text stringlengths 0 1.05M | meta dict |
|---|---|
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import numpy as nm
import os
import os.path as op
import fnmatch
import shutil
from base import output, Struct, basestr
try:
import tables as pt
except:
pt = None
class InDir(Struct):
"""
Store the directory name a file is in, and prepend this name to other
files.
Examples
--------
>>> indir = InDir('output/file1')
>>> print indir('file2')
"""
def __init__(self, filename):
self.dir = op.split(op.join(os.getcwd(), filename))[0]
def __call__(self, filename):
return op.join(self.dir, filename)
def ensure_path(filename):
"""
Check if path to `filename` exists and if not, create the necessary
intermediate directories.
"""
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def locate_files(pattern, root_dir=os.curdir):
"""
Locate all files matching fiven filename pattern in and below
supplied root directory.
"""
for dirpath, dirnames, filenames in os.walk(os.path.abspath(root_dir)):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(dirpath, filename)
def remove_files(root_dir):
"""
Remove all files and directories in supplied root directory.
"""
for dirpath, dirnames, filenames in os.walk(os.path.abspath(root_dir)):
for filename in filenames:
os.remove(os.path.join(root_dir, filename))
for dirname in dirnames:
shutil.rmtree(os.path.join(root_dir, dirname))
##
# 27.04.2006, c
def get_trunk(filename):
return op.splitext(op.basename(filename))[0]
def edit_filename(filename, prefix='', suffix='', new_ext=None):
"""
Edit a file name by add a prefix, inserting a suffix in front of a file
name extension or replacing the extension.
Parameters
----------
filename : str
The file name.
prefix : str
The prefix to be added.
suffix : str
The suffix to be inserted.
new_ext : str, optional
If not None, it replaces the original file name extension.
Returns
-------
new_filename : str
The new file name.
"""
base, ext = os.path.splitext(filename)
if new_ext is None:
new_filename = base + suffix + ext
else:
new_filename = base + suffix + new_ext
return new_filename
def get_print_info(n_step, fill=None):
"""
Returns the max. number of digits in range(n_step) and the corresponding
format string.
Examples:
>>> get_print_info(11)
(2, '%2d')
>>> get_print_info(8)
(1, '%1d')
>>> get_print_info(100)
(2, '%2d')
>>> get_print_info(101)
(3, '%3d')
>>> get_print_info(101, fill='0')
(3, '%03d')
"""
if n_step > 1:
n_digit = int(nm.log10(n_step - 1) + 1)
if fill is None:
format = '%%%dd' % n_digit
else:
format = '%%%s%dd' % (fill, n_digit)
else:
n_digit, format = 0, None
return n_digit, format
def skip_read_line(fd, no_eof=False):
"""
Read the first non-empty line (if any) from the given file
object. Return an empty string at EOF, if `no_eof` is False. If it
is True, raise the EOFError instead.
"""
ls = ''
while 1:
try:
line = fd.readline()
except EOFError:
break
if not line:
if no_eof:
raise EOFError
else:
break
ls = line.strip()
if ls and (ls[0] != '#'):
break
return ls
def read_token(fd):
"""
Read a single token (sequence of non-whitespace characters) from the
given file object.
Notes
-----
Consumes the first whitespace character after the token.
"""
out = ''
# Skip initial whitespace.
while 1:
ch = fd.read(1)
if ch.isspace(): continue
elif len(ch) == 0: return out
else: break
while not ch.isspace():
out = out + ch
ch = fd.read(1)
if len(ch) == 0: break
return out
def read_array(fd, n_row, n_col, dtype):
"""
Read a NumPy array of shape `(n_row, n_col)` from the given file
object and cast it to type `dtype`.
If `n_col` is None, determine the number of columns automatically.
"""
if n_col is None:
idx = fd.tell()
row = fd.readline().split()
fd.seek(idx)
n_col = len(row)
count = n_row * n_col
val = nm.fromfile(fd, sep=' ', count=count)
if val.shape[0] < count:
raise ValueError('(%d, %d) array reading failed!' % (n_row, n_col))
val = nm.asarray(val, dtype=dtype)
val.shape = (n_row, n_col)
return val
##
# c: 05.02.2008, r: 05.02.2008
def read_list(fd, n_item, dtype):
vals = []
ii = 0
while ii < n_item:
line = [dtype(ic) for ic in fd.readline().split()]
vals.append(line)
ii += len(line)
if ii > n_item:
output('corrupted row?', line, ii, n_item)
raise ValueError
return vals
def write_dict_hdf5(filename, adict, level=0, group=None, fd=None):
if level == 0:
fd = pt.openFile(filename, mode='w', title='Recursive dict dump')
group = '/'
for key, val in adict.iteritems():
if isinstance(val, dict):
group2 = fd.createGroup(group, '_' + str(key), '%s group' % key)
write_dict_hdf5(filename, val, level + 1, group2, fd)
else:
fd.createArray(group, '_' + str(key), val, '%s data' % key)
if level == 0:
fd.close()
def read_dict_hdf5(filename, level=0, group=None, fd=None):
out = {}
if level == 0:
fd = pt.openFile(filename, mode='r')
group = fd.root
for name, gr in group._v_groups.iteritems():
name = name.replace('_', '', 1)
out[name] = read_dict_hdf5(filename, level + 1, gr, fd)
for name, data in group._v_leaves.iteritems():
name = name.replace('_', '', 1)
out[name] = data.read()
if level == 0:
fd.close()
return out
##
# 02.07.2007, c
def write_sparse_matrix_hdf5(filename, mtx, name='a sparse matrix'):
"""Assume CSR/CSC."""
fd = pt.openFile(filename, mode='w', title=name)
try:
info = fd.createGroup('/', 'info')
fd.createArray(info, 'dtype', mtx.dtype.str)
fd.createArray(info, 'shape', mtx.shape)
fd.createArray(info, 'format', mtx.format)
data = fd.createGroup('/', 'data')
fd.createArray(data, 'data', mtx.data)
fd.createArray(data, 'indptr', mtx.indptr)
fd.createArray(data, 'indices', mtx.indices)
except:
print 'matrix must be in SciPy sparse CSR/CSC format!'
print mtx.__repr__()
raise
fd.close()
##
# 02.07.2007, c
# 08.10.2007
def read_sparse_matrix_hdf5(filename, output_format=None):
import scipy.sparse as sp
constructors = {'csr' : sp.csr_matrix, 'csc' : sp.csc_matrix}
fd = pt.openFile(filename, mode='r')
info = fd.root.info
data = fd.root.data
format = info.format.read()
if not isinstance(format, basestr):
format = format[0]
dtype = info.dtype.read()
if not isinstance(dtype, basestr):
dtype = dtype[0]
if output_format is None:
constructor = constructors[format]
else:
constructor = constructors[output_format]
if format in ['csc', 'csr']:
mtx = constructor((data.data.read(),
data.indices.read(), data.indptr.read()),
shape=info.shape.read(), dtype=dtype)
elif format == 'coo':
mtx = constructor((data.data.read(),
nm.c_[data.rows.read(), data.cols.read()].T),
shape=info.shape.read(), dtype=dtype)
else:
print format
raise ValueError
fd.close()
if output_format in ['csc', 'csr']:
mtx.sort_indices()
return mtx
| {
"repo_name": "vlukes/dicom2fem",
"path": "dicom2fem/ioutils.py",
"copies": "1",
"size": "8061",
"license": "bsd-3-clause",
"hash": -4072875990405852700,
"line_mean": 24.6719745223,
"line_max": 76,
"alpha_frac": 0.5686639375,
"autogenerated": false,
"ratio": 3.544854881266491,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9600903741652798,
"avg_score": 0.0025230154227386184,
"num_lines": 314
} |
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import sys
from copy import copy
import os.path as op
import numpy as nm
from base import (complex_types, dict_from_keys_init,
assert_, is_derived_class,
insert_static_method, output, get_default,
get_default_attr, Struct, basestr)
from ioutils \
import skip_read_line, read_token, read_array, read_list, pt
supported_formats = {
'.mesh' : 'medit',
'.vtk' : 'vtk',
'.node' : 'tetgen',
'.txt' : 'comsol',
'.h5' : 'hdf5',
# Order is important, avs_ucd does not guess -> it is the default.
'.inp' : ('abaqus', 'avs_ucd'),
'.hmascii' : 'hmascii',
'.mesh3d' : 'mesh3d',
'.bdf' : 'nastran',
'.neu' : 'gambit',
'.med' : 'med',
'.cdb' : 'ansys_cdb',
}
# Map mesh formats to read and write capabilities.
# 'r' ... read mesh
# 'w' ... write mesh
# 'rn' ... read nodes for boundary conditions
# 'wn' ... write nodes for boundary conditions
supported_capabilities = {
'medit' : ['r', 'w'],
'vtk' : ['r', 'w'],
'tetgen' : ['r'],
'comsol' : ['r', 'w'],
'hdf5' : ['r', 'w'],
'abaqus' : ['r'],
'avs_ucd' : ['r'],
'hmascii' : ['r', 'w'],
'mesh3d' : ['r'],
'nastran' : ['r', 'w'],
'gambit' : ['r', 'rn'],
'med' : ['r'],
'ansys_cdb' : ['r'],
}
def output_writable_meshes():
output('Supported writable mesh formats are:')
for key, val in supported_capabilities.iteritems():
if 'w' in val:
output(key)
##
# c: 15.02.2008, r: 15.02.2008
def sort_by_mat_id( conns_in ):
# Sort by mat_id within a group, preserve order.
conns = []
mat_ids = []
for ig, conn in enumerate( conns_in ):
if conn.shape[0] > 0:
ii = nm.argsort( conn[:,-1], kind = 'mergesort' )
conn = conn[ii]
conns.append( conn[:,:-1].copy() )
mat_ids.append( conn[:,-1].copy() )
else:
conns.append([])
mat_ids.append([])
return conns, mat_ids
def sort_by_mat_id2( conns_in, mat_ids_in ):
# Sort by mat_id within a group, preserve order.
conns = []
mat_ids = []
for ig, conn in enumerate( conns_in ):
if conn.shape[0] > 0:
mat_id = mat_ids_in[ig]
ii = nm.argsort( mat_id, kind = 'mergesort' )
conns.append( conn[ii] )
mat_ids.append( mat_id[ii] )
else:
conns.append([])
mat_ids.append([])
return conns, mat_ids
##
# conns_in must be sorted by mat_id within a group!
# c: 16.06.2005, r: 15.02.2008
def split_by_mat_id( conns_in, mat_ids_in, descs_in ):
conns = []
mat_ids = []
descs = []
for ig, conn in enumerate( conns_in ):
one = nm.array( [-1], nm.int32 )
aux = nm.concatenate((one, mat_ids_in[ig], one))
ii = nm.where(aux[1:] != aux[:-1])[0]
n_gr = len( ii ) - 1;
# print ii, n_gr
for igr in range( 0, n_gr ):
conns.append( conn[ii[igr]:ii[igr+1],:].copy() )
mat_ids.append( mat_ids_in[ig][ii[igr]:ii[igr+1]] )
descs.append( descs_in[ig] )
return (conns, mat_ids, descs)
##
# 12.10.2005, c
def write_bb( fd, array, dtype ):
fd.write( '3 %d %d %d\n' % (array.shape[1], array.shape[0], dtype) )
format = ' '.join( ['%.5e'] * array.shape[1] + ['\n'] )
for row in array:
fd.write( format % tuple( row ) )
##
# c: 03.10.2005, r: 08.02.2008
def join_conn_groups( conns, descs, mat_ids, concat = False ):
"""Join groups of the same element type."""
el = dict_from_keys_init( descs, list )
for ig, desc in enumerate( descs ):
el[desc].append( ig )
groups = [ii for ii in el.values() if ii]
## print el, groups
descs_out, conns_out, mat_ids_out = [], [], []
for group in groups:
n_ep = conns[group[0]].shape[1]
conn = nm.zeros( (0, n_ep), nm.int32 )
mat_id = nm.zeros( (0,), nm.int32 )
for ig in group:
conn = nm.concatenate( (conn, conns[ig]) )
mat_id = nm.concatenate( (mat_id, mat_ids[ig]) )
if concat:
conn = nm.concatenate( (conn, mat_id[:,nm.newaxis]), 1 )
else:
mat_ids_out.append( mat_id )
conns_out.append( conn )
descs_out.append( descs[group[0]] )
if concat:
return conns_out, descs_out
else:
return conns_out, descs_out, mat_ids_out
def convert_complex_output(out_in):
"""
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
"""
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out
##
# c: 05.02.2008
class MeshIO( Struct ):
"""
The abstract class for importing and exporting meshes.
Read the docstring of the Mesh() class. Basically all you need to do is to
implement the read() method::
def read(self, mesh, **kwargs):
nodes = ...
conns = ...
mat_ids = ...
descs = ...
mesh._set_data(nodes, conns, mat_ids, descs)
return mesh
See the Mesh class' docstring how the nodes, conns, mat_ids and descs
should look like. You just need to read them from your specific format from
disk.
To write a mesh to disk, just implement the write() method and use the
information from the mesh instance (e.g. nodes, conns, mat_ids and descs)
to construct your specific format.
The methods read_dimension(), read_bounding_box() should be implemented in
subclasses, as it is often possible to get that kind of information without
reading the whole mesh file.
Optionally, subclasses can implement read_data() to read also computation
results. This concerns mainly the subclasses with implemented write()
supporting the 'out' kwarg.
The default implementation od read_last_step() just returns 0. It should be
reimplemented in subclasses capable of storing several steps.
"""
format = None
call_msg = 'called an abstract MeshIO instance!'
def __init__( self, filename, **kwargs ):
Struct.__init__( self, filename = filename, **kwargs )
self.set_float_format()
def get_filename_trunk(self):
if isinstance(self.filename, file):
trunk = 'from_descriptor'
else:
trunk = op.splitext(self.filename)[0]
return trunk
def read_dimension( self, ret_fd = False ):
raise ValueError(MeshIO.call_msg)
def read_bounding_box( self, ret_fd = False, ret_dim = False ):
raise ValueError(MeshIO.call_msg)
def read_last_step(self):
"""The default implementation: just return 0 as the last step."""
return 0
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([], dtype=nm.float64)
return aux.astype(nm.int32), aux, aux
def read(self, mesh, omit_facets=False, **kwargs):
raise ValueError(MeshIO.call_msg)
def write(self, filename, mesh, **kwargs):
raise ValueError(MeshIO.call_msg)
def read_data( self, step, filename = None ):
raise ValueError(MeshIO.call_msg)
def set_float_format( self, format = None ):
self.float_format = get_default( format, '%e' )
def get_vector_format( self, dim ):
return ' '.join( [self.float_format] * dim )
class UserMeshIO(MeshIO):
"""
Special MeshIO subclass that enables reading and writing a mesh using a
user-supplied function.
"""
format = 'function'
def __init__(self, filename, **kwargs):
assert_(hasattr(filename, '__call__'))
self.function = filename
MeshIO.__init__(self, filename='function:%s' % self.function.__name__,
**kwargs )
def get_filename_trunk(self):
return self.filename
def read(self, mesh, *args, **kwargs):
aux = self.function(mesh, mode='read')
if aux is not None:
mesh = aux
self.filename = mesh.name
return mesh
def write(self, filename, mesh, *args, **kwargs):
self.function(mesh, mode='write')
##
# c: 05.02.2008
class MeditMeshIO( MeshIO ):
format = 'medit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'Dimension':
if len(line) == 2:
dim = int(line[1])
else:
dim = int(fd.readline())
break
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, 'r')
dim, fd = self.read_dimension(ret_fd=True)
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'Vertices':
num = int( read_token( fd ) )
nod = read_array( fd, num, dim + 1, nm.float64 )
break
bbox = nm.vstack( (nm.amin( nod[:,:dim], 0 ),
nm.amax( nod[:,:dim], 0 )) )
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, omit_facets=False, **kwargs):
dim, fd = self.read_dimension(ret_fd=True)
conns_in = []
descs = []
def _read_cells(dimension, size):
num = int(read_token(fd))
data = read_array(fd, num, size + 1, nm.int32)
if omit_facets and (dimension < dim): return
data[:, :-1] -= 1
conns_in.append(data)
descs.append('%i_%i' % (dimension, size))
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if (ls == 'Vertices'):
num = int( read_token( fd ) )
nod = read_array( fd, num, dim + 1, nm.float64 )
elif (ls == 'Tetrahedra'):
_read_cells(3, 4)
elif (ls == 'Hexahedra'):
_read_cells(3, 8)
elif (ls == 'Triangles'):
_read_cells(2, 3)
elif (ls == 'Quadrilaterals'):
_read_cells(2, 4)
elif ls == 'End':
break
elif line[0] == '#':
continue
else:
output('skipping unknown entity: %s' % line)
continue
fd.close()
conns_in, mat_ids = sort_by_mat_id( conns_in )
# Detect wedges and pyramides -> separate groups.
if ('3_8' in descs):
ic = descs.index( '3_8' )
conn_in = conns_in.pop( ic )
mat_id_in = mat_ids.pop(ic)
flag = nm.zeros( (conn_in.shape[0],), nm.int32 )
for ii, el in enumerate( conn_in ):
if (el[4] == el[5]):
if (el[5] == el[6]):
flag[ii] = 2
else:
flag[ii] = 1
conn = []
desc = []
mat_id = []
ib = nm.where( flag == 0 )[0]
if (len( ib ) > 0):
conn.append( conn_in[ib] )
mat_id.append(mat_id_in[ib])
desc.append( '3_8' )
iw = nm.where( flag == 1 )[0]
if (len( iw ) > 0):
ar = nm.array( [0,1,2,3,4,6], nm.int32 )
conn.append(conn_in[iw[:, None], ar])
mat_id.append(mat_id_in[iw])
desc.append( '3_6' )
ip = nm.where( flag == 2 )[0]
if (len( ip ) > 0):
ar = nm.array( [0,1,2,3,4], nm.int32 )
conn.append(conn_in[ip[:, None], ar])
mat_id.append(mat_id_in[ip])
desc.append( '3_5' )
## print "brick split:", ic, ":", ib, iw, ip, desc
conns_in[ic:ic] = conn
mat_ids[ic:ic] = mat_id
del( descs[ic] )
descs[ic:ic] = desc
conns, mat_ids, descs = split_by_mat_id( conns_in, mat_ids, descs )
mesh._set_data( nod[:,:-1], nod[:,-1], conns, mat_ids, descs )
return mesh
def write( self, filename, mesh, out = None, **kwargs ):
fd = open( filename, 'w' )
coors = mesh.coors
conns, desc = join_conn_groups( mesh.conns, mesh.descs,
mesh.mat_ids, concat = True )
n_nod, dim = coors.shape
fd.write( "MeshVersionFormatted 1\nDimension %d\n" % dim )
fd.write( "Vertices\n%d\n" % n_nod )
format = self.get_vector_format( dim ) + ' %d\n'
for ii in range( n_nod ):
nn = tuple( coors[ii] ) + (mesh.ngroups[ii],)
fd.write( format % tuple( nn ) )
for ig, conn in enumerate( conns ):
if (desc[ig] == "1_2"):
fd.write( "Edges\n%d\n" % conn.shape[0] )
for ii in range( conn.shape[0] ):
nn = conn[ii] + 1
fd.write( "%d %d %d\n" \
% (nn[0], nn[1], nn[2] - 1) )
elif (desc[ig] == "2_4"):
fd.write( "Quadrilaterals\n%d\n" % conn.shape[0] )
for ii in range( conn.shape[0] ):
nn = conn[ii] + 1
fd.write( "%d %d %d %d %d\n" \
% (nn[0], nn[1], nn[2], nn[3], nn[4] - 1) )
elif (desc[ig] == "2_3"):
fd.write( "Triangles\n%d\n" % conn.shape[0] )
for ii in range( conn.shape[0] ):
nn = conn[ii] + 1
fd.write( "%d %d %d %d\n" % (nn[0], nn[1], nn[2], nn[3] - 1) )
elif (desc[ig] == "3_4"):
fd.write( "Tetrahedra\n%d\n" % conn.shape[0] )
for ii in range( conn.shape[0] ):
nn = conn[ii] + 1
fd.write( "%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], nn[4] - 1) )
elif (desc[ig] == "3_8"):
fd.write( "Hexahedra\n%d\n" % conn.shape[0] )
for ii in range( conn.shape[0] ):
nn = conn[ii] + 1
fd.write( "%d %d %d %d %d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], nn[4], nn[5],
nn[6], nn[7], nn[8] - 1) )
else:
print 'unknown element type!', desc[ig]
raise ValueError
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
vtk_header = r"""# vtk DataFile Version 2.0
step %d time %e normalized time %e, generated by %s
ASCII
DATASET UNSTRUCTURED_GRID
"""
vtk_cell_types = {'2_2' : 3, '2_4' : 9, '2_3' : 5,
'3_2' : 3, '3_4' : 10, '3_8' : 12}
vtk_dims = {3 : 2, 9 : 2, 5 : 2, 3 : 3, 10 : 3, 12 : 3}
vtk_inverse_cell_types = {(3, 2) : '2_2', (5, 2) : '2_3',
(8, 2) : '2_4', (9, 2) : '2_4',
(3, 3) : '3_2', (10, 3) : '3_4',
(11, 3) : '3_8', (12, 3) : '3_8' }
vtk_remap = {8 : nm.array([0, 1, 3, 2], dtype=nm.int32),
11 : nm.array([0, 1, 3, 2, 4, 5, 7, 6], dtype=nm.int32)}
vtk_remap_keys = vtk_remap.keys()
##
# c: 05.02.2008
class VTKMeshIO( MeshIO ):
format = 'vtk'
def read_coors(self, ret_fd=False):
fd = open( self.filename, 'r' )
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINTS':
n_nod = int( line[1] )
coors = read_array(fd, n_nod, 3, nm.float64)
break
if ret_fd:
return coors, fd
else:
fd.close()
return coors
def get_dimension(self, coors):
dz = nm.diff(coors[:,2])
if nm.allclose(dz, 0.0):
dim = 2
else:
dim = 3
return dim
def read_dimension( self, ret_fd = False ):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
##
# c: 22.07.2008
def read_bounding_box( self, ret_fd = False, ret_dim = False ):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
bbox = nm.vstack( (nm.amin( coors[:,:dim], 0 ),
nm.amax( coors[:,:dim], 0 )) )
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
##
# c: 05.02.2008, r: 10.07.2008
def read( self, mesh, **kwargs ):
fd = open( self.filename, 'r' )
mode = 'header'
mode_status = 0
coors = conns = desc = mat_id = node_grps = None
finished = 0
while 1:
line = skip_read_line(fd)
if not line:
break
if mode == 'header':
if mode_status == 0:
if line.strip() == 'ASCII':
mode_status = 1
elif mode_status == 1:
if line.strip() == 'DATASET UNSTRUCTURED_GRID':
mode_status = 0
mode = 'points'
elif mode == 'points':
line = line.split()
if line[0] == 'POINTS':
n_nod = int( line[1] )
coors = read_array(fd, n_nod, 3, nm.float64)
mode = 'cells'
elif mode == 'cells':
line = line.split()
if line[0] == 'CELLS':
n_el, n_val = map( int, line[1:3] )
raw_conn = read_list( fd, n_val, int )
mode = 'cell_types'
elif mode == 'cell_types':
line = line.split()
if line[0] == 'CELL_TYPES':
assert_( int( line[1] ) == n_el )
cell_types = read_array(fd, n_el, 1, nm.int32)
mode = 'cp_data'
elif mode == 'cp_data':
line = line.split()
if line[0] == 'CELL_DATA':
assert_( int( line[1] ) == n_el )
mode_status = 1
mode = 'mat_id'
elif line[0] == 'POINT_DATA':
assert_( int( line[1] ) == n_nod )
mode_status = 1
mode = 'node_groups'
elif mode == 'mat_id':
if mode_status == 1:
if 'SCALARS mat_id int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
mat_id = read_list( fd, n_el, int )
mode_status = 0
mode = 'cp_data'
finished += 1
elif mode == 'node_groups':
if mode_status == 1:
if 'SCALARS node_groups int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
node_grps = read_list( fd, n_nod, int )
mode_status = 0
mode = 'cp_data'
finished += 1
elif finished >= 2:
break
fd.close()
if mat_id is None:
mat_id = [[0]] * n_el
else:
if len(mat_id) < n_el:
mat_id = [[ii] for jj in mat_id for ii in jj]
if node_grps is None:
node_grps = [0] * n_nod
else:
if len(node_grps) < n_nod:
node_grps = [ii for jj in node_grps for ii in jj]
dim = self.get_dimension(coors)
if dim == 2:
coors = coors[:,:2]
coors = nm.ascontiguousarray( coors )
cell_types = cell_types.squeeze()
dconns = {}
for iel, row in enumerate( raw_conn ):
ct = cell_types[iel]
key = (ct, dim)
if key not in vtk_inverse_cell_types:
continue
ct = vtk_inverse_cell_types[key]
dconns.setdefault(key, []).append(row[1:] + mat_id[iel])
desc = []
conns = []
for key, conn in dconns.iteritems():
ct = key[0]
sct = vtk_inverse_cell_types[key]
desc.append(sct)
aconn = nm.array(conn, dtype = nm.int32)
if ct in vtk_remap_keys: # Remap pixels and voxels.
aconn[:, :-1] = aconn[:, vtk_remap[ct]]
conns.append(aconn)
conns_in, mat_ids = sort_by_mat_id( conns )
conns, mat_ids, descs = split_by_mat_id( conns_in, mat_ids, desc )
mesh._set_data( coors, node_grps, conns, mat_ids, descs )
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
def _reshape_tensors(data, dim, sym, nc):
if dim == 3:
if nc == sym:
aux = data[:, [0,3,4,3,1,5,4,5,2]]
elif nc == (dim * dim):
aux = data[:, [0,3,4,6,1,5,7,8,2]]
else:
aux = data.reshape((data.shape[0], dim*dim))
else:
zz = nm.zeros((data.shape[0], 1), dtype=nm.float64)
if nc == sym:
aux = nm.c_[data[:,[0,2]], zz, data[:,[2,1]],
zz, zz, zz, zz]
elif nc == (dim * dim):
aux = nm.c_[data[:,[0,2]], zz, data[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[data[:,0,[0,1]], zz, data[:,1,[0,1]],
zz, zz, zz, zz]
return aux
def _write_tensors(data):
format = self.get_vector_format(3)
format = '\n'.join([format] * 3) + '\n\n'
for row in aux:
fd.write(format % tuple(row))
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
fd = open( filename, 'w' )
fd.write(vtk_header % (step, time, nt, op.basename(sys.argv[0])))
n_nod, dim = mesh.coors.shape
sym = dim * (dim + 1) / 2
fd.write( '\nPOINTS %d float\n' % n_nod )
aux = mesh.coors
if dim == 2:
aux = nm.hstack((aux, nm.zeros((aux.shape[0], 1), dtype=aux.dtype)))
format = self.get_vector_format( 3 ) + '\n'
for row in aux:
fd.write( format % tuple( row ) )
n_el, n_els, n_e_ps = mesh.n_el, mesh.n_els, mesh.n_e_ps
total_size = nm.dot( n_els, n_e_ps + 1 )
fd.write( '\nCELLS %d %d\n' % (n_el, total_size) )
ct = []
for ig, conn in enumerate( mesh.conns ):
nn = n_e_ps[ig] + 1
ct += [vtk_cell_types[mesh.descs[ig]]] * n_els[ig]
format = ' '.join( ['%d'] * nn + ['\n'] )
for row in conn:
fd.write( format % ((nn-1,) + tuple( row )) )
fd.write( '\nCELL_TYPES %d\n' % n_el )
fd.write( ''.join( ['%d\n' % ii for ii in ct] ) )
fd.write( '\nPOINT_DATA %d\n' % n_nod )
# node groups
fd.write( '\nSCALARS node_groups int 1\nLOOKUP_TABLE default\n' )
fd.write( ''.join( ['%d\n' % ii for ii in mesh.ngroups] ) )
if out is not None:
point_keys = [key for key, val in out.iteritems()
if val.mode == 'vertex']
else:
point_keys = {}
for key in point_keys:
val = out[key]
nr, nc = val.data.shape
if nc == 1:
fd.write( '\nSCALARS %s float %d\n' % (key, nc) )
fd.write( 'LOOKUP_TABLE default\n' )
format = self.float_format + '\n'
for row in val.data:
fd.write( format % row )
elif nc == dim:
fd.write( '\nVECTORS %s float\n' % key )
if dim == 2:
aux = nm.hstack( (val.data,
nm.zeros( (nr, 1), dtype = nm.float64 ) ) )
else:
aux = val.data
format = self.get_vector_format( 3 ) + '\n'
for row in aux:
fd.write( format % tuple( row ) )
elif (nc == sym) or (nc == (dim * dim)):
fd.write('\nTENSORS %s float\n' % key)
aux = _reshape_tensors(val.data, dim, sym, nc)
_write_tensors(aux)
else:
raise NotImplementedError, nc
if out is not None:
cell_keys = [key for key, val in out.iteritems()
if val.mode == 'cell']
else:
cell_keys = {}
fd.write( '\nCELL_DATA %d\n' % n_el )
# cells - mat_id
fd.write( 'SCALARS mat_id int 1\nLOOKUP_TABLE default\n' )
aux = nm.hstack(mesh.mat_ids).tolist()
fd.write( ''.join( ['%d\n' % ii for ii in aux] ) )
for key in cell_keys:
val = out[key]
ne, aux, nr, nc = val.data.shape
if (nr == 1) and (nc == 1):
fd.write( '\nSCALARS %s float %d\n' % (key, nc) )
fd.write( 'LOOKUP_TABLE default\n' )
format = self.float_format + '\n'
aux = val.data.squeeze()
if len(aux.shape) == 0:
fd.write(format % aux)
else:
for row in aux:
fd.write(format % row)
elif (nr == dim) and (nc == 1):
fd.write( '\nVECTORS %s float\n' % key )
if dim == 2:
aux = nm.hstack( (val.data.squeeze(),
nm.zeros( (ne, 1), dtype = nm.float64 ) ) )
else:
aux = val.data
format = self.get_vector_format( 3 ) + '\n'
for row in aux:
fd.write( format % tuple( row.squeeze() ) )
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
fd.write('\nTENSORS %s float\n' % key)
data = val.data.squeeze()
aux = _reshape_tensors(data, dim, sym, nr)
_write_tensors(aux)
else:
raise NotImplementedError, (nr, nc)
fd.close()
def read_data( self, step, filename = None ):
"""Point data only!"""
filename = get_default( filename, self.filename )
out = {}
fd = open( self.filename, 'r' )
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINT_DATA':
break
n_nod = int(line[1])
while 1:
line = skip_read_line(fd)
if not line:
break
line = line.split()
if line[0] == 'SCALARS':
name, dtype, nc = line[1:]
assert_(int(nc) == 1)
fd.readline() # skip lookup table line
data = nm.zeros((n_nod,), dtype=nm.float64)
ii = 0
while ii < n_nod:
data[ii] = float(fd.readline())
ii += 1
out[name] = Struct( name = name,
mode = 'vertex',
data = data,
dofs = None )
elif line[0] == 'VECTORS':
name, dtype = line[1:]
data = []
ii = 0
while ii < n_nod:
data.append([float(val) for val in fd.readline().split()])
ii += 1
out[name] = Struct( name = name,
mode = 'vertex',
data = nm.array(data, dtype=nm.float64),
dofs = None )
elif line[0] == 'CELL_DATA':
break
line = fd.readline()
fd.close()
return out
##
# c: 15.02.2008
class TetgenMeshIO( MeshIO ):
format = "tetgen"
##
# c: 15.02.2008, r: 15.02.2008
def read( self, mesh, **kwargs ):
import os
fname = os.path.splitext(self.filename)[0]
nodes=self.getnodes(fname+".node", MyBar(" nodes:"))
etype, elements, regions = self.getele(fname+".ele",
MyBar(" elements:"))
descs = []
conns = []
mat_ids = []
elements = nm.array( elements, dtype = nm.int32 )-1
for key, value in regions.iteritems():
descs.append( etype )
mat_ids.append( nm.ones_like(value) * key )
conns.append( elements[nm.array(value)-1].copy() )
mesh._set_data( nodes, None, conns, mat_ids, descs )
return mesh
##
# c: 15.02.2008, r: 15.02.2008
@staticmethod
def getnodes(fnods, up=None, verbose=False):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node", MyBar("nodes:"))
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f=open(fnods)
l=[int(x) for x in f.readline().split()]
npoints,dim,nattrib,nbound=l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
if verbose and up is not None: up.init(npoints)
nodes=[]
for line in f:
if line[0]=="#": continue
l=[float(x) for x in line.split()]
l = l[:(dim + 1)]
assert_( int(l[0])==len(nodes)+1 )
l = l[1:]
nodes.append(tuple(l + ndapp))
if verbose and up is not None: up.update(len(nodes))
assert_( npoints==len(nodes) )
return nodes
##
# c: 15.02.2008, r: 15.02.2008
@staticmethod
def getele(fele, up=None, verbose=False):
"""
Reads t.1.ele, returns a list of elements.
Example:
>>> elements, regions = self.getele("t.1.ele", MyBar("elements:"))
>>> elements
[(20, 154, 122, 258), (86, 186, 134, 238), (15, 309, 170, 310), (146,
229, 145, 285), (206, 207, 125, 211), (99, 193, 39, 194), (185, 197,
158, 225), (53, 76, 74, 6), (19, 138, 129, 313), (23, 60, 47, 96),
(119, 321, 1, 329), (188, 296, 122, 322), (30, 255, 177, 256), ...]
>>> regions
{100: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 7, ...],
...}
"""
f=file(fele)
l=[int(x) for x in f.readline().split()]
ntetra,nnod,nattrib=l
#we have either linear or quadratic tetrahedra:
elem = None
if nnod in [4,10]:
elem = '3_4'
linear = (nnod == 4)
if nnod in [3, 7]:
elem = '2_3'
linear = (nnod == 3)
if elem is None or not linear:
raise Exception("Only linear triangle and tetrahedra reader is implemented")
if verbose and up is not None: up.init(ntetra)
# if nattrib!=1:
# raise "tetgen didn't assign an entity number to each element (option -A)"
els=[]
regions={}
for line in f:
if line[0]=="#": continue
l=[int(x) for x in line.split()]
if elem == '2_3':
assert_((len(l) - 1 - nattrib) == 3 )
els.append((l[1],l[2],l[3]))
if elem == '3_4':
assert_((len(l) - 1 - nattrib) == 4 )
els.append((l[1],l[2],l[3],l[4]))
if nattrib == 1:
regionnum = l[-1]
else:
regionnum = 1
if regionnum==0:
print "see %s, element # %d"%(fele,l[0])
raise "there are elements not belonging to any physical entity"
if regions.has_key(regionnum):
regions[regionnum].append(l[0])
else:
regions[regionnum]=[l[0]]
assert_( l[0]==len(els) )
if verbose and up is not None: up.update(l[0])
return elem, els, regions
##
# c: 26.03.2008, r: 26.03.2008
def write( self, filename, mesh, out = None, **kwargs ):
raise NotImplementedError
def read_dimension(self):
# TetGen only supports 3D mesh
return 3
##
# c: 22.07.2008
def read_bounding_box( self ):
raise NotImplementedError
##
# c: 20.03.2008
class ComsolMeshIO( MeshIO ):
format = 'comsol'
##
# c: 20.03.2008, r: 20.03.2008
def _read_commented_int( self ):
return int( skip_read_line( self.fd ).split( '#' )[0] )
def _skip_comment(self):
read_token(self.fd)
self.fd.readline()
##
# c: 20.03.2008, r: 20.03.2008
def read( self, mesh, **kwargs ):
self.fd = fd = open( self.filename, 'r' )
mode = 'header'
coors = conns = desc = None
while 1:
if mode == 'header':
line = skip_read_line( fd )
n_tags = self._read_commented_int()
for ii in xrange( n_tags ):
skip_read_line( fd )
n_types = self._read_commented_int()
for ii in xrange( n_types ):
skip_read_line( fd )
skip_read_line( fd )
assert_( skip_read_line( fd ).split()[1] == 'Mesh' )
skip_read_line( fd )
dim = self._read_commented_int()
assert_( (dim == 2) or (dim == 3) )
n_nod = self._read_commented_int()
i0 = self._read_commented_int()
mode = 'points'
elif mode == 'points':
self._skip_comment()
coors = read_array( fd, n_nod, dim, nm.float64 )
mode = 'cells'
elif mode == 'cells':
n_types = self._read_commented_int()
conns = []
descs = []
mat_ids = []
for it in xrange( n_types ):
t_name = skip_read_line( fd ).split()[1]
n_ep = self._read_commented_int()
n_el = self._read_commented_int()
self._skip_comment()
aux = read_array(fd, n_el, n_ep, nm.int32)
if t_name == 'tri':
conns.append(aux)
descs.append('2_3')
is_conn = True
elif t_name == 'quad':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2)]
conns.append(aux)
descs.append('2_4')
is_conn = True
elif t_name == 'hex':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2,4,5,7,6)]
conns.append(aux)
descs.append('3_8')
is_conn = True
elif t_name == 'tet':
conns.append(aux)
descs.append('3_4')
is_conn = True
else:
is_conn = False
# Skip parameters.
n_pv = self._read_commented_int()
n_par = self._read_commented_int()
for ii in xrange( n_par ):
skip_read_line( fd )
n_domain = self._read_commented_int()
assert_( n_domain == n_el )
if is_conn:
self._skip_comment()
mat_id = read_array( fd, n_domain, 1, nm.int32 )
mat_ids.append( mat_id )
else:
for ii in xrange( n_domain ):
skip_read_line( fd )
# Skip up/down pairs.
n_ud = self._read_commented_int()
for ii in xrange( n_ud ):
skip_read_line( fd )
break
fd.close()
self.fd = None
conns2 = []
for ii, conn in enumerate( conns ):
conns2.append( nm.c_[conn, mat_ids[ii]] )
conns_in, mat_ids = sort_by_mat_id( conns2 )
conns, mat_ids, descs = split_by_mat_id( conns_in, mat_ids, descs )
mesh._set_data( coors, None, conns, mat_ids, descs )
return mesh
def write( self, filename, mesh, out = None, **kwargs ):
def write_elements( fd, ig, conn, mat_ids, type_name,
npe, format, norder, nm_params ):
fd.write( "# Type #%d\n\n" % ig )
fd.write( "%s # type name\n\n\n" % type_name )
fd.write( "%d # number of nodes per element\n" % npe)
fd.write( "%d # number of elements\n" % conn.shape[0] )
fd.write( "# Elements\n" )
for ii in range( conn.shape[0] ):
nn = conn[ii] # Zero based
fd.write( format % tuple( nn[norder] ) )
fd.write( "\n%d # number of parameter values per element\n"
% nm_params)
# Top level always 0?
fd.write( "0 # number of parameters\n" )
fd.write( "# Parameters\n\n" )
fd.write( "%d # number of domains\n"
% sum([mi.shape[0] for mi in mat_ids]) )
fd.write( "# Domains\n" )
for mi in mat_ids:
# Domains in comsol have to be > 0
if (mi <= 0).any():
mi += mi.min() + 1
for dom in mi:
fd.write("%d\n" % abs(dom))
fd.write( "\n0 # number of up/down pairs\n" )
fd.write( "# Up/down\n" )
fd = open( filename, 'w' )
coors = mesh.coors
conns, desc, mat_ids = join_conn_groups( mesh.conns, mesh.descs,
mesh.mat_ids )
n_nod, dim = coors.shape
# Header
fd.write( "# Created by SfePy\n\n\n" )
fd.write( "# Major & minor version\n" )
fd.write( "0 1\n" )
fd.write( "1 # number of tags\n" )
fd.write( "# Tags\n" )
fd.write( "2 m1\n" )
fd.write( "1 # number of types\n" )
fd.write( "# Types\n" )
fd.write( "3 obj\n\n" )
# Record
fd.write( "# --------- Object 0 ----------\n\n" )
fd.write( "0 0 1\n" ) # version unused serializable
fd.write( "4 Mesh # class\n" )
fd.write( "1 # version\n" )
fd.write( "%d # sdim\n" % dim )
fd.write( "%d # number of mesh points\n" % n_nod )
fd.write( "0 # lowest mesh point index\n\n" ) # Always zero in SfePy
fd.write( "# Mesh point coordinates\n" )
format = self.get_vector_format( dim ) + '\n'
for ii in range( n_nod ):
nn = tuple( coors[ii] )
fd.write( format % tuple( nn ) )
fd.write( "\n%d # number of element types\n\n\n" % len(conns) )
for ig, conn in enumerate( conns ):
if (desc[ig] == "2_4"):
write_elements( fd, ig, conn, mat_ids,
"4 quad", 4, "%d %d %d %d\n", [0, 1, 3, 2], 8 )
elif (desc[ig] == "2_3"):
# TODO: Verify number of parameters for tri element
write_elements( fd, ig, conn, mat_ids,
"3 tri", 3, "%d %d %d\n", [0, 1, 2], 4 )
elif (desc[ig] == "3_4"):
# TODO: Verify number of parameters for tet element
write_elements( fd, ig, conn, mat_ids,
"3 tet", 4, "%d %d %d %d\n", [0, 1, 2, 3], 16 )
elif (desc[ig] == "3_8"):
write_elements( fd, ig, conn, mat_ids,
"3 hex", 8, "%d %d %d %d %d %d %d %d\n",
[0, 1, 3, 2, 4, 5, 7, 6], 24 )
else:
print 'unknown element type!', desc[ig]
raise ValueError
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
##
# c: 23.06.2008
class HDF5MeshIO( MeshIO ):
format = "hdf5"
import string
_all = ''.join( map( chr, range( 256 ) ) )
_letters = string.letters + string.digits + '_'
_rubbish = ''.join( [ch for ch in set( _all ) - set( _letters )] )
_tr = string.maketrans( _rubbish, '_' * len( _rubbish ) )
def read( self, mesh, **kwargs ):
fd = pt.openFile( self.filename, mode = "r" )
mesh_group = fd.root.mesh
mesh.name = mesh_group.name.read()
coors = mesh_group.coors.read()
ngroups = mesh_group.ngroups.read()
n_gr = mesh_group.n_gr.read()
conns = []
descs = []
mat_ids = []
for ig in xrange( n_gr ):
gr_name = 'group%d' % ig
group = mesh_group._f_getChild( gr_name )
conns.append( group.conn.read() )
mat_ids.append( group.mat_id.read() )
descs.append( group.desc.read() )
fd.close()
mesh._set_data( coors, ngroups, conns, mat_ids, descs )
return mesh
def write( self, filename, mesh, out = None, ts = None, **kwargs ):
from time import asctime
if pt is None:
output( 'pytables not imported!' )
raise ValueError
step = get_default_attr(ts, 'step', 0)
if step == 0:
# A new file.
fd = pt.openFile( filename, mode = "w",
title = "SfePy output file" )
mesh_group = fd.createGroup( '/', 'mesh', 'mesh' )
fd.createArray( mesh_group, 'name', mesh.name, 'name' )
fd.createArray( mesh_group, 'coors', mesh.coors, 'coors' )
fd.createArray( mesh_group, 'ngroups', mesh.ngroups, 'ngroups' )
fd.createArray( mesh_group, 'n_gr', len( mesh.conns ), 'n_gr' )
for ig, conn in enumerate( mesh.conns ):
conn_group = fd.createGroup( mesh_group, 'group%d' % ig,
'connectivity group' )
fd.createArray( conn_group, 'conn', conn, 'connectivity' )
fd.createArray( conn_group, 'mat_id', mesh.mat_ids[ig], 'material id' )
fd.createArray( conn_group, 'desc', mesh.descs[ig], 'element Type' )
if ts is not None:
ts_group = fd.createGroup( '/', 'ts', 'time stepper' )
fd.createArray( ts_group, 't0', ts.t0, 'initial time' )
fd.createArray( ts_group, 't1', ts.t1, 'final time' )
fd.createArray( ts_group, 'dt', ts.dt, 'time step' )
fd.createArray( ts_group, 'n_step', ts.n_step, 'n_step' )
tstat_group = fd.createGroup( '/', 'tstat', 'global time statistics' )
fd.createArray( tstat_group, 'created', asctime(),
'file creation time' )
fd.createArray( tstat_group, 'finished', '.' * 24,
'file closing time' )
fd.createArray( fd.root, 'last_step', nm.array( [0], dtype = nm.int32 ),
'last saved step' )
fd.close()
if out is not None:
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
# Existing file.
fd = pt.openFile( filename, mode = "r+" )
step_group = fd.createGroup( '/', 'step%d' % step, 'time step data' )
ts_group = fd.createGroup(step_group, 'ts', 'time stepper')
fd.createArray(ts_group, 'step', step, 'step')
fd.createArray(ts_group, 't', time, 'time')
fd.createArray(ts_group, 'nt', nt, 'normalized time')
name_dict = {}
for key, val in out.iteritems():
# print key
dofs = get_default(val.dofs, (-1,))
shape = val.get('shape', val.data.shape)
var_name = val.get('var_name', 'None')
group_name = '__' + key.translate( self._tr )
data_group = fd.createGroup(step_group, group_name,
'%s data' % key)
fd.createArray( data_group, 'data', val.data, 'data' )
fd.createArray( data_group, 'mode', val.mode, 'mode' )
fd.createArray( data_group, 'dofs', dofs, 'dofs' )
fd.createArray( data_group, 'shape', shape, 'shape' )
fd.createArray( data_group, 'name', val.name, 'object name' )
fd.createArray( data_group, 'var_name',
var_name, 'object parent name' )
fd.createArray( data_group, 'dname', key, 'data name' )
if val.mode == 'full':
fd.createArray(data_group, 'field_name', val.field_name,
'field name')
name_dict[key] = group_name
step_group._v_attrs.name_dict = name_dict
fd.root.last_step[0] = step
fd.removeNode( fd.root.tstat.finished )
fd.createArray( fd.root.tstat, 'finished', asctime(),
'file closing time' )
fd.close()
def read_last_step(self, filename=None):
filename = get_default( filename, self.filename )
fd = pt.openFile( filename, mode = "r" )
last_step = fd.root.last_step[0]
fd.close()
return last_step
def read_time_stepper( self, filename = None ):
filename = get_default( filename, self.filename )
fd = pt.openFile( filename, mode = "r" )
try:
ts_group = fd.root.ts
out = (ts_group.t0.read(), ts_group.t1.read(),
ts_group.dt.read(), ts_group.n_step.read())
except:
raise ValueError('no time stepper found!')
finally:
fd.close()
return out
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
"""
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode='r')
steps = sorted(int(name[4:]) for name in fd.root._v_groups.keys()
if name.startswith('step'))
times = []
nts = []
for step in steps:
ts_group = fd.getNode(fd.root, 'step%d/ts' % step)
times.append(ts_group.t.read())
nts.append(ts_group.nt.read())
fd.close()
steps = nm.asarray(steps, dtype=nm.int32)
times = nm.asarray(times, dtype=nm.float64)
nts = nm.asarray(nts, dtype=nm.float64)
return steps, times, nts
def _get_step_group( self, step, filename = None ):
filename = get_default( filename, self.filename )
fd = pt.openFile( filename, mode = "r" )
gr_name = 'step%d' % step
try:
step_group = fd.getNode( fd.root, gr_name )
except:
output( 'step %d data not found - premature end of file?' % step )
fd.close()
return None, None
return fd, step_group
def read_data( self, step, filename = None ):
fd, step_group = self._get_step_group( step, filename = filename )
if fd is None: return None
out = {}
for data_group in step_group:
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
name = data_group.name.read()
mode = data_group.mode.read()
data = data_group.data.read()
dofs = tuple(data_group.dofs.read())
try:
shape = tuple(data_group.shape.read())
except pt.exceptions.NoSuchNodeError:
shape = data.shape
if mode == 'full':
field_name = data_group.field_name.read()
else:
field_name = None
out[key] = Struct(name=name, mode=mode, data=data,
dofs=dofs, shape=shape, field_name=field_name)
if out[key].dofs == (-1,):
out[key].dofs = None
fd.close()
return out
def read_data_header( self, dname, step = 0, filename = None ):
fd, step_group = self._get_step_group( step, filename = filename )
if fd is None: return None
groups = step_group._v_groups
for name, data_group in groups.iteritems():
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
if key == dname:
mode = data_group.mode.read()
fd.close()
return mode, name
fd.close()
raise KeyError, 'non-existent data: %s' % dname
def read_time_history( self, node_name, indx, filename = None ):
filename = get_default( filename, self.filename )
fd = pt.openFile( filename, mode = "r" )
th = dict_from_keys_init( indx, list )
for step in xrange( fd.root.last_step[0] + 1 ):
gr_name = 'step%d' % step
step_group = fd.getNode( fd.root, gr_name )
data = step_group._f_getChild( node_name ).data
for ii in indx:
th[ii].append( nm.array( data[ii] ) )
fd.close()
for key, val in th.iteritems():
aux = nm.array( val )
if aux.ndim == 4: # cell data.
aux = aux[:,0,:,0]
th[key] = aux
return th
def read_variables_time_history( self, var_names, ts, filename = None ):
filename = get_default( filename, self.filename )
fd = pt.openFile( filename, mode = "r" )
assert_( (fd.root.last_step[0] + 1) == ts.n_step )
ths = dict_from_keys_init( var_names, list )
arr = nm.asarray
for step in xrange( ts.n_step ):
gr_name = 'step%d' % step
step_group = fd.getNode( fd.root, gr_name )
name_dict = step_group._v_attrs.name_dict
for var_name in var_names:
data = step_group._f_getChild( name_dict[var_name] ).data
ths[var_name].append( arr( data.read() ) )
fd.close()
return ths
class MEDMeshIO( MeshIO ):
format = "med"
def read( self, mesh, **kwargs ):
fd = pt.openFile( self.filename, mode = "r" )
mesh_root = fd.root.ENS_MAA
#TODO: Loop through multiple meshes?
mesh_group = mesh_root._f_getChild(mesh_root._v_groups.keys()[0])
mesh.name = mesh_group._v_name
coors = mesh_group.NOE.COO.read()
n_nodes = mesh_group.NOE.COO.getAttr('NBR')
# Unflatten the node coordinate array
coors = coors.reshape(coors.shape[0]/n_nodes,n_nodes).transpose()
dim = coors.shape[1]
ngroups = mesh_group.NOE.FAM.read()
assert_((ngroups >= 0).all())
# Dict to map MED element names to SfePy descs
#NOTE: The commented lines are elements which
# produce KeyError in SfePy
med_descs = {
'TE4' : '3_4',
#'T10' : '3_10',
#'PY5' : '3_5',
#'P13' : '3_13',
'HE8' : '3_8',
#'H20' : '3_20',
#'PE6' : '3_6',
#'P15' : '3_15',
#TODO: Polyhedrons (POE) - need special handling
'TR3' : '2_3',
#'TR6' : '2_6',
'QU4' : '2_4',
#'QU8' : '2_8',
#TODO: Polygons (POG) - need special handling
#'SE2' : '1_2',
#'SE3' : '1_3',
}
conns = []
descs = []
mat_ids = []
for md, desc in med_descs.iteritems():
if int(desc[0]) != dim: continue
try:
group = mesh_group.MAI._f_getChild(md)
conn = group.NOD.read()
n_conns = group.NOD.getAttr('NBR')
# (0 based indexing in numpy vs. 1 based in MED)
conn = conn.reshape(conn.shape[0]/n_conns,n_conns).transpose()-1
conns.append( conn )
mat_id = group.FAM.read()
assert_((mat_id <= 0).all())
mat_id = nm.abs(mat_id)
mat_ids.append( mat_id )
descs.append( med_descs[md] )
except pt.exceptions.NoSuchNodeError:
pass
fd.close()
mesh._set_data( coors, ngroups, conns, mat_ids, descs )
return mesh
class Mesh3DMeshIO( MeshIO ):
format = "mesh3d"
def read(self, mesh, **kwargs):
f = open(self.filename)
# read the whole file:
vertices = self._read_section(f, integer=False)
tetras = self._read_section(f)
hexes = self._read_section(f)
prisms = self._read_section(f)
tris = self._read_section(f)
quads = self._read_section(f)
# substract 1 from all elements, because we count from 0:
conns = []
mat_ids = []
descs = []
if len(tetras) > 0:
conns.append(tetras - 1)
mat_ids.append([0]*len(tetras))
descs.append("3_4")
if len(hexes) > 0:
conns.append(hexes - 1)
mat_ids.append([0]*len(hexes))
descs.append("3_8")
mesh._set_data( vertices, None, conns, mat_ids, descs )
return mesh
def read_dimension(self):
return 3
def _read_line(self, f):
"""
Reads one non empty line (if it's a comment, it skips it).
"""
l = f.readline().strip()
while l == "" or l[0] == "#": # comment or an empty line
l = f.readline().strip()
return l
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
"""
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows)
def mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas):
ids = nm.asarray(ids, dtype=nm.int32)
coors = nm.asarray(coors, dtype=nm.float64)
n_nod = coors.shape[0]
remap = nm.zeros((ids.max()+1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
tris = remap[nm.array(tris, dtype=nm.int32)]
quads = remap[nm.array(quads, dtype=nm.int32)]
tetras = remap[nm.array(tetras, dtype=nm.int32)]
hexas = remap[nm.array(hexas, dtype=nm.int32)]
conns = [tris, quads, tetras, hexas]
mat_ids = [nm.array(ar, dtype=nm.int32)
for ar in [mat_tris, mat_quads, mat_tetras, mat_hexas]]
descs = ['2_3', '2_4', '3_4', '3_8']
conns, mat_ids = sort_by_mat_id2(conns, mat_ids)
conns, mat_ids, descs = split_by_mat_id(conns, mat_ids, descs)
mesh._set_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class AVSUCDMeshIO( MeshIO ):
format = 'avs_ucd'
def guess( filename ):
return True
guess = staticmethod( guess )
def read( self, mesh, **kwargs ):
fd = open( self.filename, 'r' )
# Skip all comments.
while 1:
line = fd.readline()
if line and (line[0] != '#'):
break
header = [int(ii) for ii in line.split()]
n_nod, n_el = header[0:2]
ids = nm.zeros( (n_nod,), dtype = nm.int32 )
dim = 3
coors = nm.zeros( (n_nod, dim), dtype = nm.float64 )
for ii in xrange( n_nod ):
line = fd.readline().split()
ids[ii] = int( line[0] )
coors[ii] = [float( coor ) for coor in line[1:]]
mat_tetras = []
tetras = []
mat_hexas = []
hexas = []
for ii in xrange( n_el ):
line = fd.readline().split()
if line[2] == 'tet':
mat_tetras.append( int( line[1] ) )
tetras.append( [int( ic ) for ic in line[3:]] )
elif line[2] == 'hex':
mat_hexas.append( int( line[1] ) )
hexas.append( [int( ic ) for ic in line[3:]] )
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
[], [], [], [],
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write( self, filename, mesh, out = None, **kwargs ):
raise NotImplementedError
class HypermeshAsciiMeshIO( MeshIO ):
format = 'hmascii'
def read( self, mesh, **kwargs ):
fd = open( self.filename, 'r' )
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
for line in fd:
if line and (line[0] == '*'):
if line[1:5] == 'node':
line = line.strip()[6:-1].split(',')
ids.append( int( line[0] ) )
coors.append( [float( coor ) for coor in line[1:4]] )
elif line[1:7] == 'tetra4':
line = line.strip()[8:-1].split(',')
mat_tetras.append( int( line[1] ) )
tetras.append( [int( ic ) for ic in line[2:6]] )
elif line[1:6] == 'hexa8':
line = line.strip()[7:-1].split(',')
mat_hexas.append( int( line[1] ) )
hexas.append( [int( ic ) for ic in line[2:10]] )
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
[], [], [], [],
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors = mesh.coors
conns, desc = join_conn_groups(mesh.conns, mesh.descs,
mesh.mat_ids, concat=True)
n_nod, dim = coors.shape
fd.write("HYPERMESH Input Deck Generated by Sfepy MeshIO\n")
fd.write("*filetype(ASCII)\n*version(11.0.0.47)\n\n")
fd.write("BEGIN DATA\n")
fd.write("BEGIN NODES\n")
format = self.get_vector_format(dim) + ' %d\n'
for ii in range(n_nod):
nn = (ii + 1, ) + tuple(coors[ii])
fd.write("*node(%d,%f,%f,%f,0,0,0,1,1)\n" % nn)
fd.write("END NODES\n\n")
fd.write("BEGIN COMPONENTS\n")
for ig, conn in enumerate(conns):
fd.write('*component(%d,"component%d",0,1,0)\n' % (ig + 1, ig + 1))
if (desc[ig] == "1_2"):
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii,:-1] + 1)
fd.write("*bar2(%d,1,%d,%d,0)\n" % nn)
elif (desc[ig] == "2_4"):
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii,:-1] + 1)
fd.write("*quad4(%d,1,%d,%d,%d,%d,0)\n" % nn)
elif (desc[ig] == "2_3"):
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii,:-1] + 1)
fd.write("*tria3(%d,1,%d,%d,%d,0)\n" % nn)
elif (desc[ig] == "3_4"):
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii,:-1] + 1)
fd.write("*tetra4(%d,1,%d,%d,%d,%d,0)\n" % nn)
elif (desc[ig] == "3_8"):
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii,:-1] + 1)
fd.write("*hex8(%d,1,%d,%d,%d,%d,%d,%d,%d,%d,0)\n" % nn)
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.write("BEGIN COMPONENTS\n\n")
fd.write("END DATA\n")
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
class AbaqusMeshIO( MeshIO ):
format = 'abaqus'
def guess( filename ):
ok = False
fd = open( filename, 'r' )
for ii in xrange(100):
try:
line = fd.readline().strip().split(',')
except:
break
if line[0].lower() == '*node':
ok = True
break
fd.close()
return ok
guess = staticmethod( guess )
def read( self, mesh, **kwargs ):
fd = open( self.filename, 'r' )
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
tris = []
mat_tris = []
quads = []
mat_quads = []
nsets = {}
ing = 1
dim = 0
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if dim == 0:
dim = len(line) - 1
ids.append( int( line[0] ) )
if dim == 2:
coors.append( [float( coor ) for coor in line[1:3]] )
else:
coors.append( [float( coor ) for coor in line[1:4]] )
elif token == '*element':
if line[1].find( 'C3D8' ) >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_hexas.append( 0 )
hexas.append( [int( ic ) for ic in line[1:9]] )
elif line[1].find( 'C3D4' ) >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tetras.append( 0 )
tetras.append( [int( ic ) for ic in line[1:5]] )
elif line[1].find('CPS') >= 0 or line[1].find('CPE') >= 0:
if line[1].find('4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_quads.append( 0 )
quads.append( [int( ic ) for ic in line[1:5]] )
elif line[1].find('3') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tris.append( 0 )
tris.append( [int( ic ) for ic in line[1:4]] )
else:
raise ValueError('unknown element type! (%s)' % line[1])
else:
raise ValueError('unknown element type! (%s)' % line[1])
elif token == '*nset':
if line[-1].strip().lower() == 'generate':
line = fd.readline()
continue
while 1:
line = fd.readline().strip().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if not line[-1]: line = line[:-1]
aux = [int( ic ) for ic in line]
nsets.setdefault(ing, []).extend( aux )
ing += 1
else:
line = fd.readline().split(',')
fd.close()
ngroups = nm.zeros( (len(coors),), dtype = nm.int32 )
for ing, ii in nsets.iteritems():
ngroups[nm.array(ii)-1] = ing
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
fd = open( self.filename, 'r' )
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
dim = len(line) - 1
fd.close()
return dim
def write( self, filename, mesh, out = None, **kwargs ):
raise NotImplementedError
class BDFMeshIO( MeshIO ):
format = 'nastran'
def read_dimension( self, ret_fd = False ):
fd = open( self.filename, 'r' )
el3d = 0
while 1:
try:
line = fd.readline()
except:
output( "reading " + fd.name + " failed!" )
raise
if len( line ) == 1: continue
if line[0] == '$': continue
aux = line.split()
if aux[0] == 'CHEXA':
el3d += 1
elif aux[0] == 'CTETRA':
el3d += 1
if el3d > 0:
dim = 3
else:
dim = 2
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read( self, mesh, **kwargs ):
def mfloat( s ):
if len( s ) > 3:
if s[-3] == '-':
return float( s[:-3]+'e'+s[-3:] )
return float( s )
import string
fd = open( self.filename, 'r' )
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
cmd = ''
dim = 2
conns_in = []
descs = []
node_grp = None
while 1:
try:
line = fd.readline()
except EOFError:
break
except:
output( "reading " + fd.name + " failed!" )
raise
if (len( line ) == 0): break
if len( line ) < 4: continue
if line[0] == '$': continue
row = line.strip().split()
if row[0] == 'GRID':
cs = line.strip()[-24:]
aux = [ cs[0:8], cs[8:16], cs[16:24] ]
nod.append( [mfloat(ii) for ii in aux] );
elif row[0] == 'GRID*':
aux = row[1:4];
cmd = 'GRIDX';
elif row[0] == 'CHEXA':
aux = [int(ii)-1 for ii in row[3:9]]
aux2 = int(row[2])
aux3 = row[9]
cmd ='CHEXAX'
elif row[0] == 'CTETRA':
aux = [int(ii)-1 for ii in row[3:]]
aux.append( int(row[2]) )
el['3_4'].append( aux )
dim = 3
elif row[0] == 'CQUAD4':
aux = [int(ii)-1 for ii in row[3:]]
aux.append( int(row[2]) )
el['2_4'].append( aux )
elif row[0] == 'CTRIA3':
aux = [int(ii)-1 for ii in row[3:]]
aux.append( int(row[2]) )
el['2_3'].append( aux )
elif cmd == 'GRIDX':
cmd = ''
aux2 = row[1]
if aux2[-1] == '0':
aux2 = aux2[:-1]
aux3 = aux[1:]
aux3.append( aux2 )
nod.append( [float(ii) for ii in aux3] );
elif cmd == 'CHEXAX':
cmd = ''
aux4 = row[0]
aux5 = string.find( aux4, aux3 )
aux.append( int(aux4[(aux5+len(aux3)):])-1 )
aux.extend( [int(ii)-1 for ii in row[1:]] )
aux.append( aux2 )
el['3_8'].append( aux )
dim = 3
elif row[0] == 'SPC' or row[0] == 'SPC*':
if node_grp is None:
node_grp = [0] * len(nod)
node_grp[int(row[2]) - 1] = int(row[1])
for elem in el.keys():
if len(el[elem]) > 0:
conns_in.append( el[elem] )
descs.append( elem )
fd.close()
nod = nm.array( nod, nm.float64 )
if dim == 2:
nod = nod[:,:2].copy()
conns_in = nm.array( conns_in, nm.int32 )
conns_in, mat_ids = sort_by_mat_id( conns_in )
conns, mat_ids, descs = split_by_mat_id( conns_in, mat_ids, descs )
mesh._set_data(nod, node_grp, conns, mat_ids, descs)
return mesh
@staticmethod
def format_str(str, idx, n=8):
out = ''
for ii, istr in enumerate(str):
aux = '%d' % istr
out += aux + ' ' * (n - len(aux))
if ii == 7:
out += '+%07d\n+%07d' % (idx, idx)
return out
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors = mesh.coors
conns, desc = join_conn_groups(mesh.conns, mesh.descs,
mesh.mat_ids, concat=True)
n_nod, dim = coors.shape
fd.write("$NASTRAN Bulk Data File created by SfePy\n")
fd.write("$\nBEGIN BULK\n")
fd.write("$\n$ ELEMENT CONNECTIVITY\n$\n")
iel = 0
mats = {}
for ig, conn in enumerate(conns):
for ii in range(conn.shape[0]):
iel += 1
nn = conn[ii][:-1] + 1
mat = conn[ii][-1]
if mat in mats:
mats[mat] += 1
else:
mats[mat] = 0
if (desc[ig] == "2_4"):
fd.write("CQUAD4 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "2_3"):
fd.write("CTRIA3 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2]], iel))
elif (desc[ig] == "3_4"):
fd.write("CTETRA %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "3_8"):
fd.write("CHEXA %s\n" %\
self.format_str([ii + 1, mat, nn[0], nn[1], nn[2],
nn[3], nn[4], nn[5], nn[6],
nn[7]], iel))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.write("$\n$ NODAL COORDINATES\n$\n")
format = 'GRID* %s % 08E % 08E\n'
if coors.shape[1] == 3:
format += '* % 08E0 \n'
else:
format += '* % 08E0 \n' % 0.0
for ii in range(n_nod):
sii = str(ii + 1)
fd.write(format % ((sii + ' ' * (8 - len(sii)), )
+ tuple(coors[ii])))
fd.write("$\n$ GEOMETRY\n$\n1 ")
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n* \n")
fd.write("$\n$ MATERIALS\n$\n")
matkeys = mats.keys()
matkeys.sort()
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : Isotropic\n" % imat)
aux = str(imat)
fd.write("MAT1* %s " % (aux + ' ' * (8 - len(aux))))
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n")
fd.write("$\n$ GEOMETRY\n$\n")
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : solid%d\n" % (imat, imat))
fd.write("PSOLID* %s\n" % self.format_str([ii + 1, imat], 0, 16))
fd.write("* \n")
fd.write("ENDDATA\n")
fd.close()
class NEUMeshIO( MeshIO ):
format = 'gambit'
def read_dimension( self, ret_fd = False ):
fd = open( self.filename, 'r' )
row = fd.readline().split()
while 1:
if not row: break
if len( row ) == 0: continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int( row[4] )
break;
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read( self, mesh, **kwargs ):
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
conns_in = []
descs = []
group_ids = []
group_n_els = []
groups = []
nodal_bcs = {}
fd = open( self.filename, 'r' )
row = fd.readline().split()
while 1:
if not row: break
if len( row ) == 0: continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int( row[4] )
elif (row[0] == 'NODAL'):
row = fd.readline().split()
while not( row[0] == 'ENDOFSECTION' ):
nod.append( row[1:] )
row = fd.readline().split()
elif (row[0] == 'ELEMENTS/CELLS'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
elid = [row[0]]
gtype = int(row[1])
if gtype == 6:
el['3_4'].append(row[3:]+elid)
elif gtype == 4:
rr = row[3:]
if (len(rr) < 8):
rr.extend(fd.readline().split())
el['3_8'].append(rr+elid)
elif gtype == 3:
el['2_3'].append(row[3:]+elid)
elif gtype == 2:
el['2_4'].append(row[3:]+elid)
row = fd.readline().split()
elif (row[0] == 'GROUP:'):
group_ids.append( row[1] )
g_n_el = int( row[3] )
group_n_els.append( g_n_el )
name = fd.readline().strip()
els = []
row = fd.readline().split()
row = fd.readline().split()
while not( row[0] == 'ENDOFSECTION' ):
els.extend( row )
row = fd.readline().split()
if g_n_el != len( els ):
print 'wrong number of group elements! (%d == %d)'\
% (n_el, len( els ))
raise ValueError
groups.append( els )
elif (row[0] == 'BOUNDARY'):
row = fd.readline().split()
key = row[0]
num = int(row[2])
inod = read_array(fd, num, None, nm.int32) - 1
nodal_bcs[key] = inod.squeeze()
row = fd.readline().split()
assert_(row[0] == 'ENDOFSECTION')
else:
row = fd.readline().split()
fd.close()
if int( n_el ) != sum( group_n_els ):
print 'wrong total number of group elements! (%d == %d)'\
% (int( n_el ), len( group_n_els ))
mat_ids = [None] * int( n_el )
for ii, els in enumerate( groups ):
for iel in els:
mat_ids[int( iel ) - 1] = group_ids[ii]
for elem in el.keys():
if len(el[elem]) > 0:
for iel in el[elem]:
for ii in range( len( iel ) ):
iel[ii] = int( iel[ii] ) - 1
iel[-1] = mat_ids[iel[-1]]
conns_in.append( el[elem] )
descs.append( elem )
nod = nm.array( nod, nm.float64 )
conns_in = nm.array( conns_in, nm.int32 )
conns_in, mat_ids = sort_by_mat_id( conns_in )
conns, mat_ids, descs = split_by_mat_id( conns_in, mat_ids, descs )
mesh._set_data(nod, None, conns, mat_ids, descs, nodal_bcs=nodal_bcs)
return mesh
def write( self, filename, mesh, out = None, **kwargs ):
raise NotImplementedError
class ANSYSCDBMeshIO( MeshIO ):
format = 'ansys_cdb'
@staticmethod
def make_format(format):
idx = [];
dtype = [];
start = 0;
for iform in format:
ret = iform.partition('i')
if not ret[1]:
ret = iform.partition('e')
if not ret[1]:
raise ValueError
aux = ret[2].partition('.')
step = int(aux[0])
for j in range(int(ret[0])):
idx.append((start, start+step))
start += step
dtype.append(ret[1])
return idx, dtype
def write( self, filename, mesh, out = None, **kwargs ):
raise NotImplementedError
def read_bounding_box( self ):
raise NotImplementedError
def read_dimension( self, ret_fd = False ):
return 3
def read(self, mesh, **kwargs):
ids = []
coors = []
elems = []
fd = open( self.filename, 'r' )
while True:
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
if (row[0] == 'NBLOCK'):
nval = int(row[1])
attr = row[2]
format = fd.readline()
format = format.strip()[1:-1].split(',')
idx, dtype = self.make_format(format)
while True:
row = fd.readline()
if row[0] == 'N':
break
line = []
for ival in range(nval):
db, de = idx[ival]
line.append(row[db:de])
ids.append(int(line[0]))
coors.append([float( coor ) for coor in line[3:]])
elif (row[0] == 'EBLOCK'):
nval = int(row[1])
attr = row[2]
nel = int(row[3])
format = fd.readline()
elems = read_array(fd, nel, nval, nm.int32)
fd.close()
tetras_idx = nm.where(elems[:,8] == 4)[0]
hexas_idx = nm.where(elems[:,8] == 8)[0]
el_hexas = elems[hexas_idx,11:]
el_tetras = elems[tetras_idx,11:]
# hack for stupid export filters
if el_hexas[0,-4] == el_hexas[0,-1]:
el_tetras = el_hexas[:,[0,1,2,4]]
tetras_idx = hexas_idx
hexas_idx = []
el_hexas = []
ngroups = nm.zeros((len(coors),), dtype = nm.int32)
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
[], [], [], [],
el_tetras, elems[tetras_idx,0],
el_hexas, elems[hexas_idx,0])
return mesh
def guess_format( filename, ext, formats, io_table ):
"""
Guess the format of filename, candidates are in formats.
"""
ok = False
for format in formats:
output( 'guessing %s' % format )
try:
ok = io_table[format].guess( filename )
except AttributeError:
pass
if ok: break
else:
raise NotImplementedError('cannot guess format of a *%s file!' % ext)
return format
##
# c: 05.02.2008, r: 05.02.2008
var_dict = vars().items()
io_table = {}
for key, var in var_dict:
try:
if is_derived_class( var, MeshIO ):
io_table[var.format] = var
except TypeError:
pass
del var_dict
def any_from_filename(filename, prefix_dir=None):
"""
Create a MeshIO instance according to the kind of `filename`.
Parameters
----------
filename : str, function or MeshIO subclass instance
The name of the mesh file. It can be also a user-supplied function
accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh
instance and `mode` is one of 'read','write', or a MeshIO subclass
instance.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the kind of `filename`.
"""
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError('unsupported mesh file suffix! (%s)' % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename)
insert_static_method(MeshIO, any_from_filename)
del any_from_filename
def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' \
% format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io
insert_static_method(MeshIO, for_format)
del for_format
| {
"repo_name": "vlukes/dicom2fem",
"path": "dicom2fem/meshio.py",
"copies": "1",
"size": "86894",
"license": "bsd-3-clause",
"hash": -2948851048050807300,
"line_mean": 31.5689655172,
"line_max": 88,
"alpha_frac": 0.4448063157,
"autogenerated": false,
"ratio": 3.5685420944558524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4513348410155852,
"avg_score": null,
"num_lines": null
} |
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import sys
from copy import copy
import os.path as op
import numpy as nm
from .base import (
complex_types,
dict_from_keys_init,
assert_,
is_derived_class,
insert_static_method,
output,
get_default,
get_default_attr,
Struct,
basestr,
)
from .ioutils import skip_read_line, read_token, read_array, read_list, pt
if sys.version_info.major == 2:
from string import letters as string_letters
from string import maketrans as string_maketrans
else:
from string import ascii_letters as string_letters
string_maketrans = str.maketrans
# from bytes import maketrans as string_maketrans
# from bytearrays import maketrans as string_maketrans
supported_formats = {
".mesh": "medit",
".vtk": "vtk",
".node": "tetgen",
".txt": "comsol",
".h5": "hdf5",
# Order is important, avs_ucd does not guess -> it is the default.
".inp": ("abaqus", "avs_ucd"),
".hmascii": "hmascii",
".mesh3d": "mesh3d",
".bdf": "nastran",
".neu": "gambit",
".med": "med",
".cdb": "ansys_cdb",
}
# Map mesh formats to read and write capabilities.
# 'r' ... read mesh
# 'w' ... write mesh
# 'rn' ... read nodes for boundary conditions
# 'wn' ... write nodes for boundary conditions
supported_capabilities = {
"medit": ["r", "w"],
"vtk": ["r", "w"],
"tetgen": ["r"],
"comsol": ["r", "w"],
"hdf5": ["r", "w"],
"abaqus": ["r"],
"avs_ucd": ["r"],
"hmascii": ["r", "w"],
"mesh3d": ["r"],
"nastran": ["r", "w"],
"gambit": ["r", "rn"],
"med": ["r"],
"ansys_cdb": ["r"],
}
def output_writable_meshes():
output("Supported writable mesh formats are:")
for key, val in supported_capabilities.iteritems():
if "w" in val:
output(key)
##
# c: 15.02.2008, r: 15.02.2008
def sort_by_mat_id(conns_in):
# Sort by mat_id within a group, preserve order.
conns = []
mat_ids = []
for ig, conn in enumerate(conns_in):
if conn.shape[0] > 0:
ii = nm.argsort(conn[:, -1], kind="mergesort")
conn = conn[ii]
conns.append(conn[:, :-1].copy())
mat_ids.append(conn[:, -1].copy())
else:
conns.append([])
mat_ids.append([])
return conns, mat_ids
def sort_by_mat_id2(conns_in, mat_ids_in):
# Sort by mat_id within a group, preserve order.
conns = []
mat_ids = []
for ig, conn in enumerate(conns_in):
if conn.shape[0] > 0:
mat_id = mat_ids_in[ig]
ii = nm.argsort(mat_id, kind="mergesort")
conns.append(conn[ii])
mat_ids.append(mat_id[ii])
else:
conns.append([])
mat_ids.append([])
return conns, mat_ids
##
# conns_in must be sorted by mat_id within a group!
# c: 16.06.2005, r: 15.02.2008
def split_by_mat_id(conns_in, mat_ids_in, descs_in):
conns = []
mat_ids = []
descs = []
for ig, conn in enumerate(conns_in):
one = nm.array([-1], nm.int32)
aux = nm.concatenate((one, mat_ids_in[ig], one))
ii = nm.where(aux[1:] != aux[:-1])[0]
n_gr = len(ii) - 1
# print ii, n_gr
for igr in range(0, n_gr):
conns.append(conn[ii[igr] : ii[igr + 1], :].copy())
mat_ids.append(mat_ids_in[ig][ii[igr] : ii[igr + 1]])
descs.append(descs_in[ig])
return (conns, mat_ids, descs)
##
# 12.10.2005, c
def write_bb(fd, array, dtype):
fd.write("3 %d %d %d\n" % (array.shape[1], array.shape[0], dtype))
format = " ".join(["%.5e"] * array.shape[1] + ["\n"])
for row in array:
fd.write(format % tuple(row))
##
# c: 03.10.2005, r: 08.02.2008
def join_conn_groups(conns, descs, mat_ids, concat=False):
"""Join groups of the same element type."""
el = dict_from_keys_init(descs, list)
for ig, desc in enumerate(descs):
el[desc].append(ig)
groups = [ii for ii in el.values() if ii]
## print el, groups
descs_out, conns_out, mat_ids_out = [], [], []
for group in groups:
n_ep = conns[group[0]].shape[1]
conn = nm.zeros((0, n_ep), nm.int32)
mat_id = nm.zeros((0,), nm.int32)
for ig in group:
conn = nm.concatenate((conn, conns[ig]))
mat_id = nm.concatenate((mat_id, mat_ids[ig]))
if concat:
conn = nm.concatenate((conn, mat_id[:, nm.newaxis]), 1)
else:
mat_ids_out.append(mat_id)
conns_out.append(conn)
descs_out.append(descs[group[0]])
if concat:
return conns_out, descs_out
else:
return conns_out, descs_out, mat_ids_out
def convert_complex_output(out_in):
"""
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
"""
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out["real(%s)" % key] = rval
ival = copy(val)
ival.data = val.data.imag
out["imag(%s)" % key] = ival
else:
out[key] = val
return out
##
# c: 05.02.2008
class MeshIO(Struct):
"""
The abstract class for importing and exporting meshes.
Read the docstring of the Mesh() class. Basically all you need to do is to
implement the read() method::
def read(self, mesh, **kwargs):
nodes = ...
conns = ...
mat_ids = ...
descs = ...
mesh._set_data(nodes, conns, mat_ids, descs)
return mesh
See the Mesh class' docstring how the nodes, conns, mat_ids and descs
should look like. You just need to read them from your specific format from
disk.
To write a mesh to disk, just implement the write() method and use the
information from the mesh instance (e.g. nodes, conns, mat_ids and descs)
to construct your specific format.
The methods read_dimension(), read_bounding_box() should be implemented in
subclasses, as it is often possible to get that kind of information without
reading the whole mesh file.
Optionally, subclasses can implement read_data() to read also computation
results. This concerns mainly the subclasses with implemented write()
supporting the 'out' kwarg.
The default implementation od read_last_step() just returns 0. It should be
reimplemented in subclasses capable of storing several steps.
"""
format = None
call_msg = "called an abstract MeshIO instance!"
def __init__(self, filename, **kwargs):
Struct.__init__(self, filename=filename, **kwargs)
self.set_float_format()
def get_filename_trunk(self):
if isinstance(self.filename, file):
trunk = "from_descriptor"
else:
trunk = op.splitext(self.filename)[0]
return trunk
def read_dimension(self, ret_fd=False):
raise ValueError(MeshIO.call_msg)
def read_bounding_box(self, ret_fd=False, ret_dim=False):
raise ValueError(MeshIO.call_msg)
def read_last_step(self):
"""The default implementation: just return 0 as the last step."""
return 0
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([], dtype=nm.float64)
return aux.astype(nm.int32), aux, aux
def read(self, mesh, omit_facets=False, **kwargs):
raise ValueError(MeshIO.call_msg)
def write(self, filename, mesh, **kwargs):
raise ValueError(MeshIO.call_msg)
def read_data(self, step, filename=None):
raise ValueError(MeshIO.call_msg)
def set_float_format(self, format=None):
self.float_format = get_default(format, "%e")
def get_vector_format(self, dim):
return " ".join([self.float_format] * dim)
class UserMeshIO(MeshIO):
"""
Special MeshIO subclass that enables reading and writing a mesh using a
user-supplied function.
"""
format = "function"
def __init__(self, filename, **kwargs):
assert_(hasattr(filename, "__call__"))
self.function = filename
MeshIO.__init__(self, filename="function:%s" % self.function.__name__, **kwargs)
def get_filename_trunk(self):
return self.filename
def read(self, mesh, *args, **kwargs):
aux = self.function(mesh, mode="read")
if aux is not None:
mesh = aux
self.filename = mesh.name
return mesh
def write(self, filename, mesh, *args, **kwargs):
self.function(mesh, mode="write")
##
# c: 05.02.2008
class MeditMeshIO(MeshIO):
format = "medit"
def read_dimension(self, ret_fd=False):
fd = open(self.filename, "r")
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == "Dimension":
if len(line) == 2:
dim = int(line[1])
else:
dim = int(fd.readline())
break
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, "r")
dim, fd = self.read_dimension(ret_fd=True)
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == "Vertices":
num = int(read_token(fd))
nod = read_array(fd, num, dim + 1, nm.float64)
break
bbox = nm.vstack((nm.amin(nod[:, :dim], 0), nm.amax(nod[:, :dim], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, omit_facets=False, **kwargs):
dim, fd = self.read_dimension(ret_fd=True)
conns_in = []
descs = []
def _read_cells(dimension, size):
num = int(read_token(fd))
data = read_array(fd, num, size + 1, nm.int32)
if omit_facets and (dimension < dim):
return
data[:, :-1] -= 1
conns_in.append(data)
descs.append("%i_%i" % (dimension, size))
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if ls == "Vertices":
num = int(read_token(fd))
nod = read_array(fd, num, dim + 1, nm.float64)
elif ls == "Tetrahedra":
_read_cells(3, 4)
elif ls == "Hexahedra":
_read_cells(3, 8)
elif ls == "Triangles":
_read_cells(2, 3)
elif ls == "Quadrilaterals":
_read_cells(2, 4)
elif ls == "End":
break
elif line[0] == "#":
continue
else:
output("skipping unknown entity: %s" % line)
continue
fd.close()
conns_in, mat_ids = sort_by_mat_id(conns_in)
# Detect wedges and pyramides -> separate groups.
if "3_8" in descs:
ic = descs.index("3_8")
conn_in = conns_in.pop(ic)
mat_id_in = mat_ids.pop(ic)
flag = nm.zeros((conn_in.shape[0],), nm.int32)
for ii, el in enumerate(conn_in):
if el[4] == el[5]:
if el[5] == el[6]:
flag[ii] = 2
else:
flag[ii] = 1
conn = []
desc = []
mat_id = []
ib = nm.where(flag == 0)[0]
if len(ib) > 0:
conn.append(conn_in[ib])
mat_id.append(mat_id_in[ib])
desc.append("3_8")
iw = nm.where(flag == 1)[0]
if len(iw) > 0:
ar = nm.array([0, 1, 2, 3, 4, 6], nm.int32)
conn.append(conn_in[iw[:, None], ar])
mat_id.append(mat_id_in[iw])
desc.append("3_6")
ip = nm.where(flag == 2)[0]
if len(ip) > 0:
ar = nm.array([0, 1, 2, 3, 4], nm.int32)
conn.append(conn_in[ip[:, None], ar])
mat_id.append(mat_id_in[ip])
desc.append("3_5")
## print "brick split:", ic, ":", ib, iw, ip, desc
conns_in[ic:ic] = conn
mat_ids[ic:ic] = mat_id
del descs[ic]
descs[ic:ic] = desc
conns, mat_ids, descs = split_by_mat_id(conns_in, mat_ids, descs)
mesh._set_data(nod[:, :-1], nod[:, -1], conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, "w")
coors = mesh.coors
conns, desc = join_conn_groups(
mesh.conns, mesh.descs, mesh.mat_ids, concat=True
)
n_nod, dim = coors.shape
fd.write("MeshVersionFormatted 1\nDimension %d\n" % dim)
fd.write("Vertices\n%d\n" % n_nod)
format = self.get_vector_format(dim) + " %d\n"
for ii in range(n_nod):
nn = tuple(coors[ii]) + (mesh.ngroups[ii],)
fd.write(format % tuple(nn))
for ig, conn in enumerate(conns):
if desc[ig] == "1_2":
fd.write("Edges\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d\n" % (nn[0], nn[1], nn[2] - 1))
elif desc[ig] == "2_4":
fd.write("Quadrilaterals\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write(
"%d %d %d %d %d\n" % (nn[0], nn[1], nn[2], nn[3], nn[4] - 1)
)
elif desc[ig] == "2_3":
fd.write("Triangles\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d\n" % (nn[0], nn[1], nn[2], nn[3] - 1))
elif desc[ig] == "3_4":
fd.write("Tetrahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write(
"%d %d %d %d %d\n" % (nn[0], nn[1], nn[2], nn[3], nn[4] - 1)
)
elif desc[ig] == "3_8":
fd.write("Hexahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write(
"%d %d %d %d %d %d %d %d %d\n"
% (
nn[0],
nn[1],
nn[2],
nn[3],
nn[4],
nn[5],
nn[6],
nn[7],
nn[8] - 1,
)
)
else:
print("unknown element type!", desc[ig])
raise ValueError
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
vtk_header = r"""# vtk DataFile Version 2.0
step %d time %e normalized time %e, generated by %s
ASCII
DATASET UNSTRUCTURED_GRID
"""
vtk_cell_types = {"2_2": 3, "2_4": 9, "2_3": 5, "3_2": 3, "3_4": 10, "3_8": 12}
vtk_dims = {3: 2, 9: 2, 5: 2, 3: 3, 10: 3, 12: 3}
vtk_inverse_cell_types = {
(3, 2): "2_2",
(5, 2): "2_3",
(8, 2): "2_4",
(9, 2): "2_4",
(3, 3): "3_2",
(10, 3): "3_4",
(11, 3): "3_8",
(12, 3): "3_8",
}
vtk_remap = {
8: nm.array([0, 1, 3, 2], dtype=nm.int32),
11: nm.array([0, 1, 3, 2, 4, 5, 7, 6], dtype=nm.int32),
}
vtk_remap_keys = vtk_remap.keys()
##
# c: 05.02.2008
class VTKMeshIO(MeshIO):
format = "vtk"
def read_coors(self, ret_fd=False):
fd = open(self.filename, "r")
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == "POINTS":
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
break
if ret_fd:
return coors, fd
else:
fd.close()
return coors
def get_dimension(self, coors):
dz = nm.diff(coors[:, 2])
if nm.allclose(dz, 0.0):
dim = 2
else:
dim = 3
return dim
def read_dimension(self, ret_fd=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
##
# c: 22.07.2008
def read_bounding_box(self, ret_fd=False, ret_dim=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
bbox = nm.vstack((nm.amin(coors[:, :dim], 0), nm.amax(coors[:, :dim], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
##
# c: 05.02.2008, r: 10.07.2008
def read(self, mesh, **kwargs):
fd = open(self.filename, "r")
mode = "header"
mode_status = 0
coors = conns = desc = mat_id = node_grps = None
finished = 0
while 1:
line = skip_read_line(fd)
if not line:
break
if mode == "header":
if mode_status == 0:
if line.strip() == "ASCII":
mode_status = 1
elif mode_status == 1:
if line.strip() == "DATASET UNSTRUCTURED_GRID":
mode_status = 0
mode = "points"
elif mode == "points":
line = line.split()
if line[0] == "POINTS":
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
mode = "cells"
elif mode == "cells":
line = line.split()
if line[0] == "CELLS":
n_el, n_val = map(int, line[1:3])
raw_conn = read_list(fd, n_val, int)
mode = "cell_types"
elif mode == "cell_types":
line = line.split()
if line[0] == "CELL_TYPES":
assert_(int(line[1]) == n_el)
cell_types = read_array(fd, n_el, 1, nm.int32)
mode = "cp_data"
elif mode == "cp_data":
line = line.split()
if line[0] == "CELL_DATA":
assert_(int(line[1]) == n_el)
mode_status = 1
mode = "mat_id"
elif line[0] == "POINT_DATA":
assert_(int(line[1]) == n_nod)
mode_status = 1
mode = "node_groups"
elif mode == "mat_id":
if mode_status == 1:
if "SCALARS mat_id int" in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == "LOOKUP_TABLE default":
mat_id = read_list(fd, n_el, int)
mode_status = 0
mode = "cp_data"
finished += 1
elif mode == "node_groups":
if mode_status == 1:
if "SCALARS node_groups int" in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == "LOOKUP_TABLE default":
node_grps = read_list(fd, n_nod, int)
mode_status = 0
mode = "cp_data"
finished += 1
elif finished >= 2:
break
fd.close()
if mat_id is None:
mat_id = [[0]] * n_el
else:
if len(mat_id) < n_el:
mat_id = [[ii] for jj in mat_id for ii in jj]
if node_grps is None:
node_grps = [0] * n_nod
else:
if len(node_grps) < n_nod:
node_grps = [ii for jj in node_grps for ii in jj]
dim = self.get_dimension(coors)
if dim == 2:
coors = coors[:, :2]
coors = nm.ascontiguousarray(coors)
cell_types = cell_types.squeeze()
dconns = {}
for iel, row in enumerate(raw_conn):
ct = cell_types[iel]
key = (ct, dim)
if key not in vtk_inverse_cell_types:
continue
ct = vtk_inverse_cell_types[key]
dconns.setdefault(key, []).append(row[1:] + mat_id[iel])
desc = []
conns = []
for key, conn in dconns.iteritems():
ct = key[0]
sct = vtk_inverse_cell_types[key]
desc.append(sct)
aconn = nm.array(conn, dtype=nm.int32)
if ct in vtk_remap_keys: # Remap pixels and voxels.
aconn[:, :-1] = aconn[:, vtk_remap[ct]]
conns.append(aconn)
conns_in, mat_ids = sort_by_mat_id(conns)
conns, mat_ids, descs = split_by_mat_id(conns_in, mat_ids, desc)
mesh._set_data(coors, node_grps, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
def _reshape_tensors(data, dim, sym, nc):
if dim == 3:
if nc == sym:
aux = data[:, [0, 3, 4, 3, 1, 5, 4, 5, 2]]
elif nc == (dim * dim):
aux = data[:, [0, 3, 4, 6, 1, 5, 7, 8, 2]]
else:
aux = data.reshape((data.shape[0], dim * dim))
else:
zz = nm.zeros((data.shape[0], 1), dtype=nm.float64)
if nc == sym:
aux = nm.c_[data[:, [0, 2]], zz, data[:, [2, 1]], zz, zz, zz, zz]
elif nc == (dim * dim):
aux = nm.c_[data[:, [0, 2]], zz, data[:, [3, 1]], zz, zz, zz, zz]
else:
aux = nm.c_[
data[:, 0, [0, 1]], zz, data[:, 1, [0, 1]], zz, zz, zz, zz
]
return aux
def _write_tensors(data):
format = self.get_vector_format(3)
format = "\n".join([format] * 3) + "\n\n"
for row in aux:
fd.write(format % tuple(row))
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
fd = open(filename, "w")
fd.write(vtk_header % (step, time, nt, op.basename(sys.argv[0])))
n_nod, dim = mesh.coors.shape
sym = dim * (dim + 1) / 2
fd.write("\nPOINTS %d float\n" % n_nod)
aux = mesh.coors
if dim == 2:
aux = nm.hstack((aux, nm.zeros((aux.shape[0], 1), dtype=aux.dtype)))
format = self.get_vector_format(3) + "\n"
for row in aux:
fd.write(format % tuple(row))
n_el, n_els, n_e_ps = mesh.n_el, mesh.n_els, mesh.n_e_ps
total_size = nm.dot(n_els, n_e_ps + 1)
fd.write("\nCELLS %d %d\n" % (n_el, total_size))
ct = []
for ig, conn in enumerate(mesh.conns):
nn = n_e_ps[ig] + 1
ct += [vtk_cell_types[mesh.descs[ig]]] * n_els[ig]
format = " ".join(["%d"] * nn + ["\n"])
for row in conn:
fd.write(format % ((nn - 1,) + tuple(row)))
fd.write("\nCELL_TYPES %d\n" % n_el)
fd.write("".join(["%d\n" % ii for ii in ct]))
fd.write("\nPOINT_DATA %d\n" % n_nod)
# node groups
fd.write("\nSCALARS node_groups int 1\nLOOKUP_TABLE default\n")
fd.write("".join(["%d\n" % ii for ii in mesh.ngroups]))
if out is not None:
point_keys = [key for key, val in out.iteritems() if val.mode == "vertex"]
else:
point_keys = {}
for key in point_keys:
val = out[key]
nr, nc = val.data.shape
if nc == 1:
fd.write("\nSCALARS %s float %d\n" % (key, nc))
fd.write("LOOKUP_TABLE default\n")
format = self.float_format + "\n"
for row in val.data:
fd.write(format % row)
elif nc == dim:
fd.write("\nVECTORS %s float\n" % key)
if dim == 2:
aux = nm.hstack((val.data, nm.zeros((nr, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + "\n"
for row in aux:
fd.write(format % tuple(row))
elif (nc == sym) or (nc == (dim * dim)):
fd.write("\nTENSORS %s float\n" % key)
aux = _reshape_tensors(val.data, dim, sym, nc)
_write_tensors(aux)
else:
raise NotImplementedError(nc)
if out is not None:
cell_keys = [key for key, val in out.iteritems() if val.mode == "cell"]
else:
cell_keys = {}
fd.write("\nCELL_DATA %d\n" % n_el)
# cells - mat_id
fd.write("SCALARS mat_id int 1\nLOOKUP_TABLE default\n")
aux = nm.hstack(mesh.mat_ids).tolist()
fd.write("".join(["%d\n" % ii for ii in aux]))
for key in cell_keys:
val = out[key]
ne, aux, nr, nc = val.data.shape
if (nr == 1) and (nc == 1):
fd.write("\nSCALARS %s float %d\n" % (key, nc))
fd.write("LOOKUP_TABLE default\n")
format = self.float_format + "\n"
aux = val.data.squeeze()
if len(aux.shape) == 0:
fd.write(format % aux)
else:
for row in aux:
fd.write(format % row)
elif (nr == dim) and (nc == 1):
fd.write("\nVECTORS %s float\n" % key)
if dim == 2:
aux = nm.hstack(
(val.data.squeeze(), nm.zeros((ne, 1), dtype=nm.float64))
)
else:
aux = val.data
format = self.get_vector_format(3) + "\n"
for row in aux:
fd.write(format % tuple(row.squeeze()))
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) or (
(nr == dim) and (nc == dim)
):
fd.write("\nTENSORS %s float\n" % key)
data = val.data.squeeze()
aux = _reshape_tensors(data, dim, sym, nr)
_write_tensors(aux)
else:
raise NotImplementedError((nr, nc))
fd.close()
def read_data(self, step, filename=None):
"""Point data only!"""
filename = get_default(filename, self.filename)
out = {}
fd = open(self.filename, "r")
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == "POINT_DATA":
break
n_nod = int(line[1])
while 1:
line = skip_read_line(fd)
if not line:
break
line = line.split()
if line[0] == "SCALARS":
name, dtype, nc = line[1:]
assert_(int(nc) == 1)
fd.readline() # skip lookup table line
data = nm.zeros((n_nod,), dtype=nm.float64)
ii = 0
while ii < n_nod:
data[ii] = float(fd.readline())
ii += 1
out[name] = Struct(name=name, mode="vertex", data=data, dofs=None)
elif line[0] == "VECTORS":
name, dtype = line[1:]
data = []
ii = 0
while ii < n_nod:
data.append([float(val) for val in fd.readline().split()])
ii += 1
out[name] = Struct(
name=name,
mode="vertex",
data=nm.array(data, dtype=nm.float64),
dofs=None,
)
elif line[0] == "CELL_DATA":
break
line = fd.readline()
fd.close()
return out
##
# c: 15.02.2008
class TetgenMeshIO(MeshIO):
format = "tetgen"
##
# c: 15.02.2008, r: 15.02.2008
def read(self, mesh, **kwargs):
import os
fname = os.path.splitext(self.filename)[0]
nodes = self.getnodes(fname + ".node", MyBar(" nodes:"))
etype, elements, regions = self.getele(
fname + ".ele", MyBar(" elements:")
)
descs = []
conns = []
mat_ids = []
elements = nm.array(elements, dtype=nm.int32) - 1
for key, value in regions.iteritems():
descs.append(etype)
mat_ids.append(nm.ones_like(value) * key)
conns.append(elements[nm.array(value) - 1].copy())
mesh._set_data(nodes, None, conns, mat_ids, descs)
return mesh
##
# c: 15.02.2008, r: 15.02.2008
@staticmethod
def getnodes(fnods, up=None, verbose=False):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node", MyBar("nodes:"))
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f = open(fnods)
l = [int(x) for x in f.readline().split()]
npoints, dim, nattrib, nbound = l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
if verbose and up is not None:
up.init(npoints)
nodes = []
for line in f:
if line[0] == "#":
continue
l = [float(x) for x in line.split()]
l = l[: (dim + 1)]
assert_(int(l[0]) == len(nodes) + 1)
l = l[1:]
nodes.append(tuple(l + ndapp))
if verbose and up is not None:
up.update(len(nodes))
assert_(npoints == len(nodes))
return nodes
##
# c: 15.02.2008, r: 15.02.2008
@staticmethod
def getele(fele, up=None, verbose=False):
"""
Reads t.1.ele, returns a list of elements.
Example:
>>> elements, regions = self.getele("t.1.ele", MyBar("elements:"))
>>> elements
[(20, 154, 122, 258), (86, 186, 134, 238), (15, 309, 170, 310), (146,
229, 145, 285), (206, 207, 125, 211), (99, 193, 39, 194), (185, 197,
158, 225), (53, 76, 74, 6), (19, 138, 129, 313), (23, 60, 47, 96),
(119, 321, 1, 329), (188, 296, 122, 322), (30, 255, 177, 256), ...]
>>> regions
{100: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 7, ...],
...}
"""
f = file(fele)
l = [int(x) for x in f.readline().split()]
ntetra, nnod, nattrib = l
# we have either linear or quadratic tetrahedra:
elem = None
if nnod in [4, 10]:
elem = "3_4"
linear = nnod == 4
if nnod in [3, 7]:
elem = "2_3"
linear = nnod == 3
if elem is None or not linear:
raise Exception("Only linear triangle and tetrahedra reader is implemented")
if verbose and up is not None:
up.init(ntetra)
# if nattrib!=1:
# raise "tetgen didn't assign an entity number to each element (option -A)"
els = []
regions = {}
for line in f:
if line[0] == "#":
continue
l = [int(x) for x in line.split()]
if elem == "2_3":
assert_((len(l) - 1 - nattrib) == 3)
els.append((l[1], l[2], l[3]))
if elem == "3_4":
assert_((len(l) - 1 - nattrib) == 4)
els.append((l[1], l[2], l[3], l[4]))
if nattrib == 1:
regionnum = l[-1]
else:
regionnum = 1
if regionnum == 0:
print("see %s, element # %d" % (fele, l[0]))
raise "there are elements not belonging to any physical entity"
if regions.has_key(regionnum):
regions[regionnum].append(l[0])
else:
regions[regionnum] = [l[0]]
assert_(l[0] == len(els))
if verbose and up is not None:
up.update(l[0])
return elem, els, regions
##
# c: 26.03.2008, r: 26.03.2008
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_dimension(self):
# TetGen only supports 3D mesh
return 3
##
# c: 22.07.2008
def read_bounding_box(self):
raise NotImplementedError
##
# c: 20.03.2008
class ComsolMeshIO(MeshIO):
format = "comsol"
##
# c: 20.03.2008, r: 20.03.2008
def _read_commented_int(self):
return int(skip_read_line(self.fd).split("#")[0])
def _skip_comment(self):
read_token(self.fd)
self.fd.readline()
##
# c: 20.03.2008, r: 20.03.2008
def read(self, mesh, **kwargs):
self.fd = fd = open(self.filename, "r")
mode = "header"
coors = conns = desc = None
while 1:
if mode == "header":
line = skip_read_line(fd)
n_tags = self._read_commented_int()
for ii in xrange(n_tags):
skip_read_line(fd)
n_types = self._read_commented_int()
for ii in xrange(n_types):
skip_read_line(fd)
skip_read_line(fd)
assert_(skip_read_line(fd).split()[1] == "Mesh")
skip_read_line(fd)
dim = self._read_commented_int()
assert_((dim == 2) or (dim == 3))
n_nod = self._read_commented_int()
i0 = self._read_commented_int()
mode = "points"
elif mode == "points":
self._skip_comment()
coors = read_array(fd, n_nod, dim, nm.float64)
mode = "cells"
elif mode == "cells":
n_types = self._read_commented_int()
conns = []
descs = []
mat_ids = []
for it in xrange(n_types):
t_name = skip_read_line(fd).split()[1]
n_ep = self._read_commented_int()
n_el = self._read_commented_int()
self._skip_comment()
aux = read_array(fd, n_el, n_ep, nm.int32)
if t_name == "tri":
conns.append(aux)
descs.append("2_3")
is_conn = True
elif t_name == "quad":
# Rearrange element node order to match SfePy.
aux = aux[:, (0, 1, 3, 2)]
conns.append(aux)
descs.append("2_4")
is_conn = True
elif t_name == "hex":
# Rearrange element node order to match SfePy.
aux = aux[:, (0, 1, 3, 2, 4, 5, 7, 6)]
conns.append(aux)
descs.append("3_8")
is_conn = True
elif t_name == "tet":
conns.append(aux)
descs.append("3_4")
is_conn = True
else:
is_conn = False
# Skip parameters.
n_pv = self._read_commented_int()
n_par = self._read_commented_int()
for ii in xrange(n_par):
skip_read_line(fd)
n_domain = self._read_commented_int()
assert_(n_domain == n_el)
if is_conn:
self._skip_comment()
mat_id = read_array(fd, n_domain, 1, nm.int32)
mat_ids.append(mat_id)
else:
for ii in xrange(n_domain):
skip_read_line(fd)
# Skip up/down pairs.
n_ud = self._read_commented_int()
for ii in xrange(n_ud):
skip_read_line(fd)
break
fd.close()
self.fd = None
conns2 = []
for ii, conn in enumerate(conns):
conns2.append(nm.c_[conn, mat_ids[ii]])
conns_in, mat_ids = sort_by_mat_id(conns2)
conns, mat_ids, descs = split_by_mat_id(conns_in, mat_ids, descs)
mesh._set_data(coors, None, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
def write_elements(
fd, ig, conn, mat_ids, type_name, npe, format, norder, nm_params
):
fd.write("# Type #%d\n\n" % ig)
fd.write("%s # type name\n\n\n" % type_name)
fd.write("%d # number of nodes per element\n" % npe)
fd.write("%d # number of elements\n" % conn.shape[0])
fd.write("# Elements\n")
for ii in range(conn.shape[0]):
nn = conn[ii] # Zero based
fd.write(format % tuple(nn[norder]))
fd.write("\n%d # number of parameter values per element\n" % nm_params)
# Top level always 0?
fd.write("0 # number of parameters\n")
fd.write("# Parameters\n\n")
fd.write("%d # number of domains\n" % sum([mi.shape[0] for mi in mat_ids]))
fd.write("# Domains\n")
for mi in mat_ids:
# Domains in comsol have to be > 0
if (mi <= 0).any():
mi += mi.min() + 1
for dom in mi:
fd.write("%d\n" % abs(dom))
fd.write("\n0 # number of up/down pairs\n")
fd.write("# Up/down\n")
fd = open(filename, "w")
coors = mesh.coors
conns, desc, mat_ids = join_conn_groups(mesh.conns, mesh.descs, mesh.mat_ids)
n_nod, dim = coors.shape
# Header
fd.write("# Created by SfePy\n\n\n")
fd.write("# Major & minor version\n")
fd.write("0 1\n")
fd.write("1 # number of tags\n")
fd.write("# Tags\n")
fd.write("2 m1\n")
fd.write("1 # number of types\n")
fd.write("# Types\n")
fd.write("3 obj\n\n")
# Record
fd.write("# --------- Object 0 ----------\n\n")
fd.write("0 0 1\n") # version unused serializable
fd.write("4 Mesh # class\n")
fd.write("1 # version\n")
fd.write("%d # sdim\n" % dim)
fd.write("%d # number of mesh points\n" % n_nod)
fd.write("0 # lowest mesh point index\n\n") # Always zero in SfePy
fd.write("# Mesh point coordinates\n")
format = self.get_vector_format(dim) + "\n"
for ii in range(n_nod):
nn = tuple(coors[ii])
fd.write(format % tuple(nn))
fd.write("\n%d # number of element types\n\n\n" % len(conns))
for ig, conn in enumerate(conns):
if desc[ig] == "2_4":
write_elements(
fd, ig, conn, mat_ids, "4 quad", 4, "%d %d %d %d\n", [0, 1, 3, 2], 8
)
elif desc[ig] == "2_3":
# TODO: Verify number of parameters for tri element
write_elements(
fd, ig, conn, mat_ids, "3 tri", 3, "%d %d %d\n", [0, 1, 2], 4
)
elif desc[ig] == "3_4":
# TODO: Verify number of parameters for tet element
write_elements(
fd, ig, conn, mat_ids, "3 tet", 4, "%d %d %d %d\n", [0, 1, 2, 3], 16
)
elif desc[ig] == "3_8":
write_elements(
fd,
ig,
conn,
mat_ids,
"3 hex",
8,
"%d %d %d %d %d %d %d %d\n",
[0, 1, 3, 2, 4, 5, 7, 6],
24,
)
else:
print("unknown element type!", desc[ig])
raise ValueError
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
##
# c: 23.06.2008
class HDF5MeshIO(MeshIO):
format = "hdf5"
import string
_all = "".join(map(chr, range(256)))
_letters = string_letters + string.digits + "_"
_rubbish = "".join([ch for ch in set(_all) - set(_letters)])
_tr = string_maketrans(_rubbish, "_" * len(_rubbish))
def read(self, mesh, **kwargs):
fd = pt.openFile(self.filename, mode="r")
mesh_group = fd.root.mesh
mesh.name = mesh_group.name.read()
coors = mesh_group.coors.read()
ngroups = mesh_group.ngroups.read()
n_gr = mesh_group.n_gr.read()
conns = []
descs = []
mat_ids = []
for ig in xrange(n_gr):
gr_name = "group%d" % ig
group = mesh_group._f_getChild(gr_name)
conns.append(group.conn.read())
mat_ids.append(group.mat_id.read())
descs.append(group.desc.read())
fd.close()
mesh._set_data(coors, ngroups, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
from time import asctime
if pt is None:
output("pytables not imported!")
raise ValueError
step = get_default_attr(ts, "step", 0)
if step == 0:
# A new file.
fd = pt.openFile(filename, mode="w", title="SfePy output file")
mesh_group = fd.createGroup("/", "mesh", "mesh")
fd.createArray(mesh_group, "name", mesh.name, "name")
fd.createArray(mesh_group, "coors", mesh.coors, "coors")
fd.createArray(mesh_group, "ngroups", mesh.ngroups, "ngroups")
fd.createArray(mesh_group, "n_gr", len(mesh.conns), "n_gr")
for ig, conn in enumerate(mesh.conns):
conn_group = fd.createGroup(
mesh_group, "group%d" % ig, "connectivity group"
)
fd.createArray(conn_group, "conn", conn, "connectivity")
fd.createArray(conn_group, "mat_id", mesh.mat_ids[ig], "material id")
fd.createArray(conn_group, "desc", mesh.descs[ig], "element Type")
if ts is not None:
ts_group = fd.createGroup("/", "ts", "time stepper")
fd.createArray(ts_group, "t0", ts.t0, "initial time")
fd.createArray(ts_group, "t1", ts.t1, "final time")
fd.createArray(ts_group, "dt", ts.dt, "time step")
fd.createArray(ts_group, "n_step", ts.n_step, "n_step")
tstat_group = fd.createGroup("/", "tstat", "global time statistics")
fd.createArray(tstat_group, "created", asctime(), "file creation time")
fd.createArray(tstat_group, "finished", "." * 24, "file closing time")
fd.createArray(
fd.root, "last_step", nm.array([0], dtype=nm.int32), "last saved step"
)
fd.close()
if out is not None:
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
# Existing file.
fd = pt.openFile(filename, mode="r+")
step_group = fd.createGroup("/", "step%d" % step, "time step data")
ts_group = fd.createGroup(step_group, "ts", "time stepper")
fd.createArray(ts_group, "step", step, "step")
fd.createArray(ts_group, "t", time, "time")
fd.createArray(ts_group, "nt", nt, "normalized time")
name_dict = {}
for key, val in out.iteritems():
# print key
dofs = get_default(val.dofs, (-1,))
shape = val.get("shape", val.data.shape)
var_name = val.get("var_name", "None")
group_name = "__" + key.translate(self._tr)
data_group = fd.createGroup(step_group, group_name, "%s data" % key)
fd.createArray(data_group, "data", val.data, "data")
fd.createArray(data_group, "mode", val.mode, "mode")
fd.createArray(data_group, "dofs", dofs, "dofs")
fd.createArray(data_group, "shape", shape, "shape")
fd.createArray(data_group, "name", val.name, "object name")
fd.createArray(data_group, "var_name", var_name, "object parent name")
fd.createArray(data_group, "dname", key, "data name")
if val.mode == "full":
fd.createArray(
data_group, "field_name", val.field_name, "field name"
)
name_dict[key] = group_name
step_group._v_attrs.name_dict = name_dict
fd.root.last_step[0] = step
fd.removeNode(fd.root.tstat.finished)
fd.createArray(fd.root.tstat, "finished", asctime(), "file closing time")
fd.close()
def read_last_step(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
last_step = fd.root.last_step[0]
fd.close()
return last_step
def read_time_stepper(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
try:
ts_group = fd.root.ts
out = (
ts_group.t0.read(),
ts_group.t1.read(),
ts_group.dt.read(),
ts_group.n_step.read(),
)
except:
raise ValueError("no time stepper found!")
finally:
fd.close()
return out
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
"""
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
steps = sorted(
int(name[4:])
for name in fd.root._v_groups.keys()
if name.startswith("step")
)
times = []
nts = []
for step in steps:
ts_group = fd.getNode(fd.root, "step%d/ts" % step)
times.append(ts_group.t.read())
nts.append(ts_group.nt.read())
fd.close()
steps = nm.asarray(steps, dtype=nm.int32)
times = nm.asarray(times, dtype=nm.float64)
nts = nm.asarray(nts, dtype=nm.float64)
return steps, times, nts
def _get_step_group(self, step, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
gr_name = "step%d" % step
try:
step_group = fd.getNode(fd.root, gr_name)
except:
output("step %d data not found - premature end of file?" % step)
fd.close()
return None, None
return fd, step_group
def read_data(self, step, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None:
return None
out = {}
for data_group in step_group:
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
name = data_group.name.read()
mode = data_group.mode.read()
data = data_group.data.read()
dofs = tuple(data_group.dofs.read())
try:
shape = tuple(data_group.shape.read())
except pt.exceptions.NoSuchNodeError:
shape = data.shape
if mode == "full":
field_name = data_group.field_name.read()
else:
field_name = None
out[key] = Struct(
name=name,
mode=mode,
data=data,
dofs=dofs,
shape=shape,
field_name=field_name,
)
if out[key].dofs == (-1,):
out[key].dofs = None
fd.close()
return out
def read_data_header(self, dname, step=0, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None:
return None
groups = step_group._v_groups
for name, data_group in groups.iteritems():
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
if key == dname:
mode = data_group.mode.read()
fd.close()
return mode, name
fd.close()
raise KeyError("non-existent data: %s" % dname)
def read_time_history(self, node_name, indx, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
th = dict_from_keys_init(indx, list)
for step in xrange(fd.root.last_step[0] + 1):
gr_name = "step%d" % step
step_group = fd.getNode(fd.root, gr_name)
data = step_group._f_getChild(node_name).data
for ii in indx:
th[ii].append(nm.array(data[ii]))
fd.close()
for key, val in th.iteritems():
aux = nm.array(val)
if aux.ndim == 4: # cell data.
aux = aux[:, 0, :, 0]
th[key] = aux
return th
def read_variables_time_history(self, var_names, ts, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
assert_((fd.root.last_step[0] + 1) == ts.n_step)
ths = dict_from_keys_init(var_names, list)
arr = nm.asarray
for step in xrange(ts.n_step):
gr_name = "step%d" % step
step_group = fd.getNode(fd.root, gr_name)
name_dict = step_group._v_attrs.name_dict
for var_name in var_names:
data = step_group._f_getChild(name_dict[var_name]).data
ths[var_name].append(arr(data.read()))
fd.close()
return ths
class MEDMeshIO(MeshIO):
format = "med"
def read(self, mesh, **kwargs):
fd = pt.openFile(self.filename, mode="r")
mesh_root = fd.root.ENS_MAA
# TODO: Loop through multiple meshes?
mesh_group = mesh_root._f_getChild(mesh_root._v_groups.keys()[0])
mesh.name = mesh_group._v_name
coors = mesh_group.NOE.COO.read()
n_nodes = mesh_group.NOE.COO.getAttr("NBR")
# Unflatten the node coordinate array
coors = coors.reshape(coors.shape[0] / n_nodes, n_nodes).transpose()
dim = coors.shape[1]
ngroups = mesh_group.NOE.FAM.read()
assert_((ngroups >= 0).all())
# Dict to map MED element names to SfePy descs
# NOTE: The commented lines are elements which
# produce KeyError in SfePy
med_descs = {
"TE4": "3_4",
#'T10' : '3_10',
#'PY5' : '3_5',
#'P13' : '3_13',
"HE8": "3_8",
#'H20' : '3_20',
#'PE6' : '3_6',
#'P15' : '3_15',
# TODO: Polyhedrons (POE) - need special handling
"TR3": "2_3",
#'TR6' : '2_6',
"QU4": "2_4",
#'QU8' : '2_8',
# TODO: Polygons (POG) - need special handling
#'SE2' : '1_2',
#'SE3' : '1_3',
}
conns = []
descs = []
mat_ids = []
for md, desc in med_descs.iteritems():
if int(desc[0]) != dim:
continue
try:
group = mesh_group.MAI._f_getChild(md)
conn = group.NOD.read()
n_conns = group.NOD.getAttr("NBR")
# (0 based indexing in numpy vs. 1 based in MED)
conn = conn.reshape(conn.shape[0] / n_conns, n_conns).transpose() - 1
conns.append(conn)
mat_id = group.FAM.read()
assert_((mat_id <= 0).all())
mat_id = nm.abs(mat_id)
mat_ids.append(mat_id)
descs.append(med_descs[md])
except pt.exceptions.NoSuchNodeError:
pass
fd.close()
mesh._set_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class Mesh3DMeshIO(MeshIO):
format = "mesh3d"
def read(self, mesh, **kwargs):
f = open(self.filename)
# read the whole file:
vertices = self._read_section(f, integer=False)
tetras = self._read_section(f)
hexes = self._read_section(f)
prisms = self._read_section(f)
tris = self._read_section(f)
quads = self._read_section(f)
# substract 1 from all elements, because we count from 0:
conns = []
mat_ids = []
descs = []
if len(tetras) > 0:
conns.append(tetras - 1)
mat_ids.append([0] * len(tetras))
descs.append("3_4")
if len(hexes) > 0:
conns.append(hexes - 1)
mat_ids.append([0] * len(hexes))
descs.append("3_8")
mesh._set_data(vertices, None, conns, mat_ids, descs)
return mesh
def read_dimension(self):
return 3
def _read_line(self, f):
"""
Reads one non empty line (if it's a comment, it skips it).
"""
l = f.readline().strip()
while l == "" or l[0] == "#": # comment or an empty line
l = f.readline().strip()
return l
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
"""
if integer:
dtype = int
else:
dtype = float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows)
def mesh_from_groups(
mesh,
ids,
coors,
ngroups,
tris,
mat_tris,
quads,
mat_quads,
tetras,
mat_tetras,
hexas,
mat_hexas,
):
ids = nm.asarray(ids, dtype=nm.int32)
coors = nm.asarray(coors, dtype=nm.float64)
n_nod = coors.shape[0]
remap = nm.zeros((ids.max() + 1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
tris = remap[nm.array(tris, dtype=nm.int32)]
quads = remap[nm.array(quads, dtype=nm.int32)]
tetras = remap[nm.array(tetras, dtype=nm.int32)]
hexas = remap[nm.array(hexas, dtype=nm.int32)]
conns = [tris, quads, tetras, hexas]
mat_ids = [
nm.array(ar, dtype=nm.int32)
for ar in [mat_tris, mat_quads, mat_tetras, mat_hexas]
]
descs = ["2_3", "2_4", "3_4", "3_8"]
conns, mat_ids = sort_by_mat_id2(conns, mat_ids)
conns, mat_ids, descs = split_by_mat_id(conns, mat_ids, descs)
mesh._set_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class AVSUCDMeshIO(MeshIO):
format = "avs_ucd"
def guess(filename):
return True
guess = staticmethod(guess)
def read(self, mesh, **kwargs):
fd = open(self.filename, "r")
# Skip all comments.
while 1:
line = fd.readline()
if line and (line[0] != "#"):
break
header = [int(ii) for ii in line.split()]
n_nod, n_el = header[0:2]
ids = nm.zeros((n_nod,), dtype=nm.int32)
dim = 3
coors = nm.zeros((n_nod, dim), dtype=nm.float64)
for ii in xrange(n_nod):
line = fd.readline().split()
ids[ii] = int(line[0])
coors[ii] = [float(coor) for coor in line[1:]]
mat_tetras = []
tetras = []
mat_hexas = []
hexas = []
for ii in xrange(n_el):
line = fd.readline().split()
if line[2] == "tet":
mat_tetras.append(int(line[1]))
tetras.append([int(ic) for ic in line[3:]])
elif line[2] == "hex":
mat_hexas.append(int(line[1]))
hexas.append([int(ic) for ic in line[3:]])
fd.close()
mesh = mesh_from_groups(
mesh, ids, coors, None, [], [], [], [], tetras, mat_tetras, hexas, mat_hexas
)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class HypermeshAsciiMeshIO(MeshIO):
format = "hmascii"
def read(self, mesh, **kwargs):
fd = open(self.filename, "r")
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
for line in fd:
if line and (line[0] == "*"):
if line[1:5] == "node":
line = line.strip()[6:-1].split(",")
ids.append(int(line[0]))
coors.append([float(coor) for coor in line[1:4]])
elif line[1:7] == "tetra4":
line = line.strip()[8:-1].split(",")
mat_tetras.append(int(line[1]))
tetras.append([int(ic) for ic in line[2:6]])
elif line[1:6] == "hexa8":
line = line.strip()[7:-1].split(",")
mat_hexas.append(int(line[1]))
hexas.append([int(ic) for ic in line[2:10]])
fd.close()
mesh = mesh_from_groups(
mesh, ids, coors, None, [], [], [], [], tetras, mat_tetras, hexas, mat_hexas
)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, "w")
coors = mesh.coors
conns, desc = join_conn_groups(
mesh.conns, mesh.descs, mesh.mat_ids, concat=True
)
n_nod, dim = coors.shape
fd.write("HYPERMESH Input Deck Generated by Sfepy MeshIO\n")
fd.write("*filetype(ASCII)\n*version(11.0.0.47)\n\n")
fd.write("BEGIN DATA\n")
fd.write("BEGIN NODES\n")
format = self.get_vector_format(dim) + " %d\n"
for ii in range(n_nod):
nn = (ii + 1,) + tuple(coors[ii])
fd.write("*node(%d,%f,%f,%f,0,0,0,1,1)\n" % nn)
fd.write("END NODES\n\n")
fd.write("BEGIN COMPONENTS\n")
for ig, conn in enumerate(conns):
fd.write('*component(%d,"component%d",0,1,0)\n' % (ig + 1, ig + 1))
if desc[ig] == "1_2":
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii, :-1] + 1)
fd.write("*bar2(%d,1,%d,%d,0)\n" % nn)
elif desc[ig] == "2_4":
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii, :-1] + 1)
fd.write("*quad4(%d,1,%d,%d,%d,%d,0)\n" % nn)
elif desc[ig] == "2_3":
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii, :-1] + 1)
fd.write("*tria3(%d,1,%d,%d,%d,0)\n" % nn)
elif desc[ig] == "3_4":
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii, :-1] + 1)
fd.write("*tetra4(%d,1,%d,%d,%d,%d,0)\n" % nn)
elif desc[ig] == "3_8":
for ii in range(conn.shape[0]):
nn = (ii + 1,) + tuple(conn[ii, :-1] + 1)
fd.write("*hex8(%d,1,%d,%d,%d,%d,%d,%d,%d,%d,0)\n" % nn)
else:
raise ValueError("unknown element type! (%s)" % desc[ig])
fd.write("BEGIN COMPONENTS\n\n")
fd.write("END DATA\n")
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
class AbaqusMeshIO(MeshIO):
format = "abaqus"
def guess(filename):
ok = False
fd = open(filename, "r")
for ii in xrange(100):
try:
line = fd.readline().strip().split(",")
except:
break
if line[0].lower() == "*node":
ok = True
break
fd.close()
return ok
guess = staticmethod(guess)
def read(self, mesh, **kwargs):
fd = open(self.filename, "r")
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
tris = []
mat_tris = []
quads = []
mat_quads = []
nsets = {}
ing = 1
dim = 0
line = fd.readline().split(",")
while 1:
if not line[0]:
break
token = line[0].strip().lower()
if token == "*node":
while 1:
line = fd.readline().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
if dim == 0:
dim = len(line) - 1
ids.append(int(line[0]))
if dim == 2:
coors.append([float(coor) for coor in line[1:3]])
else:
coors.append([float(coor) for coor in line[1:4]])
elif token == "*element":
if line[1].find("C3D8") >= 0:
while 1:
line = fd.readline().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
mat_hexas.append(0)
hexas.append([int(ic) for ic in line[1:9]])
elif line[1].find("C3D4") >= 0:
while 1:
line = fd.readline().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
mat_tetras.append(0)
tetras.append([int(ic) for ic in line[1:5]])
elif line[1].find("CPS") >= 0 or line[1].find("CPE") >= 0:
if line[1].find("4") >= 0:
while 1:
line = fd.readline().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
mat_quads.append(0)
quads.append([int(ic) for ic in line[1:5]])
elif line[1].find("3") >= 0:
while 1:
line = fd.readline().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
mat_tris.append(0)
tris.append([int(ic) for ic in line[1:4]])
else:
raise ValueError("unknown element type! (%s)" % line[1])
else:
raise ValueError("unknown element type! (%s)" % line[1])
elif token == "*nset":
if line[-1].strip().lower() == "generate":
line = fd.readline()
continue
while 1:
line = fd.readline().strip().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
if not line[-1]:
line = line[:-1]
aux = [int(ic) for ic in line]
nsets.setdefault(ing, []).extend(aux)
ing += 1
else:
line = fd.readline().split(",")
fd.close()
ngroups = nm.zeros((len(coors),), dtype=nm.int32)
for ing, ii in nsets.iteritems():
ngroups[nm.array(ii) - 1] = ing
mesh = mesh_from_groups(
mesh,
ids,
coors,
ngroups,
tris,
mat_tris,
quads,
mat_quads,
tetras,
mat_tetras,
hexas,
mat_hexas,
)
return mesh
def read_dimension(self):
fd = open(self.filename, "r")
line = fd.readline().split(",")
while 1:
if not line[0]:
break
token = line[0].strip().lower()
if token == "*node":
while 1:
line = fd.readline().split(",")
if (not line[0]) or (line[0][0] == "*"):
break
dim = len(line) - 1
fd.close()
return dim
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class BDFMeshIO(MeshIO):
format = "nastran"
def read_dimension(self, ret_fd=False):
fd = open(self.filename, "r")
el3d = 0
while 1:
try:
line = fd.readline()
except:
output("reading " + fd.name + " failed!")
raise
if len(line) == 1:
continue
if line[0] == "$":
continue
aux = line.split()
if aux[0] == "CHEXA":
el3d += 1
elif aux[0] == "CTETRA":
el3d += 1
if el3d > 0:
dim = 3
else:
dim = 2
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
def mfloat(s):
if len(s) > 3:
if s[-3] == "-":
return float(s[:-3] + "e" + s[-3:])
return float(s)
import string
fd = open(self.filename, "r")
el = {"3_8": [], "3_4": [], "2_4": [], "2_3": []}
nod = []
cmd = ""
dim = 2
conns_in = []
descs = []
node_grp = None
while 1:
try:
line = fd.readline()
except EOFError:
break
except:
output("reading " + fd.name + " failed!")
raise
if len(line) == 0:
break
if len(line) < 4:
continue
if line[0] == "$":
continue
row = line.strip().split()
if row[0] == "GRID":
cs = line.strip()[-24:]
aux = [cs[0:8], cs[8:16], cs[16:24]]
nod.append([mfloat(ii) for ii in aux])
elif row[0] == "GRID*":
aux = row[1:4]
cmd = "GRIDX"
elif row[0] == "CHEXA":
aux = [int(ii) - 1 for ii in row[3:9]]
aux2 = int(row[2])
aux3 = row[9]
cmd = "CHEXAX"
elif row[0] == "CTETRA":
aux = [int(ii) - 1 for ii in row[3:]]
aux.append(int(row[2]))
el["3_4"].append(aux)
dim = 3
elif row[0] == "CQUAD4":
aux = [int(ii) - 1 for ii in row[3:]]
aux.append(int(row[2]))
el["2_4"].append(aux)
elif row[0] == "CTRIA3":
aux = [int(ii) - 1 for ii in row[3:]]
aux.append(int(row[2]))
el["2_3"].append(aux)
elif cmd == "GRIDX":
cmd = ""
aux2 = row[1]
if aux2[-1] == "0":
aux2 = aux2[:-1]
aux3 = aux[1:]
aux3.append(aux2)
nod.append([float(ii) for ii in aux3])
elif cmd == "CHEXAX":
cmd = ""
aux4 = row[0]
aux5 = string.find(aux4, aux3)
aux.append(int(aux4[(aux5 + len(aux3)) :]) - 1)
aux.extend([int(ii) - 1 for ii in row[1:]])
aux.append(aux2)
el["3_8"].append(aux)
dim = 3
elif row[0] == "SPC" or row[0] == "SPC*":
if node_grp is None:
node_grp = [0] * len(nod)
node_grp[int(row[2]) - 1] = int(row[1])
for elem in el.keys():
if len(el[elem]) > 0:
conns_in.append(el[elem])
descs.append(elem)
fd.close()
nod = nm.array(nod, nm.float64)
if dim == 2:
nod = nod[:, :2].copy()
conns_in = nm.array(conns_in, nm.int32)
conns_in, mat_ids = sort_by_mat_id(conns_in)
conns, mat_ids, descs = split_by_mat_id(conns_in, mat_ids, descs)
mesh._set_data(nod, node_grp, conns, mat_ids, descs)
return mesh
@staticmethod
def format_str(str, idx, n=8):
out = ""
for ii, istr in enumerate(str):
aux = "%d" % istr
out += aux + " " * (n - len(aux))
if ii == 7:
out += "+%07d\n+%07d" % (idx, idx)
return out
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, "w")
coors = mesh.coors
conns, desc = join_conn_groups(
mesh.conns, mesh.descs, mesh.mat_ids, concat=True
)
n_nod, dim = coors.shape
fd.write("$NASTRAN Bulk Data File created by SfePy\n")
fd.write("$\nBEGIN BULK\n")
fd.write("$\n$ ELEMENT CONNECTIVITY\n$\n")
iel = 0
mats = {}
for ig, conn in enumerate(conns):
for ii in range(conn.shape[0]):
iel += 1
nn = conn[ii][:-1] + 1
mat = conn[ii][-1]
if mat in mats:
mats[mat] += 1
else:
mats[mat] = 0
if desc[ig] == "2_4":
fd.write(
"CQUAD4 %s\n"
% self.format_str(
[ii + 1, mat, nn[0], nn[1], nn[2], nn[3]], iel
)
)
elif desc[ig] == "2_3":
fd.write(
"CTRIA3 %s\n"
% self.format_str([ii + 1, mat, nn[0], nn[1], nn[2]], iel)
)
elif desc[ig] == "3_4":
fd.write(
"CTETRA %s\n"
% self.format_str(
[ii + 1, mat, nn[0], nn[1], nn[2], nn[3]], iel
)
)
elif desc[ig] == "3_8":
fd.write(
"CHEXA %s\n"
% self.format_str(
[
ii + 1,
mat,
nn[0],
nn[1],
nn[2],
nn[3],
nn[4],
nn[5],
nn[6],
nn[7],
],
iel,
)
)
else:
raise ValueError("unknown element type! (%s)" % desc[ig])
fd.write("$\n$ NODAL COORDINATES\n$\n")
format = "GRID* %s % 08E % 08E\n"
if coors.shape[1] == 3:
format += "* % 08E0 \n"
else:
format += "* % 08E0 \n" % 0.0
for ii in range(n_nod):
sii = str(ii + 1)
fd.write(format % ((sii + " " * (8 - len(sii)),) + tuple(coors[ii])))
fd.write("$\n$ GEOMETRY\n$\n1 ")
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n* \n")
fd.write("$\n$ MATERIALS\n$\n")
matkeys = mats.keys()
matkeys.sort()
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : Isotropic\n" % imat)
aux = str(imat)
fd.write("MAT1* %s " % (aux + " " * (8 - len(aux))))
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n")
fd.write("$\n$ GEOMETRY\n$\n")
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : solid%d\n" % (imat, imat))
fd.write("PSOLID* %s\n" % self.format_str([ii + 1, imat], 0, 16))
fd.write("* \n")
fd.write("ENDDATA\n")
fd.close()
class NEUMeshIO(MeshIO):
format = "gambit"
def read_dimension(self, ret_fd=False):
fd = open(self.filename, "r")
row = fd.readline().split()
while 1:
if not row:
break
if len(row) == 0:
continue
if row[0] == "NUMNP":
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int(row[4])
break
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
el = {"3_8": [], "3_4": [], "2_4": [], "2_3": []}
nod = []
conns_in = []
descs = []
group_ids = []
group_n_els = []
groups = []
nodal_bcs = {}
fd = open(self.filename, "r")
row = fd.readline().split()
while 1:
if not row:
break
if len(row) == 0:
continue
if row[0] == "NUMNP":
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int(row[4])
elif row[0] == "NODAL":
row = fd.readline().split()
while not (row[0] == "ENDOFSECTION"):
nod.append(row[1:])
row = fd.readline().split()
elif row[0] == "ELEMENTS/CELLS":
row = fd.readline().split()
while not (row[0] == "ENDOFSECTION"):
elid = [row[0]]
gtype = int(row[1])
if gtype == 6:
el["3_4"].append(row[3:] + elid)
elif gtype == 4:
rr = row[3:]
if len(rr) < 8:
rr.extend(fd.readline().split())
el["3_8"].append(rr + elid)
elif gtype == 3:
el["2_3"].append(row[3:] + elid)
elif gtype == 2:
el["2_4"].append(row[3:] + elid)
row = fd.readline().split()
elif row[0] == "GROUP:":
group_ids.append(row[1])
g_n_el = int(row[3])
group_n_els.append(g_n_el)
name = fd.readline().strip()
els = []
row = fd.readline().split()
row = fd.readline().split()
while not (row[0] == "ENDOFSECTION"):
els.extend(row)
row = fd.readline().split()
if g_n_el != len(els):
print(
"wrong number of group elements! (%d == %d)" % (n_el, len(els))
)
raise ValueError
groups.append(els)
elif row[0] == "BOUNDARY":
row = fd.readline().split()
key = row[0]
num = int(row[2])
inod = read_array(fd, num, None, nm.int32) - 1
nodal_bcs[key] = inod.squeeze()
row = fd.readline().split()
assert_(row[0] == "ENDOFSECTION")
else:
row = fd.readline().split()
fd.close()
if int(n_el) != sum(group_n_els):
print(
"wrong total number of group elements! (%d == %d)"
% (int(n_el), len(group_n_els))
)
mat_ids = [None] * int(n_el)
for ii, els in enumerate(groups):
for iel in els:
mat_ids[int(iel) - 1] = group_ids[ii]
for elem in el.keys():
if len(el[elem]) > 0:
for iel in el[elem]:
for ii in range(len(iel)):
iel[ii] = int(iel[ii]) - 1
iel[-1] = mat_ids[iel[-1]]
conns_in.append(el[elem])
descs.append(elem)
nod = nm.array(nod, nm.float64)
conns_in = nm.array(conns_in, nm.int32)
conns_in, mat_ids = sort_by_mat_id(conns_in)
conns, mat_ids, descs = split_by_mat_id(conns_in, mat_ids, descs)
mesh._set_data(nod, None, conns, mat_ids, descs, nodal_bcs=nodal_bcs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class ANSYSCDBMeshIO(MeshIO):
format = "ansys_cdb"
@staticmethod
def make_format(format):
idx = []
dtype = []
start = 0
for iform in format:
ret = iform.partition("i")
if not ret[1]:
ret = iform.partition("e")
if not ret[1]:
raise ValueError
aux = ret[2].partition(".")
step = int(aux[0])
for j in range(int(ret[0])):
idx.append((start, start + step))
start += step
dtype.append(ret[1])
return idx, dtype
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_bounding_box(self):
raise NotImplementedError
def read_dimension(self, ret_fd=False):
return 3
def read(self, mesh, **kwargs):
ids = []
coors = []
elems = []
fd = open(self.filename, "r")
while True:
row = fd.readline()
if not row:
break
if len(row) == 0:
continue
row = row.split(",")
if row[0] == "NBLOCK":
nval = int(row[1])
attr = row[2]
format = fd.readline()
format = format.strip()[1:-1].split(",")
idx, dtype = self.make_format(format)
while True:
row = fd.readline()
if row[0] == "N":
break
line = []
for ival in range(nval):
db, de = idx[ival]
line.append(row[db:de])
ids.append(int(line[0]))
coors.append([float(coor) for coor in line[3:]])
elif row[0] == "EBLOCK":
nval = int(row[1])
attr = row[2]
nel = int(row[3])
format = fd.readline()
elems = read_array(fd, nel, nval, nm.int32)
fd.close()
tetras_idx = nm.where(elems[:, 8] == 4)[0]
hexas_idx = nm.where(elems[:, 8] == 8)[0]
el_hexas = elems[hexas_idx, 11:]
el_tetras = elems[tetras_idx, 11:]
# hack for stupid export filters
if el_hexas[0, -4] == el_hexas[0, -1]:
el_tetras = el_hexas[:, [0, 1, 2, 4]]
tetras_idx = hexas_idx
hexas_idx = []
el_hexas = []
ngroups = nm.zeros((len(coors),), dtype=nm.int32)
mesh = mesh_from_groups(
mesh,
ids,
coors,
ngroups,
[],
[],
[],
[],
el_tetras,
elems[tetras_idx, 0],
el_hexas,
elems[hexas_idx, 0],
)
return mesh
def guess_format(filename, ext, formats, io_table):
"""
Guess the format of filename, candidates are in formats.
"""
ok = False
for format in formats:
output("guessing %s" % format)
try:
ok = io_table[format].guess(filename)
except AttributeError:
pass
if ok:
break
else:
raise NotImplementedError("cannot guess format of a *%s file!" % ext)
return format
##
# c: 05.02.2008, r: 05.02.2008
var_dict = vars().items()
if sys.version_info.major == 3:
import copy
var_dict = list(var_dict)
io_table = {}
for key, var in var_dict:
try:
if is_derived_class(var, MeshIO):
io_table[var.format] = var
except TypeError:
pass
del var_dict
def any_from_filename(filename, prefix_dir=None):
"""
Create a MeshIO instance according to the kind of `filename`.
Parameters
----------
filename : str, function or MeshIO subclass instance
The name of the mesh file. It can be also a user-supplied function
accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh
instance and `mode` is one of 'read','write', or a MeshIO subclass
instance.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the kind of `filename`.
"""
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError("unsupported mesh file suffix! (%s)" % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename)
insert_static_method(MeshIO, any_from_filename)
del any_from_filename
def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError("ambigous suffix! (%s -> %s)" % (ext, format))
if format not in io_table:
raise ValueError("unknown output mesh format! (%s)" % format)
if writable and ("w" not in supported_capabilities[format]):
output_writable_meshes()
msg = (
'write support not implemented for output mesh format "%s",'
" see above!" % format
)
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io
insert_static_method(MeshIO, for_format)
del for_format
| {
"repo_name": "mjirik/dicom2fem",
"path": "dicom2fem/meshio.py",
"copies": "1",
"size": "86767",
"license": "bsd-3-clause",
"hash": -8770243911784632000,
"line_mean": 29.5625220148,
"line_max": 88,
"alpha_frac": 0.4490647366,
"autogenerated": false,
"ratio": 3.596112400530504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9542491978614428,
"avg_score": 0.0005370317032151406,
"num_lines": 2839
} |
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import time
import numpy as nm
import scipy.sparse as sp
from base import Struct, get_default, output, assert_
from meshio import MeshIO
##
# 28.05.2007, c
def make_point_cells( indx, dim ):
conn = nm.zeros( (indx.shape[0], dim + 1), dtype = nm.int32 )
for ii in range( 0, dim + 1 ):
conn[:,ii] = indx
return conn
##
# 23.05.2007, updated from matlab version, r: 05.05.2008
def find_map( x1, x2, eps = 1e-8, allow_double = False, join = True ):
"""
Find a mapping between common coordinates in x1 and x2, such that
x1[cmap[:,0]] == x2[cmap[:,1]]
"""
off, dim = x1.shape
ir = nm.zeros( (off + x2.shape[0],), dtype = nm.int32 )
ir[off:] = off
x1 = nm.round( x1.T / eps ) * eps
x2 = nm.round( x2.T / eps ) * eps
xx = nm.c_[x1, x2]
keys = [xx[ii] for ii in range( dim )]
iis = nm.lexsort( keys = keys )
xs = xx.T[iis]
## import scipy.io as io
## io.write_array( 'sss1', x1.T )
## io.write_array( 'sss2', x2.T )
## io.write_array( 'sss', xs, precision = 16 )
## pause()
xd = nm.sqrt( nm.sum( nm.diff( xs, axis = 0 )**2.0, axis = 1 ) )
ii = nm.where( xd < eps )[0]
off1, off2 = ir[iis][ii], ir[iis][ii+1]
i1, i2 = iis[ii] - off1, iis[ii+1] - off2
dns = nm.where( off1 == off2 )[0]
if dns.size:
print 'double node(s) in:'
for dn in dns:
if off1[dn] == 0:
print 'x1: %d %d -> %s %s' % (i1[dn], i2[dn],
x1[:,i1[dn]], x1[:,i2[dn]])
else:
print 'x2: %d %d -> %s %s' % (i1[dn], i2[dn],
x2[:,i1[dn]], x2[:,i2[dn]])
if not allow_double:
raise ValueError
if join:
cmap = nm.c_[i1, i2]
return cmap
else:
return i1, i2
def merge_mesh( x1, ngroups1, conns1, x2, ngroups2, conns2, cmap, eps = 1e-8 ):
"""Merge two meshes in common coordinates found in x1, x2."""
n1 = x1.shape[0]
n2 = x2.shape[0]
err = nm.sum( nm.sum( nm.abs( x1[cmap[:,0],:-1] - x2[cmap[:,1],:-1] ) ) )
if abs( err ) > (10.0 * eps):
print 'nonmatching meshes!', err
raise ValueError
mask = nm.ones( (n2,), dtype = nm.int32 )
mask[cmap[:,1]] = 0
# print mask, nm.cumsum( mask )
remap = nm.cumsum( mask ) + n1 - 1
remap[cmap[:,1]] = cmap[:,0]
# print remap
i2 = nm.setdiff1d( nm.arange( n2, dtype = nm.int32 ),
cmap[:,1] )
xx = nm.r_[x1, x2[i2]]
ngroups = nm.r_[ngroups1, ngroups2[i2]]
conns = []
for ii in xrange( len( conns1 ) ):
conn = nm.vstack( (conns1[ii], remap[conns2[ii]]) )
conns.append( conn )
return xx, ngroups, conns
def fix_double_nodes(coor, ngroups, conns, eps):
"""
Detect and attempt fixing double nodes in a mesh.
The double nodes are nodes having the same coordinates
w.r.t. precision given by `eps`.
"""
n_nod, dim = coor.shape
cmap = find_map( coor, nm.zeros( (0,dim) ), eps = eps, allow_double = True )
if cmap.size:
output('double nodes in input mesh!')
output('trying to fix...')
while cmap.size:
print cmap.size
# Just like in Variable.equation_mapping()...
ii = nm.argsort( cmap[:,1] )
scmap = cmap[ii]
eq = nm.arange( n_nod )
eq[scmap[:,1]] = -1
eqi = eq[eq >= 0]
eq[eqi] = nm.arange( eqi.shape[0] )
remap = eq.copy()
remap[scmap[:,1]] = eq[scmap[:,0]]
print coor.shape
coor = coor[eqi]
ngroups = ngroups[eqi]
print coor.shape
ccs = []
for conn in conns:
ccs.append( remap[conn] )
conns = ccs
cmap = find_map( coor, nm.zeros( (0,dim) ), eps = eps,
allow_double = True )
output('...done')
return coor, ngroups, conns
def get_min_edge_size(coor, conns):
"""
Get the smallest edge length.
"""
mes = 1e16
for conn in conns:
n_ep = conn.shape[1]
for ir in range( n_ep ):
x1 = coor[conn[:,ir]]
for ic in range( ir + 1, n_ep ):
x2 = coor[conn[:,ic]]
aux = nm.sqrt( nm.sum( (x2 - x1)**2.0, axis = 1 ).min() )
mes = min( mes, aux )
return mes
##
# 25.05.2007, c
def get_min_vertex_distance( coor, guess ):
"""Can miss the minimum, but is enough for our purposes."""
# Sort by x.
ix = nm.argsort( coor[:,0] )
scoor = coor[ix]
mvd = 1e16
# Get mvd in chunks potentially smaller than guess.
n_coor = coor.shape[0]
print n_coor
i0 = i1 = 0
x0 = scoor[i0,0]
while 1:
while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)):
i1 += 1
# print i0, i1, x0, scoor[i1,0]
aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] )
if aux < mvd:
im, a1, a2 = aim, aa1 + i0, aa2 + i0
mvd = min( mvd, aux )
i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1
# i0 += 1
x0 = scoor[i0,0]
# print '-', i0
if i1 == n_coor - 1: break
print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2]
return mvd
##
# c: 25.05.2007, r: 05.05.2008
def get_min_vertex_distance_naive( coor ):
ii = nm.arange( coor.shape[0] )
i1, i2 = nm.meshgrid( ii, ii )
i1 = i1.flatten()
i2 = i2.flatten()
ii = nm.where( i1 < i2 )
aux = coor[i1[ii]] - coor[i2[ii]]
aux = nm.sum( aux**2.0, axis = 1 )
im = aux.argmin()
return im, i1[ii][im], i2[ii][im], nm.sqrt( aux[im] )
def make_mesh( coor, ngroups, conns, mesh_in ):
"""Create a mesh reusing mat_ids and descs of mesh_in."""
mat_ids = []
for ii, conn in enumerate( conns ):
mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 )
mat_id.fill( mesh_in.mat_ids[ii][0] )
mat_ids.append( mat_id )
mesh_out = Mesh.from_data( 'merged mesh', coor, ngroups, conns,
mat_ids, mesh_in.descs )
return mesh_out
def make_inverse_connectivity(conns, n_nod, ret_offsets=True):
"""
For each mesh node referenced in the connectivity conns, make a list of
elements it belongs to.
"""
from itertools import chain
iconn = [[] for ii in xrange( n_nod )]
n_els = [0] * n_nod
for ig, conn in enumerate( conns ):
for iel, row in enumerate( conn ):
for node in row:
iconn[node].extend([ig, iel])
n_els[node] += 1
n_els = nm.array(n_els, dtype=nm.int32)
iconn = nm.fromiter(chain(*iconn), nm.int32)
if ret_offsets:
offsets = nm.cumsum(nm.r_[0, n_els], dtype=nm.int32)
return offsets, iconn
else:
return n_els, iconn
##
# Mesh.
# 13.12.2004, c
# 02.01.2005
class Mesh( Struct ):
"""
Contains the FEM mesh together with all utilities related to it.
Input and output is handled by the MeshIO class and subclasses.
The Mesh class only contains the real mesh - nodes, connectivity,
regions, plus methods for doing operations on this mesh.
Example of creating and working with a mesh::
In [1]: from sfepy.fem import Mesh
In [2]: m = Mesh.from_file("meshes/3d/cylinder.vtk")
sfepy: reading mesh (meshes/3d/cylinder.vtk)...
sfepy: ...done in 0.04 s
In [3]: m.coors
Out[3]:
array([[ 1.00000000e-01, 2.00000000e-02, -1.22460635e-18],
[ 1.00000000e-01, 1.80193774e-02, 8.67767478e-03],
[ 1.00000000e-01, 1.24697960e-02, 1.56366296e-02],
...,
[ 8.00298527e-02, 5.21598617e-03, -9.77772215e-05],
[ 7.02544004e-02, 3.61610291e-04, -1.16903153e-04],
[ 3.19633596e-02, -1.00335972e-02, 9.60460305e-03]])
In [4]: m.ngroups
Out[4]: array([0, 0, 0, ..., 0, 0, 0])
In [5]: m.conns
Out[5]:
[array([[ 28, 60, 45, 29],
[ 28, 60, 57, 45],
[ 28, 57, 27, 45],
...,
[353, 343, 260, 296],
[353, 139, 181, 140],
[353, 295, 139, 140]])]
In [6]: m.mat_ids
Out[6]: [array([6, 6, 6, ..., 6, 6, 6])]
In [7]: m.descs
Out[7]: ['3_4']
In [8]: m
Out[8]: Mesh:meshes/3d/cylinder
In [9]: print m
Mesh:meshes/3d/cylinder
conns:
[array([[ 28, 60, 45, 29],
[ 28, 60, 57, 45],
[ 28, 57, 27, 45],
...,
[353, 343, 260, 296],
[353, 139, 181, 140],
[353, 295, 139, 140]])]
coors:
[[ 1.00000000e-01 2.00000000e-02 -1.22460635e-18]
[ 1.00000000e-01 1.80193774e-02 8.67767478e-03]
[ 1.00000000e-01 1.24697960e-02 1.56366296e-02]
...,
[ 8.00298527e-02 5.21598617e-03 -9.77772215e-05]
[ 7.02544004e-02 3.61610291e-04 -1.16903153e-04]
[ 3.19633596e-02 -1.00335972e-02 9.60460305e-03]]
descs:
['3_4']
dim:
3
el_offsets:
[ 0 1348]
io:
None
mat_ids:
[array([6, 6, 6, ..., 6, 6, 6])]
n_e_ps:
[4]
n_el:
1348
n_els:
[1348]
n_nod:
354
name:
meshes/3d/cylinder
ngroups:
[0 0 0 ..., 0 0 0]
setup_done:
0
The Mesh().coors is an array of node coordinates and Mesh().conns is the
list of elements of each type (see Mesh().desc), so for example if you want
to know the coordinates of the nodes of the fifth finite element of the
type 3_4 do::
In [10]: m.descs
Out[10]: ['3_4']
So now you know that the finite elements of the type 3_4 are in a.conns[0]::
In [11]: m.coors[m.conns[0][4]]
Out[11]:
array([[ 1.00000000e-01, 1.80193774e-02, -8.67767478e-03],
[ 1.00000000e-01, 1.32888539e-02, -4.35893200e-04],
[ 1.00000000e-01, 2.00000000e-02, -1.22460635e-18],
[ 9.22857574e-02, 1.95180454e-02, -4.36416134e-03]])
The element ids are of the form "<dimension>_<number of nodes>", i.e.:
- 2_2 ... line
- 2_3 ... triangle
- 2_4 ... quadrangle
- 3_2 ... line
- 3_4 ... tetrahedron
- 3_8 ... hexahedron
"""
def from_surface( surf_faces, mesh_in ):
"""
Create a mesh given a set of surface faces and the original mesh.
"""
aux = nm.concatenate([faces.ravel() for faces in surf_faces])
inod = nm.unique(aux)
n_nod = len( inod )
n_nod_m, dim = mesh_in.coors.shape
aux = nm.arange( n_nod, dtype=nm.int32 )
remap = nm.zeros( (n_nod_m,), nm.int32 )
remap[inod] = aux
mesh = Mesh( mesh_in.name + "_surf" )
mesh.coors = mesh_in.coors[inod]
mesh.ngroups = mesh_in.ngroups[inod]
sfm = {3 : "2_3", 4 : "2_4"}
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
for ii, sf in enumerate( surf_faces ):
n_el, n_fp = sf.shape
conn = remap[sf]
mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 )
mat_id.fill( ii )
mesh.descs.append( sfm[n_fp] )
mesh.conns.append( conn )
mesh.mat_ids.append( mat_id )
mesh._set_shape_info()
return mesh
from_surface = staticmethod( from_surface )
@staticmethod
def from_file(filename=None, io='auto', prefix_dir=None,
omit_facets=False):
"""
Read a mesh from a file.
Parameters
----------
filename : string or function or MeshIO instance or Mesh instance
The name of file to read the mesh from. For convenience, a
mesh creation function or a MeshIO instance or directly a Mesh
instance can be passed in place of the file name.
io : *MeshIO instance
Passing *MeshIO instance has precedence over filename.
prefix_dir : str
If not None, the filename is relative to that directory.
omit_facets : bool
If True, do not read cells of lower dimension than the space
dimension (faces and/or edges). Only some MeshIO subclasses
support this!
"""
if isinstance(filename, Mesh):
return filename
if io == 'auto':
if filename is None:
output( 'filename or io must be specified!' )
raise ValueError
else:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
output('reading mesh (%s)...' % (io.filename))
tt = time.clock()
trunk = io.get_filename_trunk()
mesh = Mesh(trunk)
mesh = io.read(mesh, omit_facets=omit_facets)
output('...done in %.2f s' % (time.clock() - tt))
mesh._set_shape_info()
return mesh
@staticmethod
def from_region(region, mesh_in, save_edges=False, save_faces=False,
localize=False, is_surface=False):
"""
Create a mesh corresponding to a given region.
"""
mesh = Mesh( mesh_in.name + "_reg" )
mesh.coors = mesh_in.coors.copy()
mesh.ngroups = mesh_in.ngroups.copy()
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
if not is_surface:
if region.has_cells():
for ig in region.igs:
mesh.descs.append( mesh_in.descs[ig] )
els = region.get_cells( ig )
mesh.mat_ids.append( mesh_in.mat_ids[ig][els,:].copy() )
mesh.conns.append( mesh_in.conns[ig][els,:].copy() )
if save_edges:
ed = region.domain.ed
for ig in region.igs:
edges = region.get_edges( ig )
mesh.descs.append( '1_2' )
mesh.mat_ids.append( ed.data[edges,0] + 1 )
mesh.conns.append( ed.data[edges,-2:].copy() )
if save_faces:
mesh._append_region_faces(region)
if save_edges or save_faces:
mesh.descs.append( {2 : '2_3', 3 : '3_4'}[mesh_in.dim] )
mesh.mat_ids.append( -nm.ones_like( region.all_vertices ) )
mesh.conns.append(make_point_cells(region.all_vertices,
mesh_in.dim))
else:
mesh._append_region_faces(region, force_faces=True)
mesh._set_shape_info()
if localize:
mesh.localize( region.all_vertices )
return mesh
def from_data( name, coors, ngroups, conns, mat_ids, descs, igs = None ):
"""
Create a mesh from mesh data.
"""
if igs is None:
igs = range( len( conns ) )
mesh = Mesh(name)
mesh._set_data(coors = coors,
ngroups = ngroups,
conns = [conns[ig] for ig in igs],
mat_ids = [mat_ids[ig] for ig in igs],
descs = [descs[ig] for ig in igs])
mesh._set_shape_info()
return mesh
from_data = staticmethod( from_data )
def __init__(self, name='mesh', filename=None,
prefix_dir=None, **kwargs):
"""Create a Mesh.
Parameters
----------
name : str
Object name.
filename : str
Loads a mesh from the specified file, if not None.
prefix_dir : str
If not None, the filename is relative to that directory.
"""
Struct.__init__(self, name=name, **kwargs)
if filename is None:
self.io = None
self.setup_done = 0
else:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
output( 'reading mesh (%s)...' % (io.filename) )
tt = time.clock()
io.read(self)
output( '...done in %.2f s' % (time.clock() - tt) )
self._set_shape_info()
def copy(self, name=None):
"""Make a deep copy of self.
Parameters
----------
name : str
Name of the copied mesh.
"""
return Struct.copy(self, deep=True, name=name)
##
# 04.08.2006, c
# 29.09.2006
def _set_shape_info( self ):
self.n_nod, self.dim = self.coors.shape
self.n_els = nm.array( [conn.shape[0] for conn in self.conns] )
self.n_e_ps = nm.array( [conn.shape[1] for conn in self.conns] )
self.el_offsets = nm.cumsum( nm.r_[0, self.n_els] )
self.n_el = nm.sum( self.n_els )
self.dims = [int(ii[0]) for ii in self.descs]
def _set_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None):
"""
Set mesh data.
Parameters
----------
coors : array
Coordinates of mesh nodes.
ngroups : array
Node groups.
conns : list of arrays
The array of mesh elements (connectivities) for each element group.
mat_ids : list of arrays
The array of material ids for each element group.
descs: list of strings
The element type for each element group.
nodal_bcs : dict of arrays, optional
The nodes defining regions for boundary conditions referred
to by the dict keys in problem description files.
"""
self.coors = nm.ascontiguousarray(coors)
if ngroups is None:
self.ngroups = nm.zeros((self.coors.shape[0],), dtype=nm.int32)
else:
self.ngroups = nm.ascontiguousarray(ngroups)
self.conns = [nm.asarray(conn, dtype=nm.int32) for conn in conns]
self.mat_ids = [nm.asarray(mat_id, dtype=nm.int32)
for mat_id in mat_ids]
self.descs = descs
self.nodal_bcs = get_default(nodal_bcs, {})
def _append_region_faces(self, region, force_faces=False):
fa = region.domain.get_facets(force_faces=force_faces)[1]
if fa is None:
return
for ig in region.igs:
faces = region.get_surface_entities(ig)
fdata = fa.facets[faces]
i3 = nm.where(fdata[:,-1] == -1)[0]
i4 = nm.where(fdata[:,-1] != -1)[0]
if i3.size:
self.descs.append('2_3')
self.mat_ids.append(fa.indices[i3,0] + 1)
self.conns.append(fdata[i3,:-1])
if i4.size:
self.descs.append('2_4')
self.mat_ids.append(fa.indices[i4,0] + 1)
self.conns.append(fdata[i4])
def write(self, filename=None, io=None,
coors=None, igs=None, out=None, float_format=None, **kwargs):
"""
Write mesh + optional results in `out` to a file.
Parameters
----------
filename : str, optional
The file name. If None, the mesh name is used instead.
io : MeshIO instance or 'auto', optional
Passing 'auto' respects the extension of `filename`.
coors : array, optional
The coordinates that can be used instead of the mesh coordinates.
igs : array_like, optional
Passing a list of group ids selects only those groups for writing.
out : dict, optional
The output data attached to the mesh vertices and/or cells.
float_format : str, optional
The format string used to print floats in case of a text file
format.
**kwargs : dict, optional
Additional arguments that can be passed to the `MeshIO` instance.
"""
if filename is None:
filename = self.name + '.mesh'
if io is None:
io = self.io
if io is None:
io = 'auto'
if io == 'auto':
io = MeshIO.any_from_filename( filename )
if coors is None:
coors = self.coors
if igs is None:
igs = range( len( self.conns ) )
aux_mesh = Mesh.from_data( self.name, coors, self.ngroups,
self.conns, self.mat_ids, self.descs, igs )
io.set_float_format( float_format )
io.write( filename, aux_mesh, out, **kwargs )
##
# 23.05.2007, c
def get_bounding_box( self ):
return nm.vstack( (nm.amin( self.coors, 0 ), nm.amax( self.coors, 0 )) )
def get_element_coors(self, ig=None):
"""
Get the coordinates of vertices elements in group `ig`.
Parameters
----------
ig : int, optional
The element group. If None, the coordinates for all groups
are returned, filled with zeros at places of missing
vertices, i.e. where elements having less then the full number
of vertices (`n_ep_max`) are.
Returns
-------
coors : array
The coordinates in an array of shape `(n_el, n_ep_max, dim)`.
"""
cc = self.coors
n_ep_max = self.n_e_ps.max()
coors = nm.empty((self.n_el, n_ep_max, self.dim), dtype=cc.dtype)
for ig, conn in enumerate(self.conns):
i1, i2 = self.el_offsets[ig], self.el_offsets[ig + 1]
coors[i1:i2, :conn.shape[1], :] = cc[conn]
return coors
def localize(self, inod):
"""
Strips nodes not in inod and remaps connectivities.
Omits elements where remap[conn] contains -1...
"""
remap = nm.empty((self.n_nod,), dtype=nm.int32)
remap.fill(-1)
remap[inod] = nm.arange(inod.shape[0], dtype=nm.int32)
self.coors = self.coors[inod]
self.ngroups = self.ngroups[inod]
conns = []
mat_ids = []
for ig, conn in enumerate(self.conns):
if conn.shape[0] == 0:
continue
aux = remap[conn]
ii = nm.unique(nm.where(aux == -1)[0])
ii = nm.setdiff1d(nm.arange(conn.shape[0], dtype=nm.int32), ii)
conns.append(aux[ii])
mat_ids.append(self.mat_ids[ig][ii])
self.conns = conns
self.mat_ids = mat_ids
self._set_shape_info()
def transform_coors(self, mtx_t, ref_coors=None):
"""
Transform coordinates of the mesh by the given transformation matrix.
Parameters
----------
mtx_t : array
The transformation matrix `T` (2D array). It is applied
depending on its shape:
- `(dim, dim): x = T * x`
- `(dim, dim + 1): x = T[:, :-1] * x + T[:, -1]`
ref_coors : array, optional
Alternative coordinates to use for the transformation instead
of the mesh coordinates, with the same shape as `self.coors`.
"""
if ref_coors is None:
ref_coors = self.coors
if mtx_t.shape[1] > self.coors.shape[1]:
self.coors[:] = nm.dot(ref_coors, mtx_t[:,:-1].T) + mtx_t[:,-1]
else:
self.coors[:] = nm.dot(ref_coors, mtx_t.T)
# def create_conn_graph(self, verbose=True):
# """
# Create a graph of mesh connectivity.
# Returns
# -------
# graph : csr_matrix
# The mesh connectivity graph as a SciPy CSR matrix.
# """
# from extmods.mesh import create_mesh_graph
# shape = (self.n_nod, self.n_nod)
# output('graph shape:', shape, verbose=verbose)
# if nm.prod(shape) == 0:
# output('no graph (zero size)!', verbose=verbose)
# return None
# output('assembling mesh graph...', verbose=verbose)
# tt = time.clock()
# nnz, prow, icol = create_mesh_graph(shape[0], shape[1],
# len(self.conns),
# self.conns, self.conns)
# output('...done in %.2f s' % (time.clock() - tt), verbose=verbose)
# output('graph nonzeros: %d (%.2e%% fill)' \
# % (nnz, float(nnz) / nm.prod(shape)))
# data = nm.ones((nnz,), dtype=nm.bool)
# graph = sp.csr_matrix((data, icol, prow), shape)
# return graph
def explode_groups(self, eps, return_emap=False):
"""
Explode the mesh element groups by `eps`, i.e. split group
interface nodes and shrink each group towards its centre by
`eps`.
Parameters
----------
eps : float in `[0.0, 1.0]`
The group shrinking factor.
return_emap : bool, optional
If True, also return the mapping against original mesh
coordinates that result in the exploded mesh coordinates.
The mapping can be used to map mesh vertex data to the
exploded mesh vertices.
Returns
-------
mesh : Mesh
The new mesh with exploded groups.
emap : spmatrix, optional
The maping for exploding vertex values. Only provided if
`return_emap` is True.
"""
assert_(0.0 <= eps <= 1.0)
remap = nm.empty((self.n_nod,), dtype=nm.int32)
offset = 0
if return_emap:
rows, cols = [], []
coors = []
ngroups = []
conns = []
mat_ids = []
descs = []
for ig, conn in enumerate(self.conns):
nodes = nm.unique(conn)
group_coors = self.coors[nodes]
n_nod = group_coors.shape[0]
centre = group_coors.sum(axis=0) / float(n_nod)
vectors = group_coors - centre[None, :]
new_coors = centre + (vectors * eps)
remap[nodes] = nm.arange(n_nod, dtype=nm.int32) + offset
new_conn = remap[conn]
coors.append(new_coors)
ngroups.append(self.ngroups[nodes])
conns.append(new_conn)
mat_ids.append(self.mat_ids[ig])
descs.append(self.descs[ig])
offset += n_nod
if return_emap:
cols.append(nodes)
rows.append(remap[nodes])
coors = nm.concatenate(coors, axis=0)
ngroups = nm.concatenate(ngroups, axis=0)
mesh = Mesh.from_data('exploded_' + self.name,
coors, ngroups, conns, mat_ids, descs)
if return_emap:
rows = nm.concatenate(rows)
cols = nm.concatenate(cols)
data = nm.ones(rows.shape[0], dtype=nm.float64)
emap = sp.coo_matrix((data, (rows, cols)),
shape=(mesh.n_nod, self.n_nod))
return mesh, emap
else:
return mesh
| {
"repo_name": "vlukes/dicom2fem",
"path": "dicom2fem/mesh.py",
"copies": "1",
"size": "27478",
"license": "bsd-3-clause",
"hash": -2501733847062933500,
"line_mean": 31.1380116959,
"line_max": 80,
"alpha_frac": 0.5035300968,
"autogenerated": false,
"ratio": 3.3591687041564793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43626988009564793,
"avg_score": null,
"num_lines": null
} |
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import time
import numpy as nm
import scipy.sparse as sp
from .base import Struct, get_default, output, assert_
from .meshio import MeshIO
##
# 28.05.2007, c
def make_point_cells(indx, dim):
conn = nm.zeros((indx.shape[0], dim + 1), dtype=nm.int32)
for ii in range(0, dim + 1):
conn[:, ii] = indx
return conn
##
# 23.05.2007, updated from matlab version, r: 05.05.2008
def find_map(x1, x2, eps=1e-8, allow_double=False, join=True):
"""
Find a mapping between common coordinates in x1 and x2, such that
x1[cmap[:,0]] == x2[cmap[:,1]]
"""
off, dim = x1.shape
ir = nm.zeros((off + x2.shape[0],), dtype=nm.int32)
ir[off:] = off
x1 = nm.round(x1.T / eps) * eps
x2 = nm.round(x2.T / eps) * eps
xx = nm.c_[x1, x2]
keys = [xx[ii] for ii in range(dim)]
iis = nm.lexsort(keys=keys)
xs = xx.T[iis]
## import scipy.io as io
## io.write_array( 'sss1', x1.T )
## io.write_array( 'sss2', x2.T )
## io.write_array( 'sss', xs, precision = 16 )
## pause()
xd = nm.sqrt(nm.sum(nm.diff(xs, axis=0) ** 2.0, axis=1))
ii = nm.where(xd < eps)[0]
off1, off2 = ir[iis][ii], ir[iis][ii + 1]
i1, i2 = iis[ii] - off1, iis[ii + 1] - off2
dns = nm.where(off1 == off2)[0]
if dns.size:
print("double node(s) in:")
for dn in dns:
if off1[dn] == 0:
print(
"x1: %d %d -> %s %s"
% (i1[dn], i2[dn], x1[:, i1[dn]], x1[:, i2[dn]])
)
else:
print(
"x2: %d %d -> %s %s"
% (i1[dn], i2[dn], x2[:, i1[dn]], x2[:, i2[dn]])
)
if not allow_double:
raise ValueError
if join:
cmap = nm.c_[i1, i2]
return cmap
else:
return i1, i2
def merge_mesh(x1, ngroups1, conns1, x2, ngroups2, conns2, cmap, eps=1e-8):
"""Merge two meshes in common coordinates found in x1, x2."""
n1 = x1.shape[0]
n2 = x2.shape[0]
err = nm.sum(nm.sum(nm.abs(x1[cmap[:, 0], :-1] - x2[cmap[:, 1], :-1])))
if abs(err) > (10.0 * eps):
print("nonmatching meshes!", err)
raise ValueError
mask = nm.ones((n2,), dtype=nm.int32)
mask[cmap[:, 1]] = 0
# print mask, nm.cumsum( mask )
remap = nm.cumsum(mask) + n1 - 1
remap[cmap[:, 1]] = cmap[:, 0]
# print remap
i2 = nm.setdiff1d(nm.arange(n2, dtype=nm.int32), cmap[:, 1])
xx = nm.r_[x1, x2[i2]]
ngroups = nm.r_[ngroups1, ngroups2[i2]]
conns = []
for ii in xrange(len(conns1)):
conn = nm.vstack((conns1[ii], remap[conns2[ii]]))
conns.append(conn)
return xx, ngroups, conns
def fix_double_nodes(coor, ngroups, conns, eps):
"""
Detect and attempt fixing double nodes in a mesh.
The double nodes are nodes having the same coordinates
w.r.t. precision given by `eps`.
"""
n_nod, dim = coor.shape
cmap = find_map(coor, nm.zeros((0, dim)), eps=eps, allow_double=True)
if cmap.size:
output("double nodes in input mesh!")
output("trying to fix...")
while cmap.size:
print(cmap.size)
# Just like in Variable.equation_mapping()...
ii = nm.argsort(cmap[:, 1])
scmap = cmap[ii]
eq = nm.arange(n_nod)
eq[scmap[:, 1]] = -1
eqi = eq[eq >= 0]
eq[eqi] = nm.arange(eqi.shape[0])
remap = eq.copy()
remap[scmap[:, 1]] = eq[scmap[:, 0]]
print(coor.shape)
coor = coor[eqi]
ngroups = ngroups[eqi]
print(coor.shape)
ccs = []
for conn in conns:
ccs.append(remap[conn])
conns = ccs
cmap = find_map(coor, nm.zeros((0, dim)), eps=eps, allow_double=True)
output("...done")
return coor, ngroups, conns
def get_min_edge_size(coor, conns):
"""
Get the smallest edge length.
"""
mes = 1e16
for conn in conns:
n_ep = conn.shape[1]
for ir in range(n_ep):
x1 = coor[conn[:, ir]]
for ic in range(ir + 1, n_ep):
x2 = coor[conn[:, ic]]
aux = nm.sqrt(nm.sum((x2 - x1) ** 2.0, axis=1).min())
mes = min(mes, aux)
return mes
##
# 25.05.2007, c
def get_min_vertex_distance(coor, guess):
"""Can miss the minimum, but is enough for our purposes."""
# Sort by x.
ix = nm.argsort(coor[:, 0])
scoor = coor[ix]
mvd = 1e16
# Get mvd in chunks potentially smaller than guess.
n_coor = coor.shape[0]
print(n_coor)
i0 = i1 = 0
x0 = scoor[i0, 0]
while 1:
while ((scoor[i1, 0] - x0) < guess) and (i1 < (n_coor - 1)):
i1 += 1
# print i0, i1, x0, scoor[i1,0]
aim, aa1, aa2, aux = get_min_vertex_distance_naive(scoor[i0 : i1 + 1])
if aux < mvd:
im, a1, a2 = aim, aa1 + i0, aa2 + i0
mvd = min(mvd, aux)
i0 = i1 = int(0.5 * (i1 + i0)) + 1
# i0 += 1
x0 = scoor[i0, 0]
# print '-', i0
if i1 == n_coor - 1:
break
print(im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2])
return mvd
##
# c: 25.05.2007, r: 05.05.2008
def get_min_vertex_distance_naive(coor):
ii = nm.arange(coor.shape[0])
i1, i2 = nm.meshgrid(ii, ii)
i1 = i1.flatten()
i2 = i2.flatten()
ii = nm.where(i1 < i2)
aux = coor[i1[ii]] - coor[i2[ii]]
aux = nm.sum(aux ** 2.0, axis=1)
im = aux.argmin()
return im, i1[ii][im], i2[ii][im], nm.sqrt(aux[im])
def make_mesh(coor, ngroups, conns, mesh_in):
"""Create a mesh reusing mat_ids and descs of mesh_in."""
mat_ids = []
for ii, conn in enumerate(conns):
mat_id = nm.empty((conn.shape[0],), dtype=nm.int32)
mat_id.fill(mesh_in.mat_ids[ii][0])
mat_ids.append(mat_id)
mesh_out = Mesh.from_data(
"merged mesh", coor, ngroups, conns, mat_ids, mesh_in.descs
)
return mesh_out
def make_inverse_connectivity(conns, n_nod, ret_offsets=True):
"""
For each mesh node referenced in the connectivity conns, make a list of
elements it belongs to.
"""
from itertools import chain
iconn = [[] for ii in xrange(n_nod)]
n_els = [0] * n_nod
for ig, conn in enumerate(conns):
for iel, row in enumerate(conn):
for node in row:
iconn[node].extend([ig, iel])
n_els[node] += 1
n_els = nm.array(n_els, dtype=nm.int32)
iconn = nm.fromiter(chain(*iconn), nm.int32)
if ret_offsets:
offsets = nm.cumsum(nm.r_[0, n_els], dtype=nm.int32)
return offsets, iconn
else:
return n_els, iconn
##
# Mesh.
# 13.12.2004, c
# 02.01.2005
class Mesh(Struct):
"""
Contains the FEM mesh together with all utilities related to it.
Input and output is handled by the MeshIO class and subclasses.
The Mesh class only contains the real mesh - nodes, connectivity,
regions, plus methods for doing operations on this mesh.
Example of creating and working with a mesh::
In [1]: from sfepy.fem import Mesh
In [2]: m = Mesh.from_file("meshes/3d/cylinder.vtk")
sfepy: reading mesh (meshes/3d/cylinder.vtk)...
sfepy: ...done in 0.04 s
In [3]: m.coors
Out[3]:
array([[ 1.00000000e-01, 2.00000000e-02, -1.22460635e-18],
[ 1.00000000e-01, 1.80193774e-02, 8.67767478e-03],
[ 1.00000000e-01, 1.24697960e-02, 1.56366296e-02],
...,
[ 8.00298527e-02, 5.21598617e-03, -9.77772215e-05],
[ 7.02544004e-02, 3.61610291e-04, -1.16903153e-04],
[ 3.19633596e-02, -1.00335972e-02, 9.60460305e-03]])
In [4]: m.ngroups
Out[4]: array([0, 0, 0, ..., 0, 0, 0])
In [5]: m.conns
Out[5]:
[array([[ 28, 60, 45, 29],
[ 28, 60, 57, 45],
[ 28, 57, 27, 45],
...,
[353, 343, 260, 296],
[353, 139, 181, 140],
[353, 295, 139, 140]])]
In [6]: m.mat_ids
Out[6]: [array([6, 6, 6, ..., 6, 6, 6])]
In [7]: m.descs
Out[7]: ['3_4']
In [8]: m
Out[8]: Mesh:meshes/3d/cylinder
In [9]: print m
Mesh:meshes/3d/cylinder
conns:
[array([[ 28, 60, 45, 29],
[ 28, 60, 57, 45],
[ 28, 57, 27, 45],
...,
[353, 343, 260, 296],
[353, 139, 181, 140],
[353, 295, 139, 140]])]
coors:
[[ 1.00000000e-01 2.00000000e-02 -1.22460635e-18]
[ 1.00000000e-01 1.80193774e-02 8.67767478e-03]
[ 1.00000000e-01 1.24697960e-02 1.56366296e-02]
...,
[ 8.00298527e-02 5.21598617e-03 -9.77772215e-05]
[ 7.02544004e-02 3.61610291e-04 -1.16903153e-04]
[ 3.19633596e-02 -1.00335972e-02 9.60460305e-03]]
descs:
['3_4']
dim:
3
el_offsets:
[ 0 1348]
io:
None
mat_ids:
[array([6, 6, 6, ..., 6, 6, 6])]
n_e_ps:
[4]
n_el:
1348
n_els:
[1348]
n_nod:
354
name:
meshes/3d/cylinder
ngroups:
[0 0 0 ..., 0 0 0]
setup_done:
0
The Mesh().coors is an array of node coordinates and Mesh().conns is the
list of elements of each type (see Mesh().desc), so for example if you want
to know the coordinates of the nodes of the fifth finite element of the
type 3_4 do::
In [10]: m.descs
Out[10]: ['3_4']
So now you know that the finite elements of the type 3_4 are in a.conns[0]::
In [11]: m.coors[m.conns[0][4]]
Out[11]:
array([[ 1.00000000e-01, 1.80193774e-02, -8.67767478e-03],
[ 1.00000000e-01, 1.32888539e-02, -4.35893200e-04],
[ 1.00000000e-01, 2.00000000e-02, -1.22460635e-18],
[ 9.22857574e-02, 1.95180454e-02, -4.36416134e-03]])
The element ids are of the form "<dimension>_<number of nodes>", i.e.:
- 2_2 ... line
- 2_3 ... triangle
- 2_4 ... quadrangle
- 3_2 ... line
- 3_4 ... tetrahedron
- 3_8 ... hexahedron
"""
def from_surface(surf_faces, mesh_in):
"""
Create a mesh given a set of surface faces and the original mesh.
"""
aux = nm.concatenate([faces.ravel() for faces in surf_faces])
inod = nm.unique(aux)
n_nod = len(inod)
n_nod_m, dim = mesh_in.coors.shape
aux = nm.arange(n_nod, dtype=nm.int32)
remap = nm.zeros((n_nod_m,), nm.int32)
remap[inod] = aux
mesh = Mesh(mesh_in.name + "_surf")
mesh.coors = mesh_in.coors[inod]
mesh.ngroups = mesh_in.ngroups[inod]
sfm = {3: "2_3", 4: "2_4"}
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
for ii, sf in enumerate(surf_faces):
n_el, n_fp = sf.shape
conn = remap[sf]
mat_id = nm.empty((conn.shape[0],), dtype=nm.int32)
mat_id.fill(ii)
mesh.descs.append(sfm[n_fp])
mesh.conns.append(conn)
mesh.mat_ids.append(mat_id)
mesh._set_shape_info()
return mesh
from_surface = staticmethod(from_surface)
@staticmethod
def from_file(filename=None, io="auto", prefix_dir=None, omit_facets=False):
"""
Read a mesh from a file.
Parameters
----------
filename : string or function or MeshIO instance or Mesh instance
The name of file to read the mesh from. For convenience, a
mesh creation function or a MeshIO instance or directly a Mesh
instance can be passed in place of the file name.
io : *MeshIO instance
Passing *MeshIO instance has precedence over filename.
prefix_dir : str
If not None, the filename is relative to that directory.
omit_facets : bool
If True, do not read cells of lower dimension than the space
dimension (faces and/or edges). Only some MeshIO subclasses
support this!
"""
if isinstance(filename, Mesh):
return filename
if io == "auto":
if filename is None:
output("filename or io must be specified!")
raise ValueError
else:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
output("reading mesh (%s)..." % (io.filename))
tt = time.clock()
trunk = io.get_filename_trunk()
mesh = Mesh(trunk)
mesh = io.read(mesh, omit_facets=omit_facets)
output("...done in %.2f s" % (time.clock() - tt))
mesh._set_shape_info()
return mesh
@staticmethod
def from_region(
region,
mesh_in,
save_edges=False,
save_faces=False,
localize=False,
is_surface=False,
):
"""
Create a mesh corresponding to a given region.
"""
mesh = Mesh(mesh_in.name + "_reg")
mesh.coors = mesh_in.coors.copy()
mesh.ngroups = mesh_in.ngroups.copy()
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
if not is_surface:
if region.has_cells():
for ig in region.igs:
mesh.descs.append(mesh_in.descs[ig])
els = region.get_cells(ig)
mesh.mat_ids.append(mesh_in.mat_ids[ig][els, :].copy())
mesh.conns.append(mesh_in.conns[ig][els, :].copy())
if save_edges:
ed = region.domain.ed
for ig in region.igs:
edges = region.get_edges(ig)
mesh.descs.append("1_2")
mesh.mat_ids.append(ed.data[edges, 0] + 1)
mesh.conns.append(ed.data[edges, -2:].copy())
if save_faces:
mesh._append_region_faces(region)
if save_edges or save_faces:
mesh.descs.append({2: "2_3", 3: "3_4"}[mesh_in.dim])
mesh.mat_ids.append(-nm.ones_like(region.all_vertices))
mesh.conns.append(make_point_cells(region.all_vertices, mesh_in.dim))
else:
mesh._append_region_faces(region, force_faces=True)
mesh._set_shape_info()
if localize:
mesh.localize(region.all_vertices)
return mesh
def from_data(name, coors, ngroups, conns, mat_ids, descs, igs=None):
"""
Create a mesh from mesh data.
"""
if igs is None:
igs = range(len(conns))
mesh = Mesh(name)
mesh._set_data(
coors=coors,
ngroups=ngroups,
conns=[conns[ig] for ig in igs],
mat_ids=[mat_ids[ig] for ig in igs],
descs=[descs[ig] for ig in igs],
)
mesh._set_shape_info()
return mesh
from_data = staticmethod(from_data)
def __init__(self, name="mesh", filename=None, prefix_dir=None, **kwargs):
"""Create a Mesh.
Parameters
----------
name : str
Object name.
filename : str
Loads a mesh from the specified file, if not None.
prefix_dir : str
If not None, the filename is relative to that directory.
"""
Struct.__init__(self, name=name, **kwargs)
if filename is None:
self.io = None
self.setup_done = 0
else:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
output("reading mesh (%s)..." % (io.filename))
tt = time.clock()
io.read(self)
output("...done in %.2f s" % (time.clock() - tt))
self._set_shape_info()
def copy(self, name=None):
"""Make a deep copy of self.
Parameters
----------
name : str
Name of the copied mesh.
"""
return Struct.copy(self, deep=True, name=name)
##
# 04.08.2006, c
# 29.09.2006
def _set_shape_info(self):
self.n_nod, self.dim = self.coors.shape
self.n_els = nm.array([conn.shape[0] for conn in self.conns])
self.n_e_ps = nm.array([conn.shape[1] for conn in self.conns])
self.el_offsets = nm.cumsum(nm.r_[0, self.n_els])
self.n_el = nm.sum(self.n_els)
self.dims = [int(ii[0]) for ii in self.descs]
def _set_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None):
"""
Set mesh data.
Parameters
----------
coors : array
Coordinates of mesh nodes.
ngroups : array
Node groups.
conns : list of arrays
The array of mesh elements (connectivities) for each element group.
mat_ids : list of arrays
The array of material ids for each element group.
descs: list of strings
The element type for each element group.
nodal_bcs : dict of arrays, optional
The nodes defining regions for boundary conditions referred
to by the dict keys in problem description files.
"""
self.coors = nm.ascontiguousarray(coors)
if ngroups is None:
self.ngroups = nm.zeros((self.coors.shape[0],), dtype=nm.int32)
else:
self.ngroups = nm.ascontiguousarray(ngroups)
self.conns = [nm.asarray(conn, dtype=nm.int32) for conn in conns]
self.mat_ids = [nm.asarray(mat_id, dtype=nm.int32) for mat_id in mat_ids]
self.descs = descs
self.nodal_bcs = get_default(nodal_bcs, {})
def _append_region_faces(self, region, force_faces=False):
fa = region.domain.get_facets(force_faces=force_faces)[1]
if fa is None:
return
for ig in region.igs:
faces = region.get_surface_entities(ig)
fdata = fa.facets[faces]
i3 = nm.where(fdata[:, -1] == -1)[0]
i4 = nm.where(fdata[:, -1] != -1)[0]
if i3.size:
self.descs.append("2_3")
self.mat_ids.append(fa.indices[i3, 0] + 1)
self.conns.append(fdata[i3, :-1])
if i4.size:
self.descs.append("2_4")
self.mat_ids.append(fa.indices[i4, 0] + 1)
self.conns.append(fdata[i4])
def write(
self,
filename=None,
io=None,
coors=None,
igs=None,
out=None,
float_format=None,
lc_all="C",
**kwargs
):
"""
Write mesh + optional results in `out` to a file.
Parameters
----------
:param filename: str, optional
The file name. If None, the mesh name is used instead.
:param io : MeshIO instance or 'auto', optional
Passing 'auto' respects the extension of `filename`.
:param coors: array, optional
The coordinates that can be used instead of the mesh coordinates.
:param igs: array_like, optional
Passing a list of group ids selects only those groups for writing.
:param out: dict, optional
The output data attached to the mesh vertices and/or cells.
:param float_format: str, optional
The format string used to print floats in case of a text file
format.
:param lc_all: "C" or None. Locale system settings. It can be used to
specify float format with dot or comma.
Dot format (f.e. 1.23) if "C" is used.
If is set to None, no operation is done. Format is system default.
**kwargs : dict, optional
Additional arguments that can be passed to the `MeshIO` instance.
"""
if filename is None:
filename = self.name + ".mesh"
if io is None:
io = self.io
if io is None:
io = "auto"
if io == "auto":
io = MeshIO.any_from_filename(filename)
if coors is None:
coors = self.coors
if igs is None:
igs = range(len(self.conns))
aux_mesh = Mesh.from_data(
self.name, coors, self.ngroups, self.conns, self.mat_ids, self.descs, igs
)
if lc_all is not None:
import locale
locale.setlocale(locale.LC_ALL, lc_all)
io.set_float_format(float_format)
io.write(filename, aux_mesh, out, **kwargs)
##
# 23.05.2007, c
def get_bounding_box(self):
return nm.vstack((nm.amin(self.coors, 0), nm.amax(self.coors, 0)))
def get_element_coors(self, ig=None):
"""
Get the coordinates of vertices elements in group `ig`.
Parameters
----------
ig : int, optional
The element group. If None, the coordinates for all groups
are returned, filled with zeros at places of missing
vertices, i.e. where elements having less then the full number
of vertices (`n_ep_max`) are.
Returns
-------
coors : array
The coordinates in an array of shape `(n_el, n_ep_max, dim)`.
"""
cc = self.coors
n_ep_max = self.n_e_ps.max()
coors = nm.empty((self.n_el, n_ep_max, self.dim), dtype=cc.dtype)
for ig, conn in enumerate(self.conns):
i1, i2 = self.el_offsets[ig], self.el_offsets[ig + 1]
coors[i1:i2, : conn.shape[1], :] = cc[conn]
return coors
def localize(self, inod):
"""
Strips nodes not in inod and remaps connectivities.
Omits elements where remap[conn] contains -1...
"""
remap = nm.empty((self.n_nod,), dtype=nm.int32)
remap.fill(-1)
remap[inod] = nm.arange(inod.shape[0], dtype=nm.int32)
self.coors = self.coors[inod]
self.ngroups = self.ngroups[inod]
conns = []
mat_ids = []
for ig, conn in enumerate(self.conns):
if conn.shape[0] == 0:
continue
aux = remap[conn]
ii = nm.unique(nm.where(aux == -1)[0])
ii = nm.setdiff1d(nm.arange(conn.shape[0], dtype=nm.int32), ii)
conns.append(aux[ii])
mat_ids.append(self.mat_ids[ig][ii])
self.conns = conns
self.mat_ids = mat_ids
self._set_shape_info()
def transform_coors(self, mtx_t, ref_coors=None):
"""
Transform coordinates of the mesh by the given transformation matrix.
Parameters
----------
mtx_t : array
The transformation matrix `T` (2D array). It is applied
depending on its shape:
- `(dim, dim): x = T * x`
- `(dim, dim + 1): x = T[:, :-1] * x + T[:, -1]`
ref_coors : array, optional
Alternative coordinates to use for the transformation instead
of the mesh coordinates, with the same shape as `self.coors`.
"""
if ref_coors is None:
ref_coors = self.coors
if mtx_t.shape[1] > self.coors.shape[1]:
self.coors[:] = nm.dot(ref_coors, mtx_t[:, :-1].T) + mtx_t[:, -1]
else:
self.coors[:] = nm.dot(ref_coors, mtx_t.T)
# def create_conn_graph(self, verbose=True):
# """
# Create a graph of mesh connectivity.
# Returns
# -------
# graph : csr_matrix
# The mesh connectivity graph as a SciPy CSR matrix.
# """
# from extmods.mesh import create_mesh_graph
# shape = (self.n_nod, self.n_nod)
# output('graph shape:', shape, verbose=verbose)
# if nm.prod(shape) == 0:
# output('no graph (zero size)!', verbose=verbose)
# return None
# output('assembling mesh graph...', verbose=verbose)
# tt = time.clock()
# nnz, prow, icol = create_mesh_graph(shape[0], shape[1],
# len(self.conns),
# self.conns, self.conns)
# output('...done in %.2f s' % (time.clock() - tt), verbose=verbose)
# output('graph nonzeros: %d (%.2e%% fill)' \
# % (nnz, float(nnz) / nm.prod(shape)))
# data = nm.ones((nnz,), dtype=nm.bool)
# graph = sp.csr_matrix((data, icol, prow), shape)
# return graph
def explode_groups(self, eps, return_emap=False):
"""
Explode the mesh element groups by `eps`, i.e. split group
interface nodes and shrink each group towards its centre by
`eps`.
Parameters
----------
eps : float in `[0.0, 1.0]`
The group shrinking factor.
return_emap : bool, optional
If True, also return the mapping against original mesh
coordinates that result in the exploded mesh coordinates.
The mapping can be used to map mesh vertex data to the
exploded mesh vertices.
Returns
-------
mesh : Mesh
The new mesh with exploded groups.
emap : spmatrix, optional
The maping for exploding vertex values. Only provided if
`return_emap` is True.
"""
assert_(0.0 <= eps <= 1.0)
remap = nm.empty((self.n_nod,), dtype=nm.int32)
offset = 0
if return_emap:
rows, cols = [], []
coors = []
ngroups = []
conns = []
mat_ids = []
descs = []
for ig, conn in enumerate(self.conns):
nodes = nm.unique(conn)
group_coors = self.coors[nodes]
n_nod = group_coors.shape[0]
centre = group_coors.sum(axis=0) / float(n_nod)
vectors = group_coors - centre[None, :]
new_coors = centre + (vectors * eps)
remap[nodes] = nm.arange(n_nod, dtype=nm.int32) + offset
new_conn = remap[conn]
coors.append(new_coors)
ngroups.append(self.ngroups[nodes])
conns.append(new_conn)
mat_ids.append(self.mat_ids[ig])
descs.append(self.descs[ig])
offset += n_nod
if return_emap:
cols.append(nodes)
rows.append(remap[nodes])
coors = nm.concatenate(coors, axis=0)
ngroups = nm.concatenate(ngroups, axis=0)
mesh = Mesh.from_data(
"exploded_" + self.name, coors, ngroups, conns, mat_ids, descs
)
if return_emap:
rows = nm.concatenate(rows)
cols = nm.concatenate(cols)
data = nm.ones(rows.shape[0], dtype=nm.float64)
emap = sp.coo_matrix((data, (rows, cols)), shape=(mesh.n_nod, self.n_nod))
return mesh, emap
else:
return mesh
| {
"repo_name": "mjirik/dicom2fem",
"path": "dicom2fem/mesh.py",
"copies": "1",
"size": "27575",
"license": "bsd-3-clause",
"hash": -6933944286560148000,
"line_mean": 29.948372615,
"line_max": 86,
"alpha_frac": 0.5108612874,
"autogenerated": false,
"ratio": 3.3375695957395304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.934574200141538,
"avg_score": 0.0005377763448299989,
"num_lines": 891
} |
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import time, sys, os
from copy import copy, deepcopy
from types import UnboundMethodType
import numpy as nm
import scipy.sparse as sp
real_types = [nm.float64]
complex_types = [nm.complex128]
nm.set_printoptions( threshold = 100 )
def output(mesg):
print(mesg)
##
# 22.09.2005, c
# 24.10.2005
if sys.version[:5] < '2.4.0':
def sorted( sequence ):
tmp = copy( sequence )
tmp.sort()
return tmp
if sys.version[0] < '3':
basestr = basestring
else:
basestr = str
def get_debug():
"""
Utility function providing ``debug()`` function.
"""
try:
import IPython
except ImportError:
debug = None
else:
old_excepthook = sys.excepthook
def debug(frame=None):
if IPython.__version__ >= '0.11':
from IPython.core.debugger import Pdb
try:
ip = get_ipython()
except NameError:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
ip = InteractiveShellEmbed()
colors = ip.colors
else:
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
from IPython import ipapi
ip = ipapi.get()
if ip is None:
IPShell(argv=[''])
ip = ipapi.get()
colors = ip.options.colors
sys.excepthook = old_excepthook
if frame is None:
frame = sys._getframe().f_back
Pdb(colors).set_trace(frame)
if debug is None:
import pdb
debug = pdb.set_trace
debug.__doc__ = """
Start debugger on line where it is called, roughly equivalent to::
import pdb; pdb.set_trace()
First, this function tries to start an `IPython`-enabled
debugger using the `IPython` API.
When this fails, the plain old `pdb` is used instead.
"""
return debug
debug = get_debug()
def mark_time(times, msg=None):
"""
Time measurement utility.
Measures times of execution between subsequent calls using
time.clock(). The time is printed if the msg argument is not None.
Examples
--------
>>> times = []
>>> mark_time(times)
... do something
>>> mark_time(times, 'elapsed')
elapsed 0.1
... do something else
>>> mark_time(times, 'elapsed again')
elapsed again 0.05
>>> times
[0.10000000000000001, 0.050000000000000003]
"""
tt = time.clock()
times.append(tt)
if (msg is not None) and (len(times) > 1):
print msg, times[-1] - times[-2]
def import_file(filename, package_name=None):
"""
Import a file as a module. The module is explicitly reloaded to
prevent undesirable interactions.
"""
path = os.path.dirname(filename)
if not path in sys.path:
sys.path.append( path )
remove_path = True
else:
remove_path = False
name = os.path.splitext(os.path.basename(filename))[0]
if name in sys.modules:
force_reload = True
else:
force_reload = False
if package_name is not None:
mod = __import__('.'.join((package_name, name)), fromlist=[name])
else:
mod = __import__(name)
if force_reload:
reload(mod)
if remove_path:
sys.path.pop(-1)
return mod
def try_imports(imports, fail_msg=None):
"""
Try import statements until one succeeds.
Parameters
----------
imports : list
The list of import statements.
fail_msg : str
If not None and no statement succeeds, a `ValueError` is raised with
the given message, appended to all failed messages.
Returns
-------
locals : dict
The dictionary of imported modules.
"""
msgs = []
for imp in imports:
try:
exec imp
break
except Exception, inst:
msgs.append(str(inst))
else:
if fail_msg is not None:
msgs.append(fail_msg)
raise ValueError('\n'.join(msgs))
return locals()
def assert_(condition, msg='assertion failed!'):
if not condition:
raise ValueError(msg)
##
# c: 06.04.2005, r: 05.05.2008
def pause( msg = None ):
"""
Prints the line number and waits for a keypress.
If you press:
"q" ............. it will call sys.exit()
any other key ... it will continue execution of the program
This is useful for debugging.
"""
f = sys._getframe(1)
ff = f.f_code
if (msg):
print '%s, %d: %s(), %d: %s' % (ff.co_filename, ff.co_firstlineno,
ff.co_name, f.f_lineno, msg)
else:
print '%s, %d: %s(), %d' % (ff.co_filename, ff.co_firstlineno,
ff.co_name, f.f_lineno)
spause()
##
# 02.01.2005
class Struct( object ):
# 03.10.2005, c
# 26.10.2005
def __init__( self, **kwargs ):
if kwargs:
self.__dict__.update( kwargs )
def _format_sequence(self, seq, threshold):
threshold_half = threshold / 2
if len(seq) > threshold:
out = ', '.join(str(ii) for ii in seq[:threshold_half]) \
+ ', ..., ' \
+ ', '.join(str(ii) for ii in seq[-threshold_half:])
else:
out = str(seq)
return out
# 08.03.2005
def __str__(self):
"""Print instance class, name and items in alphabetical order.
If the class instance has '_str_attrs' attribute, only the attributes
listed there are taken into account. Other attributes are provided only
as a list of attribute names (no values).
For attributes that are Struct instances, if
the listed attribute name ends with '.', the attribute is printed fully
by calling str(). Otherwise only its class name/name are printed.
Attributes that are NumPy arrays or SciPy sparse matrices are
printed in a brief form.
Only keys of dict attributes are printed. For the dict keys as
well as list or tuple attributes only several edge items are
printed if their length is greater than the threshold value 20.
"""
return self._str()
def _str(self, keys=None, threshold=20):
ss = '%s' % self.__class__.__name__
if hasattr(self, 'name'):
ss += ':%s' % self.name
ss += '\n'
if keys is None:
keys = self.__dict__.keys()
str_attrs = sorted(self.get('_str_attrs', keys))
printed_keys = []
for key in str_attrs:
if key[-1] == '.':
key = key[:-1]
full_print = True
else:
full_print = False
printed_keys.append(key)
try:
val = getattr(self, key)
except AttributeError:
continue
if isinstance(val, Struct):
if not full_print:
ss += ' %s:\n %s' % (key, val.__class__.__name__)
if hasattr(val, 'name'):
ss += ':%s' % val.name
ss += '\n'
else:
aux = '\n' + str(val)
aux = aux.replace('\n', '\n ');
ss += ' %s:\n%s\n' % (key, aux[1:])
elif isinstance(val, dict):
sval = self._format_sequence(val.keys(), threshold)
sval = sval.replace('\n', '\n ')
ss += ' %s:\n dict with keys: %s\n' % (key, sval)
elif isinstance(val, list):
sval = self._format_sequence(val, threshold)
sval = sval.replace('\n', '\n ')
ss += ' %s:\n list: %s\n' % (key, sval)
elif isinstance(val, tuple):
sval = self._format_sequence(val, threshold)
sval = sval.replace('\n', '\n ')
ss += ' %s:\n tuple: %s\n' % (key, sval)
elif isinstance(val, nm.ndarray):
ss += ' %s:\n %s array of %s\n' \
% (key, val.shape, val.dtype)
elif isinstance(val, sp.spmatrix):
ss += ' %s:\n %s spmatrix of %s, %d nonzeros\n' \
% (key, val.shape, val.dtype, val.nnz)
else:
aux = '\n' + str(val)
aux = aux.replace('\n', '\n ');
ss += ' %s:\n%s\n' % (key, aux[1:])
other_keys = sorted(set(keys).difference(set(printed_keys)))
if len(other_keys):
ss += ' other attributes:\n %s\n' \
% '\n '.join(key for key in other_keys)
return ss.rstrip()
def __repr__( self ):
ss = "%s" % self.__class__.__name__
if hasattr( self, 'name' ):
ss += ":%s" % self.name
return ss
##
# 28.08.2007, c
def __add__( self, other ):
"""Merge Structs. Attributes of new are those of self unless an
attribute and its counterpart in other are both Structs - these are
merged then."""
new = copy( self )
for key, val in other.__dict__.iteritems():
if hasattr( new, key ):
sval = getattr( self, key )
if issubclass( sval.__class__, Struct ) and \
issubclass( val.__class__, Struct ):
setattr( new, key, sval + val )
else:
setattr( new, key, sval )
else:
setattr( new, key, val )
return new
##
# 28.08.2007, c
def __iadd__( self, other ):
"""Merge Structs in place. Attributes of self are left unchanged
unless an attribute and its counterpart in other are both Structs -
these are merged then."""
for key, val in other.__dict__.iteritems():
if hasattr( self, key ):
sval = getattr( self, key )
if issubclass( sval.__class__, Struct ) and \
issubclass( val.__class__, Struct ):
setattr( self, key, sval + val )
else:
setattr( self, key, val )
return self
def str_class(self):
"""
As __str__(), but for class attributes.
"""
return self._str(self.__class__.__dict__.keys())
# 08.03.2005, c
def str_all( self ):
ss = "%s\n" % self.__class__
for key, val in self.__dict__.iteritems():
if issubclass( self.__dict__[key].__class__, Struct ):
ss += " %s:\n" % key
aux = "\n" + self.__dict__[key].str_all()
aux = aux.replace( "\n", "\n " );
ss += aux[1:] + "\n"
else:
aux = "\n" + str( val )
aux = aux.replace( "\n", "\n " );
ss += " %s:\n%s\n" % (key, aux[1:])
return( ss.rstrip() )
##
# 09.07.2007, c
def to_dict( self ):
return copy( self.__dict__ )
def get(self, key, default=None, msg_if_none=None):
"""
A dict-like get() for Struct attributes.
"""
out = getattr(self, key, default)
if (out is None) and (msg_if_none is not None):
raise ValueError(msg_if_none)
return out
def update(self, other, **kwargs):
"""
A dict-like update for Struct attributes.
"""
if other is None: return
if not isinstance(other, dict):
other = other.to_dict()
self.__dict__.update(other, **kwargs)
def set_default(self, key, default=None):
"""
Behaves like dict.setdefault().
"""
return self.__dict__.setdefault(key, default)
def copy(self, deep=False, name=None):
"""Make a (deep) copy of self.
Parameters:
deep : bool
Make a deep copy.
name : str
Name of the copy, with default self.name + '_copy'.
"""
if deep:
other = deepcopy(self)
else:
other = copy(self)
if hasattr(self, 'name'):
other.name = get_default(name, self.name + '_copy')
return other
#
# 12.07.2007, c
class IndexedStruct( Struct ):
##
# 12.07.2007, c
def __getitem__( self, key ):
return getattr( self, key )
##
# 12.07.2007, c
def __setitem__( self, key, val ):
setattr( self, key, val )
##
# 14.07.2006, c
class Container( Struct ):
def __init__( self, objs = None, **kwargs ):
Struct.__init__( self, **kwargs )
if objs is not None:
self._objs = objs
self.update()
else:
self._objs = []
self.names = []
def update( self, objs = None ):
if objs is not None:
self._objs = objs
self.names = [obj.name for obj in self._objs]
def __setitem__(self, ii, obj):
try:
if isinstance(ii, basestr):
if ii in self.names:
ii = self.names.index(ii)
else:
ii = len(self.names)
elif not isinstance(ii, int):
raise ValueError('bad index type! (%s)' % type(ii))
if ii >= len(self.names):
self._objs.append( obj )
self.names.append( obj.name )
else:
self._objs[ii] = obj
self.names[ii] = obj.name
except (IndexError, ValueError), msg:
raise IndexError(msg)
def __getitem__(self, ii):
try:
if isinstance(ii, basestr):
ii = self.names.index(ii)
elif not isinstance( ii, int ):
raise ValueError('bad index type! (%s)' % type(ii))
return self._objs[ii]
except (IndexError, ValueError), msg:
raise IndexError(msg)
def __iter__( self ):
return self._objs.__iter__()
##
# 18.07.2006, c
def __len__( self ):
return len( self._objs )
def insert(self, ii, obj):
self._objs.insert(ii, obj)
self.names.insert(ii, obj.name)
def append( self, obj ):
self[len(self.names)] = obj
def extend(self, objs):
"""
Extend the container items by the sequence `objs`.
"""
for obj in objs:
self.append(obj)
def get(self, ii, default=None, msg_if_none=None):
"""
Get an item from Container - a wrapper around
Container.__getitem__() with defaults and custom error message.
Parameters
----------
ii : int or str
The index or name of the item.
default : any, optional
The default value returned in case the item `ii` does not exist.
msg_if_none : str, optional
If not None, and if `default` is None and the item `ii` does
not exist, raise ValueError with this message.
"""
try:
out = self[ii]
except (IndexError, ValueError):
if default is not None:
out = default
else:
if msg_if_none is not None:
raise ValueError(msg_if_none)
else:
raise
return out
def remove_name( self, name ):
ii = self.names.index[name]
del self.names[ii]
del self._objs[ii]
##
# dict-like methods.
def itervalues( self ):
return self._objs.__iter__()
def iterkeys( self ):
return self.get_names().__iter__()
def iteritems( self ):
for obj in self._objs:
yield obj.name, obj
##
# 20.09.2006, c
def has_key( self, ii ):
if isinstance( ii, int ):
if (ii < len( self )) and (ii >= (-len( self ))):
return True
else:
return False
elif isinstance(ii, basestr):
try:
self.names.index( ii )
return True
except:
return False
else:
raise IndexError('unsupported index type: %s' % ii)
##
# 12.06.2007, c
def print_names( self ):
print [obj.name for obj in self._objs]
def get_names( self ):
return [obj.name for obj in self._objs]
def as_dict(self):
"""
Return stored objects in a dictionary with object names as keys.
"""
out = {}
for key, val in self.iteritems():
out[key] = val
return out
##
# 30.11.2004, c
# 01.12.2004
# 01.12.2004
class OneTypeList( list ):
def __init__(self, item_class, seq=None):
self.item_class = item_class
if seq is not None:
for obj in seq:
self.append(obj)
def __setitem__( self, key, value ):
if (type( value ) in (list, tuple)):
for ii, val in enumerate( value ):
if not isinstance(val, self.item_class):
raise TypeError
else:
if not isinstance(value, self.item_class):
raise TypeError
list.__setitem__( self, key, value )
##
# 21.11.2005, c
def __getitem__( self, ii ):
if isinstance( ii, int ):
return list.__getitem__( self, ii )
elif isinstance(ii, basestr):
ir = self.find( ii, ret_indx = True )
if ir:
return list.__getitem__( self, ir[0] )
else:
raise IndexError, ii
else:
raise IndexError, ii
def __str__( self ):
ss = "[\n"
for ii in self:
aux = "\n" + ii.__str__()
aux = aux.replace( "\n", "\n " );
ss += aux[1:] + "\n"
ss += "]"
return( ss )
def find( self, name, ret_indx = False ):
for ii, item in enumerate( self ):
if item.name == name:
if ret_indx:
return ii, item
else:
return item
return None
##
# 12.06.2007, c
def print_names( self ):
print [ii.name for ii in self]
def get_names( self ):
return [ii.name for ii in self]
def print_structs(objs):
"""Print Struct instances in a container, works recursively. Debugging
utility function."""
if isinstance(objs, dict):
for key, vals in objs.iteritems():
print key
print_structs(vals)
elif isinstance(objs, list):
for vals in objs:
print_structs(vals)
else:
print objs
def iter_dict_of_lists(dol, return_keys=False):
for key, vals in dol.iteritems():
for ii, val in enumerate(vals):
if return_keys:
yield key, ii, val
else:
yield val
##
# 19.07.2005, c
# 26.05.2006
# 17.10.2007
def dict_to_struct( *args, **kwargs ):
"""Convert a dict instance to a Struct instance."""
try:
level = kwargs['level']
except:
level = 0
try:
flag = kwargs['flag']
except:
flag = (1,)
# For level 0 only...
try:
constructor = kwargs['constructor']
except:
constructor = Struct
out = []
for arg in args:
if type( arg ) == dict:
if flag[level]:
aux = constructor()
else:
aux = {}
for key, val in arg.iteritems():
if type( val ) == dict:
try:
flag[level+1]
except:
flag = flag + (0,)
val2 = dict_to_struct( val, level = level + 1, flag = flag )
if flag[level]:
aux.__dict__[key] = val2
else:
aux[key] = val2
else:
if flag[level]:
aux.__dict__[key] = val
else:
aux[key] = val
out.append( aux )
else:
out.append( arg )
if len( out ) == 1:
out = out[0]
return out
##
# 23.01.2006, c
def is_sequence( var ):
if issubclass( var.__class__, tuple ) or issubclass( var.__class__, list ):
return True
else:
return False
##
# 17.10.2007, c
def is_derived_class( cls, parent ):
return issubclass( cls, parent ) and (cls is not parent)
##
# 23.10.2007, c
def insert_static_method( cls, function ):
setattr( cls, function.__name__, staticmethod( function ) )
##
# 23.10.2007, c
def insert_method( instance, function ):
setattr( instance, function.__name__,
UnboundMethodType( function, instance, instance.__class__ ) )
def use_method_with_name( instance, method, new_name ):
setattr( instance, new_name, method )
def insert_as_static_method( cls, name, function ):
setattr( cls, name, staticmethod( function ) )
def find_subclasses(context, classes, omit_unnamed=False, name_attr='name'):
"""Find subclasses of the given classes in the given context.
Examples
--------
>>> solver_table = find_subclasses(vars().items(),
[LinearSolver, NonlinearSolver,
TimeSteppingSolver, EigenvalueSolver,
OptimizationSolver])
"""
var_dict = context.items()
table = {}
for key, var in var_dict:
try:
for cls in classes:
if is_derived_class(var, cls):
if hasattr(var, name_attr):
key = getattr(var, name_attr)
if omit_unnamed and not key:
continue
elif omit_unnamed:
continue
else:
key = var.__class__.__name__
table[key] = var
break
except TypeError:
pass
return table
def load_classes(filenames, classes, package_name=None, ignore_errors=False,
name_attr='name'):
"""
For each filename in filenames, load all subclasses of classes listed.
"""
table = {}
for filename in filenames:
if not ignore_errors:
mod = import_file(filename, package_name=package_name)
else:
try:
mod = import_file(filename, package_name=package_name)
except:
output('WARNING: module %s cannot be imported!' % filename)
output('reason:\n', sys.exc_info()[1])
continue
table.update(find_subclasses(vars(mod), classes, omit_unnamed=True,
name_attr=name_attr))
return table
def update_dict_recursively(dst, src, tuples_too=False,
overwrite_by_none=True):
"""
Update `dst` dictionary recursively using items in `src` dictionary.
Parameters
----------
dst : dict
The destination dictionary.
src : dict
The source dictionary.
tuples_too : bool
If True, recurse also into dictionaries that are members of tuples.
overwrite_by_none : bool
If False, do not overwrite destination dictionary values by None.
Returns
-------
dst : dict
The destination dictionary.
"""
def tuplezip(a):
if isinstance(a[0], dict) and isinstance(a[1], dict):
return update_dict_recursively(a[0], a[1], True)
return a[1]
for key in src:
if key in dst:
if isinstance(src[key], dict) and isinstance(dst[key], dict):
dst[key] = update_dict_recursively(dst[key],
src[key], tuples_too)
continue
if tuples_too and isinstance(dst[key], tuple) \
and isinstance(src[key], tuple):
dst[key] = tuple(map(tuplezip,
zip(src[key], dst[key]))[:len(dst[key])])
continue
if overwrite_by_none or not src[key] is None:
dst[key] = src[key]
return dst
def edit_tuple_strings(str_tuple, old, new, recur=False):
"""
Replace substrings `old` with `new` in items of tuple
`str_tuple`. Non-string items are just copied to the new tuple.
Parameters
----------
str_tuple : tuple
The tuple with string values.
old : str
The old substring.
new : str
The new substring.
recur : bool
If True, edit items that are tuples recursively.
Returns
-------
new_tuple : tuple
The tuple with edited strings.
"""
new_tuple = []
for item in str_tuple:
if isinstance(item, basestr):
item = item.replace(old, new)
elif recur and isinstance(item, tuple):
item = edit_tuple_strings(item, old, new, recur=True)
new_tuple.append(item)
return tuple(new_tuple)
def edit_dict_strings(str_dict, old, new, recur=False):
"""
Replace substrings `old` with `new` in string values of dictionary
`str_dict`. Both `old` and `new` can be lists of the same length - items
in `old` are replaced by items in `new` with the same index.
Parameters
----------
str_dict : dict
The dictionary with string values or tuples containing strings.
old : str or list of str
The old substring or list of substrings.
new : str or list of str
The new substring or list of substrings.
recur : bool
If True, edit tuple values recursively.
Returns
-------
new_dict : dict
The dictionary with edited strings.
"""
if isinstance(old, basestr):
new_dict = {}
for key, val in str_dict.iteritems():
if isinstance(val, basestr):
new_dict[key] = val.replace(old, new)
elif isinstance(val, tuple):
new_dict[key] = edit_tuple_strings(val, old, new, recur=recur)
else:
raise ValueError('unsupported value! (%s)' % type(val))
else:
assert_(len(old) == len(new))
new_dict = dict(str_dict)
for ii, _old in enumerate(old):
new_dict.update(edit_dict_strings(new_dict, _old, new[ii],
recur=recur))
return new_dict
def invert_dict(d, is_val_tuple=False, unique=True):
"""
Invert a dictionary by making its values keys and vice versa.
Parameters
----------
d : dict
The input dictionary.
is_val_tuple : bool
If True, the `d` values are tuples and new keys are the tuple items.
unique : bool
If True, the `d` values are unique and so the mapping is
one to one. If False, the `d` values (possibly) repeat, so the inverted
dictionary will have as items lists of corresponding keys.
Returns
-------
di : dict
The inverted dictionary.
"""
di = {}
for key, val in d.iteritems():
if unique:
if is_val_tuple:
for v in val:
di[v] = key
else:
di[val] = key
else:
if is_val_tuple:
for v in val:
item = di.setdefault(v, [])
item.append(key)
else:
item = di.setdefault(val, [])
item.append(key)
return di
def remap_dict(d, map):
"""
Utility function to remap state dict keys according to var_map.
"""
out = {}
for new_key, key in map.iteritems():
out[new_key] = d[key]
return out
##
# 24.08.2006, c
# 05.09.2006
def dict_from_keys_init( keys, seq_class = None ):
if seq_class is None:
return {}.fromkeys( keys )
out = {}
for key in keys:
out[key] = seq_class()
return out
##
# 16.10.2006, c
def dict_extend( d1, d2 ):
for key, val in d1.iteritems():
val.extend( d2[key] )
def get_subdict(adict, keys):
"""
Get a sub-dictionary of `adict` with given `keys`.
"""
return dict((key, adict[key]) for key in keys if key in adict)
def set_defaults( dict_, defaults ):
for key, val in defaults.iteritems():
dict_.setdefault( key, val )
##
# c: 12.03.2007, r: 04.04.2008
def get_default( arg, default, msg_if_none = None ):
if arg is None:
out = default
else:
out = arg
if (out is None) and (msg_if_none is not None):
raise ValueError( msg_if_none )
return out
##
# c: 28.04.2008, r: 28.04.2008
def get_default_attr( obj, attr, default, msg_if_none = None ):
if hasattr( obj, attr ):
out = getattr( obj, attr )
else:
out = default
if (out is None) and (msg_if_none is not None):
raise ValueError( msg_if_none )
return out
def get_arguments(omit=None):
"""Get a calling function's arguments.
Returns:
args : dict
The calling function's arguments.
"""
from inspect import getargvalues, stack
if omit is None:
omit = []
_args, _, _, _vars = getargvalues(stack()[1][0])
args = {}
for name in _args:
if name in omit: continue
args[name] = _vars[name]
return args
def check_names(names1, names2, msg):
"""Check if all names in names1 are in names2, otherwise raise IndexError
with the provided message msg.
"""
names = set(names1)
both = names.intersection(names2)
if both != names:
missing = ', '.join(ii for ii in names.difference(both))
raise IndexError(msg % missing)
##
# c: 27.02.2008, r: 27.02.2008
def select_by_names( objs_all, names, replace = None, simple = True ):
objs = {}
for key, val in objs_all.iteritems():
if val.name in names:
if replace is None:
objs[key] = val
else:
new_val = copy( val )
old_attr = getattr( val, replace[0] )
if simple:
new_attr = old_attr % replace[1]
setattr( new_val, replace[0], new_attr )
else:
new_attr = replace[1].get( val.name, old_attr )
setattr( new_val, replace[0], new_attr )
objs[key] = new_val
return objs
def ordered_iteritems(adict):
keys = adict.keys()
order = nm.argsort(keys)
for ii in order:
key = keys[ii]
yield key, adict[key]
def dict_to_array(adict):
"""
Convert a dictionary of 1D arrays of the same lengths with
non-negative integer keys to a single 2D array.
"""
keys = adict.keys()
ik = nm.array(keys, dtype=nm.int32)
assert_((ik >= 0).all())
if ik.shape[0] == 0:
return nm.zeros((0,), dtype=nm.int32)
aux = adict[ik[0]]
out = nm.empty((ik.max() + 1, aux.shape[0]), dtype=aux.dtype)
out.fill(-1)
for key, val in adict.iteritems():
out[key] = val
return out
def as_float_or_complex(val):
"""
Try to cast val to Python float, and if this fails, to Python
complex type.
"""
success = False
try:
out = float(val)
except:
pass
else:
success = True
if not success:
try:
out = complex(val)
except:
pass
else:
success = True
if not success:
raise ValueError('cannot cast %s to float or complex!' % val)
return out
| {
"repo_name": "vlukes/dicom2fem",
"path": "dicom2fem/base.py",
"copies": "1",
"size": "31975",
"license": "bsd-3-clause",
"hash": -2300470462240647000,
"line_mean": 26.0287404903,
"line_max": 80,
"alpha_frac": 0.5068021892,
"autogenerated": false,
"ratio": 4.046443938243483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008627402919195936,
"num_lines": 1183
} |
# Adopted form SfePy project, see http://sfepy.org
# Thanks to Robert Cimrman
import time, sys, os
from copy import copy, deepcopy
# from types import UnboundMethodType
import numpy as nm
import scipy.sparse as sp
real_types = [nm.float64]
complex_types = [nm.complex128]
nm.set_printoptions(threshold=100)
def output(mesg):
print(mesg)
##
# 22.09.2005, c
# 24.10.2005
if sys.version[:5] < "2.4.0":
def sorted(sequence):
tmp = copy(sequence)
tmp.sort()
return tmp
if sys.version[0] < "3":
basestr = basestring
else:
basestr = str
def get_debug():
"""
Utility function providing ``debug()`` function.
Start debugger on line where it is called, roughly equivalent to::
import pdb; pdb.set_trace()
First, this function tries to start an `IPython`-enabled
debugger using the `IPython` API.
When this fails, the plain old `pdb` is used instead.
"""
try:
import IPython
except ImportError:
debug = None
else:
old_excepthook = sys.excepthook
"""
Start debugger on line where it is called, roughly equivalent to::
import pdb; pdb.set_trace()
First, this function tries to start an `IPython`-enabled
debugger using the `IPython` API.
When this fails, the plain old `pdb` is used instead.
"""
def debug(frame=None):
if IPython.__version__ >= "0.11":
from IPython.core.debugger import Pdb
try:
ip = get_ipython()
except NameError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
ip = InteractiveShellEmbed()
colors = ip.colors
else:
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
from IPython import ipapi
ip = ipapi.get()
if ip is None:
IPShell(argv=[""])
ip = ipapi.get()
colors = ip.options.colors
sys.excepthook = old_excepthook
if frame is None:
frame = sys._getframe().f_back
Pdb(colors).set_trace(frame)
if debug is None:
import pdb
debug = pdb.set_trace
# debug.__doc__ = """
# Start debugger on line where it is called, roughly equivalent to::
#
# import pdb; pdb.set_trace()
#
# First, this function tries to start an `IPython`-enabled
# debugger using the `IPython` API.
#
# When this fails, the plain old `pdb` is used instead.
# """
return debug
debug = get_debug()
def mark_time(times, msg=None):
"""
Time measurement utility.
Measures times of execution between subsequent calls using
time.clock(). The time is printed if the msg argument is not None.
Examples
--------
>>> times = []
>>> mark_time(times)
... do something
>>> mark_time(times, 'elapsed')
elapsed 0.1
... do something else
>>> mark_time(times, 'elapsed again')
elapsed again 0.05
>>> times
[0.10000000000000001, 0.050000000000000003]
"""
tt = time.clock()
times.append(tt)
if (msg is not None) and (len(times) > 1):
print(msg, times[-1] - times[-2])
def import_file(filename, package_name=None):
"""
Import a file as a module. The module is explicitly reloaded to
prevent undesirable interactions.
"""
path = os.path.dirname(filename)
if not path in sys.path:
sys.path.append(path)
remove_path = True
else:
remove_path = False
name = os.path.splitext(os.path.basename(filename))[0]
if name in sys.modules:
force_reload = True
else:
force_reload = False
if package_name is not None:
mod = __import__(".".join((package_name, name)), fromlist=[name])
else:
mod = __import__(name)
if force_reload:
reload(mod)
if remove_path:
sys.path.pop(-1)
return mod
def try_imports(imports, fail_msg=None):
"""
Try import statements until one succeeds.
Parameters
----------
imports : list
The list of import statements.
fail_msg : str
If not None and no statement succeeds, a `ValueError` is raised with
the given message, appended to all failed messages.
Returns
-------
locals : dict
The dictionary of imported modules.
"""
msgs = []
for imp in imports:
try:
exec(imp)
break
except Exception as inst:
msgs.append(str(inst))
else:
if fail_msg is not None:
msgs.append(fail_msg)
raise ValueError("\n".join(msgs))
return locals()
def assert_(condition, msg="assertion failed!"):
if not condition:
raise ValueError(msg)
##
# c: 06.04.2005, r: 05.05.2008
def pause(msg=None):
"""
Prints the line number and waits for a keypress.
If you press:
"q" ............. it will call sys.exit()
any other key ... it will continue execution of the program
This is useful for debugging.
"""
f = sys._getframe(1)
ff = f.f_code
if msg:
print(
"%s, %d: %s(), %d: %s"
% (ff.co_filename, ff.co_firstlineno, ff.co_name, f.f_lineno, msg)
)
else:
print(
"%s, %d: %s(), %d"
% (ff.co_filename, ff.co_firstlineno, ff.co_name, f.f_lineno)
)
spause()
##
# 02.01.2005
class Struct(object):
# 03.10.2005, c
# 26.10.2005
def __init__(self, **kwargs):
if kwargs:
self.__dict__.update(kwargs)
def _format_sequence(self, seq, threshold):
threshold_half = threshold / 2
if len(seq) > threshold:
out = (
", ".join(str(ii) for ii in seq[:threshold_half])
+ ", ..., "
+ ", ".join(str(ii) for ii in seq[-threshold_half:])
)
else:
out = str(seq)
return out
# 08.03.2005
def __str__(self):
"""Print instance class, name and items in alphabetical order.
If the class instance has '_str_attrs' attribute, only the attributes
listed there are taken into account. Other attributes are provided only
as a list of attribute names (no values).
For attributes that are Struct instances, if
the listed attribute name ends with '.', the attribute is printed fully
by calling str(). Otherwise only its class name/name are printed.
Attributes that are NumPy arrays or SciPy sparse matrices are
printed in a brief form.
Only keys of dict attributes are printed. For the dict keys as
well as list or tuple attributes only several edge items are
printed if their length is greater than the threshold value 20.
"""
return self._str()
def _str(self, keys=None, threshold=20):
ss = "%s" % self.__class__.__name__
if hasattr(self, "name"):
ss += ":%s" % self.name
ss += "\n"
if keys is None:
keys = self.__dict__.keys()
str_attrs = sorted(self.get("_str_attrs", keys))
printed_keys = []
for key in str_attrs:
if key[-1] == ".":
key = key[:-1]
full_print = True
else:
full_print = False
printed_keys.append(key)
try:
val = getattr(self, key)
except AttributeError:
continue
if isinstance(val, Struct):
if not full_print:
ss += " %s:\n %s" % (key, val.__class__.__name__)
if hasattr(val, "name"):
ss += ":%s" % val.name
ss += "\n"
else:
aux = "\n" + str(val)
aux = aux.replace("\n", "\n ")
ss += " %s:\n%s\n" % (key, aux[1:])
elif isinstance(val, dict):
sval = self._format_sequence(val.keys(), threshold)
sval = sval.replace("\n", "\n ")
ss += " %s:\n dict with keys: %s\n" % (key, sval)
elif isinstance(val, list):
sval = self._format_sequence(val, threshold)
sval = sval.replace("\n", "\n ")
ss += " %s:\n list: %s\n" % (key, sval)
elif isinstance(val, tuple):
sval = self._format_sequence(val, threshold)
sval = sval.replace("\n", "\n ")
ss += " %s:\n tuple: %s\n" % (key, sval)
elif isinstance(val, nm.ndarray):
ss += " %s:\n %s array of %s\n" % (key, val.shape, val.dtype)
elif isinstance(val, sp.spmatrix):
ss += " %s:\n %s spmatrix of %s, %d nonzeros\n" % (
key,
val.shape,
val.dtype,
val.nnz,
)
else:
aux = "\n" + str(val)
aux = aux.replace("\n", "\n ")
ss += " %s:\n%s\n" % (key, aux[1:])
other_keys = sorted(set(keys).difference(set(printed_keys)))
if len(other_keys):
ss += " other attributes:\n %s\n" % "\n ".join(
key for key in other_keys
)
return ss.rstrip()
def __repr__(self):
ss = "%s" % self.__class__.__name__
if hasattr(self, "name"):
ss += ":%s" % self.name
return ss
##
# 28.08.2007, c
def __add__(self, other):
"""Merge Structs. Attributes of new are those of self unless an
attribute and its counterpart in other are both Structs - these are
merged then."""
new = copy(self)
for key, val in other.__dict__.iteritems():
if hasattr(new, key):
sval = getattr(self, key)
if issubclass(sval.__class__, Struct) and issubclass(
val.__class__, Struct
):
setattr(new, key, sval + val)
else:
setattr(new, key, sval)
else:
setattr(new, key, val)
return new
##
# 28.08.2007, c
def __iadd__(self, other):
"""Merge Structs in place. Attributes of self are left unchanged
unless an attribute and its counterpart in other are both Structs -
these are merged then."""
for key, val in other.__dict__.iteritems():
if hasattr(self, key):
sval = getattr(self, key)
if issubclass(sval.__class__, Struct) and issubclass(
val.__class__, Struct
):
setattr(self, key, sval + val)
else:
setattr(self, key, val)
return self
def str_class(self):
"""
As __str__(), but for class attributes.
"""
return self._str(self.__class__.__dict__.keys())
# 08.03.2005, c
def str_all(self):
ss = "%s\n" % self.__class__
for key, val in self.__dict__.iteritems():
if issubclass(self.__dict__[key].__class__, Struct):
ss += " %s:\n" % key
aux = "\n" + self.__dict__[key].str_all()
aux = aux.replace("\n", "\n ")
ss += aux[1:] + "\n"
else:
aux = "\n" + str(val)
aux = aux.replace("\n", "\n ")
ss += " %s:\n%s\n" % (key, aux[1:])
return ss.rstrip()
##
# 09.07.2007, c
def to_dict(self):
return copy(self.__dict__)
def get(self, key, default=None, msg_if_none=None):
"""
A dict-like get() for Struct attributes.
"""
out = getattr(self, key, default)
if (out is None) and (msg_if_none is not None):
raise ValueError(msg_if_none)
return out
def update(self, other, **kwargs):
"""
A dict-like update for Struct attributes.
"""
if other is None:
return
if not isinstance(other, dict):
other = other.to_dict()
self.__dict__.update(other, **kwargs)
def set_default(self, key, default=None):
"""
Behaves like dict.setdefault().
"""
return self.__dict__.setdefault(key, default)
def copy(self, deep=False, name=None):
"""Make a (deep) copy of self.
Parameters:
deep : bool
Make a deep copy.
name : str
Name of the copy, with default self.name + '_copy'.
"""
if deep:
other = deepcopy(self)
else:
other = copy(self)
if hasattr(self, "name"):
other.name = get_default(name, self.name + "_copy")
return other
#
# 12.07.2007, c
class IndexedStruct(Struct):
##
# 12.07.2007, c
def __getitem__(self, key):
return getattr(self, key)
##
# 12.07.2007, c
def __setitem__(self, key, val):
setattr(self, key, val)
##
# 14.07.2006, c
class Container(Struct):
def __init__(self, objs=None, **kwargs):
Struct.__init__(self, **kwargs)
if objs is not None:
self._objs = objs
self.update()
else:
self._objs = []
self.names = []
def update(self, objs=None):
if objs is not None:
self._objs = objs
self.names = [obj.name for obj in self._objs]
def __setitem__(self, ii, obj):
try:
if isinstance(ii, basestr):
if ii in self.names:
ii = self.names.index(ii)
else:
ii = len(self.names)
elif not isinstance(ii, int):
raise ValueError("bad index type! (%s)" % type(ii))
if ii >= len(self.names):
self._objs.append(obj)
self.names.append(obj.name)
else:
self._objs[ii] = obj
self.names[ii] = obj.name
except (IndexError, ValueError) as msg:
raise IndexError(msg)
def __getitem__(self, ii):
try:
if isinstance(ii, basestr):
ii = self.names.index(ii)
elif not isinstance(ii, int):
raise ValueError("bad index type! (%s)" % type(ii))
return self._objs[ii]
except (IndexError, ValueError) as msg:
raise IndexError(msg)
def __iter__(self):
return self._objs.__iter__()
##
# 18.07.2006, c
def __len__(self):
return len(self._objs)
def insert(self, ii, obj):
self._objs.insert(ii, obj)
self.names.insert(ii, obj.name)
def append(self, obj):
self[len(self.names)] = obj
def extend(self, objs):
"""
Extend the container items by the sequence `objs`.
"""
for obj in objs:
self.append(obj)
def get(self, ii, default=None, msg_if_none=None):
"""
Get an item from Container - a wrapper around
Container.__getitem__() with defaults and custom error message.
Parameters
----------
ii : int or str
The index or name of the item.
default : any, optional
The default value returned in case the item `ii` does not exist.
msg_if_none : str, optional
If not None, and if `default` is None and the item `ii` does
not exist, raise ValueError with this message.
"""
try:
out = self[ii]
except (IndexError, ValueError):
if default is not None:
out = default
else:
if msg_if_none is not None:
raise ValueError(msg_if_none)
else:
raise
return out
def remove_name(self, name):
ii = self.names.index[name]
del self.names[ii]
del self._objs[ii]
##
# dict-like methods.
def itervalues(self):
return self._objs.__iter__()
def iterkeys(self):
return self.get_names().__iter__()
def iteritems(self):
for obj in self._objs:
yield obj.name, obj
##
# 20.09.2006, c
def has_key(self, ii):
if isinstance(ii, int):
if (ii < len(self)) and (ii >= (-len(self))):
return True
else:
return False
elif isinstance(ii, basestr):
try:
self.names.index(ii)
return True
except:
return False
else:
raise IndexError("unsupported index type: %s" % ii)
##
# 12.06.2007, c
def print_names(self):
print([obj.name for obj in self._objs])
def get_names(self):
return [obj.name for obj in self._objs]
def as_dict(self):
"""
Return stored objects in a dictionary with object names as keys.
"""
out = {}
for key, val in self.iteritems():
out[key] = val
return out
##
# 30.11.2004, c
# 01.12.2004
# 01.12.2004
class OneTypeList(list):
def __init__(self, item_class, seq=None):
self.item_class = item_class
if seq is not None:
for obj in seq:
self.append(obj)
def __setitem__(self, key, value):
if type(value) in (list, tuple):
for ii, val in enumerate(value):
if not isinstance(val, self.item_class):
raise TypeError
else:
if not isinstance(value, self.item_class):
raise TypeError
list.__setitem__(self, key, value)
##
# 21.11.2005, c
def __getitem__(self, ii):
if isinstance(ii, int):
return list.__getitem__(self, ii)
elif isinstance(ii, basestr):
ir = self.find(ii, ret_indx=True)
if ir:
return list.__getitem__(self, ir[0])
else:
raise IndexError(ii)
else:
raise IndexError(ii)
def __str__(self):
ss = "[\n"
for ii in self:
aux = "\n" + ii.__str__()
aux = aux.replace("\n", "\n ")
ss += aux[1:] + "\n"
ss += "]"
return ss
def find(self, name, ret_indx=False):
for ii, item in enumerate(self):
if item.name == name:
if ret_indx:
return ii, item
else:
return item
return None
##
# 12.06.2007, c
def print_names(self):
print([ii.name for ii in self])
def get_names(self):
return [ii.name for ii in self]
def print_structs(objs):
"""Print Struct instances in a container, works recursively. Debugging
utility function."""
if isinstance(objs, dict):
for key, vals in objs.iteritems():
print(key)
print_structs(vals)
elif isinstance(objs, list):
for vals in objs:
print_structs(vals)
else:
print(objs)
def iter_dict_of_lists(dol, return_keys=False):
for key, vals in dol.iteritems():
for ii, val in enumerate(vals):
if return_keys:
yield key, ii, val
else:
yield val
##
# 19.07.2005, c
# 26.05.2006
# 17.10.2007
def dict_to_struct(*args, **kwargs):
"""Convert a dict instance to a Struct instance."""
try:
level = kwargs["level"]
except:
level = 0
try:
flag = kwargs["flag"]
except:
flag = (1,)
# For level 0 only...
try:
constructor = kwargs["constructor"]
except:
constructor = Struct
out = []
for arg in args:
if type(arg) == dict:
if flag[level]:
aux = constructor()
else:
aux = {}
for key, val in arg.iteritems():
if type(val) == dict:
try:
flag[level + 1]
except:
flag = flag + (0,)
val2 = dict_to_struct(val, level=level + 1, flag=flag)
if flag[level]:
aux.__dict__[key] = val2
else:
aux[key] = val2
else:
if flag[level]:
aux.__dict__[key] = val
else:
aux[key] = val
out.append(aux)
else:
out.append(arg)
if len(out) == 1:
out = out[0]
return out
##
# 23.01.2006, c
def is_sequence(var):
if issubclass(var.__class__, tuple) or issubclass(var.__class__, list):
return True
else:
return False
##
# 17.10.2007, c
def is_derived_class(cls, parent):
return issubclass(cls, parent) and (cls is not parent)
##
# 23.10.2007, c
def insert_static_method(cls, function):
setattr(cls, function.__name__, staticmethod(function))
##
# 23.10.2007, c
# def insert_method( instance, function ):
# setattr( instance, function.__name__,
# UnboundMethodType( function, instance, instance.__class__ ) )
def use_method_with_name(instance, method, new_name):
setattr(instance, new_name, method)
def insert_as_static_method(cls, name, function):
setattr(cls, name, staticmethod(function))
def find_subclasses(context, classes, omit_unnamed=False, name_attr="name"):
"""Find subclasses of the given classes in the given context.
Examples
--------
>>> solver_table = find_subclasses(vars().items(),
[LinearSolver, NonlinearSolver,
TimeSteppingSolver, EigenvalueSolver,
OptimizationSolver])
"""
var_dict = context.items()
table = {}
for key, var in var_dict:
try:
for cls in classes:
if is_derived_class(var, cls):
if hasattr(var, name_attr):
key = getattr(var, name_attr)
if omit_unnamed and not key:
continue
elif omit_unnamed:
continue
else:
key = var.__class__.__name__
table[key] = var
break
except TypeError:
pass
return table
def load_classes(
filenames, classes, package_name=None, ignore_errors=False, name_attr="name"
):
"""
For each filename in filenames, load all subclasses of classes listed.
"""
table = {}
for filename in filenames:
if not ignore_errors:
mod = import_file(filename, package_name=package_name)
else:
try:
mod = import_file(filename, package_name=package_name)
except:
output("WARNING: module %s cannot be imported!" % filename)
output("reason:\n", sys.exc_info()[1])
continue
table.update(
find_subclasses(vars(mod), classes, omit_unnamed=True, name_attr=name_attr)
)
return table
def update_dict_recursively(dst, src, tuples_too=False, overwrite_by_none=True):
"""
Update `dst` dictionary recursively using items in `src` dictionary.
Parameters
----------
dst : dict
The destination dictionary.
src : dict
The source dictionary.
tuples_too : bool
If True, recurse also into dictionaries that are members of tuples.
overwrite_by_none : bool
If False, do not overwrite destination dictionary values by None.
Returns
-------
dst : dict
The destination dictionary.
"""
def tuplezip(a):
if isinstance(a[0], dict) and isinstance(a[1], dict):
return update_dict_recursively(a[0], a[1], True)
return a[1]
for key in src:
if key in dst:
if isinstance(src[key], dict) and isinstance(dst[key], dict):
dst[key] = update_dict_recursively(dst[key], src[key], tuples_too)
continue
if (
tuples_too
and isinstance(dst[key], tuple)
and isinstance(src[key], tuple)
):
dst[key] = tuple(
map(tuplezip, zip(src[key], dst[key]))[: len(dst[key])]
)
continue
if overwrite_by_none or not src[key] is None:
dst[key] = src[key]
return dst
def edit_tuple_strings(str_tuple, old, new, recur=False):
"""
Replace substrings `old` with `new` in items of tuple
`str_tuple`. Non-string items are just copied to the new tuple.
Parameters
----------
str_tuple : tuple
The tuple with string values.
old : str
The old substring.
new : str
The new substring.
recur : bool
If True, edit items that are tuples recursively.
Returns
-------
new_tuple : tuple
The tuple with edited strings.
"""
new_tuple = []
for item in str_tuple:
if isinstance(item, basestr):
item = item.replace(old, new)
elif recur and isinstance(item, tuple):
item = edit_tuple_strings(item, old, new, recur=True)
new_tuple.append(item)
return tuple(new_tuple)
def edit_dict_strings(str_dict, old, new, recur=False):
"""
Replace substrings `old` with `new` in string values of dictionary
`str_dict`. Both `old` and `new` can be lists of the same length - items
in `old` are replaced by items in `new` with the same index.
Parameters
----------
str_dict : dict
The dictionary with string values or tuples containing strings.
old : str or list of str
The old substring or list of substrings.
new : str or list of str
The new substring or list of substrings.
recur : bool
If True, edit tuple values recursively.
Returns
-------
new_dict : dict
The dictionary with edited strings.
"""
if isinstance(old, basestr):
new_dict = {}
for key, val in str_dict.iteritems():
if isinstance(val, basestr):
new_dict[key] = val.replace(old, new)
elif isinstance(val, tuple):
new_dict[key] = edit_tuple_strings(val, old, new, recur=recur)
else:
raise ValueError("unsupported value! (%s)" % type(val))
else:
assert_(len(old) == len(new))
new_dict = dict(str_dict)
for ii, _old in enumerate(old):
new_dict.update(edit_dict_strings(new_dict, _old, new[ii], recur=recur))
return new_dict
def invert_dict(d, is_val_tuple=False, unique=True):
"""
Invert a dictionary by making its values keys and vice versa.
Parameters
----------
d : dict
The input dictionary.
is_val_tuple : bool
If True, the `d` values are tuples and new keys are the tuple items.
unique : bool
If True, the `d` values are unique and so the mapping is
one to one. If False, the `d` values (possibly) repeat, so the inverted
dictionary will have as items lists of corresponding keys.
Returns
-------
di : dict
The inverted dictionary.
"""
di = {}
for key, val in d.iteritems():
if unique:
if is_val_tuple:
for v in val:
di[v] = key
else:
di[val] = key
else:
if is_val_tuple:
for v in val:
item = di.setdefault(v, [])
item.append(key)
else:
item = di.setdefault(val, [])
item.append(key)
return di
def remap_dict(d, map):
"""
Utility function to remap state dict keys according to var_map.
"""
out = {}
for new_key, key in map.iteritems():
out[new_key] = d[key]
return out
##
# 24.08.2006, c
# 05.09.2006
def dict_from_keys_init(keys, seq_class=None):
if seq_class is None:
return {}.fromkeys(keys)
out = {}
for key in keys:
out[key] = seq_class()
return out
##
# 16.10.2006, c
def dict_extend(d1, d2):
for key, val in d1.iteritems():
val.extend(d2[key])
def get_subdict(adict, keys):
"""
Get a sub-dictionary of `adict` with given `keys`.
"""
return dict((key, adict[key]) for key in keys if key in adict)
def set_defaults(dict_, defaults):
for key, val in defaults.iteritems():
dict_.setdefault(key, val)
##
# c: 12.03.2007, r: 04.04.2008
def get_default(arg, default, msg_if_none=None):
if arg is None:
out = default
else:
out = arg
if (out is None) and (msg_if_none is not None):
raise ValueError(msg_if_none)
return out
##
# c: 28.04.2008, r: 28.04.2008
def get_default_attr(obj, attr, default, msg_if_none=None):
if hasattr(obj, attr):
out = getattr(obj, attr)
else:
out = default
if (out is None) and (msg_if_none is not None):
raise ValueError(msg_if_none)
return out
def get_arguments(omit=None):
"""Get a calling function's arguments.
Returns:
args : dict
The calling function's arguments.
"""
from inspect import getargvalues, stack
if omit is None:
omit = []
_args, _, _, _vars = getargvalues(stack()[1][0])
args = {}
for name in _args:
if name in omit:
continue
args[name] = _vars[name]
return args
def check_names(names1, names2, msg):
"""Check if all names in names1 are in names2, otherwise raise IndexError
with the provided message msg.
"""
names = set(names1)
both = names.intersection(names2)
if both != names:
missing = ", ".join(ii for ii in names.difference(both))
raise IndexError(msg % missing)
##
# c: 27.02.2008, r: 27.02.2008
def select_by_names(objs_all, names, replace=None, simple=True):
objs = {}
for key, val in objs_all.iteritems():
if val.name in names:
if replace is None:
objs[key] = val
else:
new_val = copy(val)
old_attr = getattr(val, replace[0])
if simple:
new_attr = old_attr % replace[1]
setattr(new_val, replace[0], new_attr)
else:
new_attr = replace[1].get(val.name, old_attr)
setattr(new_val, replace[0], new_attr)
objs[key] = new_val
return objs
def ordered_iteritems(adict):
keys = adict.keys()
order = nm.argsort(keys)
for ii in order:
key = keys[ii]
yield key, adict[key]
def dict_to_array(adict):
"""
Convert a dictionary of 1D arrays of the same lengths with
non-negative integer keys to a single 2D array.
"""
keys = adict.keys()
ik = nm.array(keys, dtype=nm.int32)
assert_((ik >= 0).all())
if ik.shape[0] == 0:
return nm.zeros((0,), dtype=nm.int32)
aux = adict[ik[0]]
out = nm.empty((ik.max() + 1, aux.shape[0]), dtype=aux.dtype)
out.fill(-1)
for key, val in adict.iteritems():
out[key] = val
return out
def as_float_or_complex(val):
"""
Try to cast val to Python float, and if this fails, to Python
complex type.
"""
success = False
try:
out = float(val)
except:
pass
else:
success = True
if not success:
try:
out = complex(val)
except:
pass
else:
success = True
if not success:
raise ValueError("cannot cast %s to float or complex!" % val)
return out
| {
"repo_name": "mjirik/dicom2fem",
"path": "dicom2fem/base.py",
"copies": "1",
"size": "32325",
"license": "bsd-3-clause",
"hash": -527888655861369100,
"line_mean": 24.5735759494,
"line_max": 87,
"alpha_frac": 0.5128847641,
"autogenerated": false,
"ratio": 4.0080595164290145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005516933127033901,
"num_lines": 1264
} |
# adopted from http://gremu.net/blog/2010/django-admin-read-only-permission/
from django.contrib.gis import admin
from django.core.exceptions import PermissionDenied
from ajax_select.fields import autoselect_fields_check_can_add
class ReadOnlyAdmin(admin.OSMGeoAdmin):
""" in order to get + popup functions subclass this or do
the same hook inside of your get_form """
def get_form(self, request, obj=None, **kwargs):
form = super(ReadOnlyAdmin, self).get_form(request, obj, **kwargs)
if 'featuregeometrywkt' in form.declared_fields and self.__user_is_readonly(request):
form.declared_fields['featuregeometrywkt'].widget.attrs['readonly'] = True
# This is commented since django ajax selects doesn't seem to work with it
# autoselect_fields_check_can_add(form, self.model, request.user)
return form
def has_add_permission(self, request, obj=None):
"""
Arguments:
- `request`:
- `obj`:
"""
return not self.__user_is_readonly(request)
def has_delete_permission(self, request, obj=None):
"""
Arguments:
- `request`:
- `obj`:
"""
return not self.__user_is_readonly(request)
def get_actions(self, request):
actions = super(ReadOnlyAdmin, self).get_actions(request)
if self.__user_is_readonly(request):
if 'delete_selected' in actions:
del actions['delete_selected']
elif 'duplicate_results_event' in actions:
del actions['duplicate_results_event']
return actions
def change_view(self, request, object_id, form_url='', extra_context=None):
if self.__user_is_readonly(request):
self.save_as = False
self.readonly_fields = self.user_readonly
self.inlines = self.user_readonly_inlines
extra_context = extra_context or {}
extra_context['show_save'] = False
extra_context['show_save_as_new'] = False
extra_context['show_save_and_continue'] = False
try:
return super(ReadOnlyAdmin, self).change_view(
request, object_id, extra_context=extra_context)
except PermissionDenied:
pass
if request.method == 'POST':
raise PermissionDenied
request.readonly = True
return super(ReadOnlyAdmin, self).change_view(
request, object_id, form_url, extra_context=extra_context)
else:
self.readonly_fields = list()
self.form = self.form
self.inlines = self.inlines_list
request.readonly = False
return super(ReadOnlyAdmin, self).change_view(
request, object_id, form_url, extra_context=extra_context)
@staticmethod
def __user_is_readonly(request):
groups = [x.name for x in request.user.groups.all()]
return "readonly" in groups
| {
"repo_name": "ocefpaf/ODM2-Admin",
"path": "odm2admin/readonlyadmin.py",
"copies": "2",
"size": "3019",
"license": "mit",
"hash": -8767030944464297000,
"line_mean": 34.5176470588,
"line_max": 93,
"alpha_frac": 0.606492216,
"autogenerated": false,
"ratio": 4.246132208157524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5852624424157524,
"avg_score": null,
"num_lines": null
} |
# Adopted from https://github.com/airaria/TextBrewer
# Apache License Version 2.0
from abc import ABC, abstractmethod
import torch
# x is between 0 and 1
from hanlp_common.configurable import AutoConfigurable
def linear_growth_weight_scheduler(x):
return x
def linear_decay_weight_scheduler(x):
return 1 - x
def constant_temperature_scheduler(logits_S, logits_T, base_temperature):
'''
Remember to detach logits_S
'''
return base_temperature
def flsw_temperature_scheduler_builder(beta, gamma, eps=1e-4, *args):
'''
adapted from arXiv:1911.07471
'''
def flsw_temperature_scheduler(logits_S, logits_T, base_temperature):
v = logits_S.detach()
t = logits_T.detach()
with torch.no_grad():
v = v / (torch.norm(v, dim=-1, keepdim=True) + eps)
t = t / (torch.norm(t, dim=-1, keepdim=True) + eps)
w = torch.pow((1 - (v * t).sum(dim=-1)), gamma)
tau = base_temperature + (w.mean() - w) * beta
return tau
return flsw_temperature_scheduler
def cwsm_temperature_scheduler_builder(beta, *args):
'''
adapted from arXiv:1911.07471
'''
def cwsm_temperature_scheduler(logits_S, logits_T, base_temperature):
v = logits_S.detach()
with torch.no_grad():
v = torch.softmax(v, dim=-1)
v_max = v.max(dim=-1)[0]
w = 1 / (v_max + 1e-3)
tau = base_temperature + (w.mean() - w) * beta
return tau
return cwsm_temperature_scheduler
class LinearTeacherAnnealingScheduler(object):
def __init__(self, num_training_steps: int) -> None:
super().__init__()
self._num_training_steps = num_training_steps
self._current_training_steps = 0
def step(self):
self._current_training_steps += 1
def __float__(self):
return self._current_training_steps / self._num_training_steps
class TemperatureScheduler(ABC, AutoConfigurable):
def __init__(self, base_temperature) -> None:
super().__init__()
self.base_temperature = base_temperature
def __call__(self, logits_S, logits_T):
return self.forward(logits_S, logits_T)
@abstractmethod
def forward(self, logits_S, logits_T):
raise NotImplementedError()
@staticmethod
def from_name(name):
classes = {
'constant': ConstantScheduler,
'flsw': FlswScheduler,
'cwsm': CwsmScheduler,
}
assert name in classes, f'Unsupported temperature scheduler {name}. Expect one from {list(classes.keys())}.'
return classes[name]()
class FunctionalScheduler(TemperatureScheduler):
def __init__(self, scheduler_func, base_temperature) -> None:
super().__init__(base_temperature)
self._scheduler_func = scheduler_func
def forward(self, logits_S, logits_T):
return self._scheduler_func(logits_S, logits_T, self.base_temperature)
class ConstantScheduler(TemperatureScheduler):
def forward(self, logits_S, logits_T):
return self.base_temperature
class FlswScheduler(FunctionalScheduler):
def __init__(self, beta=1, gamma=1, eps=1e-4, base_temperature=8):
super().__init__(flsw_temperature_scheduler_builder(beta, gamma, eps), base_temperature)
self.beta = beta
self.gamma = gamma
self.eps = eps
class CwsmScheduler(FunctionalScheduler):
def __init__(self, beta=1, base_temperature=8):
super().__init__(cwsm_temperature_scheduler_builder(beta), base_temperature)
self.beta = beta
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/components/distillation/schedulers.py",
"copies": "1",
"size": "3585",
"license": "apache-2.0",
"hash": 3873581398111176700,
"line_mean": 27.9112903226,
"line_max": 116,
"alpha_frac": 0.6292887029,
"autogenerated": false,
"ratio": 3.635902636916836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47651913398168355,
"avg_score": null,
"num_lines": null
} |
# Adopted from https://github.com/airaria/TextBrewer
# Apache License Version 2.0
import torch
import torch.nn.functional as F
from hanlp_common.configurable import AutoConfigurable
def kd_mse_loss(logits_S, logits_T, temperature=1):
'''
Calculate the mse loss between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
'''
if isinstance(temperature, torch.Tensor) and temperature.dim() > 0:
temperature = temperature.unsqueeze(-1)
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
loss = F.mse_loss(beta_logits_S, beta_logits_T)
return loss
def kd_ce_loss(logits_S, logits_T, temperature=1):
'''
Calculate the cross entropy between logits_S and logits_T
:param logits_S: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param logits_T: Tensor of shape (batch_size, length, num_labels) or (batch_size, num_labels)
:param temperature: A float or a tensor of shape (batch_size, length) or (batch_size,)
'''
if isinstance(temperature, torch.Tensor) and temperature.dim() > 0:
temperature = temperature.unsqueeze(-1)
beta_logits_T = logits_T / temperature
beta_logits_S = logits_S / temperature
p_T = F.softmax(beta_logits_T, dim=-1)
loss = -(p_T * F.log_softmax(beta_logits_S, dim=-1)).sum(dim=-1).mean()
return loss
def att_mse_loss(attention_S, attention_T, mask=None):
'''
* Calculates the mse loss between `attention_S` and `attention_T`.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.Tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
attention_S_select = torch.where(attention_S <= -1e-3, torch.zeros_like(attention_S), attention_S)
attention_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), attention_T)
loss = F.mse_loss(attention_S_select, attention_T_select)
else:
mask = mask.to(attention_S).unsqueeze(1).expand(-1, attention_S.size(1), -1) # (bs, num_of_heads, len)
valid_count = torch.pow(mask.sum(dim=2), 2).sum()
loss = (F.mse_loss(attention_S, attention_T, reduction='none') * mask.unsqueeze(-1) * mask.unsqueeze(
2)).sum() / valid_count
return loss
def att_mse_sum_loss(attention_S, attention_T, mask=None):
'''
* Calculates the mse loss between `attention_S` and `attention_T`.
* If the the shape is (*batch_size*, *num_heads*, *length*, *length*), sums along the `num_heads` dimension and then calcuates the mse loss between the two matrices.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.Tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.Tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if len(attention_S.size()) == 4:
attention_T = attention_T.sum(dim=1)
attention_S = attention_S.sum(dim=1)
if mask is None:
attention_S_select = torch.where(attention_S <= -1e-3, torch.zeros_like(attention_S), attention_S)
attention_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), attention_T)
loss = F.mse_loss(attention_S_select, attention_T_select)
else:
mask = mask.to(attention_S)
valid_count = torch.pow(mask.sum(dim=1), 2).sum()
loss = (F.mse_loss(attention_S, attention_T, reduction='none') * mask.unsqueeze(-1) * mask.unsqueeze(
1)).sum() / valid_count
return loss
def att_ce_loss(attention_S, attention_T, mask=None):
'''
* Calculates the cross-entropy loss between `attention_S` and `attention_T`, where softmax is to applied on ``dim=-1``.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.Tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
probs_T = F.softmax(attention_T, dim=-1)
if mask is None:
probs_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), probs_T)
loss = -((probs_T_select * F.log_softmax(attention_S, dim=-1)).sum(dim=-1)).mean()
else:
mask = mask.to(attention_S).unsqueeze(1).expand(-1, attention_S.size(1), -1) # (bs, num_of_heads, len)
loss = -((probs_T * F.log_softmax(attention_S, dim=-1) * mask.unsqueeze(2)).sum(
dim=-1) * mask).sum() / mask.sum()
return loss
def att_ce_mean_loss(attention_S, attention_T, mask=None):
'''
* Calculates the cross-entropy loss between `attention_S` and `attention_T`, where softmax is to applied on ``dim=-1``.
* If the shape is (*batch_size*, *num_heads*, *length*, *length*), averages over dimension `num_heads` and then computes cross-entropy loss between the two matrics.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.tensor logits_S: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.tensor logits_T: tensor of shape (*batch_size*, *num_heads*, *length*, *length*) or (*batch_size*, *length*, *length*)
:param torch.tensor mask: tensor of shape (*batch_size*, *length*)
'''
if len(attention_S.size()) == 4:
attention_S = attention_S.mean(dim=1) # (bs, len, len)
attention_T = attention_T.mean(dim=1)
probs_T = F.softmax(attention_T, dim=-1)
if mask is None:
probs_T_select = torch.where(attention_T <= -1e-3, torch.zeros_like(attention_T), probs_T)
loss = -((probs_T_select * F.log_softmax(attention_S, dim=-1)).sum(dim=-1)).mean()
else:
mask = mask.to(attention_S)
loss = -((probs_T * F.log_softmax(attention_S, dim=-1) * mask.unsqueeze(1)).sum(
dim=-1) * mask).sum() / mask.sum()
return loss
def hid_mse_loss(state_S, state_T, mask=None):
'''
* Calculates the mse loss between `state_S` and `state_T`, which are the hidden state of the models.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
loss = F.mse_loss(state_S, state_T)
else:
mask = mask.to(state_S)
valid_count = mask.sum() * state_S.size(-1)
loss = (F.mse_loss(state_S, state_T, reduction='none') * mask.unsqueeze(-1)).sum() / valid_count
return loss
def cos_loss(state_S, state_T, mask=None):
'''
* Computes the cosine similarity loss between the inputs. This is the loss used in DistilBERT, see `DistilBERT <https://arxiv.org/abs/1910.01108>`_
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor mask: tensor of shape (*batch_size*, *length*)
'''
if mask is None:
state_S = state_S.view(-1, state_S.size(-1))
state_T = state_T.view(-1, state_T.size(-1))
else:
mask = mask.to(state_S).unsqueeze(-1).expand_as(state_S) # (bs,len,dim)
state_S = torch.masked_select(state_S, mask).view(-1, mask.size(-1)) # (bs * select, dim)
state_T = torch.masked_select(state_T, mask).view(-1, mask.size(-1)) # (bs * select, dim)
target = state_S.new(state_S.size(0)).fill_(1)
loss = F.cosine_embedding_loss(state_S, state_T, target, reduction='mean')
return loss
def pkd_loss(state_S, state_T, mask=None):
'''
* Computes normalized vector mse loss at position 0 along `length` dimension. This is the loss used in BERT-PKD, see `Patient Knowledge Distillation for BERT Model Compression <https://arxiv.org/abs/1908.09355>`_.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.Tensor state_S: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param torch.Tensor state_T: tensor of shape (*batch_size*, *length*, *hidden_size*)
:param mask: not used.
'''
cls_T = state_T[:, 0] # (batch_size, hidden_dim)
cls_S = state_S[:, 0] # (batch_size, hidden_dim)
normed_cls_T = cls_T / torch.norm(cls_T, dim=1, keepdim=True)
normed_cls_S = cls_S / torch.norm(cls_S, dim=1, keepdim=True)
loss = (normed_cls_S - normed_cls_T).pow(2).sum(dim=-1).mean()
return loss
def fsp_loss(state_S, state_T, mask=None):
r'''
* Takes in two lists of matrics `state_S` and `state_T`. Each list contains two matrices of the shape (*batch_size*, *length*, *hidden_size*). Computes the similarity matrix between the two matrices in `state_S` ( with the resulting shape (*batch_size*, *hidden_size*, *hidden_size*) ) and the ones in B ( with the resulting shape (*batch_size*, *hidden_size*, *hidden_size*) ), then computes the mse loss between the similarity matrices:
.. math::
loss = mean((S_{1}^T \cdot S_{2} - T_{1}^T \cdot T_{2})^2)
* It is a Variant of FSP loss in `A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning <http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf>`_.
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
* If the hidden sizes of student and teacher are different, 'proj' option is required in `inetermediate_matches` to match the dimensions.
:param torch.tensor state_S: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor state_T: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor mask: tensor of the shape (*batch_size*, *length*)
Example in `intermediate_matches`::
intermediate_matches = [
{'layer_T':[0,0], 'layer_S':[0,0], 'feature':'hidden','loss': 'fsp', 'weight' : 1, 'proj':['linear',384,768]},
...]
'''
if mask is None:
state_S_0 = state_S[0] # (batch_size , length, hidden_dim)
state_S_1 = state_S[1] # (batch_size, length, hidden_dim)
state_T_0 = state_T[0]
state_T_1 = state_T[1]
gram_S = torch.bmm(state_S_0.transpose(1, 2), state_S_1) / state_S_1.size(
1) # (batch_size, hidden_dim, hidden_dim)
gram_T = torch.bmm(state_T_0.transpose(1, 2), state_T_1) / state_T_1.size(1)
else:
mask = mask.to(state_S[0]).unsqueeze(-1)
lengths = mask.sum(dim=1, keepdim=True)
state_S_0 = state_S[0] * mask
state_S_1 = state_S[1] * mask
state_T_0 = state_T[0] * mask
state_T_1 = state_T[1] * mask
gram_S = torch.bmm(state_S_0.transpose(1, 2), state_S_1) / lengths
gram_T = torch.bmm(state_T_0.transpose(1, 2), state_T_1) / lengths
loss = F.mse_loss(gram_S, gram_T)
return loss
def mmd_loss(state_S, state_T, mask=None):
r'''
* Takes in two lists of matrices `state_S` and `state_T`. Each list contains 2 matrices of the shape (*batch_size*, *length*, *hidden_size*). `hidden_size` of matrices in `State_S` doesn't need to be the same as that of `state_T`. Computes the similarity matrix between the two matrices in `state_S` ( with the resulting shape (*batch_size*, *length*, *length*) ) and the ones in B ( with the resulting shape (*batch_size*, *length*, *length*) ), then computes the mse loss between the similarity matrices:
.. math::
loss = mean((S_{1} \cdot S_{2}^T - T_{1} \cdot T_{2}^T)^2)
* It is a Variant of the NST loss in `Like What You Like: Knowledge Distill via Neuron Selectivity Transfer <https://arxiv.org/abs/1707.01219>`_
* If the `inputs_mask` is given, masks the positions where ``input_mask==0``.
:param torch.tensor state_S: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor state_T: list of two tensors, each tensor is of the shape (*batch_size*, *length*, *hidden_size*)
:param torch.tensor mask: tensor of the shape (*batch_size*, *length*)
Example in `intermediate_matches`::
intermediate_matches = [
{'layer_T':[0,0], 'layer_S':[0,0], 'feature':'hidden','loss': 'nst', 'weight' : 1},
...]
'''
state_S_0 = state_S[0] # (batch_size , length, hidden_dim_S)
state_S_1 = state_S[1] # (batch_size , length, hidden_dim_S)
state_T_0 = state_T[0] # (batch_size , length, hidden_dim_T)
state_T_1 = state_T[1] # (batch_size , length, hidden_dim_T)
if mask is None:
gram_S = torch.bmm(state_S_0, state_S_1.transpose(1, 2)) / state_S_1.size(2) # (batch_size, length, length)
gram_T = torch.bmm(state_T_0, state_T_1.transpose(1, 2)) / state_T_1.size(2)
loss = F.mse_loss(gram_S, gram_T)
else:
mask = mask.to(state_S[0])
valid_count = torch.pow(mask.sum(dim=1), 2).sum()
gram_S = torch.bmm(state_S_0, state_S_1.transpose(1, 2)) / state_S_1.size(1) # (batch_size, length, length)
gram_T = torch.bmm(state_T_0, state_T_1.transpose(1, 2)) / state_T_1.size(1)
loss = (F.mse_loss(gram_S, gram_T, reduction='none') * mask.unsqueeze(-1) * mask.unsqueeze(
1)).sum() / valid_count
return loss
class KnowledgeDistillationLoss(AutoConfigurable):
def __init__(self, name) -> None:
super().__init__()
self.name = name
import sys
thismodule = sys.modules[__name__]
self._loss = getattr(thismodule, name)
def __call__(self, *args, **kwargs):
return self._loss(*args, **kwargs)
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/components/distillation/losses.py",
"copies": "1",
"size": "15061",
"license": "apache-2.0",
"hash": 8504197230807629000,
"line_mean": 51.8456140351,
"line_max": 510,
"alpha_frac": 0.6354823717,
"autogenerated": false,
"ratio": 3.1370547802541138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4272537151954114,
"avg_score": null,
"num_lines": null
} |
# Adopted from https://github.com/allenai/allennlp under Apache Licence 2.0.
# Changed the packaging and created a subclass CharCNNEmbedding
from typing import Union, Tuple, Optional, Callable
import torch
from torch import nn
from alnlp.modules.cnn_encoder import CnnEncoder
from alnlp.modules.time_distributed import TimeDistributed
from hanlp_common.configurable import AutoConfigurable
from hanlp.common.transform import VocabDict, ToChar
from hanlp.common.vocab import Vocab
from hanlp.layers.embeddings.embedding import EmbeddingDim, Embedding
class CharCNN(nn.Module):
def __init__(self,
field: str,
embed: Union[int, Embedding], num_filters: int,
ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),
conv_layer_activation: str = 'ReLU',
output_dim: Optional[int] = None,
vocab_size=None) -> None:
"""A `CnnEncoder` is a combination of multiple convolution layers and max pooling layers.
The input to this module is of shape `(batch_size, num_tokens,
input_dim)`, and the output is of shape `(batch_size, output_dim)`.
The CNN has one convolution layer for each ngram filter size. Each convolution operation gives
out a vector of size num_filters. The number of times a convolution layer will be used
is `num_tokens - ngram_size + 1`. The corresponding maxpooling layer aggregates all these
outputs from the convolution layer and outputs the max.
This operation is repeated for every ngram size passed, and consequently the dimensionality of
the output after maxpooling is `len(ngram_filter_sizes) * num_filters`. This then gets
(optionally) projected down to a lower dimensional output, specified by `output_dim`.
We then use a fully connected layer to project in back to the desired output_dim. For more
details, refer to "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural
Networks for Sentence Classification", Zhang and Wallace 2016, particularly Figure 1.
See allennlp.modules.seq2vec_encoders.cnn_encoder.CnnEncoder, Apache 2.0
Args:
field: The field in samples this encoder will work on.
embed: An ``Embedding`` object or the feature size to create an ``Embedding`` object.
num_filters: This is the output dim for each convolutional layer, which is the number of "filters"
learned by that layer.
ngram_filter_sizes: This specifies both the number of convolutional layers we will create and their sizes. The
default of `(2, 3, 4, 5)` will have four convolutional layers, corresponding to encoding
ngrams of size 2 to 5 with some number of filters.
conv_layer_activation: `Activation`, optional (default=`torch.nn.ReLU`)
Activation to use after the convolution layers.
output_dim: After doing convolutions and pooling, we'll project the collected features into a vector of
this size. If this value is `None`, we will just return the result of the max pooling,
giving an output of shape `len(ngram_filter_sizes) * num_filters`.
vocab_size: The size of character vocab.
Returns:
A tensor of shape `(batch_size, output_dim)`.
"""
super().__init__()
EmbeddingDim.__init__(self)
# the embedding layer
if isinstance(embed, int):
embed = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embed)
else:
raise ValueError(f'Unrecognized type for {embed}')
self.field = field
self.embed = TimeDistributed(embed)
self.encoder = TimeDistributed(
CnnEncoder(embed.embedding_dim, num_filters, ngram_filter_sizes, conv_layer_activation, output_dim))
self.embedding_dim = output_dim or num_filters * len(ngram_filter_sizes)
def forward(self, batch: dict, **kwargs):
tokens: torch.Tensor = batch[f'{self.field}_char_id']
mask = tokens.ge(0)
x = self.embed(tokens)
return self.encoder(x, mask)
def get_output_dim(self) -> int:
return self.embedding_dim
class CharCNNEmbedding(Embedding, AutoConfigurable):
def __init__(self,
field,
embed: Union[int, Embedding],
num_filters: int,
ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),
conv_layer_activation: str = 'ReLU',
output_dim: Optional[int] = None,
min_word_length=None
) -> None:
"""
Args:
field: The character field in samples this encoder will work on.
embed: An ``Embedding`` object or the feature size to create an ``Embedding`` object.
num_filters: This is the output dim for each convolutional layer, which is the number of "filters"
learned by that layer.
ngram_filter_sizes: This specifies both the number of convolutional layers we will create and their sizes. The
default of `(2, 3, 4, 5)` will have four convolutional layers, corresponding to encoding
ngrams of size 2 to 5 with some number of filters.
conv_layer_activation: `Activation`, optional (default=`torch.nn.ReLU`)
Activation to use after the convolution layers.
output_dim: After doing convolutions and pooling, we'll project the collected features into a vector of
this size. If this value is `None`, we will just return the result of the max pooling,
giving an output of shape `len(ngram_filter_sizes) * num_filters`.
min_word_length: For ngram filter with max size, the input (chars) is required to have at least max size
chars.
"""
super().__init__()
if min_word_length is None:
min_word_length = max(ngram_filter_sizes)
self.min_word_length = min_word_length
self.output_dim = output_dim
self.conv_layer_activation = conv_layer_activation
self.ngram_filter_sizes = ngram_filter_sizes
self.num_filters = num_filters
self.embed = embed
self.field = field
def transform(self, vocabs: VocabDict, **kwargs) -> Optional[Callable]:
if isinstance(self.embed, Embedding):
self.embed.transform(vocabs=vocabs)
vocab_name = self.vocab_name
if vocab_name not in vocabs:
vocabs[vocab_name] = Vocab()
return ToChar(self.field, vocab_name, min_word_length=self.min_word_length,
pad=vocabs[vocab_name].safe_pad_token)
@property
def vocab_name(self):
vocab_name = f'{self.field}_char'
return vocab_name
def module(self, vocabs: VocabDict, **kwargs) -> Optional[nn.Module]:
embed = self.embed
if isinstance(embed, Embedding):
embed = embed.module(vocabs=vocabs)
return CharCNN(self.field,
embed,
self.num_filters,
self.ngram_filter_sizes,
self.conv_layer_activation,
self.output_dim,
vocab_size=len(vocabs[self.vocab_name]))
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/layers/embeddings/char_cnn.py",
"copies": "1",
"size": "7436",
"license": "apache-2.0",
"hash": -3051699420792094700,
"line_mean": 49.5714285714,
"line_max": 123,
"alpha_frac": 0.6280602637,
"autogenerated": false,
"ratio": 4.2,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5328060263700001,
"avg_score": null,
"num_lines": null
} |
# Adopted from https://github.com/allenai/allennlp under Apache Licence 2.0.
# Changed the packaging.
from typing import List, Set, Tuple, Dict
import numpy
def decode_mst(
energy: numpy.ndarray, length: int, has_labels: bool = True
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
Adopted from https://github.com/allenai/allennlp/blob/master/allennlp/nn/chu_liu_edmonds.py
which is licensed under the Apache License 2.0
# Parameters
energy : `numpy.ndarray`, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is `False`,
the tensor should have shape (timesteps, timesteps) instead.
length : `int`, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : `bool`, optional, (default = True)
Whether the graph has labels or not.
Args:
energy: numpy.ndarray:
length: int:
has_labels: bool: (Default value = True)
Returns:
"""
if has_labels and energy.ndim != 3:
raise ValueError("The dimension of the energy array is not equal to 3.")
elif not has_labels and energy.ndim != 2:
raise ValueError("The dimension of the energy array is not equal to 2.")
input_shape = energy.shape
max_length = input_shape[-1]
# Our energy matrix might have been batched -
# here we clip it to contain only non padded tokens.
if has_labels:
energy = energy[:, :length, :length]
# get best label for each edge.
label_id_matrix = energy.argmax(axis=0)
energy = energy.max(axis=0)
else:
energy = energy[:length, :length]
label_id_matrix = None
# get original score matrix
original_score_matrix = energy
# initialize score matrix to original score matrix
score_matrix = numpy.array(original_score_matrix, copy=True)
old_input = numpy.zeros([length, length], dtype=numpy.int32)
old_output = numpy.zeros([length, length], dtype=numpy.int32)
current_nodes = [True for _ in range(length)]
representatives: List[Set[int]] = []
for node1 in range(length):
original_score_matrix[node1, node1] = 0.0
score_matrix[node1, node1] = 0.0
representatives.append({node1})
for node2 in range(node1 + 1, length):
old_input[node1, node2] = node1
old_output[node1, node2] = node2
old_input[node2, node1] = node2
old_output[node2, node1] = node1
final_edges: Dict[int, int] = {}
# The main algorithm operates inplace.
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
heads = numpy.zeros([max_length], numpy.int32)
if has_labels:
head_type = numpy.ones([max_length], numpy.int32)
else:
head_type = None
for child, parent in final_edges.items():
heads[child] = parent
if has_labels:
head_type[child] = label_id_matrix[parent, child]
return heads, head_type
def chu_liu_edmonds(
length: int,
score_matrix: numpy.ndarray,
current_nodes: List[bool],
final_edges: Dict[int, int],
old_input: numpy.ndarray,
old_output: numpy.ndarray,
representatives: List[Set[int]],
):
"""Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
# Parameters
length : `int`, required.
The number of nodes.
score_matrix : `numpy.ndarray`, required.
The score matrix representing the scores for pairs
of nodes.
current_nodes : `List[bool]`, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges : `Dict[int, int]`, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input : `numpy.ndarray`, required.
old_output : `numpy.ndarray`, required.
representatives : `List[Set[int]]`, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
# Returns
Nothing - all variables are modified in place.
Args:
length: int:
score_matrix: numpy.ndarray:
current_nodes: List[bool]:
final_edges: Dict[int:
int]:
old_input: numpy.ndarray:
old_output: numpy.ndarray:
representatives: List[Set[int]]:
Returns:
"""
# Set the initial graph to be the greedy best one.
parents = [-1]
for node1 in range(1, length):
parents.append(0)
if current_nodes[node1]:
max_score = score_matrix[0, node1]
for node2 in range(1, length):
if node2 == node1 or not current_nodes[node2]:
continue
new_score = score_matrix[node2, node1]
if new_score > max_score:
max_score = new_score
parents[node1] = node2
# Check if this solution has a cycle.
has_cycle, cycle = _find_cycle(parents, length, current_nodes)
# If there are no cycles, find all edges and return.
if not has_cycle:
final_edges[0] = -1
for node in range(1, length):
if not current_nodes[node]:
continue
parent = old_input[parents[node], node]
child = old_output[parents[node], node]
final_edges[child] = parent
return
# Otherwise, we have a cycle so we need to remove an edge.
# From here until the recursive call is the contraction stage of the algorithm.
cycle_weight = 0.0
# Find the weight of the cycle.
index = 0
for node in cycle:
index += 1
cycle_weight += score_matrix[parents[node], node]
# For each node in the graph, find the maximum weight incoming
# and outgoing edge into the cycle.
cycle_representative = cycle[0]
for node in range(length):
if not current_nodes[node] or node in cycle:
continue
in_edge_weight = float("-inf")
in_edge = -1
out_edge_weight = float("-inf")
out_edge = -1
for node_in_cycle in cycle:
if score_matrix[node_in_cycle, node] > in_edge_weight:
in_edge_weight = score_matrix[node_in_cycle, node]
in_edge = node_in_cycle
# Add the new edge score to the cycle weight
# and subtract the edge we're considering removing.
score = (
cycle_weight
+ score_matrix[node, node_in_cycle]
- score_matrix[parents[node_in_cycle], node_in_cycle]
)
if score > out_edge_weight:
out_edge_weight = score
out_edge = node_in_cycle
score_matrix[cycle_representative, node] = in_edge_weight
old_input[cycle_representative, node] = old_input[in_edge, node]
old_output[cycle_representative, node] = old_output[in_edge, node]
score_matrix[node, cycle_representative] = out_edge_weight
old_output[node, cycle_representative] = old_output[node, out_edge]
old_input[node, cycle_representative] = old_input[node, out_edge]
# For the next recursive iteration, we want to consider the cycle as a
# single node. Here we collapse the cycle into the first node in the
# cycle (first node is arbitrary), set all the other nodes not be
# considered in the next iteration. We also keep track of which
# representatives we are considering this iteration because we need
# them below to check if we're done.
considered_representatives: List[Set[int]] = []
for i, node_in_cycle in enumerate(cycle):
considered_representatives.append(set())
if i > 0:
# We need to consider at least one
# node in the cycle, arbitrarily choose
# the first.
current_nodes[node_in_cycle] = False
for node in representatives[node_in_cycle]:
considered_representatives[i].add(node)
if i > 0:
representatives[cycle_representative].add(node)
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
# Expansion stage.
# check each node in cycle, if one of its representatives
# is a key in the final_edges, it is the one we need.
found = False
key_node = -1
for i, node in enumerate(cycle):
for cycle_rep in considered_representatives[i]:
if cycle_rep in final_edges:
key_node = node
found = True
break
if found:
break
previous = parents[key_node]
while previous != key_node:
child = old_output[parents[previous], previous]
parent = old_input[parents[previous], previous]
final_edges[child] = parent
previous = parents[previous]
def _find_cycle(
parents: List[int], length: int, current_nodes: List[bool]
) -> Tuple[bool, List[int]]:
added = [False for _ in range(length)]
added[0] = True
cycle = set()
has_cycle = False
for i in range(1, length):
if has_cycle:
break
# don't redo nodes we've already
# visited or aren't considering.
if added[i] or not current_nodes[i]:
continue
# Initialize a new possible cycle.
this_cycle = set()
this_cycle.add(i)
added[i] = True
has_cycle = True
next_node = i
while parents[next_node] not in this_cycle:
next_node = parents[next_node]
# If we see a node we've already processed,
# we can stop, because the node we are
# processing would have been in that cycle.
if added[next_node]:
has_cycle = False
break
added[next_node] = True
this_cycle.add(next_node)
if has_cycle:
original = next_node
cycle.add(original)
next_node = parents[original]
while next_node != original:
cycle.add(next_node)
next_node = parents[next_node]
break
return has_cycle, list(cycle)
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/components/parsers/chu_liu_edmonds.py",
"copies": "1",
"size": "10923",
"license": "apache-2.0",
"hash": 7361882799741844000,
"line_mean": 33.7866242038,
"line_max": 96,
"alpha_frac": 0.6064268058,
"autogenerated": false,
"ratio": 3.9864963503649635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092923156164963,
"avg_score": null,
"num_lines": null
} |
# adopted from https://github.com/danvk/RangeHTTPServer to allow CORS
import os
import re
try:
from http.server import SimpleHTTPRequestHandler
except ImportError:
from SimpleHTTPServer import SimpleHTTPRequestHandler
def copy_byte_range(infile, outfile, start=None, stop=None, bufsize=16*1024):
'''Like shutil.copyfileobj, but only copy a range of the streams.
Both start and stop are inclusive.
'''
if start is not None: infile.seek(start)
while 1:
to_read = min(bufsize, stop + 1 - infile.tell() if stop else bufsize)
buf = infile.read(to_read)
if not buf:
break
outfile.write(buf)
BYTE_RANGE_RE = re.compile(r'bytes=(\d+)-(\d+)?$')
def parse_byte_range(byte_range):
'''Returns the two numbers in 'bytes=123-456' or throws ValueError.
The last number or both numbers may be None.
'''
if byte_range.strip() == '':
return None, None
m = BYTE_RANGE_RE.match(byte_range)
if not m:
raise ValueError('Invalid byte range %s' % byte_range)
first, last = [x and int(x) for x in m.groups()]
if last and last < first:
raise ValueError('Invalid byte range %s' % byte_range)
return first, last
class RangeRequestHandler(SimpleHTTPRequestHandler):
"""Adds support for HTTP 'Range' requests to SimpleHTTPRequestHandler
The approach is to:
- Override send_head to look for 'Range' and respond appropriately.
- Override copyfile to only transmit a range when requested.
"""
def send_head(self):
if 'Range' not in self.headers:
self.range = None
else:
try:
self.range = parse_byte_range(self.headers['Range'])
except ValueError as e:
self.send_error(400, 'Invalid byte range')
return None
first, last = self.range if self.range else 0, None
# Mirroring SimpleHTTPServer.py here
path = self.translate_path(self.path)
f = None
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, 'File not found')
return None
fs = os.fstat(f.fileno())
file_len = fs[6]
if first >= file_len:
self.send_error(416, 'Requested Range Not Satisfiable')
return None
self.send_response(206)
self.send_header('Content-type', 'text/html')
self.send_header('Accept-Ranges', 'bytes')
if last is None or last >= file_len:
last = file_len - 1
response_length = last - first + 1
self.send_header('Content-Range',
'bytes %s-%s/%s' % (first, last, file_len))
self.send_header('Content-Length', str(response_length))
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
return SimpleHTTPRequestHandler.end_headers(self)
def copyfile(self, source, outputfile):
if not self.range:
return SimpleHTTPRequestHandler.copyfile(self, source, outputfile)
# SimpleHTTPRequestHandler uses shutil.copyfileobj, which doesn't let
# you stop the copying before the end of the file.
start, stop = self.range # set in send_head()
copy_byte_range(source, outputfile, start, stop)
#class CORSHTTPRequestHandler(RangeHTTPServer.SimpleHTTPRequestHandler):
# def send_head(self):
# """Common code for GET and HEAD commands.
#
# This sends the response code and MIME headers.
#
# Return value is either a file object (which has to be copied
# to the outputfile by the caller unless the command was HEAD,
# and must be closed by the caller under all circumstances), or
# None, in which case the caller has nothing further to do.
#
# """
# path = self.translate_path(self.path)
# f = None
# if os.path.isdir(path):
# if not self.path.endswith('/'):
# # redirect browser - doing basically what apache does
# self.send_response(301)
# self.send_header("Location", self.path + "/")
# self.end_headers()
# return None
# for index in "index.html", "index.htm":
# index = os.path.join(path, index)
# if os.path.exists(index):
# path = index
# break
# else:
# return self.list_directory(path)
# ctype = self.guess_type(path)
# try:
# # Always read in binary mode. Opening files in text mode may cause
# # newline translations, making the actual size of the content
# # transmitted *less* than the content-length!
# f = open(path, 'rb')
# except IOError:
# self.send_error(404, "File not found")
# return None
# self.send_response(200)
# self.send_header("Content-type", ctype)
# fs = os.fstat(f.fileno())
# self.send_header("Content-Length", str(fs[6]))
# self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
# self.send_header("Access-Control-Allow-Origin", "*")
# self.end_headers()
# return f
if __name__ == "__main__":
import sys
try:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
except ImportError:
import SimpleHTTPServer
import SocketServer
PORT = int(sys.argv[1]) if len(sys.argv) > 1 else 8000
Handler = RangeRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
| {
"repo_name": "NabaviLab/CNV-Visualizer",
"path": "scripts/cors_server.py",
"copies": "1",
"size": "5850",
"license": "mit",
"hash": -809937830573544100,
"line_mean": 33.8214285714,
"line_max": 79,
"alpha_frac": 0.5994871795,
"autogenerated": false,
"ratio": 3.931451612903226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9998850901632821,
"avg_score": 0.006417578154080975,
"num_lines": 168
} |
# Adopted from https://github.com/KiroSummer/A_Syntax-aware_MTL_Framework_for_Chinese_SRL
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
from .layer import DropoutLayer, HighwayLSTMCell, VariationalLSTMCell
def initializer_1d(input_tensor, initializer):
assert len(input_tensor.size()) == 1
input_tensor = input_tensor.view(-1, 1)
input_tensor = initializer(input_tensor)
return input_tensor.view(-1)
class HighwayBiLSTM(nn.Module):
"""A module that runs multiple steps of HighwayBiLSTM."""
def __init__(self, input_size, hidden_size, num_layers=1, batch_first=False, bidirectional=False, dropout_in=0,
dropout_out=0):
super(HighwayBiLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bidirectional = bidirectional
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.num_directions = 2 if bidirectional else 1
self.fcells, self.f_dropout, self.f_hidden_dropout = [], [], []
self.bcells, self.b_dropout, self.b_hidden_dropout = [], [], []
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else hidden_size
self.fcells.append(HighwayLSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
self.f_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.f_hidden_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
if self.bidirectional:
self.bcells.append(HighwayLSTMCell(input_size=hidden_size, hidden_size=hidden_size))
self.b_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.b_hidden_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.fcells, self.bcells = nn.ModuleList(self.fcells), nn.ModuleList(self.bcells)
self.f_dropout, self.b_dropout = nn.ModuleList(self.f_dropout), nn.ModuleList(self.b_dropout)
def reset_dropout_layer(self, batch_size):
for layer in range(self.num_layers):
self.f_dropout[layer].reset_dropout_mask(batch_size)
if self.bidirectional:
self.b_dropout[layer].reset_dropout_mask(batch_size)
@staticmethod
def _forward_rnn(cell, gate, input, masks, initial, drop_masks=None, hidden_drop=None):
max_time = input.size(0)
output = []
hx = initial
for time in range(max_time):
h_next, c_next = cell(input[time], mask=masks[time], hx=hx, dropout=drop_masks)
hx = (h_next, c_next)
output.append(h_next)
output = torch.stack(output, 0)
return output, hx
@staticmethod
def _forward_brnn(cell, gate, input, masks, initial, drop_masks=None, hidden_drop=None):
max_time = input.size(0)
output = []
hx = initial
for time in reversed(list(range(max_time))):
h_next, c_next = cell(input[time], mask=masks[time], hx=hx, dropout=drop_masks)
hx = (h_next, c_next)
output.append(h_next)
output.reverse()
output = torch.stack(output, 0)
return output, hx
def forward(self, input, masks, initial=None):
if self.batch_first:
input = input.transpose(0, 1) # transpose: return the transpose matrix
masks = torch.unsqueeze(masks.transpose(0, 1), dim=2)
max_time, batch_size, _ = input.size()
self.reset_dropout_layer(batch_size) # reset the dropout each batch forward
masks = masks.expand(-1, -1, self.hidden_size) # expand: -1 means not expand that dimension
if initial is None:
initial = Variable(input.data.new(batch_size, self.hidden_size).zero_())
initial = (initial, initial) # h0, c0
h_n, c_n = [], []
for layer in range(self.num_layers):
# hidden_mask, hidden_drop = None, None
hidden_mask, hidden_drop = self.f_dropout[layer], self.f_hidden_dropout[layer]
layer_output, (layer_h_n, layer_c_n) = HighwayBiLSTM._forward_rnn(cell=self.fcells[layer], \
gate=None, input=input, masks=masks,
initial=initial, \
drop_masks=hidden_mask,
hidden_drop=hidden_drop)
h_n.append(layer_h_n)
c_n.append(layer_c_n)
if self.bidirectional:
hidden_mask, hidden_drop = self.b_dropout[layer], self.b_hidden_dropout[layer]
blayer_output, (blayer_h_n, blayer_c_n) = HighwayBiLSTM._forward_brnn(cell=self.bcells[layer], \
gate=None, input=layer_output,
masks=masks, initial=initial, \
drop_masks=hidden_mask,
hidden_drop=hidden_drop)
h_n.append(blayer_h_n)
c_n.append(blayer_c_n)
input = blayer_output if self.bidirectional else layer_output
h_n, c_n = torch.stack(h_n, 0), torch.stack(c_n, 0)
if self.batch_first:
input = input.transpose(1, 0) # transpose: return the transpose matrix
return input, (h_n, c_n)
class StackedHighwayBiLSTM(nn.Module):
"""A module that runs multiple steps of HighwayBiLSTM."""
def __init__(self, input_size, hidden_size, num_layers=1, batch_first=False, \
bidirectional=False, dropout_in=0, dropout_out=0):
super(StackedHighwayBiLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bidirectional = bidirectional
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.num_directions = 2 if bidirectional else 1
self.fcells, self.f_dropout, self.f_hidden_dropout = [], [], []
self.bcells, self.b_dropout, self.b_hidden_dropout = [], [], []
self.f_initial, self.b_initial = [], []
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else 2 * hidden_size if self.bidirectional else hidden_size
self.fcells.append(VariationalLSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
self.f_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.f_hidden_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.f_initial.append(nn.Parameter(torch.Tensor(2, self.hidden_size)))
assert self.bidirectional is True
self.bcells.append(VariationalLSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
self.b_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.b_hidden_dropout.append(DropoutLayer(hidden_size, self.dropout_out))
self.b_initial.append(nn.Parameter(torch.Tensor(2, self.hidden_size)))
self.lstm_project_layer = nn.ModuleList([nn.Linear(2 * self.hidden_size, 2 * self.hidden_size)
for _ in range(num_layers - 1)])
self.fcells, self.bcells = nn.ModuleList(self.fcells), nn.ModuleList(self.bcells)
self.f_dropout, self.b_dropout = nn.ModuleList(self.f_dropout), nn.ModuleList(self.b_dropout)
self.f_hidden_dropout, self.b_hidden_dropout = \
nn.ModuleList(self.f_hidden_dropout), nn.ModuleList(self.b_hidden_dropout)
self.f_initial, self.b_initial = nn.ParameterList(self.f_initial), nn.ParameterList(self.b_initial)
self.reset_parameters()
def reset_parameters(self):
for layer_initial in [self.f_initial, self.b_initial]:
for initial in layer_initial:
init.xavier_uniform_(initial)
for layer in self.lstm_project_layer:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
def reset_dropout_layer(self, batch_size):
for layer in range(self.num_layers):
self.f_dropout[layer].reset_dropout_mask(batch_size)
self.f_hidden_dropout[layer].reset_dropout_mask(batch_size)
if self.bidirectional:
self.b_dropout[layer].reset_dropout_mask(batch_size)
self.b_hidden_dropout[layer].reset_dropout_mask(batch_size)
def reset_state(self, batch_size):
f_states, b_states = [], []
for f_layer_initial, b_layer_initial in zip(self.f_initial, self.b_initial):
f_states.append([f_layer_initial[0].expand(batch_size, -1), f_layer_initial[1].expand(batch_size, -1)])
b_states.append([b_layer_initial[0].expand(batch_size, -1), b_layer_initial[1].expand(batch_size, -1)])
return f_states, b_states
@staticmethod
def _forward_rnn(cell, gate, input, masks, initial, drop_masks=None, hidden_drop=None):
max_time = input.size(0)
output = []
hx = initial
for time in range(max_time):
h_next, c_next = cell(input[time], mask=masks[time], hx=hx, dropout=drop_masks)
hx = (h_next, c_next)
output.append(h_next)
output = torch.stack(output, 0)
return output, hx
@staticmethod
def _forward_brnn(cell, gate, input, masks, initial, drop_masks=None, hidden_drop=None):
max_time = input.size(0)
output = []
hx = initial
for time in reversed(list(range(max_time))):
h_next, c_next = cell(input[time], mask=masks[time], hx=hx, dropout=drop_masks)
hx = (h_next, c_next)
output.append(h_next)
output.reverse()
output = torch.stack(output, 0)
return output, hx
def forward(self, input, masks, initial=None):
if self.batch_first:
input = input.transpose(0, 1) # transpose: return the transpose matrix
masks = torch.unsqueeze(masks.transpose(0, 1), dim=2)
max_time, batch_size, _ = input.size()
self.reset_dropout_layer(batch_size) # reset the dropout each batch forward
f_states, b_states = self.reset_state(batch_size)
masks = masks.expand(-1, -1, self.hidden_size) # expand: -1 means not expand that dimension
h_n, c_n = [], []
outputs = []
for layer in range(self.num_layers):
hidden_mask, hidden_drop = self.f_dropout[layer], self.f_hidden_dropout[layer]
layer_output, (layer_h_n, layer_c_n) = \
StackedHighwayBiLSTM._forward_rnn(cell=self.fcells[layer],
gate=None, input=input, masks=masks, initial=f_states[layer],
drop_masks=hidden_mask, hidden_drop=hidden_drop)
h_n.append(layer_h_n)
c_n.append(layer_c_n)
assert self.bidirectional is True
hidden_mask, hidden_drop = self.b_dropout[layer], self.b_hidden_dropout[layer]
blayer_output, (blayer_h_n, blayer_c_n) = \
StackedHighwayBiLSTM._forward_brnn(cell=self.bcells[layer],
gate=None, input=input, masks=masks, initial=b_states[layer],
drop_masks=hidden_mask, hidden_drop=hidden_drop)
h_n.append(blayer_h_n)
c_n.append(blayer_c_n)
output = torch.cat([layer_output, blayer_output], 2) if self.bidirectional else layer_output
output = F.dropout(output, self.dropout_out, self.training)
if layer > 0: # Highway
highway_gates = torch.sigmoid(self.lstm_project_layer[layer - 1].forward(output))
output = highway_gates * output + (1 - highway_gates) * input
if self.batch_first:
outputs.append(output.transpose(1, 0))
else:
outputs.append(output)
input = output
h_n, c_n = torch.stack(h_n, 0), torch.stack(c_n, 0)
if self.batch_first:
output = output.transpose(1, 0) # transpose: return the transpose matrix
return output, (h_n, c_n), outputs
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/components/srl/span_rank/highway_variational_lstm.py",
"copies": "1",
"size": "12805",
"license": "apache-2.0",
"hash": 3651464124923053600,
"line_mean": 50.22,
"line_max": 117,
"alpha_frac": 0.5814135103,
"autogenerated": false,
"ratio": 3.662757437070938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9730049232095888,
"avg_score": 0.0028243430550099506,
"num_lines": 250
} |
# Adopted from https://github.com/KiroSummer/A_Syntax-aware_MTL_Framework_for_Chinese_SRL
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from hanlp.components.srl.span_rank.util import block_orth_normal_initializer
def get_tensor_np(t):
return t.data.cpu().numpy()
def orthonormal_initializer(output_size, input_size):
"""adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py
Args:
output_size:
input_size:
Returns:
"""
print((output_size, input_size))
I = np.eye(output_size)
lr = .1
eps = .05 / (output_size + input_size)
success = False
tries = 0
while not success and tries < 10:
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
for i in range(100):
QTQmI = Q.T.dot(Q) - I
loss = np.sum(QTQmI ** 2 / 2)
Q2 = Q ** 2
Q -= lr * Q.dot(QTQmI) / (
np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)
if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):
tries += 1
lr /= 2
break
success = True
if success:
print(('Orthogonal pretrainer loss: %.2e' % loss))
else:
print('Orthogonal pretrainer failed, using non-orthogonal random matrix')
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
return np.transpose(Q.astype(np.float32))
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-8):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class DropoutLayer3D(nn.Module):
def __init__(self, input_size, dropout_rate=0.0):
super(DropoutLayer3D, self).__init__()
self.dropout_rate = dropout_rate
self.input_size = input_size
self.drop_mask = torch.FloatTensor(self.input_size).fill_(1 - self.dropout_rate)
self.drop_mask = Variable(torch.bernoulli(self.drop_mask), requires_grad=False)
if torch.cuda.is_available():
self.drop_mask = self.drop_mask.cuda()
def reset_dropout_mask(self, batch_size, length):
self.drop_mask = torch.FloatTensor(batch_size, length, self.input_size).fill_(1 - self.dropout_rate)
self.drop_mask = Variable(torch.bernoulli(self.drop_mask), requires_grad=False)
if torch.cuda.is_available():
self.drop_mask = self.drop_mask.cuda()
def forward(self, x):
if self.training:
return torch.mul(x, self.drop_mask)
else: # eval
return x * (1.0 - self.dropout_rate)
class DropoutLayer(nn.Module):
def __init__(self, input_size, dropout_rate=0.0):
super(DropoutLayer, self).__init__()
self.dropout_rate = dropout_rate
self.input_size = input_size
self.drop_mask = torch.Tensor(self.input_size).fill_(1 - self.dropout_rate)
self.drop_mask = torch.bernoulli(self.drop_mask)
def reset_dropout_mask(self, batch_size):
self.drop_mask = torch.Tensor(batch_size, self.input_size).fill_(1 - self.dropout_rate)
self.drop_mask = torch.bernoulli(self.drop_mask)
def forward(self, x):
if self.training:
return torch.mul(x, self.drop_mask.to(x.device))
else: # eval
return x * (1.0 - self.dropout_rate)
class NonLinear(nn.Module):
def __init__(self, input_size, hidden_size, activation=None):
super(NonLinear, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.linear = nn.Linear(in_features=input_size, out_features=hidden_size)
if activation is None:
self._activate = lambda x: x
else:
if not callable(activation):
raise ValueError("activation must be callable: type={}".format(type(activation)))
self._activate = activation
self.reset_parameters()
def forward(self, x):
y = self.linear(x)
return self._activate(y)
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
class Biaffine(nn.Module):
def __init__(self, in1_features, in2_features, out_features,
bias=(True, True)):
super(Biaffine, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.bias = bias
self.linear_input_size = in1_features + int(bias[0])
self.linear_output_size = out_features * (in2_features + int(bias[1]))
self.linear = nn.Linear(in_features=self.linear_input_size,
out_features=self.linear_output_size,
bias=False)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
def forward(self, input1, input2):
batch_size, len1, dim1 = input1.size()
batch_size, len2, dim2 = input2.size()
if self.bias[0]:
ones = input1.data.new(batch_size, len1, 1).zero_().fill_(1) # this kind of implementation is too tedious
input1 = torch.cat((input1, Variable(ones)), dim=2)
dim1 += 1
if self.bias[1]:
ones = input2.data.new(batch_size, len2, 1).zero_().fill_(1)
input2 = torch.cat((input2, Variable(ones)), dim=2)
dim2 += 1
affine = self.linear(input1)
affine = affine.view(batch_size, len1 * self.out_features, dim2)
input2 = torch.transpose(input2, 1, 2)
# torch.bmm: Performs a batch matrix-matrix product of matrices stored in batch1 and batch2.
biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2)
# view: Returns a new tensor with the same data as the self tensor but of a different size.
biaffine = biaffine.contiguous().view(batch_size, len2, len1, self.out_features)
return biaffine
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'in1_features=' + str(self.in1_features) \
+ ', in2_features=' + str(self.in2_features) \
+ ', out_features=' + str(self.out_features) + ')'
class HighwayLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(HighwayLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.linear_ih = nn.Linear(in_features=input_size,
out_features=6 * hidden_size)
self.linear_hh = nn.Linear(in_features=hidden_size,
out_features=5 * hidden_size,
bias=False)
self.reset_parameters() # reset all the param in the MyLSTMCell
def reset_parameters(self):
weight_ih = block_orth_normal_initializer([self.input_size, ], [self.hidden_size] * 6)
self.linear_ih.weight.data.copy_(weight_ih)
weight_hh = block_orth_normal_initializer([self.hidden_size, ], [self.hidden_size] * 5)
self.linear_hh.weight.data.copy_(weight_hh)
# nn.init.constant(self.linear_hh.weight, 1.0)
# nn.init.constant(self.linear_ih.weight, 1.0)
nn.init.constant(self.linear_ih.bias, 0.0)
def forward(self, x, mask=None, hx=None, dropout=None):
assert mask is not None and hx is not None
_h, _c = hx
_x = self.linear_ih(x) # compute the x
preact = self.linear_hh(_h) + _x[:, :self.hidden_size * 5]
i, f, o, t, j = preact.chunk(chunks=5, dim=1)
i, f, o, t, j = F.sigmoid(i), F.sigmoid(f + 1.0), F.sigmoid(o), F.sigmoid(t), F.tanh(j)
k = _x[:, self.hidden_size * 5:]
c = f * _c + i * j
c = mask * c + (1.0 - mask) * _c
h = t * o * F.tanh(c) + (1.0 - t) * k
if dropout is not None:
h = dropout(h)
h = mask * h + (1.0 - mask) * _h
return h, c
class VariationalLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(VariationalLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.linear = nn.Linear(in_features=input_size + self.hidden_size, out_features=3 * hidden_size)
self.reset_parameters() # reset all the param in the MyLSTMCell
def reset_parameters(self):
weight = block_orth_normal_initializer([self.input_size + self.hidden_size, ], [self.hidden_size] * 3)
self.linear.weight.data.copy_(weight)
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, x, mask=None, hx=None, dropout=None):
assert mask is not None and hx is not None
_h, _c = hx
_h = dropout(_h)
_x = self.linear(torch.cat([x, _h], 1)) # compute the x
i, j, o = _x.chunk(3, dim=1)
i = torch.sigmoid(i)
c = (1.0 - i) * _c + i * torch.tanh(j)
c = mask * c # + (1.0 - mask) * _c
h = torch.tanh(c) * torch.sigmoid(o)
h = mask * h # + (1.0 - mask) * _h
return h, c
class VariationalLSTM(nn.Module):
"""A module that runs multiple steps of LSTM."""
def __init__(self, input_size, hidden_size, num_layers=1, batch_first=False, \
bidirectional=False, dropout_in=0, dropout_out=0):
super(VariationalLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bidirectional = bidirectional
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.num_directions = 2 if bidirectional else 1
self.fcells = []
self.bcells = []
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else hidden_size * self.num_directions
self.fcells.append(nn.LSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
if self.bidirectional:
self.bcells.append(nn.LSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
self._all_weights = []
for layer in range(num_layers):
layer_params = (self.fcells[layer].weight_ih, self.fcells[layer].weight_hh, \
self.fcells[layer].bias_ih, self.fcells[layer].bias_hh)
suffix = ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
if self.bidirectional:
layer_params = (self.bcells[layer].weight_ih, self.bcells[layer].weight_hh, \
self.bcells[layer].bias_ih, self.bcells[layer].bias_hh)
suffix = '_reverse'
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.reset_parameters()
def reset_parameters(self): # modified by kiro
for name, param in self.named_parameters():
print(name)
if "weight" in name:
# for i in range(4):
# nn.init.orthogonal(self.__getattr__(name)[self.hidden_size*i:self.hidden_size*(i+1),:])
nn.init.orthogonal(self.__getattr__(name))
if "bias" in name:
nn.init.normal(self.__getattr__(name), 0.0, 0.01)
# nn.init.constant(self.__getattr__(name), 1.0) # different from zhang's 0
@staticmethod
def _forward_rnn(cell, input, masks, initial, drop_masks):
max_time = input.size(0)
output = []
hx = initial
for time in range(max_time):
h_next, c_next = cell(input=input[time], hx=hx)
h_next = h_next * masks[time] + initial[0] * (1 - masks[time])
c_next = c_next * masks[time] + initial[1] * (1 - masks[time])
output.append(h_next)
if drop_masks is not None: h_next = h_next * drop_masks
hx = (h_next, c_next)
output = torch.stack(output, 0)
return output, hx
@staticmethod
def _forward_brnn(cell, input, masks, initial, drop_masks):
max_time = input.size(0)
output = []
hx = initial
for time in reversed(list(range(max_time))):
h_next, c_next = cell(input=input[time], hx=hx)
h_next = h_next * masks[time] + initial[0] * (1 - masks[time])
c_next = c_next * masks[time] + initial[1] * (1 - masks[time])
output.append(h_next)
if drop_masks is not None: h_next = h_next * drop_masks
hx = (h_next, c_next)
output.reverse()
output = torch.stack(output, 0)
return output, hx
def forward(self, input, masks, initial=None):
if self.batch_first:
input = input.transpose(0, 1) # transpose: return the transpose matrix
masks = torch.unsqueeze(masks.transpose(0, 1), dim=2)
max_time, batch_size, _ = input.size()
masks = masks.expand(-1, -1, self.hidden_size) # expand: -1 means not expand that dimension
if initial is None:
initial = Variable(input.data.new(batch_size, self.hidden_size).zero_())
initial = (initial, initial) # h0, c0
h_n = []
c_n = []
for layer in range(self.num_layers):
max_time, batch_size, input_size = input.size()
input_mask, hidden_mask = None, None
if self.training: # when training, use the dropout
input_mask = input.data.new(batch_size, input_size).fill_(1 - self.dropout_in)
input_mask = Variable(torch.bernoulli(input_mask), requires_grad=False)
input_mask = input_mask / (1 - self.dropout_in)
# permute: exchange the dimension
input_mask = torch.unsqueeze(input_mask, dim=2).expand(-1, -1, max_time).permute(2, 0, 1)
input = input * input_mask
hidden_mask = input.data.new(batch_size, self.hidden_size).fill_(1 - self.dropout_out)
hidden_mask = Variable(torch.bernoulli(hidden_mask), requires_grad=False)
hidden_mask = hidden_mask / (1 - self.dropout_out)
layer_output, (layer_h_n, layer_c_n) = VariationalLSTM._forward_rnn(cell=self.fcells[layer], \
input=input, masks=masks,
initial=initial,
drop_masks=hidden_mask)
if self.bidirectional:
blayer_output, (blayer_h_n, blayer_c_n) = VariationalLSTM._forward_brnn(cell=self.bcells[layer], \
input=input, masks=masks,
initial=initial,
drop_masks=hidden_mask)
h_n.append(torch.cat([layer_h_n, blayer_h_n], 1) if self.bidirectional else layer_h_n)
c_n.append(torch.cat([layer_c_n, blayer_c_n], 1) if self.bidirectional else layer_c_n)
input = torch.cat([layer_output, blayer_output], 2) if self.bidirectional else layer_output
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
input = input.transpose(1, 0) # transpose: return the transpose matrix
return input, (h_n, c_n)
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/components/srl/span_rank/layer.py",
"copies": "1",
"size": "16572",
"license": "apache-2.0",
"hash": 5060657347899016000,
"line_mean": 41.7113402062,
"line_max": 118,
"alpha_frac": 0.5599203476,
"autogenerated": false,
"ratio": 3.4705759162303664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4530496263830366,
"avg_score": null,
"num_lines": null
} |
# Adopted from https://github.com/KiroSummer/A_Syntax-aware_MTL_Framework_for_Chinese_SRL
# Inference functions for the SRL model.
import numpy as np
def decode_spans(span_starts, span_ends, span_scores, labels_inv):
"""
Args:
span_starts: [num_candidates,]
span_scores: [num_candidates, num_labels]
span_ends:
labels_inv:
Returns:
"""
pred_spans = []
span_labels = np.argmax(span_scores, axis=1) # [num_candidates]
spans_list = list(zip(span_starts, span_ends, span_labels, span_scores))
spans_list = sorted(spans_list, key=lambda x: x[3][x[2]], reverse=True)
predicted_spans = {}
for start, end, label, _ in spans_list:
# Skip invalid span.
if label == 0 or (start, end) in predicted_spans:
continue
pred_spans.append((start, end, labels_inv[label]))
predicted_spans[(start, end)] = label
return pred_spans
def greedy_decode(predict_dict, srl_labels_inv):
"""Greedy decoding for SRL predicate-argument structures.
Args:
predict_dict: Dictionary of name to numpy arrays.
srl_labels_inv: SRL label id to string name.
suppress_overlap: Whether to greedily suppress overlapping arguments for the same predicate.
Returns:
"""
arg_starts = predict_dict["arg_starts"]
arg_ends = predict_dict["arg_ends"]
predicates = predict_dict["predicates"]
arg_labels = predict_dict["arg_labels"]
scores = predict_dict["srl_scores"]
num_suppressed_args = 0
# Map from predicates to a list of labeled spans.
pred_to_args = {}
if len(arg_ends) > 0 and len(predicates) > 0:
max_len = max(np.max(arg_ends), np.max(predicates)) + 1
else:
max_len = 1
for j, pred_id in enumerate(predicates):
args_list = []
for i, (arg_start, arg_end) in enumerate(zip(arg_starts, arg_ends)):
# If label is not null.
if arg_labels[i][j] == 0:
continue
label = srl_labels_inv[arg_labels[i][j]]
# if label not in ["V", "C-V"]:
args_list.append((arg_start, arg_end, label, scores[i][j][arg_labels[i][j]]))
# Sort arguments by highest score first.
args_list = sorted(args_list, key=lambda x: x[3], reverse=True)
new_args_list = []
flags = [False for _ in range(max_len)]
# Predicate will not overlap with arguments either.
flags[pred_id] = True
for (arg_start, arg_end, label, score) in args_list:
# If none of the tokens has been covered:
if not max(flags[arg_start:arg_end + 1]):
new_args_list.append((arg_start, arg_end, label))
for k in range(arg_start, arg_end + 1):
flags[k] = True
# Only add predicate if it has any argument.
if new_args_list:
pred_to_args[pred_id] = new_args_list
num_suppressed_args += len(args_list) - len(new_args_list)
return pred_to_args, num_suppressed_args
_CORE_ARGS = {"ARG0": 1, "ARG1": 2, "ARG2": 4, "ARG3": 8, "ARG4": 16, "ARG5": 32, "ARGA": 64,
"A0": 1, "A1": 2, "A2": 4, "A3": 8, "A4": 16, "A5": 32, "AA": 64}
def get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in list(mention_to_predicted.items())}
return predicted_clusters, mention_to_predicted
def _decode_non_overlapping_spans(starts, ends, scores, max_len, labels_inv, pred_id):
labels = np.argmax(scores, axis=1)
spans = []
for i, (start, end, label) in enumerate(zip(starts, ends, labels)):
if label <= 0:
continue
label_str = labels_inv[label]
if pred_id is not None and label_str == "V":
continue
spans.append((start, end, label_str, scores[i][label]))
spans = sorted(spans, key=lambda x: x[3], reverse=True)
flags = np.zeros([max_len], dtype=bool)
if pred_id is not None:
flags[pred_id] = True
new_spans = []
for start, end, label_str, score in spans:
if not max(flags[start:end + 1]):
new_spans.append((start, end, label_str)) # , score))
for k in range(start, end + 1):
flags[k] = True
return new_spans
def _dp_decode_non_overlapping_spans(starts, ends, scores, max_len, labels_inv, pred_id, u_constraint=False):
num_roles = scores.shape[1] # [num_arg, num_roles]
labels = np.argmax(scores, axis=1).astype(np.int64)
spans = list(zip(starts, ends, list(range(len(starts)))))
spans = sorted(spans, key=lambda x: (x[0], x[1])) # sort according to the span start index
if u_constraint:
f = np.zeros([max_len + 1, 128], dtype=float) - 0.1
else: # This one
f = np.zeros([max_len + 1, 1], dtype=float) - 0.1
f[0, 0] = 0
states = {0: set([0])} # A dictionary from id to list of binary core-arg states.
pointers = {} # A dictionary from states to (arg_id, role, prev_t, prev_rs)
best_state = [(0, 0)]
def _update_state(t0, rs0, t1, rs1, delta, arg_id, role):
if f[t0][rs0] + delta > f[t1][rs1]:
f[t1][rs1] = f[t0][rs0] + delta
if t1 not in states:
states[t1] = set()
states[t1].update([rs1])
pointers[(t1, rs1)] = (arg_id, role, t0, rs0) # the pointers store
if f[t1][rs1] > f[best_state[0][0]][best_state[0][1]]:
best_state[0] = (t1, rs1)
for start, end, i in spans: # [arg_start, arg_end, arg_span_id]
assert scores[i][0] == 0 # dummy score
# The extra dummy score should be same for all states, so we can safely skip arguments overlap
# with the predicate.
if pred_id is not None and start <= pred_id and pred_id <= end: # skip the span contains the predicate
continue
r0 = labels[i] # Locally best role assignment.
# Strictly better to incorporate a dummy span if it has the highest local score.
if r0 == 0: # labels_inv[r0] == "O"
continue
r0_str = labels_inv[r0]
# Enumerate explored states.
t_states = [t for t in list(states.keys()) if t <= start] # collect the state which is before the current span
for t in t_states: # for each state
role_states = states[t]
# Update states if best role is not a core arg.
if not u_constraint or r0_str not in _CORE_ARGS: # True; this one
for rs in role_states: # the set type in the value in the state dict
_update_state(t, rs, end + 1, rs, scores[i][r0], i, r0) # update the state
else:
for rs in role_states:
for r in range(1, num_roles):
if scores[i][r] > 0:
r_str = labels_inv[r]
core_state = _CORE_ARGS.get(r_str, 0)
# print start, end, i, r_str, core_state, rs
if core_state & rs == 0:
_update_state(t, rs, end + 1, rs | core_state, scores[i][r], i, r)
# Backtrack to decode.
new_spans = []
t, rs = best_state[0]
while (t, rs) in pointers:
i, r, t0, rs0 = pointers[(t, rs)]
new_spans.append((int(starts[i]), int(ends[i]), labels_inv[r]))
t = t0
rs = rs0
return new_spans[::-1]
def srl_decode(sentence_lengths, predict_dict, srl_labels_inv, config): # decode the predictions.
# Decode sentence-level tasks.
num_sentences = len(sentence_lengths)
predictions = [{} for _ in range(num_sentences)]
# Sentence-level predictions.
for i in range(num_sentences): # for each sentences
# if predict_dict["No_arg"] is True:
# predictions["srl"][i][predict_dict["predicates"][i]] = []
# continue
predict_dict_num_args_ = predict_dict["num_args"].cpu().numpy()
predict_dict_num_preds_ = predict_dict["num_preds"].cpu().numpy()
predict_dict_predicates_ = predict_dict["predicates"].cpu().numpy()
predict_dict_arg_starts_ = predict_dict["arg_starts"].cpu().numpy()
predict_dict_arg_ends_ = predict_dict["arg_ends"].cpu().numpy()
predict_dict_srl_scores_ = predict_dict["srl_scores"].detach().cpu().numpy()
num_args = predict_dict_num_args_[i] # the number of the candidate argument spans
num_preds = predict_dict_num_preds_[i] # the number of the candidate predicates
# for each predicate id, exec the decode process
for j, pred_id in enumerate(predict_dict_predicates_[i][:num_preds]):
# sorted arg_starts and arg_ends and srl_scores ? should be??? enforce_srl_constraint = False
arg_spans = _dp_decode_non_overlapping_spans(
predict_dict_arg_starts_[i][:num_args],
predict_dict_arg_ends_[i][:num_args],
predict_dict_srl_scores_[i, :num_args, j, :],
sentence_lengths[i], srl_labels_inv, pred_id, config.enforce_srl_constraint)
# To avoid warnings in the eval script.
if config.use_gold_predicates: # false
arg_spans.append((pred_id, pred_id, "V"))
if arg_spans:
predictions[i][int(pred_id)] = sorted(arg_spans, key=lambda x: (x[0], x[1]))
return predictions
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/components/srl/span_rank/inference_utils.py",
"copies": "1",
"size": "10363",
"license": "apache-2.0",
"hash": 5019844949522396000,
"line_mean": 41.646090535,
"line_max": 119,
"alpha_frac": 0.585255235,
"autogenerated": false,
"ratio": 3.3810766721044048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.945022666587989,
"avg_score": 0.003221048244903099,
"num_lines": 243
} |
# Adopted from https://github.com/lazyprogrammer/machine_learning_examples/blob/master/nlp_class2/tfidf_tsne.py
import json
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.manifold import TSNE
from datetime import datetime
# import os
# import sys
# sys.path.append(os.path.abspath('..'))
from utils import get_wikipedia_data, find_analogies, get_news_data_with_price
# from util import find_analogies
from sklearn.feature_extraction.text import TfidfTransformer
def tsne_on_wikipedia():
sentences, word2idx = get_wikipedia_data('file', 5000, by_paragraph=True)
with open('w2v_word2idx.json', 'w') as f:
json.dump(word2idx, f)
# build term document matrix
V = len(word2idx)
N = len(sentences)
print V, N
# create raw counts first
A = np.zeros((V, N))
j = 0
for sentence in sentences:
for i in sentence:
A[i,j] += 1
j += 1
print 'finished getting raw counts'
transformer = TfidfTransformer()
A = transformer.fit_transform(A)
A = A.toarray()
idx2word = {v:k for k, v in word2idx.iteritems()}
# plot the data in 2-D
tsne = TSNE()
Z = tsne.fit_transform(A)
print 'Z.shape:', Z.shape
plt.scatter(Z[:,0], Z[:,1])
for i in xrange(V):
try:
plt.annotate(s=idx2word[i].encode('utf8'), xy=(Z[i,0], Z[i,1]))
except:
print 'bad string:', idx2word[i]
plt.show()
We = Z
# find_analogies('king', 'man', 'woman', We, word2idx)
find_analogies('france', 'paris', 'london', We, word2idx)
find_analogies('france', 'paris', 'rome', We, word2idx)
find_analogies('paris', 'france', 'italy', We, word2idx)
def tsne_on_news():
get_news_data_with_price()
if __name__ == '__main__':
tsne_on_news() | {
"repo_name": "WayneDW/Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction",
"path": "archived/tfidf_tsne.py",
"copies": "1",
"size": "1827",
"license": "mit",
"hash": 7551169774974377000,
"line_mean": 27.1230769231,
"line_max": 111,
"alpha_frac": 0.6338259442,
"autogenerated": false,
"ratio": 3.107142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4240968801342857,
"avg_score": null,
"num_lines": null
} |
# Adopted from https://github.com/lazyprogrammer/machine_learning_examples/blob/master/rnn_class/util.py
# Adopted form https://github.com/lazyprogrammer/machine_learning_examples/blob/master/nlp_class2/util.py
import numpy as np
import pandas as pd
import string
import os
import operator
from nltk import pos_tag, word_tokenize
from datetime import datetime
import time # for debug
from nltk.corpus import stopwords
eng_stop = set(stopwords.words('english'))
def remove_punctuation(s):
return s.translate(None, string.punctuation)
def my_tokenizer(s):
s = remove_punctuation(s)
s = s.lower() # downcase
# remove stopwords
return [i for i in s.split() if i not in eng_stop]
def get_wikipedia_data(filename, n_vocab, by_paragraph=False):
prefix = './input/'
# return variables
sentences = []
word2idx = {'START': 0, 'END': 1}
idx2word = ['START', 'END']
current_idx = 2
word_idx_count = {0: float('inf'), 1: float('inf')}
print "reading:", filename
for line in open(prefix + filename):
line = line.strip()
# don't count headers, structured data, lists, etc...
if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'):
if by_paragraph:
sentence_lines = [line]
else:
sentence_lines = line.split('. ')
for sentence in sentence_lines:
tokens = my_tokenizer(sentence)
for t in tokens:
if t not in word2idx:
word2idx[t] = current_idx
idx2word.append(t)
current_idx += 1
idx = word2idx[t]
word_idx_count[idx] = word_idx_count.get(idx, 0) + 1
sentence_by_idx = [word2idx[t] for t in tokens]
sentences.append(sentence_by_idx)
print '# of unique words: ', len(word2idx)
# restrict vocab size
sorted_word_idx_count = sorted(word_idx_count.items(), key=operator.itemgetter(1), reverse=True)
word2idx_small = {}
new_idx = 0
idx_new_idx_map = {}
for idx, count in sorted_word_idx_count[:n_vocab]:
word = idx2word[idx]
print word, count
word2idx_small[word] = new_idx
idx_new_idx_map[idx] = new_idx
new_idx += 1
# let 'unknown' be the last token
word2idx_small['UNKNOWN'] = new_idx
unknown = new_idx
assert('START' in word2idx_small)
assert('END' in word2idx_small)
# assert('king' in word2idx_small)
# assert('queen' in word2idx_small)
# assert('man' in word2idx_small)
# assert('woman' in word2idx_small)
# map old idx to new idx
sentences_small = []
for sentence in sentences:
if len(sentence) > 1:
new_sentence = [idx_new_idx_map[idx] if idx in idx_new_idx_map else unknown for idx in sentence]
sentences_small.append(new_sentence)
return sentences_small, word2idx_small
def find_analogies(w1, w2, w3, We, word2idx):
king = We[word2idx[w1]]
man = We[word2idx[w2]]
woman = We[word2idx[w3]]
v0 = king - man + woman
def dist1(a, b):
return np.linalg.norm(a - b)
def dist2(a, b):
return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))
for dist, name in [(dist1, 'Euclidean'), (dist2, 'cosine')]:
min_dist = float('inf')
best_word = ''
for word, idx in word2idx.iteritems():
if word not in (w1, w2, w3):
v1 = We[idx]
d = dist(v0, v1)
if d < min_dist:
min_dist = d
best_word = word
print "closest match by", name, "distance:", best_word
print w1, "-", w2, "=", best_word, "-", w3
def get_news_data_with_price(filename, prefix='./input/'):
df = pd.read_csv(prefix+filename, header=None)
# use line numbers to check if data is filtered or not
lineNo = df.shape[0]
filtered_filename = './filtered/'+ str(lineNo) + '_' + filename
print 'try to read', filtered_filename
if os.path.isfile(filtered_filename):
df = pd.read_csv(filtered_filename)
data = df.as_matrix()
X = data[:, :-1]
Y = data[:, -1]
print 'Done!'
return X, Y
# save if new
print "filtered data doesn't exist, filter and save"
df.columns = ['Ticker', 'Comp_name', 'Date', 'Title', 'Summary']
print df.head()
def main():
# get_wikipedia_data('file', 5000)
get_news_data_with_price('news_bloomberg_part0.csv')
if __name__ == '__main__':
main()
| {
"repo_name": "WayneDW/Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction",
"path": "archived/utils.py",
"copies": "1",
"size": "4611",
"license": "mit",
"hash": 1564728552200417300,
"line_mean": 31.7021276596,
"line_max": 108,
"alpha_frac": 0.5755801345,
"autogenerated": false,
"ratio": 3.4206231454005933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4496203279900593,
"avg_score": null,
"num_lines": null
} |
# Adopted from InspIRCd
# https://github.com/inspircd/inspircd/blob/master/include/numerics.h
RPL_WELCOME = "001" # 2812, not 1459
RPL_YOURHOSTIS = "002" # 2812, not 1459
RPL_SERVERCREATED = "003" # 2812, not 1459
RPL_SERVERVERSION = "004" # 2812, not 1459
RPL_ISUPPORT = "005" # not RFC, extremely common though (defined as RPL_BOUNCE in 2812, widely ignored)
RPL_MAP = "006" # unrealircd
RPL_ENDMAP = "007" # unrealircd
RPL_SNOMASKIS = "008" # unrealircd
RPL_REDIR = "010"
RPL_YOURUUID = "042" # taken from ircnet
RPL_UMODEIS = "221"
RPL_RULES = "232" # unrealircd
RPL_LUSERCLIENT = "251"
RPL_LUSEROP = "252"
RPL_LUSERUNKNOWN = "253"
RPL_LUSERCHANNELS = "254"
RPL_LUSERME = "255"
RPL_ADMINME = "256"
RPL_ADMINLOC1 = "257"
RPL_ADMINLOC2 = "258"
RPL_ADMINEMAIL = "259"
RPL_LOCALUSERS = "265"
RPL_GLOBALUSERS = "266"
RPL_MAPUSERS = "270" # insp-specific
RPL_AWAY = "301"
RPL_SYNTAX = "304" # insp-specific
RPL_UNAWAY = "305"
RPL_NOWAWAY = "306"
RPL_RULESTART = "308" # unrealircd
RPL_RULESEND = "309" # unrealircd
RPL_WHOISSERVER = "312"
RPL_WHOWASUSER = "314"
RPL_ENDOFWHO = "315"
RPL_ENDOFWHOIS = "318"
RPL_LISTSTART = "321"
RPL_LIST = "322"
RPL_LISTEND = "323"
RPL_CHANNELMODEIS = "324"
RPL_CHANNELCREATED = "329" # ???
RPL_NOTOPICSET = "331"
RPL_TOPIC = "332"
RPL_TOPICTIME = "333" # not RFC, extremely common though
RPL_INVITING = "341"
RPL_INVITELIST = "346" # insp-specific (stolen from ircu)
RPL_ENDOFINVITELIST = "347" # insp-specific (stolen from ircu)
RPL_VERSION = "351"
RPL_NAMREPLY = "353"
RPL_LINKS = "364"
RPL_ENDOFLINKS = "365"
RPL_ENDOFNAMES = "366"
RPL_ENDOFWHOWAS = "369"
RPL_INFO = "371"
RPL_ENDOFINFO = "374"
RPL_MOTD = "372"
RPL_MOTDSTART = "375"
RPL_ENDOFMOTD = "376"
RPL_WHOWASIP = "379"
RPL_YOUAREOPER = "381"
RPL_REHASHING = "382"
RPL_TIME = "391"
RPL_YOURDISPLAYEDHOST = "396" # from charybdis/etc, common convention
# Error range of numerics
ERR_NOSUCHNICK = "401"
ERR_NOSUCHSERVER = "402"
ERR_NOSUCHCHANNEL = "403" # used to indicate an invalid channel name also, so don't rely on RFC text (don't do that anyway!)
ERR_CANNOTSENDTOCHAN = "404"
ERR_TOOMANYCHANNELS = "405"
ERR_WASNOSUCHNICK = "406"
ERR_INVALIDCAPSUBCOMMAND = "410" # ratbox/charybdis(?)
ERR_NOTEXTTOSEND = "412"
ERR_UNKNOWNCOMMAND = "421"
ERR_NOMOTD = "422"
ERR_ERRONEUSNICKNAME = "432"
ERR_NICKNAMEINUSE = "433"
ERR_NORULES = "434" # unrealircd
ERR_USERNOTINCHANNEL = "441"
ERR_NOTONCHANNEL = "442"
ERR_USERONCHANNEL = "443"
ERR_CANTCHANGENICK = "447" # unrealircd, probably
ERR_NOTREGISTERED = "451"
ERR_NEEDMOREPARAMS = "461"
ERR_ALREADYREGISTERED = "462"
ERR_YOUREBANNEDCREEP = "465"
ERR_UNKNOWNMODE = "472"
ERR_BADCHANNELKEY = "475"
ERR_INVITEONLYCHAN = "473"
ERR_CHANNELISFULL = "471"
ERR_BANNEDFROMCHAN = "474"
ERR_BANLISTFULL = "478"
ERR_NOPRIVILEGES = "481" # rfc, beware though, we use this for other things opers may not do also
ERR_CHANOPRIVSNEEDED = "482" # rfc, beware though, we use this for other things like trying to kick a uline
ERR_RESTRICTED = "484"
ERR_ALLMUSTSSL = "490" # unrealircd
ERR_NOOPERHOST = "491"
ERR_NOCTCPALLOWED = "492" # XXX: bzzzz. 1459 defines this as ERR_NOSERVICEHOST, research it more and perhaps change this! (ERR_CANNOTSENDTOCHAN?)
# wtf, we also use this for m_noinvite. UGLY!
ERR_DELAYREJOIN = "495" # insp-specific, XXX: we should use 'resource temporarily unavailable' from ircnet/ratbox or whatever
ERR_UNKNOWNSNOMASK = "501" # insp-specific
ERR_USERSDONTMATCH = "502"
ERR_CANTJOINOPERSONLY = "520" # unrealircd, but crap to have so many numerics for cant join..
ERR_CANTSENDTOUSER = "531" # ???
RPL_COMMANDS = "702" # insp-specific
RPL_COMMANDSEND = "703" # insp-specific
ERR_CHANOPEN = "713"
ERR_KNOCKONCHAN = "714"
ERR_WORDFILTERED = "936" # insp-specific, would be nice if we could get rid of this..
ERR_CANTUNLOADMODULE = "972" # insp-specific
RPL_UNLOADEDMODULE = "973" # insp-specific
ERR_CANTLOADMODULE = "974" # insp-specific
RPL_LOADEDMODULE = "975" # insp-specific
# String commands, for convenience
NICK = "NICK"
USER = "USER"
JOIN = "JOIN"
PART = "PART"
QUIT = "QUIT"
MODE = "MODE"
PING = "PING"
PONG = "PONG"
PRIVMSG = "PRIVMSG"
NOTICE = "NOTICE"
TOPIC = "TOPIC"
KICK = "KICK"
INVITE = "INVITE"
PASS = "PASS"
| {
"repo_name": "minus7/asif",
"path": "asif/command_codes.py",
"copies": "1",
"size": "6430",
"license": "mit",
"hash": 834228490929906400,
"line_mean": 39.9554140127,
"line_max": 160,
"alpha_frac": 0.4622083981,
"autogenerated": false,
"ratio": 3.3178534571723426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9191860776254739,
"avg_score": 0.0176402158035206,
"num_lines": 157
} |
# Adopted from python-twitter's get_access_key.py # http://code.google.com/p/python-twitter/
import urllib, urllib2
import oauth2 as oauth
import twitter
try:
import json
except:
import simplejson as json
try:
from urlparse import parse_qsl
except:
from cgi import parse_qsl
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.http import HttpResponse
from models import TwitterAccount
USER_URL = 'http://api.twitter.com/1/users/show/%s.json'
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
CONSUMER_KEY = settings.TWITTER_CONSUMER_KEY
CONSUMER_SECRET = settings.TWITTER_CONSUMER_SECRET
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
oauth_client = oauth.Client(oauth_consumer)
def get_request_token():
current_site = Site.objects.get_current()
callback_url = 'http://%s%s' % (current_site.domain, reverse('twitter-access-token'))
resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'POST', 'oauth_callback=%s' % callback_url)
if resp['status'] == '200':
return dict(parse_qsl(content))
return HttpResponse('Oops, something borked', mimetype='text/plain', status=500)
def get_authorization_url(request_token):
return '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token'])
def get_access_token(request_token, oauth_verifier):
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(oauth_verifier)
oauth_client = oauth.Client(oauth_consumer, token)
resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % oauth_verifier)
access_token = dict(parse_qsl(content))
if resp['status'] == '200':
return access_token
def get_image_url(username):
result = json.load(urllib.urlopen(USER_URL % username))
return result['profile_image_url']
| {
"repo_name": "jaysoo/django-twitter",
"path": "django_twitter/utils.py",
"copies": "1",
"size": "2218",
"license": "mit",
"hash": -6911303535511000000,
"line_mean": 34.7741935484,
"line_max": 116,
"alpha_frac": 0.7339945897,
"autogenerated": false,
"ratio": 3.360606060606061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9474804959183796,
"avg_score": 0.02395913822445284,
"num_lines": 62
} |
"""adopteitor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
import views
from authentication.views import AccountViewSet, LoginView, LogoutView
from django.contrib.auth import get_user_model
User = get_user_model()
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
router.register(r'Animal', views.AnimalViewSet, base_name="Animal")
router.register(r'Persona', views.PersonaViewSet)
router.register(r'FormularioAdopcion', views.FormularioAdopcionViewSet)
router.register(r'AdoptarAnimal', views.AdoptarAnimalViewSet)
router.register(r'accounts', AccountViewSet)
router.register(r'Subscripcion', views.SubscripcionViewSet)
router.register(r'Ipn', views.IpnViewSet)
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
]
| {
"repo_name": "smarbos/adopteitor-server",
"path": "urls.py",
"copies": "1",
"size": "1845",
"license": "mit",
"hash": 3384562134153681000,
"line_mean": 40.9318181818,
"line_max": 82,
"alpha_frac": 0.745799458,
"autogenerated": false,
"ratio": 3.372943327239488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4618742785239488,
"avg_score": null,
"num_lines": null
} |
"""A double-ended queue with an optional maximum size."""
import time
from collections import deque
from greennet import greenlet
from greennet import get_hub
from greennet.hub import Wait
class QueueWait(Wait):
"""Abstract class to wait for a Queue event."""
__slots__ = ('queue',)
def __init__(self, task, queue, expires):
super(QueueWait, self).__init__(task, expires)
self.queue = queue
def timeout(self):
getattr(self.queue, self._wait_attr).remove(self)
super(QueueWait, self).timeout()
class PopWait(QueueWait):
"""Wait for a pop to happen."""
__slots__ = ()
_wait_attr = '_pop_waits'
class AppendWait(QueueWait):
"""Wait for an append to happen."""
__slots__ = ()
_wait_attr = '_append_waits'
class Queue(object):
"""A double-ended queue with an optional maximum size.
Tasks will be suspended when they try to pop from an empty Queue or
append to a full Queue until the operation can complete.
"""
def __init__(self, maxlen=None, hub=None):
self.queue = deque()
self.maxlen = maxlen
self.hub = get_hub() if hub is None else hub
self._append_waits = deque()
self._pop_waits = deque()
def __len__(self):
"""len(q) <==> q.__len__()
>>> q = Queue()
>>> len(q)
0
>>> q.append('an item')
>>> len(q)
1
"""
return len(self.queue)
def full(self):
"""Returns True if the Queue is full, else False.
>>> q = Queue(1)
>>> q.full()
False
>>> q.append('an item')
>>> q.full()
True
>>> q.pop()
'an item'
>>> q.full()
False
"""
if self.maxlen is None:
return False
return len(self.queue) >= self.maxlen
def _wait_for_append(self, timeout):
"""Suspend the current task until an append happens.
Call this if popping from an empty Queue.
"""
expires = None if timeout is None else time.time() + timeout
wait = AppendWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
self._append_waits.append(wait)
self.hub.run()
def _wait_for_pop(self, timeout):
"""Suspend the current task until a pop happens.
Call this if appending to a full Queue.
"""
expires = None if timeout is None else time.time() + timeout
wait = PopWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
self._pop_waits.append(wait)
self.hub.run()
def _popped(self):
"""Called when the Queue is reduced in size."""
if self._pop_waits:
wait = self._pop_waits.popleft()
if wait.expires is not None:
self.hub._remove_timeout(wait)
self.hub.schedule(wait.task)
def _appended(self):
"""Called when the Queue increases in size."""
if self._append_waits:
wait = self._append_waits.popleft()
if wait.expires is not None:
self.hub._remove_timeout(wait)
self.hub.schedule(wait.task)
def wait_until_empty(self, timeout=None):
"""Suspend the current task until the Queue is empty.
>>> q = Queue()
>>> q.wait_until_empty()
>>> q.append('an item')
>>> q.wait_until_empty(0)
Traceback (most recent call last):
...
Timeout
"""
if not self.queue:
return
expires = None if timeout is None else time.time() + timeout
wait = PopWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
while self.queue:
self._pop_waits.append(wait)
self.hub.run()
self._popped()
def pop(self, timeout=None):
"""Pop an item from the right side of the Queue.
>>> q = Queue()
>>> q.append('an item')
>>> q.append('another item')
>>> q.pop()
'another item'
>>> q.pop()
'an item'
>>> q.pop(0)
Traceback (most recent call last):
...
Timeout
"""
if not self.queue:
self._wait_for_append(timeout)
item = self.queue.pop()
self._popped()
return item
def popleft(self, timeout=None):
"""Pop an item from the left side of the Queue.
>>> q = Queue()
>>> q.append('an item')
>>> q.append('another item')
>>> q.popleft()
'an item'
>>> q.popleft()
'another item'
>>> q.popleft(0)
Traceback (most recent call last):
...
Timeout
"""
if not self.queue:
self._wait_for_append(timeout)
item = self.queue.popleft()
self._popped()
return item
def clear(self):
"""Remove all items from the Queue.
>>> q = Queue()
>>> q.append('an item')
>>> len(q)
1
>>> q.clear()
>>> len(q)
0
"""
self.queue.clear()
self._popped()
def append(self, item, timeout=None):
"""Append an item to the right side of the Queue.
>>> q = Queue(2)
>>> q.append('an item')
>>> len(q)
1
>>> q.append('another item')
>>> len(q)
2
>>> q.append('a third item', 0)
Traceback (most recent call last):
...
Timeout
>>> len(q)
2
>>> q.popleft()
'an item'
>>> q.popleft()
'another item'
"""
if self.full():
self._wait_for_pop(timeout)
self.queue.append(item)
self._appended()
def appendleft(self, item, timeout=None):
"""Append an item to the left side of the Queue.
>>> q = Queue(2)
>>> q.appendleft('an item')
>>> len(q)
1
>>> q.appendleft('another item')
>>> len(q)
2
>>> q.appendleft('a third item', 0)
Traceback (most recent call last):
...
Timeout
>>> len(q)
2
>>> q.popleft()
'another item'
>>> q.popleft()
'an item'
"""
if self.full():
self._wait_for_pop(timeout)
self.queue.appendleft(item)
self._appended()
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "dhain/greennet",
"path": "greennet/queue.py",
"copies": "1",
"size": "6766",
"license": "mit",
"hash": 5157170247582765000,
"line_mean": 25.5333333333,
"line_max": 71,
"alpha_frac": 0.49527047,
"autogenerated": false,
"ratio": 4.095641646489105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5090912116489105,
"avg_score": null,
"num_lines": null
} |
"""A doubly-linked list"""
class LLNode(object):
"""A single node in the list.
The pointers to the next and previous nodes should not be manipulated
directly, but only through the LList class. Directly setting the pointers
can create an inconsistent LList object.
Attributes:
value: the value stored at this node
prev: the previous node in the list
next: the next node in the list
"""
def __init__(self, value, prev_node, next_node):
self.value = value
self.prev = prev_node
self.next = next_node
def __repr__(self):
return 'LLNode({})'.format(self.value)
class LList(object):
"""Doubly-linked list class.
Implemented as a cycle of LLNode objects, with a single sentinel LLNode to
delimit the start/end of the list.
Args:
values (optional): an iterable of values to initially populate the LList
Attributes:
sentinel: the LLNode pointing to the start and end of the LList
"""
def __init__(self, values=None):
self.sentinel = LLNode(None, None, None)
self.sentinel.next = self.sentinel.prev = self.sentinel
self._len = 0
if values is not None:
for val in values:
self.append(val)
self._len += 1
def __bool__(self):
return self.sentinel.next is not self.sentinel
@property
def head(self):
"""The first LLNode in the list, or None if it is empty."""
if not self:
return None
else:
return self.sentinel.next
@property
def tail(self):
"""The last LLNode in the list, or None if it is empty."""
if not self:
return None
else:
return self.sentinel.prev
def insert(self, value, prev):
"""Insert `value` in a new LLNode immediately following `prev`."""
node = LLNode(value, prev, prev.next)
prev.next = node
prev.next.prev = node
self._len += 1
return node
def push(self, value):
"""Push `value` onto the beginning of the LList in a new LLNode."""
return self.insert(value, self.sentinel)
def append(self, value):
"""Append `value` to the end of the LList in a new LLNode."""
return self.insert(value, self.sentinel.prev)
def extend(self, other):
"""Append every value of LList `other` to the end of `self`.
This removes all the nodes from `other`."""
if not other:
return
tail = self.tail if self else self.sentinel
tail.next = other.head
other.head.prev = tail
other.tail.next = self.sentinel
self.sentinel.prev = other.tail
self._len += len(other)
other.sentinel.prev = other.sentinel.next = other.sentinel
other.clear()
def clear(self):
"""Remove every node from `self`.
The nodes themselves will be left in an inconsistent state (i.e.,
points will not be set to None).
"""
self._len = 0
self.sentinel.next = self.sentinel.prev = self.sentinel
def remove(self, node):
"""Remove `node` from `self`."""
if node is self.sentinel:
raise ValueError("node out of bounds")
node.prev.next, node.next.prev = node.next, node.prev
node.next = node.prev = None
def __len__(self):
"""Get the length of the list.
This uses only O(1) time but the correctness requires that the linked
list is only manipulated via the methods of this class (and any
subclasses)
Returns:
int: the length of the list.
"""
return self._len
def __iter__(self):
def llist_iter():
"""Generator yielding the list one node at a time."""
node = self.sentinel
while node.next != self.sentinel:
node = node.next
yield node
return llist_iter()
def __repr__(self):
inner = ', '.join(node.value for node in self)
return 'LList({})'.format(inner)
| {
"repo_name": "johnwilmes/py-data-structures",
"path": "py_data_structures/llist.py",
"copies": "1",
"size": "4127",
"license": "mit",
"hash": 3317669000760705000,
"line_mean": 29.5703703704,
"line_max": 80,
"alpha_frac": 0.5825054519,
"autogenerated": false,
"ratio": 4.185598377281948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007808834969328796,
"num_lines": 135
} |
# adpated from http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.correlate2d.html
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from scipy import misc
print("cross correlation demo")
face = misc.face() - misc.face().mean()
face = face.sum(-1)
template = np.copy(face[700:800, 310:380]) # right eye
template -= template.mean()
noisyface = face + np.random.randn(*face.shape) * 50 # add noise
corr = signal.correlate2d(noisyface, template, boundary='symm', mode='same')
y, x = np.unravel_index(-1*np.argmax(corr), corr.shape) # find the match
fig, ((ax_orig, ax_template), (ax_noisy, ax_corr)) = plt.subplots(2, 2)
ax_orig.imshow(face, cmap='gray')
ax_orig.set_title('Original')
ax_orig.set_axis_off()
ax_orig.plot(x, y, 'ro')
ax_template.imshow(template, cmap='gray')
ax_template.set_title('Template')
ax_template.set_axis_off()
ax_noisy.imshow(noisyface, cmap='gray')
ax_noisy.set_title('Noisy')
ax_noisy.set_axis_off()
ax_noisy.plot(x, y, 'ro')
ax_corr.imshow(corr, cmap='gray')
ax_corr.set_title('Cross-correlation')
ax_corr.set_axis_off()
fig.show()
| {
"repo_name": "probml/pyprobml",
"path": "scripts/xcorr_demo.py",
"copies": "1",
"size": "1116",
"license": "mit",
"hash": -2837330267262520300,
"line_mean": 26.9,
"line_max": 103,
"alpha_frac": 0.7123655914,
"autogenerated": false,
"ratio": 2.70873786407767,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.39211034554776697,
"avg_score": null,
"num_lines": null
} |
""" A drag drawn line. """
from __future__ import with_statement
from enable.api import Line
from traits.api import Instance
from drawing_tool import DrawingTool
class DragLine(DrawingTool):
"""
A drag drawn line. This is not a straight line, but can be a free-form,
curved path.
"""
# Override the vertex color so as to not draw it.
vertex_color = (0.0, 0.0, 0.0, 0.0)
# Because this class subclasses DrawingTool and not Line, it contains
# an instance of the Line primitive.
line = Instance(Line, args=())
# Override the default value of this inherited trait
draw_mode="overlay"
def reset(self):
self.line.vertex_color = self.vertex_color
self.line.points = []
self.event_state = "normal"
return
#------------------------------------------------------------------------
# "complete" state
#------------------------------------------------------------------------
def complete_draw(self, gc):
""" Draw the completed line. """
self.line.line_dash = None
self.line._draw_mainlayer(gc)
return
#------------------------------------------------------------------------
# "drawing" state
#------------------------------------------------------------------------
def drawing_draw(self, gc):
self.line.line_dash = (4.0, 2.0)
self.line._draw_mainlayer(gc)
return
def drawing_left_up(self, event):
""" Handle the left mouse button coming up in the 'drawing' state. """
self.event_state = 'complete'
event.window.set_pointer('arrow')
self.request_redraw()
self.complete = True
event.handled = True
return
def drawing_mouse_move(self, event):
""" Handle the mouse moving in 'drawing' state. """
last_point = self.line.points[-1]
# If we have moved, we need to add a point.
if last_point != (event.x + self.x, event.y - self.y):
self.line.points.append((event.x + self.x, event.y - self.y))
self.request_redraw()
return
#------------------------------------------------------------------------
# "normal" state
#------------------------------------------------------------------------
def normal_left_down(self, event):
""" Handle the left button down in the 'normal' state. """
self.line.points.append((event.x + self.x, event.y - self.y))
self.event_state = 'drawing'
event.window.set_pointer('pencil')
event.handled = True
self.request_redraw()
return
| {
"repo_name": "tommy-u/enable",
"path": "enable/drawing/drag_line.py",
"copies": "1",
"size": "2630",
"license": "bsd-3-clause",
"hash": -7839375331935217000,
"line_mean": 31.0731707317,
"line_max": 78,
"alpha_frac": 0.4931558935,
"autogenerated": false,
"ratio": 4.361525704809287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5354681598309287,
"avg_score": null,
"num_lines": null
} |
""" A drag drawn polygon. """
from __future__ import with_statement
from enable.primitives.api import Polygon
from enable.api import Pointer
from pyface.action.api import MenuManager
from traits.api import Delegate, Instance
from drawing_tool import DrawingTool
class DragPolygon(DrawingTool):
""" A drag drawn polygon. """
poly = Instance(Polygon, args=())
draw_mode = "overlay"
#### Visible style. ####
# Override the vertex color so as to not draw it.
vertex_color = Delegate('poly', modify=True)
# Override the vertex size so as to not draw it.
vertex_size = Delegate('poly', modify=True)
background_color = Delegate('poly', modify=True)
#### Pointers. ####
# Pointer for the complete state.
complete_pointer = Pointer('cross')
# Pointer for the drawing state.
drawing_pointer = Pointer('cross')
# Pointer for the normal state.
normal_pointer = Pointer('cross')
#### Miscellaneous. ####
# The context menu for the polygon.
menu = Instance(MenuManager)
def reset(self):
self.vertex_color = (0,0,0,0)
self.vertex_size = 0
self.poly.model.points = []
self.event_state = "normal"
return
###########################################################################
# 'Component' interface.
###########################################################################
#### 'complete' state #####################################################
def complete_draw ( self, gc ):
""" Draw the completed polygon. """
with gc:
self.poly.border_dash = None
self.poly._draw_closed(gc)
return
def complete_left_down ( self, event ):
""" Draw a new polygon. """
self.reset()
self.normal_left_down( event )
return
def complete_right_down ( self, event ):
""" Do the context menu if available. """
if self.menu is not None:
if self._is_in((event.x + self.x, event.y - self.y)):
menu = self.menu.create_menu(event.window.control)
### FIXME : The call to _flip_y is necessary but inappropriate.
menu.show(event.x, event.window._flip_y(event.y))
return
#### 'drawing' state ######################################################
def drawing_draw ( self, gc ):
""" Draw the polygon while in 'drawing' state. """
with gc:
self.poly.border_dash = (4.0, 2.0)
self.poly._draw_open(gc)
return
def drawing_left_up ( self, event ):
""" Handle the left mouse button coming up in 'drawing' state. """
self.event_state = 'complete'
self.pointer = self.complete_pointer
self.request_redraw()
self.complete = True
return
def drawing_mouse_move ( self, event ):
""" Handle the mouse moving in 'drawing' state. """
last_point = self.poly.model.points[-1]
# If we have moved, we need to add a point.
if last_point != (event.x + self.x, event.y - self.y):
self.poly.model.points.append((event.x + self.x, event.y - self.y))
self.request_redraw()
return
#### 'normal' state #######################################################
def normal_left_down ( self, event ):
""" Handle the left button down in the 'normal' state. """
self.poly.model.points.append((event.x + self.x, event.y - self.y))
self.event_state = 'drawing'
self.pointer = self.drawing_pointer
self.request_redraw()
return
def normal_mouse_move ( self, event ):
""" Handle the mouse moving in the 'normal' state. """
self.pointer = self.normal_pointer
return
#### EOF ######################################################################
| {
"repo_name": "tommy-u/enable",
"path": "enable/drawing/drag_polygon.py",
"copies": "1",
"size": "3886",
"license": "bsd-3-clause",
"hash": 8425185192589923000,
"line_mean": 27.3649635036,
"line_max": 79,
"alpha_frac": 0.5247040659,
"autogenerated": false,
"ratio": 4.284454244762955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5309158310662955,
"avg_score": null,
"num_lines": null
} |
"""Adres API tests."""
import unittest
import postcodepy
from postcodepy import typedefs
from postcodepy import PostcodeError
from . import unittestsetup
try:
from nose_parameterized import parameterized, param
except:
print("*** Please install 'nose_parameterized' to run these tests ***")
exit(1)
import os
import sys
access_key = None
access_secret = None
api = None
@typedefs.translate_addresstype
@typedefs.translate_purposes
def parse_response(r, pc):
"""manipulate the response."""
return r
class Test_Adres_API(unittest.TestCase):
"""Tests for Adres API."""
def setUp(self):
"""setup for tests.
provides an api instance
"""
global access_key
global access_secret
global api
try:
access_key, access_secret = unittestsetup.auth()
except Exception as e:
sys.stderr.write("%s" % e)
exit(2)
api = postcodepy.API(environment='live',
access_key=access_key,
access_secret=access_secret)
@parameterized.expand([
("Rijksmuseum",
('1071XX', 1),
'verblijfsobject',
["bijeenkomstfunctie"],
"Amsterdam",
"Museumstraat",
),
("Sportschool",
('8431NJ', 23),
'verblijfsobject',
["overige gebruiksfunctie"],
"Oosterwolde",
"Veengang",
),
("Gamma",
('8431NJ', 8),
'verblijfsobject',
["kantoorfunctie", "winkelfunctie"],
"Oosterwolde",
"Veengang",
),
("Industrieterrein Douwe Egberts Joure",
('8501ZD', 1),
'verblijfsobject',
["industriefunctie", "kantoorfunctie", "overige gebruiksfunctie"],
"Joure",
"Leeuwarderweg",
),
("Ziekenhuis Tjongerschans Heerenveen",
('8441PW', 44),
'verblijfsobject',
["gezondheidszorgfunctie"],
"Heerenveen",
"Thialfweg",
),
("De Marwei te Leeuwarden",
('8936AS', 7),
'verblijfsobject',
["celfunctie"],
"Leeuwarden",
"Holstmeerweg",
),
("Hotel de Zon Oosterwolde",
('8431ET', 1),
'verblijfsobject',
["overige gebruiksfunctie"],
"Oosterwolde",
"Stationsstraat",
),
("Hotel de Zon Oosterwolde",
('8431ET', 1),
'error_building',
["overige gebruiksfunctie"],
"Oosterwolde",
"Stationsstraat",
1
),
("Hotel de Zon Oosterwolde",
('8431ET', 1),
'verblijfsobject',
["overige gebruiksfunctie", "cannot_find"],
"Oosterwolde",
"Stationsstraat",
2
),
])
def test_Postcode_and_translation(self, description,
pc, addressType,
purpose, city, street, errFlag=0):
"""verify response data."""
retValue = api.get_postcodedata(*pc)
if errFlag == 1:
# force a lookup error
retValue['addressType'] = "error_building"
if errFlag == 2:
# force a lookup error
retValue['purposes'].append("cannot_find")
retValue = parse_response(retValue, pc)
self.assertTrue(retValue['addressType'] == addressType and
retValue['purposes'].sort() == purpose.sort() and
retValue['city'] == city and
retValue['street'] == street)
def test_PostcodeDataWithAdditionOK(self):
"""TEST: retrieval of data.
should return testvalues for city, street, housenumber, and
housenumber addition
"""
pc = ('7514BP', 129, 'A')
retValue = api.get_postcodedata(*pc)
self.assertEqual((retValue['city'],
retValue['street'],
retValue['houseNumber'],
retValue['houseNumberAddition']),
("Enschede", "Lasondersingel", 129, "A"))
def test_PostcodeDataWithAdditionFail(self):
"""TEST: retrieval of data.
should fail with ERRHouseNumberAdditionInvalid exception
"""
pc = ('7514BP', 129, 'B')
with self.assertRaises(PostcodeError) as cm:
retValue = api.get_postcodedata(*pc)
caught_exception = cm.exception
exp_exception = PostcodeError("ERRHouseNumberAdditionInvalid")
self.assertEqual(exp_exception.exceptionId,
caught_exception.exceptionId)
def test_PostcodeNoData(self):
"""TEST: no data for this postcode.
a request that should fail with:
PostcodeNl_Service_PostcodeAddress_AddressNotFoundException
"""
pc = ('1077XX', 1)
with self.assertRaises(PostcodeError) as cm:
api.get_postcodedata(*pc)
caught_exception = cm.exception
expected_exception = PostcodeError(
"PostcodeNl_Service_PostcodeAddress_AddressNotFoundException", {
"exception": "Combination does not exist.",
"exceptionId": "PostcodeNl_Service_PostcodeAddress_"
"AddressNotFoundException"})
self.assertEqual(expected_exception.msg,
caught_exception.msg)
def test_PostcodeWrongFormat(self):
"""TEST: no data for this postcode.
a request that should fail with:
PostcodeNl_Controller_Address_InvalidPostcodeException
"""
pc = ('1071 X', 1)
with self.assertRaises(PostcodeError) as cm:
api.get_postcodedata(*pc)
caught_exception = cm.exception
expected_exception = PostcodeError(
"PostcodeNl_Controller_Address_InvalidPostcodeException", {
"exception": "Postcode does not use format `1234AB`.",
"exceptionId": "PostcodeNl_Controller_Address_"
"InvalidPostcodeException"})
self.assertEqual(expected_exception.msg,
caught_exception.msg)
def test_PostcodeInvalidUserAccount(self):
"""TEST: invalid useraccount.
test should fail with:
PostcodeNl_Controller_Plugin_HttpBasicAuthentication_NotAuthorizedException
"""
# make the key faulty by adding an extra character
api = postcodepy.API(environment='live',
access_key="1"+access_key,
access_secret=access_secret)
pc = ('1077XX', 1)
with self.assertRaises(PostcodeError) as cm:
api.get_postcodedata(*pc)
caught_exception = cm.exception
expected_exception = PostcodeError(
"PostcodeNl_Controller_Plugin_HttpBasic"
"Authentication_NotAuthorizedException", {
"exception": "User `1%s` not correct." % access_key,
"exceptionId": "PostcodeNl_Controller_Plugin_HttpBasic"
"Authentication_NotAuthorizedException"})
self.assertEqual(expected_exception.msg,
caught_exception.msg)
def test_PostcodeInvalidUserSecret(self):
"""TEST: invalid secret.
test should fail with:
PostcodeNl_Controller_Plugin_HttpBasicAuthentication_PasswordNotCorrectException
"""
# make the secret faulty by adding an extra character
api = postcodepy.API(environment='live',
access_key=access_key,
access_secret="1"+access_secret)
pc = ('1077XX', 1)
with self.assertRaises(PostcodeError) as cm:
api.get_postcodedata(*pc)
caught_exception = cm.exception
expected_exception = PostcodeError(
"PostcodeNl_Controller_Plugin_HttpBasic"
"Authentication_PasswordNotCorrectException", {
"exception": "Password not correct.",
"exceptionId": "PostcodeNl_Controller_Plugin_HttpBasic"
"Authentication_PasswordNotCorrectException"})
self.assertEqual(expected_exception.msg,
caught_exception.msg)
def test_FailArgNotPassedSecret(self):
"""TEST: no secret provided.
a request that should fail with a ERRauthAccessUnknownSecret
"""
with self.assertRaises(PostcodeError) as cm:
api = postcodepy.API(environment='live', access_key=access_key)
caught_exception = cm.exception
exp_exception = PostcodeError("ERRauthAccessUnknownSecret")
self.assertEqual(exp_exception.exceptionId,
caught_exception.exceptionId)
def test_FailArgNotPassedKey(self):
"""TEST: no key provided.
a request that should fail with a ERRauthAccessUnknownKey
"""
with self.assertRaises(PostcodeError) as cm:
api = postcodepy.API(environment='live',
access_secret=access_secret)
caught_exception = cm.exception
expect_exception = PostcodeError("ERRauthAccessUnknownKey")
self.assertEqual(expect_exception.exceptionId,
caught_exception.exceptionId)
def test_request(self):
"""TEST: faulty URL.
a request that should fail with 'A Connection error occurred.'
"""
# Make the REST-API url point to some faulty url
api.api_url = "https://some/ur/l"
pc = ('1071 XX', 1)
with self.assertRaises(PostcodeError) as cm:
api.get_postcodedata(*pc)
caught_exception = cm.exception
expected_exception = PostcodeError(
"ERRrequest", {
"exception": "A Connection error occurred.",
"exceptionId": "ERRrequest"})
self.assertEqual(expected_exception.msg,
caught_exception.msg)
if __name__ == "__main__":
unittest.main()
| {
"repo_name": "hootnot/postcode-api-wrapper",
"path": "tests/test_adres_api.py",
"copies": "1",
"size": "10115",
"license": "mit",
"hash": 5456670431273982000,
"line_mean": 32.7166666667,
"line_max": 88,
"alpha_frac": 0.5672763223,
"autogenerated": false,
"ratio": 4.233989116785266,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004166666666666667,
"num_lines": 300
} |
# Adrian deWynter, 2016
# Check that Spark is working
from pyspark.sql import Row
data = [('Alice', 1), ('Bob', 2), ('Bill', 4)]
df = sqlContext.createDataFrame(data, ['name', 'age'])
fil = df.filter(df.age > 3).collect()
print fil
# If the Spark job doesn't work properly this will raise an AssertionError
assert fil == [Row(u'Bill', 4)]
# Check loading data with sqlContext.read.text
import os.path
baseDir = os.path.join('databricks-datasets', 'cs100')
inputPath = os.path.join('lab1', 'data-001', 'shakespeare.txt')
fileName = os.path.join(baseDir, inputPath)
dataDF = sqlContext.read.text(fileName)
shakespeareCount = dataDF.count()
print shakespeareCount
# If the text file didn't load properly an AssertionError will be raised
assert shakespeareCount == 122395
# Check matplotlib plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from math import log
# function for generating plot layout
def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0):
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot data
x = range(1, 50)
y = [log(x1 ** 2) for x1 in x]
fig, ax = preparePlot(range(5, 60, 10), range(0, 12, 1))
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
ax.set_xlabel(r'$range(1, 50)$'), ax.set_ylabel(r'$\log_e(x^2)$')
display(fig)
pass | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark/Before starting.py",
"copies": "1",
"size": "1903",
"license": "mit",
"hash": -7091245741693466000,
"line_mean": 35.6153846154,
"line_max": 105,
"alpha_frac": 0.6931161324,
"autogenerated": false,
"ratio": 3.059485530546624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4252601662946624,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
# Implementation of Adam, as per the original paper available at https://arxiv.org/pdf/1412.6980.pdf
# I tried to make it as versatile as possible, but there are caveats:
# Most operations are element-wise, and it's written with NN use in mind.
# TODO: Adam's update rule is unimplemented.
# TODO: Fix Adam so it works with pointers to a gradient function, theta.
# TODO: Adam should have separate gradient, cost, train functions.
import numpy as np
import math
import theano
# alpha = stepsize
# beta1, beta2 = exponential decay rates for the moment estimates
# f = Our training function (stochastic obective function) f(\theta).
# X,Y = Inputs to the training function. I keep calling it training but it's more like a logistic function.
# You know what I mean.
# theta = parameter vector
# The square on the elements is due to the Hadamard product of vectors.
def Adam(theta,f,X,Y,batchMode=False,batchSize=1000,alpha=0.001,beta1=0.9,beta2=0.999,epsilon=1e-8):
# Initialize first and second moment vectors
# Initialize t, and batch size
m_t = [0 for _ in range(len(theta))]
v_t = [0 for _ in range(len(theta))]
alpha_t = alpha
theta_prime = theta # This is a dangerous line to write.
t = 0
batch = 0
converged = False
# I shun while boolean loops, but Adam converges (in online, convex programming)
while not converged:
t = t+1
if batchMode:
inputs = X[batch:batch+batchSize, :, :]
outputs = Y[batch:batch+batchSize, :]
batch = (batch + batchSize) % X.shape[0]
cost = f(inputs, outputs)
gradient = theano.grad(cost, theta)
else:
cost =f(X,Y)
gradient = theano.grad(cost, theta)
m_t = [beta1*m+(1-beta1)*g for (m, g) in zip(m_t, gradient)]
v_t = [beta2*v+(1-beta2)*g**2 for (v, g) in zip(v_t, gradient)]
mhat_t = [m*(1./(1-beta1**t)) for m in m_t]
vhat_t = [v*(1./(1-beta2**t)) for v in v_t]
alpha_t = alpha*(np.sqrt(1-beta2**2)/(1-beta1**t))
theta_prime = [p.get_value()-alpha_t*m1/(np.sqrt(m2)+epsilon) for (p,m1,m2) in zip(theta, mhat_t, vhat_t)]
delta = [abs(p.get_value()-p_new) for (p, p_new) in zip(theta, theta_prime)]
converged = all((d < 0.5 * alpha_t).all() for d in delta)
theta = theta_prime
# I really hate not having some sort of benchmark on what my computer is doing
if t%100==1 or converged:
print "Cost at t="+str(t-1)+": "+str(cost)
return theta | {
"repo_name": "adewynter/Tools",
"path": "MLandDS/MachineLearning/Adam.py",
"copies": "1",
"size": "2599",
"license": "mit",
"hash": -5465066992736361000,
"line_mean": 39.625,
"line_max": 114,
"alpha_frac": 0.6217776068,
"autogenerated": false,
"ratio": 3.208641975308642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9244880384514726,
"avg_score": 0.017107839518783242,
"num_lines": 64
} |
# Adrian deWynter, 2016
# Implementation of:
# - GCD
# - LCM
# - LCMM
# - LCM (sequence)
# - XOR-based swap
# - power set generator
def gcd(a, b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a, b):
"""Return lowest common multiple."""
return a * b // gcd(a, b)
def lcmm(*args):
"""Return lcm of args."""
return reduce(lcm, args)
def lcm_seq(seq):
"""Return lcm of sequence."""
return reduce(lcm, seq)
def quickSwap(x,y):
x = x^y
y = x^y
x = x^y
"""Power sets."""
def powerSetLengthN(x,y):
import itertools
indexes = [i for i in range(len(x))]
for c in itertools.combinations(indexes,len(y)):
word = "".join([x[j] for j in list(c)])
def powerSet():
n = 2**len(x)
for i in xrange(n):
ans = ""
ix = []
for j in xrange(len(x)):
if i & (1<<j):
ans += x[j]
ix.append(j)
# Timing
import time
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
# Usage:
'''
from timer import Timer
from redis import Redis
rdb = Redis()
with Timer() as t:
rdb.lpush("foo", "bar")
print "=> elasped lpush: %s s" % t.secs
with Timer() as t:
rdb.lpop("foo")
print "=> elasped lpop: %s s" % t.secs
'''
'''
def matchSubsequence(a,b,ptr,idx):
# print("-----------------")
# print("received {}".format(idx))
j = 0
indexes = []
idx_ = idx[:]
ix = idx_.pop(0)
i = 0
while i < len(a):
# Make sure we stay within range, and terminate
# early if needed.
if j >= len(b) or len(indexes) == len(b):
break
if a[i] == b[j]:
# Acceptable range:
if j == ptr and i <= ix:
# print("ptr: {} ix: {}, idx : {}".format(ptr,ix,idx_))
if idx_ != []:
ix = idx_.pop(0)
# print("i {} j {}".format(i,j))
# print("pop {}, i: {}".format(ix,i))
i+= 1
ptr = min(len(b)-1,ptr + 1)
# Is a match, but we must skip
#elif len(indexes) == ptr and i <= ptr:
# print("count: {} diff: {} ptr: {}".format(len(indexes),len(b) - ptr,ptr))
# i+=1
# ptr +=1
else:
j += 1
#print(indexes,i)
indexes.append(i)
else:
i += 1
if len(indexes) == len(b):
print("ptr: {} ix: {} indexes: {}".format(ptr,ix,indexes))
return tryDict(indexes,indexes[ptr - len(b):])
else:
return -1
def bruteForce(x,y):
count = 0
for i in xrange(len(y)-1,-1,-1):
idx = [len(y) -1]
for j in range(len(x)):
ans = matchSubsequence(x,y,i,idx)
if ans != -1:
idx = ans
idx_ = [len(y) -1]
for j in range(len(x)):
x_ = list(x)
x_[j] = "x"
x_ = "".join(x_)
ans = matchSubsequence(x_,y,i,idx_)
if ans != -1:
idx_ = ans
''' | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/numberTheory/util.py",
"copies": "1",
"size": "3478",
"license": "mit",
"hash": -6212684117061600000,
"line_mean": 22.0397350993,
"line_max": 89,
"alpha_frac": 0.459746981,
"autogenerated": false,
"ratio": 3.217391304347826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9111823225007003,
"avg_score": 0.013063012068164548,
"num_lines": 151
} |
# Adrian deWynter, 2016
import heapq
import math
def dijkstra(adj, cost, N, s):
visited = {}
ans = {}
Q = []
for k,v in adj.iteritems():
if k != s:
heapq.heappush(Q, [float('inf'), k, float('inf')])
visited[k] = 0
ans[k] = -1
heapq.heappush(Q, [0, s, 0])
visited[s] = 0
ans[s] = 0
while Q:
source = heapq.heappop(Q)
u = source[1]
w = source[0]
p = source[2]
if visited[u] != 1:
visited[u] == 1
for v in adj[u]:
index = -1
for i in Q:
if i[1] == v:
index = Q.index(i)
if index != -1:
if cost[(u, v)] == -1:
temp = [-1, v, u]
ans[v] = -1
del Q[index]
heapq.heapify(Q)
else:
alt = cost[(u, v)] + w
if alt < Q[index][0]:
temp = [alt, v, u]
ans[v] = alt
del Q[index]
heapq.heappush(Q, temp)
out = ''
for k,v in ans.iteritems():
if k != s:
out = out + str(v) + ' '
print out[:-1]
T = input()
for i in range(0, T):
val = map(int, raw_input().strip().split(' '))
graph = []
cost = {}
V = val[0]
E = val[1]
adj = {k: [] for k in range(1, V+1)}
for i in range(0, E):
graph.append(map(int,raw_input().strip().split(' ')))
a = graph[i]
cost[(a[0], a[1])] = a[2]
cost[(a[1], a[0])] = a[2]
adj[a[0]].append(a[1])
adj[a[1]].append(a[0])
s = input()
dijkstra(adj,cost, V, s)
| {
"repo_name": "adewynter/Tools",
"path": "Algorithms/graphAlgorithms/dijkstra.py",
"copies": "1",
"size": "2037",
"license": "mit",
"hash": -1901752516514913500,
"line_mean": 22.6860465116,
"line_max": 62,
"alpha_frac": 0.3190967108,
"autogenerated": false,
"ratio": 3.67027027027027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9357975053263583,
"avg_score": 0.02627838556133732,
"num_lines": 86
} |
# Adrian deWynter, 2016
import heapq
def dijkstra(adj, cost, N, s):
visited = {}
ans = {}
Q = []
for k in range(1, N+1):
if k != s:
heapq.heappush(Q, [99999, k, 99999])
visited[k] = 0
ans[k] = 99999
heapq.heappush(Q, [0, s, 0])
visited[s] = 0
ans[s] = 0
while Q:
source = heapq.heappop(Q)
u = source[1]
w = source[0]
p = source[2]
if visited[u] != 1:
visited[u] == 1
for v in adj[u]:
alt = cost[(u, v)]
index = -1
for i in Q:
if i[1] == v:
index = Q.index(i)
if index != -1:
if alt < Q[index][0]:
temp = [alt, v, u]
ans[v] = alt
del Q[index]
heapq.heappush(Q, temp)
out = 0
for k,v in ans.iteritems():
out = out + v
print out
val = map(int, raw_input().strip().split(' '))
graph = []
cost = {}
V = val[0]
E = val[1]
adj = {k: [] for k in range(1, V+1)}
for i in range(0, E):
graph.append(map(int,raw_input().strip().split(' ')))
a = graph[i]
cost[(a[0], a[1])] = a[2]
cost[(a[1], a[0])] = a[2]
adj[a[0]].append(a[1])
adj[a[1]].append(a[0])
s = input()
prim(adj,cost, V, s)
| {
"repo_name": "adewynter/Tools",
"path": "Algorithms/graphAlgorithms/prim.py",
"copies": "1",
"size": "1512",
"license": "mit",
"hash": 148112077675762530,
"line_mean": 20.6,
"line_max": 57,
"alpha_frac": 0.3670634921,
"autogenerated": false,
"ratio": 3.210191082802548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4077254574902548,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
import nltk
nltk.download()
## Working with custom files
file = open('PATH')
temp = file.read()
tokens = nltk.word_tokenize(temp)
text = nltk.Text(tokens)
# can also be an URL
from urllib import request
url = ""
response = request.urlopen(url)
raw = response.read().decode('encoding')
# then just tokenize as usual
# can also strip raw text:
for line in file:
line.strip()
## Preprocessing
#remove stopwords
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
# stopwords = ['custom list']
textNew = [w for w in text if w not in stopwords]
#normalization
#g = isalpha() #keep alphabetic chars
#f = lower() #lowercase
#[w.f for w in text if w.g]
set(text) # drop duplicates
## Analysis
# POS tagging
nltk.pos_tag(tokens)
# n > 1 words
nltk.bigrams(text) # there's also trigrams
nltk.ngrams(text, n)
text.collocations()
# frequency distribution
freq = nltk.FreqDist(text) #note FreqDist takes in a LIST.
freq.plot(n, cumulative=False) # n most frequent words
freq.tabulate() # table
text.count('thisword')
text.concordance('word') # sentiment analysis
## ML
labels = [('name', 'label')] # list of features and labels
import random
random.shuffle(labels)
# here we can get smart and classify based on the last letter, for example.
# If you work with big data, do NOT use lists or you'll run out of memory.
# from nltk.classify import apply_features
# train_set, test_set = apply_features(encoding_function, labeled_names[500:]), apply_features(encoding_function, labeled_names[:500])
train_set, test_set = labels[500:], labels[:500]
classifier = nltk.NaiveBayesClassifier.train(train_set)
classifier.classify('name')
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(5) | {
"repo_name": "adewynter/Tools",
"path": "MLandDS/NLTK.py",
"copies": "1",
"size": "1762",
"license": "mit",
"hash": 5558307196375385000,
"line_mean": 28.3833333333,
"line_max": 134,
"alpha_frac": 0.7412031782,
"autogenerated": false,
"ratio": 3.23302752293578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44742307011357796,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
import pandas as pd
import numpy as np
import scipy.io
from sklearn.decomposition import PCA, RandomizedPCA
from plyfile import PlyData, PlyElement
from sklearn import manifold
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib
import random, math, datetime
def isoPCAExample():
mat = scipy.io.loadmat('Datasets/face_data.mat')
df = pd.DataFrame(mat['images']).T
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
for i in range(num_images): df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
# Reduce the dataframe df down to THREE components
iso = manifold.Isomap(n_neighbors=4,n_components=N)
iso.fit(df)
manifold = iso.transform(df)
Plot2D(manifold,"isomap",x,y)
plt.show()
# Every 100 data samples, we save 1.
reduce_factor = 100
matplotlib.style.use('ggplot')
def armadilloExample():
def do_PCA(armadillo):
pca = PCA(n_components=2)
pca.fit(armadillo)
return pca.transform(armadillo)
def do_RandomizedPCA(armadillo):
pca = RandomizedPCA(n_components = 2)
pca.fit(armadillo)
return pca.transform(armadillo)
# Load up the scanned armadillo
plyfile = PlyData.read('Datasets/stanford_armadillo.ply')
armadillo = pd.DataFrame({
'x':plyfile['vertex']['z'][::reduce_factor],
'y':plyfile['vertex']['x'][::reduce_factor],
'z':plyfile['vertex']['y'][::reduce_factor]
})
# Render the original Armadillo
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title('Armadillo 3D')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(armadillo.x, armadillo.y, armadillo.z, c='green', marker='.', alpha=0.75)
# Render the newly transformed PCA armadillo!
t1 = datetime.datetime.now()
pca = do_PCA(armadillo)
time_delta = datetime.datetime.now() - t1
if not pca is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('PCA, build time: ' + str(time_delta))
ax.scatter(pca[:,0], pca[:,1], c='blue', marker='.', alpha=0.75)
# Render the newly transformed RandomizedPCA armadillo!
t1 = datetime.datetime.now()
rpca = do_RandomizedPCA(armadillo)
time_delta = datetime.datetime.now() - t1
if not rpca is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('RandomizedPCA, build time: ' + str(time_delta))
ax.scatter(rpca[:,0], rpca[:,1], c='red', marker='.', alpha=0.75)
plt.show() | {
"repo_name": "adewynter/Tools",
"path": "MLandDS/DataScience/appliedExamples.py",
"copies": "1",
"size": "2419",
"license": "mit",
"hash": 10942366052931042,
"line_mean": 27.1395348837,
"line_max": 100,
"alpha_frac": 0.7007027697,
"autogenerated": false,
"ratio": 2.7027932960893857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8632983162130445,
"avg_score": 0.05410258073178809,
"num_lines": 86
} |
# Adrian deWynter, 2016
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import linear_model
matplotlib.style.use('ggplot')
def drawLine(model, X_test, y_test, title):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_test, y_test, c='g', marker='o')
ax.plot(X_test, model.predict(X_test), color='orange', linewidth=1, alpha=0.7)
print "Est 2014 " + title + " Life Expectancy: ", model.predict([[2014]])[0]
print "Est 2030 " + title + " Life Expectancy: ", model.predict([[2030]])[0]
print "Est 2045 " + title + " Life Expectancy: ", model.predict([[2045]])[0]
score = model.score(X_test, y_test)
title += " R2: " + str(score)
ax.set_title(title)
plt.show()
X = pd.read_csv('Datasets/life_expectancy.csv', sep="\t")
# Create our LR model
model = linear_model.LinearRegression()
# Cleanup, slicing
X_train = X[X['Year'] < 1986]
y_train = X_train['WhiteMale']
y_train = pd.DataFrame(y_train)
X_train = X_train.drop('WhiteMale',1)
X_test = X[X['Year'] >= 1986]
y_test = X_test['WhiteMale']
y_test = pd.DataFrame(y_test)
X_test = X_test.drop('WhiteMale',1)
# Train and extrapolate
model.fit(X_train, y_train)
drawLine(model, pd.DataFrame(X_test['Year']),y_test,"WhiteMale")
# A correlation matrix would be amazing here.
plt.show() | {
"repo_name": "adewynter/Tools",
"path": "MLandDS/MachineLearning/LinearRegression.py",
"copies": "1",
"size": "1362",
"license": "mit",
"hash": 4197394115512178700,
"line_mean": 24.7647058824,
"line_max": 79,
"alpha_frac": 0.6534508076,
"autogenerated": false,
"ratio": 2.6811023622047245,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8774107402825214,
"avg_score": 0.012089153395902056,
"num_lines": 51
} |
# Adrian deWynter, 2016
import sys
import queue
class Vertex:
def __init__(self):
self.edges = {}
def getEdges(self):
return self.edges
def addEdge(self, value, distance):
if value not in self.edges or distance < self.edges[value]:
self.edges[value] = distance
class Graph:
def __init__(self, N):
self.vertices = {}
self.N = N
while (N > 0):
self.vertices[N] = Vertex()
N -= 1
def getN(self):
return self.N
def getVertices(self):
return self.vertices
def getVertex(self, value):
return self.vertices[value]
def addVertex(self, value, vertex):
self.vertices[value] = vertex
class Dijkstra:
def __init__(self, graph):
self.graph = graph
def run(self, s):
visited = {s: 0}
Q = queue.PriorityQueue()
self.updateAgenda(s, visited, Q)
while not Q.empty():
d, v = Q.get()
if v not in visited:
visited[v] = d
self.updateAgenda(v, visited, Q)
for i in range(1, self.graph.getN() + 1):
if (i != s):
d = -1 if i not in visited else visited[i]
print(d, end=" ")
print()
def updateAgenda(self, parent, solved, Q):
edges = self.graph.getVertex(parent).getEdges()
for value, distance in edges.items():
Q.put((solved[parent] + distance, value))
T = int(input())
for _ in range(T):
N, M = tuple(map(int, sys.stdin.readline().split(" ")))
G = Graph(N)
for _ in range(M):
u, v, w = tuple(map(int, sys.stdin.readline().split(" ")))
G.getVertex(u).addEdge(v, w)
G.getVertex(v).addEdge(u, w)
d = Dijkstra(G)
s = int(input())
d.run(s)
| {
"repo_name": "adewynter/Tools",
"path": "Algorithms/graphAlgorithms/Dijkstra_forreal.py",
"copies": "1",
"size": "1930",
"license": "mit",
"hash": 99598304461555870,
"line_mean": 21.7176470588,
"line_max": 67,
"alpha_frac": 0.4968911917,
"autogenerated": false,
"ratio": 3.634651600753296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4631542792453296,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
# Insertion sort and bubble sort are just the same thing
# only that insertion sort will generate a new array every time...
def insertionSort(ar):
swaps = 0
index = 1
while index < len(ar) - 1:
for i in xrange(index, -1, -1):
temp = ar[i]
if ar[i + 1] < temp:
ar[i] = ar[i + 1]
ar[i + 1] = temp
swaps = swaps + 1
else:
break
index = index + 1
return swaps
Q = []
def quickSort(ar):
if len(ar) <= 1:
Q.append(1)
return ar
first = ar[0]
left = []
right = []
for i in range(1, len(ar)):
if ar[i] < first:
left.append(ar[i])
Q.append(1)
else:
right.append(ar[i])
left = quickSort(left)
left.append(first)
left.extend(quickSort(right))
return left
T = input()
ar = map(int, raw_input().strip().split(' '))
quickSort(ar)
print insertionSort(ar) - sum(Q)
# Where N is the maximum length of the word
# Where M is the size of the char space (10 for numbers)
def radixSort(ar,N,M):
for x in range(M):
buckets = [[] for _ in range(M)]
for y in ar:
buckets[(y/10**x)%N].append(y)
ar = []
for section in buckets:
ar.extend(section)
return ar | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/sortingAndSearch/sorting.py",
"copies": "1",
"size": "1374",
"license": "mit",
"hash": 338078053452162300,
"line_mean": 21.9166666667,
"line_max": 66,
"alpha_frac": 0.5087336245,
"autogenerated": false,
"ratio": 3.3925925925925924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44013262170925926,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
# I wrote this application because I needed to calculate
# how much paid time out (PTO) I could take given a certain
# day. Lel.
# Only works in 'Murica because we have different holidays :)
# I think I omitted static holidays (Thanksgiving, for example)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
from datetime import date
import math
from matplotlib.table import Table
today,tomonth,toyear,toweekday = tuple(map(int, time.strftime("%d/%m/%Y/%u").split('/')))
isIntercalary = False #I like this word better. "Leap" makes it sound so... dull.
# If I ever go back in time, I'll punch Pope Gregory XVII in the face. Or politely
# remind him that people tend to settle on the old ways too quickly, and the tech
# of the time can't accurately measure the length of a year. I mean, how hard would
# it be (aside from making Swiss watchmakers' lives more difficult) to ADD 57.6 minutes
# every midnight of a new year? It's cool, and not complicated. It adds an aura of
# romanticism and mystery,too! Kind of like the martian time slip
if toyear%4 == 0 and (toyear%100 == 0 and toyear%400!=0):
isIntercalary = True
def isWeekend(a,b,date=False):
# Basically we need to calculate the norm mod 7 of (a,b) wrt today.
# If it's (7-dow) or (6-dow), it's a weekend.
x = todayCode[0] - a
y = todayCode[1] - b
linearDistance = (x + 20*y)%7
if (linearDistance+toweekday)%7==0:
if date:
return 'sun'
return True
elif (linearDistance+toweekday)%6==0:
if date:
return 'sat'
return True
if date:
return 'none'
return False
def has31DaysOrWhat(month):
if month > 7 and month%2==0:
return True
elif month < 8 and month%2==1:
return True
return False
def getThanksgiving():
days=(date.today()-date(toyear,11,1)).days
if days<=0:
#November 1st falls on a...
nov1st=(toweekday+((abs(days)-1)%7))%7
x = 1
thursdays=-1
while (nov1st+x) < 31: # We probably can make this faster by solving (m+7x)mod7 but I'm tired
if (nov1st+x)%7==4:
thursdays=thursdays+1
x =x+1
a = (todayCode[0] + abs(days) + 7*thursdays)%20
b = todayCode[1] + (toweekday + abs(days) + 7*thursdays)%20
return (a,b)
else:
# It already passed ):
return (0,0)
# In 20x20 grid format. (0,0) corresponds to January 1st.
# Interestingly, Washington's Birthday (2/15) has the same
# headerue in grid category AND Gregorian category.
federalHolidays = [(0,0),(0,17),(2,5),(7,9),(9,4),(12,7),(14,2),(15,13),(16,18),(17,18)]
day = 1
month = 1
year = [[] for _ in range(20)]
todayCode = (0,0)
for x in range(20):
for y in range(20):
# Get the i,j cell coordinates.
if today == day and tomonth == month:
todayCode = (x,y)
# Boy the correctness analysis on this one is tough.
if month==2:
year[x].append(day)
day = day + 1
if day >= 28:
if (isIntercalary and day>29) or (not isIntercalary and day>28) :
day = 1
month = 3
else:
year[x].append(day)
day = day +1
if (has31DaysOrWhat(month) and day > 31) or (not has31DaysOrWhat(month) and day > 30):
month = month+1
day = 1
federalHolidays[8] = getThanksgiving()
# Alright, now we can actually start.
def main():
df = pd.DataFrame((year),columns=[str(x) for x in range(20)])
calendar(df)
plt.tight_layout()
plt.show()
def calendar(data):
fig, ax = plt.subplots()
ax.set_axis_off()
tb = Table(ax, bbox=[0,0,1,1])
rows, cols = data.shape
width, height = 1.0/cols,1.0/rows
nextDay = False
# Draw our table
for (i,j), header in np.ndenumerate(data):
# Determine color code.
if i == todayCode[0] and j == todayCode[1]:
color = 'magenta' #Today
elif (i < todayCode[0]) or (i==todayCode[0] and j<todayCode[1]):
color = 'lightslategray' #Yesterday
elif i == 19 or (i==18 and j > 4):
color = 'slategray' #Next year
elif year[i][j] == 1:
color = 'tan' # The first of the month
elif (i,j) in federalHolidays or nextDay:
if isWeekend(i,j):
color = 'teal'
if isWeekend(i,j,True) == 'sun':
nextDay = True
elif isWeekend(i,j,True) == 'sat':
tb.add_cell(i, j-1, width, height, text=int(header),loc='center', facecolor='powderblue')
else:
color = 'powderblue' #Someone's birthday
nextDay = False
elif isWeekend(i,j):
color = 'teal' #Gee I wonder which one is this
else:
color = 'navajowhite'
tb.add_cell(i, j, width, height, text=int(header),loc='center', facecolor=color)
# Input data
for i, label in enumerate(data.index):
tb.add_cell(i, -1, width, height, text=label, loc='right',
edgecolor='none', facecolor='none')
for j, label in enumerate(data.columns):
tb.add_cell(-1, j, width, height/2, text=label, loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
ax.set_title("How many workdays are there left this year?", y=1.05)
return fig
if __name__ == '__main__':
main() | {
"repo_name": "adewynter/Tools",
"path": "Scripts/ptoCalculator.py",
"copies": "1",
"size": "4876",
"license": "mit",
"hash": -3569800904870501000,
"line_mean": 27.3546511628,
"line_max": 95,
"alpha_frac": 0.661197703,
"autogenerated": false,
"ratio": 2.6835443037974684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3844742006797468,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
#Longest common subsequence
def LCS(A, B):
M = [[None]*(len(B) + 1) for _ in xrange(len(A) + 1)]
for i in range(len(A) + 1):
for j in range(len(B) + 1):
if i == 0 or j == 0:
M[i][j] = 0
elif A[i - 1] == B[j - 1]:
M[i][j] = M[i - 1][j - 1] + 1
else:
M[i][j] = max(M[i - 1][j], M[i][j - 1])
index = M[len(A)][len(B)]
L = ['']*(index + 1)
L[index] = '\0'
i = len(A)
j = len(B)
while i > 0 and j > 0:
if A[i - 1] == B[j - 1]:
L[index - 1] = str(A[i - 1]) + ' '
i -= 1
j -= 1
index -= 1
elif M[i - 1][j] > M[i][j - 1]:
i -= 1
else:
j -= 1
L[-1] = L[-1][:-1]
return ''.join(L)
m, n = tuple(map(int, raw_input().strip().split(' ')))
A = map(int, raw_input().strip().split(' '))
B = map(int, raw_input().strip().split(' '))
print LCS(A, B)
| {
"repo_name": "adewynter/Tools",
"path": "Algorithms/dynamicProgramming/LCS.py",
"copies": "1",
"size": "1050",
"license": "mit",
"hash": -8236671503420694000,
"line_mean": 22.8636363636,
"line_max": 57,
"alpha_frac": 0.3476190476,
"autogenerated": false,
"ratio": 2.7777777777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3625396825377778,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one implements a math review.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - NumPy
# 2 - Spark, lambda functions
#####
'''
###########
# 1 - NumPy
###########
'''
# Scalar multiplication
import numpy as np
simpleArray = np.array([1, 2, 3])
# Perform the scalar product of 5 and the numpy array
timesFive = 5*simpleArray
print 'simpleArray\n{0}'.format(simpleArray)
print '\ntimesFive\n{0}'.format(timesFive)
# Element-wise multiplication and dot product
u = np.arange(0, 5, .5)
v = np.arange(5, 10, .5)
elementWise = u*v
dotProduct = u.dot(v)
print 'u: {0}'.format(u)
print 'v: {0}'.format(v)
print '\nelementWise\n{0}'.format(elementWise)
print '\ndotProduct\n{0}'.format(dotProduct)
# Matrices - AA^{T}, A^{-1}
from numpy.linalg import inv
A = np.matrix([[1,2,3,4],[5,6,7,8]])
print 'A:\n{0}'.format(A)
# Print A transpose
print '\nA transpose:\n{0}'.format(A.T)
# Multiply A by A transpose
AAt = A*(A.T)
print '\nAAt:\n{0}'.format(AAt)
# Invert AAt with np.linalg.inv()
AAtInv = np.linalg.inv(AAt)
print '\nAAtInv:\n{0}'.format(AAtInv)
# Show inverse times matrix equals identity
# We round due to numerical precision
print '\nAAtInv * AAt:\n{0}'.format((AAtInv * AAt).round(4))
# Slices
features = np.array([1, 2, 3, 4])
print 'features:\n{0}'.format(features)
# The last three elements of features
lastThree = features[-3:]
print '\nlastThree:\n{0}'.format(lastThree)
# Combining ndarray objects
zeros = np.zeros(8)
ones = np.ones(8)
print 'zeros:\n{0}'.format(zeros)
print '\nones:\n{0}'.format(ones)
zerosThenOnes = np.hstack((zeros, ones)) # A 1 by 16 array
zerosAboveOnes = np.vstack((zeros, ones)) # A 2 by 8 array
print '\nzerosThenOnes:\n{0}'.format(zerosThenOnes)
print '\nzerosAboveOnes:\n{0}'.format(zerosAboveOnes)
'''
#############################
# 2 - Spark, lambda functions
#############################
'''
# DenseVector
from pyspark.mllib.linalg import DenseVector
numpyVector = np.array([-3, -4, 5])
print '\nnumpyVector:\n{0}'.format(numpyVector)
# Create a DenseVector consisting of the values [3.0, 4.0, 5.0]
myDenseVector = DenseVector([3.0, 4.0, 5.0])
# Calculate the dot product between the two vectors.
denseDotProduct = numpyVector.dot(myDenseVector)
print 'myDenseVector:\n{0}'.format(myDenseVector)
print '\ndenseDotProduct:\n{0}'.format(denseDotProduct)
# Lambda functions
# Example function
def addS(x):
return x + 's'
print type(addS)
print addS
print addS('cat')
# As a lambda
addSLambda = lambda x: x + 's'
print type(addSLambda)
print addSLambda
print addSLambda('cat')
multiplyByTen = lambda x: x*10
print multiplyByTen(5)
print '\n', multiplyByTen
# Code using def that we will recreate with lambdas
def plus(x, y):
return x + y
def minus(x, y):
return x - y
functions = [plus, minus]
print functions[0](4, 5)
print functions[1](4, 5)
lambdaFunctions = [lambda x,y: x+y , lambda x,y: x-y]
print lambdaFunctions[0](4, 5)
print lambdaFunctions[1](4, 5)
a1 = lambda x: x[0] + x[1]
a2 = lambda (x0, x1): x0 + x1
print 'a1( (3,4) ) = {0}'.format( a1( (3,4) ) )
print 'a2( (3,4) ) = {0}'.format( a2( (3,4) ) )
# Two-parameter function
b1 = lambda x, y: (x[0] + y[0], x[1] + y[1])
b2 = lambda (x0, x1), (y0, y1): (x0 + y0, x1 + y1)
print '\nb1( (1,2), (3,4) ) = {0}'.format( b1( (1,2), (3,4) ) )
print 'b2( (1,2), (3,4) ) = {0}'.format( b2( (1,2), (3,4) ) )
# Takes in a tuple of two values and swaps their order
swap1 = lambda x: (x[1], x[0])
swap2 = lambda (x0, x1): (x1, x0)
print 'swap1((1, 2)) = {0}'.format(swap1((1, 2)))
print 'swap2((1, 2)) = {0}'.format(swap2((1, 2)))
swapOrder = lambda x: (x[1], x[2], x[0])
print 'swapOrder((1, 2, 3)) = {0}'.format(swapOrder((1, 2, 3)))
sumThree = lambda x,y,z: (x[0]+y[0]+z[0],x[1]+y[1]+z[1])
print 'sumThree((1, 2), (3, 4), (5, 6)) = {0}'.format(sumThree((1, 2), (3, 4), (5, 6)))
# Lambda functions - advanced applications
class FunctionalWrapper(object):
def __init__(self, data):
self.data = data
def map(self, function):
"""Call map on the items in data using the provided function"""
return FunctionalWrapper(map(function, self.data))
def reduce(self, function):
"""Call reduce on the items in data using the provided function"""
return reduce(function, self.data)
def filter(self, function):
"""Call filter on the items in data using the provided function"""
return FunctionalWrapper(filter(function, self.data))
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __getattr__(self, name): return getattr(self.data, name)
def __getitem__(self, k): return self.data.__getitem__(k)
def __repr__(self): return 'FunctionalWrapper({0})'.format(repr(self.data))
def __str__(self): return 'FunctionalWrapper({0})'.format(str(self.data))
# Map example
mapData = FunctionalWrapper(range(5))
f = lambda x: x + 3
# Imperative programming: loop through and create a new object by applying f
mapResult = FunctionalWrapper([]) # Initialize the result
for element in mapData:
mapResult.append(f(element)) # Apply f and save the new value
print 'Result from for loop: {0}'.format(mapResult)
# Functional programming: use map rather than a for loop
print 'Result from map call: {0}'.format(mapData.map(f))
dataset = FunctionalWrapper(range(10))
# Multiply each element by 5
mapResult = dataset.map(lambda x: x*5)
# Keep the even elements
# Note that "x % 2" evaluates to the remainder of x divided by 2
filterResult = dataset.filter(lambda x: (x+1)%2)
# Sum the elements
reduceResult = dataset.reduce(lambda x,y: x+y)
print 'mapResult: {0}'.format(mapResult)
print '\nfilterResult: {0}'.format(filterResult)
print '\nreduceResult: {0}'.format(reduceResult)
# Composability
# Example of a multi-line expression statement
# Note that placing parentheses around the expression allows it to exist on multiple lines without
# causing a syntax error.
(dataset
.map(lambda x: x + 2)
.reduce(lambda x, y: x * y))
# Multiply the elements in dataset by five, keep just the even values, and sum those values
finalSum = dataset.map(lambda x: x*5).filter(lambda x: (x+1)%2).reduce(lambda x,y: x+y)
print finalSum | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark-ML/Math review.py",
"copies": "1",
"size": "6663",
"license": "mit",
"hash": 2638941619262329300,
"line_mean": 30.1401869159,
"line_max": 98,
"alpha_frac": 0.6644154285,
"autogenerated": false,
"ratio": 2.9262187088274043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40906341373274047,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one implements (another) a word count application.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - Setup
# 2 - Counting and uniqueness
# 3 - String manipulation
#####
'''
###########
# 1 - Setup
###########
'''
# Create a base RDD with parallelize and use pair RDDs to count words.
wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat']
wordsRDD = sc.parallelize(wordsList, 4)
# Print out the type of wordsRDD
print type(wordsRDD)
# Use a map() transformation to add the letter 's' to each string in the base RDD/
def makePlural(word):
return word+'s'
print makePlural('cat')
pluralRDD = wordsRDD.map(makePlural)
print pluralRDD.collect()
# Create the same RDD using a lambda function.
pluralLambdaRDD = wordsRDD.map(lambda x: x+"s")
print pluralLambdaRDD.collect()
# Now use map() and a lambda function to return the number of characters in each word.
pluralLengths = (pluralRDD.map(lambda x: len(x)).collect())
print pluralLengths
# Create a pair RDD
wordPairs = wordsRDD.map(lambda x: (x, 1))
print wordPairs.collect()
'''
#############################
# 2 - Counting and uniqueness
#############################
'''
# Count the number of times a particular word appears in the RDD.
wordsGrouped = wordPairs.groupByKey()
for key, value in wordsGrouped.collect():
print '{0}: {1}'.format(key, list(value))
wordCountsGrouped = wordsGrouped.map(lambda (x, y): (x, len(y)))#sum(1 for _ in y)))
print wordCountsGrouped.collect()
wordCounts = wordPairs.reduceByKey(lambda x, y: x+y)
print wordCounts.collect()
# All together
wordCountsCollected = (wordsRDD.map(lambda x: (x, 1)).reduceByKey(lambda x,y: x+y).collect())
print wordCountsCollected
# Calculate the number of unique words in wordsRDD
uniqueWords = wordsRDD.distinct().count()
print uniqueWords
# Find the mean number of words per unique word in wordCounts.
from operator import add
totalCount = (wordCounts.map(lambda (x,y): y).reduce(lambda x,y: x+y))
average = totalCount / float(wordCounts.distinct().count())
print totalCount
print round(average, 2)
'''
#########################
# 3 - String manipulation
#########################
'''
# Creates a pair RDD with word counts from an RDD of words.
# Args: an RDD consisting of words.
# Returns: an RDD consisting of (word, count) tuples.
def wordCount(wordListRDD):
return wordListRDD.map(lambda x: (x, 1)).reduceByKey(lambda x,y: x+y)
print wordCount(wordsRDD).collect()
import re
# Removes punctuation, changes to lower case, and strips leading and trailing spaces.
def removePunctuation(text):
return re.sub(r'[^a-zA-Z0-9\s]+', '', text.lower().strip())
print removePunctuation('Hi, you!')
print removePunctuation(' No under_score!')
print removePunctuation(' * Remove punctuation then spaces * ')
import os.path
fileName = ""
shakespeareRDD = sc.textFile(fileName, 8).map(removePunctuation)
print '\n'.join(shakespeareRDD
.zipWithIndex() # to (line, lineNum)
.map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line'
.take(15))
# Apply a transformation that will split each element of the RDD by its spaces
shakespeareWordsRDD = shakespeareRDD.flatMap(lambda x: x.split(' '))
shakespeareWordCount = shakespeareWordsRDD.count()
print shakespeareWordsRDD.top(5)
print shakespeareWordCount
# Filter out the empty elements.
shakeWordsRDD = shakespeareWordsRDD.filter(lambda x: x != '')
shakeWordCount = shakeWordsRDD.count()
print shakeWordCount
# Count the words
top15WordsAndCounts = shakeWordsRDD.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y).takeOrdered(15, key = lambda x: -x[1])
print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts)) | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark-ML/Word count.py",
"copies": "1",
"size": "4165",
"license": "mit",
"hash": -2722293236089868000,
"line_mean": 29.8592592593,
"line_max": 126,
"alpha_frac": 0.6931572629,
"autogenerated": false,
"ratio": 3.342696629213483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4535853892113483,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one implements a supervised learning pipeline with the Million Song Dataset.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - Setup and analysis
# 2 - Train and evaluate - baseline
# 3 - Train and evaluate - gradient descent
# 4 - Train and evaluate - grid search
# 5 - Feature analysis
#####
'''
########################
# 1 - Setup and analysis
########################
'''
# Store the raw data in a df, with each element of
# the df representing a data point as a comma-delimited string.
# Each string starts with the label (a year) followed by numerical audio features.
import os.path
file_name = os.path.join('millionsong.txt')
raw_data_df = sqlContext.read.load(file_name, 'text')
num_points = raw_data_df.count()
print num_points
sample_points = raw_data_df.take(5)
print sample_points
import numpy as np
from pyspark.sql import functions as sql_functions
# Converts a df of comma separated unicode strings into a df of LabeledPoints.
# Args: df: df where each row is a comma separated unicode string.
# First element in the string is the label and the remaining elements are the features.
# Returns a df: Each row is converted into a LabeledPoint, which consists of a label and
# features.
def parse_point(df):
return df.map(lambda x: LabeledPoint(float(x.value.split(',')[0]), map(float, x.value.split(','))[1:])).toDF()
parsed_points_df = parse_point(raw_data_df)
first_point_features = parsed_points_df.select("features").first()[0]
first_point_label = parsed_points_df.select("label").first()[0]
print first_point_features, first_point_label
d = len(first_point_features)
print d
# Look at the raw features for 50 data points by generating a heatmap
# that visualizes each feature and shows the variation of each feature
# across the 50 sample data points.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
data_values = (parsed_points_df
.rdd
.map(lambda lp: lp.features.toArray())
.takeSample(False, 50, 47))
def prepare_plot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999',
gridWidth=1.0):
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot
fig, ax = prepare_plot(np.arange(.5, 11, 1), np.arange(.5, 49, 1), figsize=(8,7), hideLabels=True,
gridColor='#eeeeee', gridWidth=1.1)
image = plt.imshow(data_values,interpolation='nearest', aspect='auto', cmap=cm.Greys)
for x, y, s in zip(np.arange(-.125, 12, 1), np.repeat(-.75, 12), [str(x) for x in range(12)]):
plt.text(x, y, s, color='#999999', size='10')
plt.text(4.7, -3, 'Feature', color='#999999', size='11'), ax.set_ylabel('Observation')
display(fig)
# Examine the labels to find the range of song years.
content_stats = parsed_points_df.groupBy()
min_year = parsed_points_df.groupBy().min('label').collect()[0][0]
max_year = parsed_points_df.groupBy().max('label').collect()[0][0]
print min_year, max_year
# Shift the labels
parsed_data_df = parsed_points_df.map(lambda x: LabeledPoint(x.label - min_year, x.features)).toDF()
print '\n{0}'.format(parsed_data_df.first())
# Look at the labels before and after shifting them.
old_data = (parsed_points_df
.rdd
.map(lambda lp: (lp.label, 1))
.reduceByKey(lambda x, y: x + y)
.collect())
x, y = zip(*old_data)
# generate layout and plot data
fig, ax = prepare_plot(np.arange(1920, 2050, 20), np.arange(0, 150, 20))
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
ax.set_xlabel('Year'), ax.set_ylabel('Count')
display(fig)
# get data for plot
new_data = (parsed_points_df
.rdd
.map(lambda lp: (lp.label, 1))
.reduceByKey(lambda x, y: x + y)
.collect())
x, y = zip(*new_data)
# generate layout and plot data
fig, ax = prepare_plot(np.arange(0, 120, 20), np.arange(0, 120, 20))
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
ax.set_xlabel('Year (shifted)'), ax.set_ylabel('Count')
display(fig)
pass
# Split the dataset into training, validation and test sets.
weights = [.8, .1, .1]
seed = 42
parsed_train_data_df, parsed_val_data_df, parsed_test_data_df = parsed_data_df.randomSplit(weights, seed)
parsed_train_data_df.cache()
parsed_val_data_df.cache()
parsed_test_data_df.cache()
n_train = parsed_train_data_df.count()
n_val = parsed_val_data_df.count()
n_test = parsed_test_data_df.count()
print n_train, n_val, n_test, n_train + n_val + n_test
print parsed_data_df.count()
'''
###################################
# 2 - Train and evaluate - baseline
###################################
'''
# Use the average label in the training set as the constant prediction value.
average_train_year = (parsed_train_data_df.select('label').groupBy().avg()).first()[0]
print average_train_year
# Compute the RMSE given a dataset of (prediction, label)_tuples.
from pyspark.ml.evaluation import RegressionEvaluator
preds_and_labels = [(1., 3.), (2., 1.), (2., 2.)]
preds_and_labels_df = sqlContext.createdf(preds_and_labels, ["prediction", "label"])
evaluator = RegressionEvaluator(predictionCol="prediction", labelCol="label")
# Calculates the RMSE
# Args: A df consisting of (prediction, label) tuples.
# Returns The square root of the mean of the squared errors.
def calc_RMSE(dataset):
return evaluator.evaluate(dataset, {evaluator.metricName: "rmse"})
example_rmse = calc_RMSE(preds_and_labels_df)
print example_rmse
# Calculate the training, validation and test RMSE of our baseline model.
preds_and_labels_train = parsed_train_data_df.map( lambda x: (x.label, average_train_year))
preds_and_labels_train_df = sqlContext.createdf(preds_and_labels_train, ["prediction", "label"])
rmse_train_base = calc_RMSE(preds_and_labels_train_df)
preds_and_labels_val = parsed_val_data_df.map( lambda x:( x.label, average_train_year))
preds_and_labels_val_df = sqlContext.createdf(preds_and_labels_val, ["prediction", "label"])
rmse_val_base = calc_RMSE(preds_and_labels_val_df)
preds_and_labels_test = parsed_test_data_df.map( lambda x:( x.label, average_train_year))
preds_and_labels_test_df = sqlContext.createdf(preds_and_labels_test, ["prediction", "label"])
rmse_test_base = calc_RMSE(preds_and_labels_test_df)
print 'Baseline Train RMSE = {0:.3f}'.format(rmse_train_base)
print 'Baseline Validation RMSE = {0:.3f}'.format(rmse_val_base)
print 'Baseline Test RMSE = {0:.3f}'.format(rmse_test_base)
# Visualize predictions on the validation dataset.
from matplotlib.colors import ListedColormap, Normalize
from matplotlib.cm import get_cmap
cmap = get_cmap('YlOrRd')
norm = Normalize()
# Calculates the squared error for a single prediction.
def squared_error(label, prediction):
return float((label - prediction)**2)
actual = np.asarray(parsed_val_data_df
.select('label')
.collect())
error = np.asarray(parsed_val_data_df
.rdd
.map(lambda lp: (lp.label, lp.label))
.map(lambda (l, p): squared_error(l, p))
.collect())
clrs = cmap(np.asarray(norm(error)))[:,0:3]
fig, ax = prepare_plot(np.arange(0, 100, 20), np.arange(0, 100, 20))
plt.scatter(actual, actual, s=14**2, c=clrs, edgecolors='#888888', alpha=0.75, linewidths=0.5)
ax.set_xlabel('Predicted'), ax.set_ylabel('Actual')
display(fig)
predictions = np.asarray(parsed_val_data_df
.rdd
.map(lambda lp: average_train_year)
.collect())
error = np.asarray(parsed_val_data_df
.rdd
.map(lambda lp: (average_train_year, lp.label))
.map(lambda (l, p): squared_error(l, p))
.collect())
norm = Normalize()
clrs = cmap(np.asarray(norm(error)))[:,0:3]
fig, ax = prepare_plot(np.arange(53.0, 55.0, 0.5), np.arange(0, 100, 20))
ax.set_xlim(53, 55)
plt.scatter(predictions, actual, s=14**2, c=clrs, edgecolors='#888888', alpha=0.75, linewidths=0.3)
ax.set_xlabel('Predicted'), ax.set_ylabel('Actual')
display(fig)
'''
###########################################
# 3 - Train and evaluate - gradient descent
###########################################
'''
# Use linear regression: train a model via gradient descent.
# Recall that the gradient descent update for linear regression is:
# \mathbf{w}_{i+1} = \mathbf{w}_i - \alpha_i \sum_j (\mathbf{w}_i^\top\mathbf{x}_j - y_j) \mathbf{x}_j
# where i is the iteration number of the gradient descent algorithm, and j identifies the observation.
#
from pyspark.mllib.linalg import DenseVector
# Calculates the gradient summand for a given weight and LabeledPoint.
# Args: an array of model weights/betas, and the LabeledPoint for a single observation.
# Returns: the gradient summand.
def gradient_summand(weights, lp):
x = lp.features
y = lp.label
return ((weights.transpose()).dot(x) - y)*x
example_w = DenseVector([1, 1, 1])
example_lp = LabeledPoint(2.0, [3, 1, 4])
summand_one = gradient_summand(example_w, example_lp)
print summand_one
example_w = DenseVector([.24, 1.2, -1.4])
example_lp = LabeledPoint(3.0, [-1.4, 4.2, 2.1])
summand_two = gradient_summand(example_w, example_lp)
print summand_two
# Calculates predictions and returns a (prediction, label) tuple.
# Args: an array with one weight for each feature in trainData, and
# a LabeledPoint that contains the correct label and the features for the data point.
# Returns: (prediction, label)
def get_labeled_prediction(weights, observation):
feats = map(float, observation.features)
label = float(observation.label)
return float(weights.dot(feats)), label
weights = np.array([1.0, 1.5])
prediction_example = sc.parallelize([LabeledPoint(2, np.array([1.0, .5])),
LabeledPoint(1.5, np.array([.5, .5]))])
preds_and_labels_example = prediction_example.map(lambda lp: get_labeled_prediction(weights, lp))
print preds_and_labels_example.collect()
# Calculates the weights and error for a linear regression model trained with gradient descent.
# Args: The labeled data for use in training the model, and the number of iterations.
# Returns: a tuple of (weights, training errors). Training errors contain RMSE for each iteration of the algorithm.
def linreg_gradient_descent(train_data, num_iters):
# The length of the training data
n = train_data.count()
# The number of features in the training data
d = len(train_data.first().features)
w = np.zeros(d)
alpha = 1.0
# We will compute and store the training error after each iteration
error_train = np.zeros(num_iters)
for i in range(num_iters):
preds_and_labels_train = train_data.map(lambda x: get_labeled_prediction(w, x))
preds_and_labels_train_df = sqlContext.createdf(preds_and_labels_train, ["prediction", "label"])
error_train[i] = calc_RMSE(preds_and_labels_train_df)
# Calculate the gradient.
gradient = train_data.map(lambda x: gradient_summand(w, x)).sum()
# Update the weights
alpha_i = alpha / (n * np.sqrt(i+1))
w -= alpha_i*gradient
return w, error_train
# Train a linear regression model and evaluate its accuracy on the validation set.
num_iters = 50
weights_LR0, error_train_LR0 = linreg_gradient_descent(parsed_train_data_df, num_iters)
preds_and_labels = parsed_val_data_df.map( lambda x: get_labeled_prediction(weights_LR0, x))
preds_and_labels_df = sqlContext.createdf(preds_and_labels, ["prediction", "label"])
rmse_val_LR0 = calc_RMSE(preds_and_labels_df)
print 'Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}'.format(rmse_val_base,
rmse_val_LR0)
# Visualize the log of the training error as a function of iteration.
norm = Normalize()
clrs = cmap(np.asarray(norm(np.log(error_train_LR0))))[:,0:3]
fig, ax = prepare_plot(np.arange(0, 60, 10), np.arange(2, 6, 1))
ax.set_ylim(2, 6)
plt.scatter(range(0, num_iters), np.log(error_train_LR0), s=14**2, c=clrs, edgecolors='#888888', alpha=0.75)
ax.set_xlabel('Iteration'), ax.set_ylabel(r'$\log_e(errorTrainLR0)$')
display(fig)
norm = Normalize()
clrs = cmap(np.asarray(norm(error_train_LR0[6:])))[:,0:3]
fig, ax = prepare_plot(np.arange(0, 60, 10), np.arange(17, 22, 1))
ax.set_ylim(17.8, 21.2)
plt.scatter(range(0, num_iters-6), error_train_LR0[6:], s=14**2, c=clrs, edgecolors='#888888', alpha=0.75)
ax.set_xticklabels(map(str, range(6, 66, 10)))
ax.set_xlabel('Iteration'), ax.set_ylabel(r'Training Error')
display(fig)
'''
######################################
# 4 - Train and evaluate - grid search
######################################
'''
# Add an intercept, use regularization, and more iterations.
from pyspark.ml.regression import LinearRegression
# Values to use when training the linear regression model
num_iters = 500 # iterations
reg = 1e-1 # regParam
alpha = .2 # elasticNetParam
use_intercept = True # intercept
lin_reg = LinearRegression(regParam=reg, maxIter=num_iters, elasticNetParam=alpha, fitIntercept=use_intercept)
first_model = lin_reg.fit(parsed_train_data_df)
# coeffsLR1 stores the model coefficients; interceptLR1 stores the model intercept
coeffs_LR1 = first_model.coefficients
intercept_LR1 = first_model.intercept
print coeffs_LR1, intercept_LR1
# Now use the model to make predictions
sample_prediction = first_model.transform(parsed_train_data_df)
display(sample_prediction)
# Evaluate the accuracy of this model on the validation set
val_pred_df = first_model.transform(parsed_val_data_df)
rmse_val_LR1 = calc_RMSE(val_pred_df)
print ('Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}' +
'\n\tLR1 = {2:.3f}').format(rmse_val_base, rmse_val_LR0, rmse_val_LR1)
# Perform grid search to find a good regularization parameter.
best_RMSE = rmse_val_LR1
best_reg_param = reg
best_model = first_model
num_iters = 500 # iterations
alpha = .2 # elasticNetParam
use_intercept = True # intercept
for reg in [1e-10, 1e-5, 1.0]:
lin_reg = LinearRegression(maxIter=num_iters, regParam=reg, elasticNetParam=alpha, fitIntercept=use_intercept)
model = lin_reg.fit(parsed_train_data_df)
val_pred_df = model.transform(parsed_val_data_df)
rmse_val_grid = calc_RMSE(val_pred_df)
print rmse_val_grid
if rmse_val_grid < best_RMSE:
best_RMSE = rmse_val_grid
best_reg_param = reg
best_model = model
rmse_val_LR_grid = best_RMSE
print ('Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}\n\tLR1 = {2:.3f}\n' +
'\tLRGrid = {3:.3f}').format(rmse_val_base, rmse_val_LR0, rmse_val_LR1, rmse_val_LR_grid)
# Create a color-coded scatter plot visualizing tuples storing the predicted value from this model and the true label
parsed_val_df = best_model.transform(parsed_val_data_df)
predictions = np.asarray(parsed_val_df
.select('prediction')
.collect())
actual = np.asarray(parsed_val_df
.select('label')
.collect())
error = np.asarray(parsed_val_df
.rdd
.map(lambda lp: squared_error(lp.label, lp.prediction))
.collect())
norm = Normalize()
clrs = cmap(np.asarray(norm(error)))[:,0:3]
fig, ax = prepare_plot(np.arange(0, 120, 20), np.arange(0, 120, 20))
ax.set_xlim(15, 82), ax.set_ylim(-5, 105)
plt.scatter(predictions, actual, s=14**2, c=clrs, edgecolors='#888888', alpha=0.75, linewidths=.5)
ax.set_xlabel('Predicted'), ax.set_ylabel(r'Actual')
display(fig)
# Perform a visualization of hyperparameter search:
# Create a heat map where the brighter colors correspond to lower RMSE values.
from matplotlib.colors import LinearSegmentedColormap
# Saved parameters and results, to save the time required to run 36 models
num_iters = 500
reg_params = [1.0, 2.0, 4.0, 8.0, 16.0, 32.0]
alpha_params = [0.0, .1, .2, .4, .8, 1.0]
rmse_val = np.array([[ 15.317156766552452, 15.327211561989827, 15.357152971253697, 15.455092206273847, 15.73774335576239,
16.36423857334287, 15.315019185101972, 15.305949211619886, 15.355590337955194, 15.573049001631558,
16.231992712117222, 17.700179790697746, 15.305266383061921, 15.301104931027034, 15.400125020566225,
15.824676190630191, 17.045905140628836, 19.365558346037535, 15.292810983243772, 15.333756681057828,
15.620051033979871, 16.631757941340428, 18.948786862836954, 20.91796910560631, 15.308301384150049,
15.522394576046239, 16.414106221093316, 18.655978799189178, 20.91796910560631, 20.91796910560631,
15.33442896030322, 15.680134490745722, 16.86502909075323, 19.72915603626022, 20.91796910560631,
20.91796910560631 ]])
num_rows, num_cols = len(alpha_params), len(reg_params)
rmse_val = np.array(rmse_val)
rmse_val.shape = (num_rows, num_cols)
fig, ax = prepare_plot(np.arange(0, num_cols, 1), np.arange(0, num_rows, 1), figsize=(8, 7), hideLabels=True,
gridWidth=0.)
ax.set_xticklabels(reg_params), ax.set_yticklabels(alpha_params)
ax.set_xlabel('Regularization Parameter'), ax.set_ylabel('Alpha')
colors = LinearSegmentedColormap.from_list('blue', ['#0022ff', '#000055'], gamma=.2)
image = plt.imshow(rmse_val,interpolation='nearest', aspect='auto',
cmap = colors)
display(fig)
# Zoom into the top left
alpha_params_zoom, reg_params_zoom = alpha_params[1:5], reg_params[:4]
rmse_val_zoom = rmse_val[1:5, :4]
num_rows, num_cols = len(alpha_params_zoom), len(reg_params_zoom)
fig, ax = prepare_plot(np.arange(0, num_cols, 1), np.arange(0, num_rows, 1), figsize=(8, 7), hideLabels=True,
gridWidth=0.)
ax.set_xticklabels(reg_params_zoom), ax.set_yticklabels(alpha_params_zoom)
ax.set_xlabel('Regularization Parameter'), ax.set_ylabel('Alpha')
colors = LinearSegmentedColormap.from_list('blue', ['#0022ff', '#000055'], gamma=.2)
image = plt.imshow(rmse_val_zoom, interpolation='nearest', aspect='auto',
cmap = colors)
display(fig)
'''
######################
# 5 - Feature analysis
######################
'''
# Add 2-way interactions
import itertools
# Creates a new LabeledPoint that includes two-way interactions.
# Note: For features [x, y] the two-way interactions would be:
# [x^2, x*y, y*x, y^2]
# and these would be appended to the original [x, y] feature list.
# Args: lp (LabeledPoint): The label and features for this observation.
# Returns: a new LabeledPoint with the same label as lp, and features
# which include the features from the argument, followed by the
# two-way interaction features.
def two_way_interactions(lp):
x = lp.features
phi = np.hstack((x, [x[i]*x[j] for (i,j) in list(itertools.product(range(len(x)),range(len(x)))) ]))
return LabeledPoint(lp.label, phi)
print two_way_interactions(LabeledPoint(0.0, [2, 3]))
train_data_interact_df = parsed_train_data_df.map(two_way_interactions).toDF()
val_data_interact_df = parsed_val_data_df.map(two_way_interactions).toDF()
test_data_interact_df = parsed_test_data_df.map(two_way_interactions).toDF()
# Build an interaction model
num_iters = 500
reg = 1e-10
alpha = .2
use_intercept = True
lin_reg = LinearRegression(maxIter=num_iters, regParam=reg, elasticNetParam=alpha, fitIntercept=use_intercept)
model_interact = lin_reg.fit(train_data_interact_df)
preds_and_labels_interact_df = model_interact.transform(val_data_interact_df)
rmse_val_interact = calc_RMSE(preds_and_labels_interact_df)
print ('Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}\n\tLR1 = {2:.3f}\n\tLRGrid = ' +
'{3:.3f}\n\tLRInteract = {4:.3f}').format(rmse_val_base, rmse_val_LR0, rmse_val_LR1,
rmse_val_LR_grid, rmse_val_interact)
# Evaluate the new model on the test dataset.
# Note: we haven't used the test set to evaluate any of our models.
# Therefore our evaluation provides us with an unbiased estimate
# for how our model will perform on new data.
# Otherwise our estimate of RMSE would likely be overly optimistic!
preds_and_labels_test_df = model_interact.transform(test_data_interact_df)
rmse_test_interact = calc_RMSE(preds_and_labels_test_df)
print ('Test RMSE:\n\tBaseline = {0:.3f}\n\tLRInteract = {1:.3f}'
.format(rmse_test_base, rmse_test_interact))
# Use a pipeline to create the interaction model
from pyspark.ml import Pipeline
from pyspark.ml.feature import PolynomialExpansion
num_iters = 500
reg = 1e-10
alpha = .2
use_intercept = True
polynomial_expansion = PolynomialExpansion(degree=2, inputCol='features', outputCol='polyFeatures')
linear_regression = LinearRegression(maxIter=num_iters, regParam=reg, elasticNetParam=alpha,
fitIntercept=use_intercept, featuresCol='polyFeatures')
pipeline = Pipeline(stages=[polynomial_expansion, linear_regression])
pipeline_model = pipeline.fit(parsed_train_data_df)
predictions_df = pipeline_model.transform(parsed_test_data_df)
evaluator = RegressionEvaluator()
rmse_test_pipeline = evaluator.evaluate(predictions_df, {evaluator.metricName: "rmse"})
print('RMSE for test data set using pipelines: {0:.3f}'.format(rmse_test_pipeline)) | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark-ML/Linear Regression.py",
"copies": "1",
"size": "22285",
"license": "mit",
"hash": 7212473767967123000,
"line_mean": 41.2884250474,
"line_max": 122,
"alpha_frac": 0.675072919,
"autogenerated": false,
"ratio": 3.12333566923616,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42984085882361595,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one is a (very) basic intro to Spark.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - Setup
# 2 - Transformations
# 3 - Operations
#####
'''
###########
# 1 - Setup
###########
'''
# Create a Python collection of 10,000 people
from faker import Factory
fake = Factory.create()
fake.seed(4321)
# Each entry consists of last_name, first_name, ssn, job, and age (at least 1)
from pyspark.sql import Row
def fake_entry():
name = fake.name().split()
return Row(name[1], name[0], fake.ssn(), fake.job(), abs(2016 - fake.date_time().year) + 1)
# Create a helper function to call a function repeatedly
def repeat(times, func, *args, **kwargs):
for _ in xrange(times):
yield func(*args, **kwargs)
data = list(repeat(10000, fake_entry))
dataDF = sqlContext.createdf(data, ('last_name', 'first_name', 'ssn', 'occupation', 'age'))
dataDF.printSchema()
# Register the newly created df as a named table.
sqlContext.registerdfAsTable(dataDF, 'df')
# How many partitions will the df be split into?
dataDF.rdd.getNumPartitions()
'''
#####################
# 2 - Transformations
#####################
'''
newDF = dataDF.distinct().select('*')
newDF.explain(True)
subDF = dataDF.select('last_name', 'first_name', 'ssn', 'occupation', (dataDF.age - 1).alias('age'))
# Look at the query plan.
subDF.explain(True)
# Collect the data
results = subDF.collect()
print results
subDF.show()
subDF.show(n=30, truncate=False)
print dataDF.count()
print subDF.count()
# Filter
filteredDF = subDF.filter(subDF.age < 10)
filteredDF.show(truncate=False)
filteredDF.count()
# Lambdas and UDFs
from pyspark.sql.types import BooleanType
less_ten = udf(lambda s: s < 10, BooleanType())
lambdaDF = subDF.filter(less_ten(subDF.age))
lambdaDF.show()
lambdaDF.count()
# Let's collect the even values less than 10
even = udf(lambda s: s % 2 == 0, BooleanType())
evenDF = lambdaDF.filter(even(lambdaDF.age))
evenDF.show()
evenDF.count()
print "first: {0}\n".format(filteredDF.first())
print "Four of them: {0}\n".format(filteredDF.take(4))
# Get the five oldest people in the list. To do that, sort by age in descending order.
display(dataDF.orderBy(dataDF.age.desc()).take(5))
display(dataDF.orderBy('age').take(5))
print dataDF.count()
print dataDF.distinct().count()
# Distinct
tempDF = sqlContext.createdf([("Joe", 1), ("Joe", 1), ("Anna", 15), ("Anna", 12), ("Ravi", 5)], ('name', 'score'))
tempDF.show()
tempDF.distinct().show()
# Drop duplicates
print dataDF.count()
print dataDF.dropDuplicates(['first_name', 'last_name']).count()
dataDF.drop('occupation').drop('age').show()
# Aggregation functions typically create a new column and return a new df.
dataDF.groupBy('occupation').count().show(truncate=False)
dataDF.groupBy().avg('age').show(truncate=False)
# We can also use groupBy() to do aother useful aggregations:
print "Maximum age: {0}".format(dataDF.groupBy().max('age').first()[0])
print "Minimum age: {0}".format(dataDF.groupBy().min('age').first()[0])
'''
################
# 3 - Operations
################
'''
# When analyzing data, the sample() transformation is often quite useful.
# Returns a new df with a random sample of elements from the dataset.
# withReplacement argument - when withReplacement=True you can get the same item back multiple times.
# fraction - specifies the fraction elements in the dataset you want to return.
# seed - allows you to specify a seed value, so that reproducible results can be obtained.
sampledDF = dataDF.sample(withReplacement=False, fraction=0.10)
print sampledDF.count()
sampledDF.show()
print dataDF.sample(withReplacement=False, fraction=0.05).count()
# Cache the df
filteredDF.cache()
# Trigger an action
print filteredDF.count()
# Check if it is cached
print filteredDF.is_cached
# If we are done with the df we can unpersist it so that its memory can be reclaimed
filteredDF.unpersist()
# Check if it is cached
print filteredDF.is_cached
# Cleaner code through lambda use
myUDF = udf(lambda v: v < 10)
subDF.filter(myUDF(subDF.age) == True)
# Final version
from pyspark.sql.functions import *
(dataDF
.filter(dataDF.age > 20)
.select(concat(dataDF.first_name, lit(' '), dataDF.last_name), dataDF.occupation)
.show(truncate=False)
) | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark/Intro to Spark.py",
"copies": "1",
"size": "4665",
"license": "mit",
"hash": 4513615648289129500,
"line_mean": 29.1032258065,
"line_max": 114,
"alpha_frac": 0.7026795284,
"autogenerated": false,
"ratio": 3.221685082872928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44243646112729285,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one pertains to analysis of logs and traffic to a website.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - Cleanup
# 2 - Traffic Analysis
# 3 - 404 Analysis
#####
import re
import datetime
'''
#############
# 1 - Cleanup
#############
'''
# Quick test of the regular expression library
m = re.search('(?<=abc)def', 'abcdef')
m.group(0)
# Quick test of the datetime library
print 'This was last run on: {0}'.format(datetime.datetime.now())
# List sqlContext's attributes
dir(sqlContext)
# Use help to obtain more detailed information
help(sqlContext)
# Help can be used on any Python object
help(map)
help(Test)
# Specify path to downloaded log file
import sys
import os
log_file_path = 'dbfs:/' + os.path.join('databricks-datasets', 'cs100', 'lab2', 'data-001', 'apache.access.log.PROJECT')
base_df = sqlContext.read.text(log_file_path)
# Let's look at the schema
base_df.printSchema()
base_df.show(truncate=False)
# If you're familiar with web servers at all, you'll recognize that this is in Common Log Format:
#
# _remotehost rfc931 authuser [date] "request" status bytes_
#
# | field | meaning |
# | ------------- | ---------------------------------------------------------------------- |
# | _remotehost_ | Remote hostname (or IP number if DNS hostname is not available). |
# | _rfc931_ | The remote logname of the user. We don't really care about this field. |
# | _authuser_ | The username of the remote user, as authenticated by the HTTP server. |
# | _[date]_ | The date and time of the request. |
# | _"request"_ | The request, exactly as it came from the browser or client. |
# | _status_ | The HTTP status code the server sent back to the client. |
# | _bytes_ | The number of bytes (Content-Length) transferred to the client. |
#
#
# Next, we have to parse it into individual columns. We'll use the special built-in regexp_extract()
# function to do the parsing.
from pyspark.sql.functions import split, regexp_extract
split_df = base_df.select(regexp_extract('value', r'^([^\s]+\s)', 1).alias('host'),
regexp_extract('value', r'^.*\[(\d\d/\w{3}/\d{4}:\d{2}:\d{2}:\d{2} -\d{4})]', 1).alias('timestamp'),
regexp_extract('value', r'^.*"\w+\s+([^\s]+)\s+HTTP.*"', 1).alias('path'),
regexp_extract('value', r'^.*"\s+([^\s]+)', 1).cast('integer').alias('status'),
regexp_extract('value', r'^.*\s+(\d+)$', 1).cast('integer').alias('content_size'))
split_df.show(truncate=False)
# First, let's verify that there are no null rows in the original data set.
base_df.filter(base_df['value'].isNull()).count()
bad_rows_df = split_df.filter(split_df['host'].isNull() |
split_df['timestamp'].isNull() |
split_df['path'].isNull() |
split_df['status'].isNull() |
split_df['content_size'].isNull())
bad_rows_df.count()
# Not good. We have some null values. Something went wrong. Which columns are affected?
from pyspark.sql.functions import col, sum
def count_null(col_name):
return sum(col(col_name).isNull().cast('integer')).alias(col_name)
# Build up a list of column expressions, one per column.
exprs = []
for col_name in split_df.columns:
exprs.append(count_null(col_name))
# Run the aggregation. The *exprs converts the list of expressions into
# variable function arguments.
split_df.agg(*exprs).show()
# Our original parsing regular expression for that column was:
# regexp_extract('value', r'^.*\s+(\d+)$', 1).cast('integer').alias('content_size')
# Let's see if there are any lines that do not end with one or more digits.
bad_content_size_df = base_df.filter(~ base_df['value'].rlike(r'\d+$'))
bad_content_size_df.count()
from pyspark.sql.functions import lit, concat
bad_content_size_df.select(concat(bad_content_size_df['value'], lit('*'))).show(truncate=False)
# Fix the rows with null content_size
# The easiest solution is to replace the null values in split_df with 0.
cleaned_df = split_df.na.fill({'content_size': 0})
# Ensure that there are no nulls left.
exprs = []
for col_name in cleaned_df.columns:
exprs.append(count_null(col_name))
cleaned_df.agg(*exprs).show()
# Parse the timestamp field into an actual timestamp. The Common Log Format time is somewhat non-standard.
month_map = {
'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12
}
# Convert Common Log time format into a Python datetime object
# Args: s (str): date and time in Apache time format [dd/mmm/yyyy:hh:mm:ss (+/-)zzzz]
# Returns a string suitable for passing to CAST('timestamp')
def parse_clf_time(s):
return "{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}".format(
int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20])
)
u_parse_time = udf(parse_clf_time)
logs_df = cleaned_df.select('*', u_parse_time(cleaned_df['timestamp']).cast('timestamp').alias('time')).drop('timestamp')
total_log_entries = logs_df.count()
logs_df.printSchema()
display(logs_df)
logs_df.cache()
# Calculate statistics based on the content size.
content_size_summary_df = logs_df.describe(['content_size'])
content_size_summary_df.show()
# Alternatively, we can use SQL to directly calculate these statistics.
from pyspark.sql import functions as sqlFunctions
content_size_stats = (logs_df
.agg(sqlFunctions.min(logs_df['content_size']),
sqlFunctions.avg(logs_df['content_size']),
sqlFunctions.max(logs_df['content_size']))
.first())
print 'Using SQL functions:'
print 'Content Size Avg: {1:,.2f}; Min: {0:.2f}; Max: {2:,.0f}'.format(*content_size_stats)
status_to_count_df =(logs_df
.groupBy('status')
.count()
.sort('status')
.cache())
status_to_count_length = status_to_count_df.count()
print 'Found %d response codes' % status_to_count_length
status_to_count_df.show()
# Now, let's visualize the results from the last example.
display(status_to_count_df)
log_status_to_count_df = status_to_count_df.withColumn('log(count)', sqlFunctions.log(status_to_count_df['count']))
display(log_status_to_count_df)
# We might want to make more adjustments.
from spark_notebook_helpers import prepareSubplot, np, plt, cm
data = log_status_to_count_df.drop('count').collect()
x, y = zip(*data)
index = np.arange(len(x))
bar_width = 0.7
colorMap = 'Accent'
cmap = cm.get_cmap(colorMap)
fig, ax = prepareSubplot(np.arange(0, 6, 1), np.arange(0, 14, 2))
plt.bar(index, y, width=bar_width, color=cmap(0))
plt.xticks(index + bar_width/2.0, x)
display(fig)
'''
######################
# 2 - Traffic Analysis
######################
'''
# Any hosts that has accessed the server more than 10 times.
host_sum_df =(logs_df
.groupBy('host')
.count())
host_more_than_10_df = (host_sum_df
.filter(host_sum_df['count'] > 10)
.select(host_sum_df['host']))
print 'Any 20 hosts that have accessed more then 10 times:\n'
host_more_than_10_df.show(truncate=False)
# Now, let's visualize the number of hits to paths (URIs) in the log.
# We previously imported the prepareSubplot function and the matplotlib.pyplot library,
# so we do not need to import them again.
paths_df = (logs_df
.groupBy('path')
.count()
.sort('count', ascending=False))
paths_counts = (paths_df
.select('path', 'count')
.map(lambda r: (r[0], r[1]))
.collect())
paths, counts = zip(*paths_counts)
colorMap = 'Accent'
cmap = cm.get_cmap(colorMap)
index = np.arange(1000)
fig, ax = prepareSubplot(np.arange(0, 1000, 100), np.arange(0, 70000, 10000))
plt.xlabel('Paths')
plt.ylabel('Number of Hits')
plt.plot(index, counts[:1000], color=cmap(0), linewidth=3)
plt.axhline(linewidth=2, color='#999999')
display(fig)
display(paths_df)
# DataFrame containing all accesses that did not return a code 200
from pyspark.sql.functions import desc
not200DF = logs_df.filter(logs_df['status'] != 200)
status_to_count_length = status_to_count_df.count()
print 'Found %d response codes' % status_to_count_length
status_to_count_df.show()
# Sorted DataFrame containing all paths and the number of times they were accessed with non-200 return code
logs_sum_df = not200DF.groupBy('path').count().sort('count', ascending=False)
print 'Top Ten failed URLs:'
logs_sum_df.show(10, False)
# How many unique hosts are there in the entire log?
unique_host_count = (logs_df.select('host').distinct()).count()
print 'Unique hosts: {0}'.format(unique_host_count)
# Let's determine the number of unique hosts in the entire log on a day-by-day basis.
#
# | column | explanation |
# | ------- | -------------------------------------------------- |
# | day | the day of the month |
# | count | the number of unique requesting hosts for that day |
from pyspark.sql.functions import dayofmonth
day_to_host_pair_df = logs_df.select('host', dayofmonth('time').alias('day'))
day_group_hosts_df = day_to_host_pair_df.drop_duplicates().groupBy('day').avg()
daily_hosts_df = logs_df.select(dayofmonth('time').alias('day'), 'host').drop_duplicates().groupBy('day').count().cache()
print 'Unique hosts per day:'
daily_hosts_df.show(30, False)
# Plot a line graph of the unique hosts requests by day.
days_with_hosts = logs_df.select(dayofmonth('time').alias('day')).drop_duplicates().collect()
temp = logs_df.select(dayofmonth('time').alias('day'), 'host').drop_duplicates().groupBy('day').count()
hosts = temp.select('count').alias('host').collect()
ans = []
for i in hosts:
ans.append(i[0])
hosts = ans
ans = []
for i in days_with_hosts:
ans.append(i[0])
days_with_hosts = ans
print(days_with_hosts)
print(hosts)
fig, ax = prepareSubplot(np.arange(0, 30, 5), np.arange(0, 5000, 1000))
colorMap = 'Dark2'
cmap = cm.get_cmap(colorMap)
plt.plot(days_with_hosts, hosts, color=cmap(0), linewidth=3)
plt.axis([0, max(days_with_hosts), 0, max(hosts)+500])
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.axhline(linewidth=3, color='#999999')
plt.axvline(linewidth=2, color='#999999')
display(fig)
display(daily_hosts_df)
# Determine the average number of requests on a day-by-day basis.
total_req_per_day_df = logs_df.select(dayofmonth('time').alias('day')).groupBy('day').count()
avg_daily_req_per_host_df = (total_req_per_day_df.alias("temp")
.join(daily_hosts_df.alias("host"), ["day"])
.select(col("day"), (col("temp.count") / col("host.count")).alias("avg_reqs_per_host_per_day"))).cache()
print 'Average number of daily requests per Hosts is:\n'
avg_daily_req_per_host_df.show()
# Plot a line graph of the average daily requests per unique host by day.
days_with_avg = (avg_daily_req_per_host_df.select('day')).collect()
avgs = (avg_daily_req_per_host_df.select('avg_reqs_per_host_per_day')).collect()
ans = []
for i in days_with_avg:
ans.append(i[0])
days_with_avg = ans
ans = []
for i in avgs:
ans.append(i[0])
avgs = ans
print(days_with_avg)
print(avgs)
fig, ax = prepareSubplot(np.arange(0, 20, 5), np.arange(0, 16, 2))
colorMap = 'Set3'
cmap = cm.get_cmap(colorMap)
plt.plot(days_with_avg, avgs, color=cmap(0), linewidth=3)
plt.axis([0, max(days_with_avg), 0, max(avgs)+2])
plt.xlabel('Day')
plt.ylabel('Average')
plt.axhline(linewidth=3, color='#999999')
plt.axvline(linewidth=2, color='#999999')
display(fig)
display(avg_daily_req_per_host_df)
'''
##################
# 3 - 404 Analysis
##################
'''
# Create a df containing only log records with a 404 status code.
not_found_df = logs_df.select('status').filter(logs_df['status'] == 404).cache()
print('Found {0} 404 URLs').format(not_found_df.count())
# Print out a list up to 40 distinct paths that generate 404 errors.
not_found_paths_df = logs_df.select('status', 'path').filter(logs_df['status'] == 404)
unique_not_found_paths_df = not_found_paths_df.select('path').drop_duplicates()
print '404 URLS:\n'
unique_not_found_paths_df.show(n=40, truncate=False)
# Print out a list of the top twenty paths that generate the most 404 errors.
top_20_not_found_df = not_found_paths_df.select('path').groupBy('path').count().sort('count', ascending=False)
print 'Top Twenty 404 URLs:\n'
top_20_not_found_df.show(n=20, truncate=False)
# Print out a list of the top twenty-five hosts that generate the most 404 errors.
hosts_404_count_df = logs_df.select('status', 'host').filter(logs_df['status'] == 404).groupBy('host').count().sort('count', ascending=False)
print 'Top 25 hosts that generated errors:\n'
hosts_404_count_df.show(n=25, truncate=False)
# Break down the 404 requests by day and get the daily counts sorted by day in another dataframe
errors_by_date_sorted_df = logs_df.select('status', dayofmonth('time').alias('day')).filter(logs_df['status'] == 404).groupBy('day').count().sort('day').cache()
print '404 Errors by day:\n'
errors_by_date_sorted_df.show()
# Use matplotlib to plot a line or bar graph of the 404 response codes by day.
days_with_errors_404 = errors_by_date_sorted_df.select('day').collect()
errors_404_by_day = errors_by_date_sorted_df.select('count').collect()
ans = []
for i in errors_404_by_day:
ans.append(i[0])
errors_404_by_day = ans
ans = []
for i in days_with_errors_404:
ans.append(i[0])
days_with_errors_404 = ans
print days_with_errors_404
print errors_404_by_day
fig, ax = prepareSubplot(np.arange(0, 20, 5), np.arange(0, 600, 100))
colorMap = 'rainbow'
cmap = cm.get_cmap(colorMap)
plt.plot(days_with_errors_404, errors_404_by_day, color=cmap(0), linewidth=3)
plt.axis([0, max(days_with_errors_404), 0, max(errors_404_by_day)])
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.axhline(linewidth=3, color='#999999')
plt.axvline(linewidth=2, color='#999999')
display(fig)
# Plot a line or bar graph of the 404 response codes by day.
display(errors_by_date_sorted_df)
top_err_date_df = errors_by_date_sorted_df.sort('count', ascending=False)
print 'Top Five Dates for 404 Requests:\n'
top_err_date_df.show(5)
# Using not_found_df create a DataFrame containing the number of requests that had a 404 return code for each hour of the day (midnight starts at 0).
from pyspark.sql.functions import hour
hour_records_sorted_df = logs_df.select('status', hour('time').alias('hour')).filter(logs_df['status'] == 404).groupBy('hour').count().sort('hour').cache()
print 'Top hours for 404 requests:\n'
hour_records_sorted_df.show(24)
# Plot a line or bar graph of the 404 response codes by hour.
hours_with_not_found = hour_records_sorted_df.select('hour').collect()
not_found_counts_per_hour = hour_records_sorted_df.select('count').collect()
ans = []
for i in not_found_counts_per_hour:
ans.append(i[0])
not_found_counts_per_hour = ans
ans = []
for i in hours_with_not_found:
ans.append(i[0])
hours_with_not_found = ans
print hours_with_not_found
print not_found_counts_per_hour
fig, ax = prepareSubplot(np.arange(0, 25, 5), np.arange(0, 500, 50))
colorMap = 'seismic'
cmap = cm.get_cmap(colorMap)
plt.plot(hours_with_not_found, not_found_counts_per_hour, color=cmap(0), linewidth=3)
plt.axis([0, max(hours_with_not_found), 0, max(not_found_counts_per_hour)])
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.axhline(linewidth=3, color='#999999')
plt.axvline(linewidth=2, color='#999999')
display(fig)
display(hour_records_sorted_df) | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark/Traffic analysis.py",
"copies": "1",
"size": "16244",
"license": "mit",
"hash": -6202390171504634000,
"line_mean": 35.9204545455,
"line_max": 160,
"alpha_frac": 0.6546417139,
"autogenerated": false,
"ratio": 3.125048095421316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42796898093213154,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one pertains to analysis of texts, more specifically word count.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - Basic string operations
# 2 - DF operations with words
# 3 - Shakesepearian Analysis
#####
'''
#############################
# 1 - Basic string operations
#############################
'''
# Perform an operation that adds an 's' to each word in a df
wordsDF = sqlContext.createdf([('cat',), ('elephant',), ('rat',), ('rat',), ('cat', )], ['word'])
wordsDF.show()
print type(wordsDF)
wordsDF.printSchema()
from pyspark.sql.functions import lit, concat
pluralDF = wordsDF.select(concat(wordsDF.word, lit('s')).alias('word'))
pluralDF.show()
# Use the SQL length function to find the number of characters in each word
from pyspark.sql.functions import length
pluralLengthsDF = pluralDF.select(length('word'))
pluralLengthsDF.show()
'''
##############################
# 2 - DF operations with words
##############################
'''
# Find the counts of words and the number of times that these words occur.
wordCountsDF = (wordsDF.groupBy(wordsDF.word).count())
wordCountsDF.show()
# Calculate the number of unique words in wordsDF
from spark_notebook_helpers import printdfs
#This function returns all the dfs in the notebook and their corresponding column names.
printdfs(True)
uniqueWordsCount = wordCountsDF.groupBy(wordCountsDF.word).count()
uniqueWordsCount = uniqueWordsCount.count()
print uniqueWordsCount
# Find the mean number of occurrences of words in wordCountsDF.
averageCount = (wordCountsDF.groupBy().mean('count')).head()[0]
print averageCount
# Creates a df with word counts.
# Args: wordListDF (df of str): A df consisting of one string column called 'word'.
# Returns df of (str, int): A df containing 'word' and 'count' columns.
def wordCount(wordListDF):
return wordListDF.groupBy(wordListDF.word).count()
wordCount(wordsDF).show()
from pyspark.sql.functions import regexp_replace, trim, col, lower
# Removes punctuation, changes to lower case, and strips leading and trailing spaces.
# Args: a Column containing a sentence.
# Returns a Column named 'sentence' with clean-up operations applied.
def removePunctuation(column):
return lower(trim(regexp_replace(column, "[^0-9a-zA-Z ]", ""))).alias("sentence")
sentenceDF = sqlContext.createdf([('Hi, you!',),
(' No under_score!',),
(' * Remove punctuation then spaces * ',)], ['sentence'])
sentenceDF.show(truncate=False)
(sentenceDF
.select(removePunctuation(col('sentence')))
.show(truncate=False))
'''
#############################
# 1 - Shakesepearian Analysis
#############################
'''
# Use http://www.gutenberg.org/ebooks/100
fileName = ""
shakespeareDF = sqlContext.read.text(fileName).select(removePunctuation(col('value')))
shakespeareDF.show(15, truncate=False)
# Split each 'sentence' in the df by its spaces
# Transform from a df that contains lists of words into a df with each word in its own row.
# Remove the rows that contain ''.
from pyspark.sql.functions import split, explode
shakeWordsDF = shakespeareDF.select(split(shakespeareDF.sentence, ' ').alias("sentence"))
shakeWordsDF = shakeWordsDF.select(explode(shakeWordsDF.sentence).alias("word"))
shakeWordsDF = shakeWordsDF.filter("word != ''")
shakeWordsDF.show()
shakeWordsDFCount = shakeWordsDF.count()
print shakeWordsDFCount
# Apply the wordCount() function to produce a list of word counts.
from pyspark.sql.functions import desc
topWordsAndCountsDF = wordCount(shakeWordsDF).orderBy("count", ascending=False)
topWordsAndCountsDF.show() | {
"repo_name": "adewynter/Tools",
"path": "Notebooks/Spark/Text analysis.py",
"copies": "1",
"size": "4091",
"license": "mit",
"hash": 3825622731199805000,
"line_mean": 33.9743589744,
"line_max": 104,
"alpha_frac": 0.6968956245,
"autogenerated": false,
"ratio": 3.715712988192552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.986265843394483,
"avg_score": 0.009990035749544492,
"num_lines": 117
} |
# Adrian deWynter, 2016
def BFS(adj, cost, s, V):
Q = []
Q.append((0, s, 0))
while Q:
d, u, p = Q.pop(0)
if u in adj:
for n in adj[u]:
if cost[n] == 0:
d = cost[u] + 6
cost[n] = d
p = u
Q.append((d, n, p))
else:
if cost[u] + 6 < cost[n]:
d = cost[u] + 6
cost[n] = d
Q.append((d, n, u))
ans = ''
for k in range(1, V + 1):
if k == s:
continue
ans = ans + str(cost[k]) + ' '
print ans[:-1]
T = input()
for i in range(0, T):
V, E = tuple(map(int, raw_input().strip().split(' ')))
G = []
adj = {}
cost = {}
vs = 0
for i in range(0, E):
u, v = tuple(map(int, raw_input().strip().split(' ')))
if u in adj:
adj[u].append(v)
else:
adj[u] = [v]
if u not in cost:
cost[u] = 0
if v not in cost:
cost[v] = 0
for i in range(0, V):
if i+1 in cost:
continue
cost[i + 1] = -1
s = input()
BFS(adj, cost, s, V)
| {
"repo_name": "adewynter/Tools",
"path": "Algorithms/graphAlgorithms/BFSshortestreach.py",
"copies": "1",
"size": "1352",
"license": "mit",
"hash": -4483557394305601000,
"line_mean": 20.140625,
"line_max": 62,
"alpha_frac": 0.3150887574,
"autogenerated": false,
"ratio": 3.5116883116883115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9135270658831901,
"avg_score": 0.038301282051282054,
"num_lines": 64
} |
# Adrian deWynter, 2016
'''Python definitions of factoring algorithms, and prime list generation.'''
def factor1(n):
"""returns a list of prime factors of n"""
d = 2
factors = [ ] #empty list
while n > 1:
if n % d == 0:
factors.append(d)
n = n/d
else:
d = d + 1
return factors
# The next version is slightly improved, because it only checks odd numbers
# for possible prime factors, after first checking 2.
def factor2(n):
"""returns a list of prime factors of n"""
d = 2
factors = [ ]
while n > 1:
if n % d == 0:
factors.append(d)
n = n/d
else:
d += 1 + d % 2 # 2 -> 3, odd -> odd + 2
return factors
# The conditions n > 1 above are equivalent to n >= d, since the last divisor
# found is when n equals d.
# The next version is improved even more, because it checks only
# up to the square root of the input for possible factors:
def factor(n, startFrom=2):
"""returns a list of prime factors of n,
knowing min possible >= startFrom."""
if n <= 1: return [ ]
d = startFrom
factors = [ ]
while n >= d*d:
if n % d == 0:
factors.append(d)
n = n/d
else:
d += 1 + d % 2 # 2 -> 3, odd -> odd + 2
factors.append(n)
return factors
def countConsecutiveSame(seq):
'''Given a sequence, return a list of (item, consecutive_repetitions).'''
if not seq: return []
current = NotImplemented
n = 0
pairs = []
for e in seq:
if e == current:
n += 1
else:
if n > 0:
pairs.append((current, n))
n = 1
current = e
pairs.append((current, n))
return pairs
def factorMultiplicity(n):
return countConsecutiveSame(factor(n))
def listPrimes(n):
'''Return a list of all primes < n using the Sieve of Eratosthenes.'''
if n <= 2: return []
sieve = [True]*n # indices 0 ... n-1, ignore 1 and even. Entries at odd
# indices greater than 2 will be changed to false when found not prime
primes = [2]
i = 3
while(i < n):
if sieve[i]: # First number not eliminated must be prime
primes.append(i) # next eliminate multiples of i:
for mult in range(i*i, n, i): # Note multiples with a smaller
sieve[mult] = False # factor are already eliminated
i += 2 # skip even numbers
return primes
# If you have precalculated a list of all prime factors conceivable
# (for instance using primeSieve), then you can be even more efficient by
# only using prime divisors, rather than all odd ones:
def factorGivenPrimes(n, primes):
"""returns a list of prime factors of n,
given an initial part of the sequence of all primes in order."""
p = 0 # in case primes seq empty
factors = []
for p in primes:
while n % p == 0:
n /= p
factors.append(p)
if n < p*p:
if n > 1:
factors.append(n)
return factors
return factors + factor(n, p+2) #revert to brute force if not enough primes
if __name__ == '__main__':
print 'Primes < 50:\n', listPrimes(50)
print 'Factorizations:'
for x in range(15):
print x, ':', factorMultiplicity(x)
for x in range(185, 200):
print x, ':', factorGivenPrimes(x, listPrimes(8)) | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/numberTheory/factoring.py",
"copies": "1",
"size": "3409",
"license": "mit",
"hash": -5712238215096947000,
"line_mean": 29.7207207207,
"line_max": 80,
"alpha_frac": 0.5734819595,
"autogenerated": false,
"ratio": 3.6035940803382664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9613957803670646,
"avg_score": 0.012623647233524044,
"num_lines": 111
} |
# Adrian deWynter, 2016
# This dataset has call records for 10 users tracked over the course of 3 years.
# Use K Means to find out where the users live, work, and commute.
import pandas as pd
from datetime import timedelta
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
# People are likely to behave differently on weekends:
#
# On Weekends:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
matplotlib.style.use('ggplot')
def showandtell(title=None):
if title != None: plt.savefig(title + ".png", bbox_inches='tight', dpi=300)
plt.show()
exit()
def clusterInfo(model):
print "Cluster Analysis Inertia: ", model.inertia_
print '------------------------------------------'
for i in range(len(model.cluster_centers_)):
print "\n Cluster ", i
print " Centroid ", model.cluster_centers_[i]
print " #Samples ", (model.labels_==i).sum()
# Find the cluster with the least number of attached nodes
def clusterWithFewestSamples(model):
# Ensure there's at least on cluster
minSamples = len(model.labels_)
minCluster = 0
for i in range(len(model.cluster_centers_)):
if minSamples > (model.labels_==i).sum():
minCluster = i
minSamples = (model.labels_==i).sum()
print "\n Cluster With Fewest Samples: ", minCluster
return (model.labels_==minCluster)
# Since both Lat and Lon are (approximately) on the same scale,
# no feature scaling is required.
def doKMeans(data, clusters=0):
latlon = data[['TowerLat','TowerLon']]
kmeans = KMeans(n_clusters=clusters)
kmeans.fit(latlon)
kmeans.predict(latlon)
#centroids = kmeans.cluster_centers_
return kmeans
df = pd.read_csv('Datasets/CDR.csv')
df.CallDate = pd.to_datetime(df.CallDate)
df.CallTime = pd.to_timedelta(df.CallTime)
# Get a distinct list of "In" phone numbers
unique = df.In.unique().tolist()
# Filter out all data not belonging to user #8
idno = 8
print "\n\nExamining person: ", idno
user1 = df[df['In'] == unique[idno]]
# Workplace
user1 = user1[user1.DOW != 'Sun']
user1 = user1[user1.DOW != 'Sat']
user1 = user1[user1.CallTime < '17:00:00']
# Home
# user1 = (user1[user1.DOW == 'Sat']).append(user1[user1.DOW == 'Sun'])
# user1 = (user1[user1.CallTime < '06:00:00']).append(user1[user1.CallTime > '22:00:00'])
# Plot the cell towers the user connected to
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1.TowerLon,user1.TowerLat, c='g', marker='o', alpha=0.2)
ax.set_title('Weekend Calls (<5pm)')
# Run K-Means
# There really should only be two areas of concentration.
# We tune K with the goal that all centroids except two will
# remove the outliers.
model = doKMeans(user1, 4)
print model.cluster_centers_
print unique[idno]
# We will decide which cluster is home / work by mean call time (CallTime)
# The cluster with the most samples will be the workplace, and the cluster
# with the second most samples will be the user's home.
# That means that the cluster with the least samples, in between home and
# work, is the commute route.
midWayClusterIndices = clusterWithFewestSamples(model)
midWaySamples = user1[midWayClusterIndices]
print " Commute time: ", midWaySamples.CallTime.mean()
ax.scatter(model.cluster_centers_[:,1], model.cluster_centers_[:,0], s=169, c='r', marker='x', alpha=0.8, linewidths=2)
showandtell('Weekday Calls Centroids') | {
"repo_name": "adewynter/Tools",
"path": "MLandDS/MachineLearning/Kmeans-CellTowers.py",
"copies": "1",
"size": "3733",
"license": "mit",
"hash": -5375882972834845000,
"line_mean": 34.5619047619,
"line_max": 119,
"alpha_frac": 0.7173854808,
"autogenerated": false,
"ratio": 3.0648604269293926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4282245907729393,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2016
# This dataset is nasty, so we are also going to use some PCA.
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
import math
PLOT_TYPE_TEXT = False
PLOT_VECTORS = True
matplotlib.style.use('ggplot')
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
# Draws features on PCA space
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
# Sort each column by its length (not PCA columns)
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Project each original feature on the PCA axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import RandomizedPCA
model = RandomizedPCA(n_components=dimensions)
model.fit(data)
return model
def doKMeans(data, clusters=0):
kmeans = KMeans(n_clusters=clusters)
kmeans.fit(data)
kmeans.predict(data)
return kmeans.cluster_centers_, kmeans.labels_
df = pd.read_csv('Datasets/Wholesale customers data.csv')
df = df.fillna(value=0)
# Assume single-location wholesale
df = df.drop('Channel',1)
df = df.drop('Region',1)
# We don't care much for outlier customers.
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print df.describe()
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.normalize(df)
#T = preprocessing.scale(df)
T = df
# There are so few features that doing PCA ahead of time isn't really necessary
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
print centroids
# Do PCA to visualize the results.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the centroids
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show() | {
"repo_name": "adewynter/Tools",
"path": "MLandDS/MachineLearning/Kmeans-CustomerAnalysis.py",
"copies": "1",
"size": "3531",
"license": "mit",
"hash": 3102393988818930000,
"line_mean": 31.7037037037,
"line_max": 110,
"alpha_frac": 0.7063154914,
"autogenerated": false,
"ratio": 2.7979397781299524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40042552695299527,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2017
# Implementation of:
# - Factorial function
# - Number of zeros in factorial
# - Big number mod M
# - Equilateral Pascal's Triangle
# A recursive implementation of factorial.
def factorial(N):
if N == 0 or N == 1:
return 1
if N == 2:
return 2
else:
return N*factorial(N-1)
# Calculate the number of zeros in a
# factorial
def noZeros(N):
zeros = N/5
if N > 25: zeros = zeros + N/25
print zeros
# Given three numbers, A,B and M, calculate (A*B)%M. Careful
# from overflow.
def bigNumberModM(A,B,M):
# We can do:
i = 0
while i < B:
A = A + B
A = A%M
# O(log B)
# Exponentiation by squaring
# x^n : x*(x^2)^((n-1)/2) ; n is odd
# (x^2)^(n/2)
a = A%M
b = 1
while B > 0:
if B%2 == 0:
b = b*a
B = B/2
else:
B = (B - 1)/2
a = (a*a)%M
return a*b
# From Pascal's triangle, determine if the sides
# a,b,c form an equilateral triangle
def isEquilateral(a,b,c):
import math
d = c - b
# The last number of the nth row is n(n+1)/2
#
# This solves to x = i(i + 1)/2 + 1
# Or 2*(x - 1) = i^2 + i0
lastRow_c = math.floor((math.sqrt(4*(2*(c-1))+1) - 1)/2)
lastRow_b = math.floor((math.sqrt(4*(2*(b-1))+1) - 1)/2)
if lastRow_b != lastRow_c: return False
# Otherwise go find the relevant element of the row.
firstRow = lastRow_c + 1 - d
# The way we'll ensure if it's aligned is first to verify
# that it exists on the row:
if a > firstRow*(firstRow+1)/2 or a < (firstRow-1)*(firstRow)/2 + 1:
return False
# Finally we just shift the triangle to the left and verify
# that it is the same
D = b - (lastRow_c+1)*lastRow_c/2
if (firstRow-1)*(firstRow)/2 + D != a: return False
return True | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/numberTheory/util2.py",
"copies": "1",
"size": "1680",
"license": "mit",
"hash": 1398668973971843600,
"line_mean": 19.0119047619,
"line_max": 69,
"alpha_frac": 0.6113095238,
"autogenerated": false,
"ratio": 2.393162393162393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8136720366337251,
"avg_score": 0.07355031012502829,
"num_lines": 84
} |
# Adrian deWynter, 2017
# Implementation of various algorithms
# applied to strings
# Given a long string find the greater
# number that is also a palindrome.
def nextPalindrome(S):
def isPalindrome(x): return x == x[::-1]
while True:
S = S + 1
if isPalindrome(S): return S
# Given two words A,B find if A = rot(B)
def isRotation(A,B):
return B in A+A
# Print all possible combinations of a certain
# s \in {0,1}^* for a given wildcard (*)
def wS(s,i):
if i == len(s):
print "".join(s)
else:
if s[i] == "*":
s[i] = "1"
wS(s,i+1)
s[i] = "0"
wS(s,i+1)
else:
wS(s,i+1)
def allNonRepeatingWordsInTwoSentences(a,b):
# Hash a, hash b, print differences. O(a + b)
d = {}
def insertHash(x):
if x not in d:
d[x] = 1
else:
d[x] = d[x] + 1
for c in a:
insertHash(c.split(" "))
for c in b:
insertHash(c.split(" "))
ans = []
for k,v in d:
if d[k] > 1:
ans.append(d[k])
ans.append(" ")
print "".join(ans[:-1])
# Split a string into the minimum number of substrings
# such that each substring is a palindrome.
# This doesn't really work.
# Instead maintain an array:
# mincuts[i] = min cuts until i in S
# ispalindrome[i][j]
def minSubPalindromes(S):
p = []
M = [[None for _ in range(len(S))] for _ in range(len(S))]
for i in range(1,len(S)):
for i in range(1,len(S)):
if S[i] == S[j]:
M[i][j] = max(M[i-1][j-1], M[i-1][j], M[i][j - 1]) + 1
else:
M[i][j] = max(M[i-1][j-1], M[i-1][j], M[i][j - 1]) + 1
print M[-1][-1]
# Longest word made of words.
# I have no idea what it does.
def longestWordsMadeOfWords(W):
# First method
W.sort()
W=W[::-1]
i = 0
def splitWord(w):
ans = []
for i in range(1,len(w)):
ans.append( (w[:i], w[i:] ))
return ans
while i < len(W):
w = W[i]
for a,b in splitWord(w):
if a not in W or b not in W:
i = i + 1
break
return w
# Find smallest window if a string A containing all
# characters of another string B
def smallestWindow(A,B):
M = [[0 for _ in range(len(A))] for _ in range(len(B))]
M[0] = [1 if B[0] == A[i] else 0 for i in range(len(A))]
for i in range(len(B)): M[i][0] = 1 if A[0] == B[i] else 0
for i in range(1,len(A)):
for j in range(1,len(B)):
if A[i] == A[j]:
M[i][j] = max(1, M[i-1][j-1],M[i-1][j],M[i][j-1])
if M[-1][-1] == len(B): return 1
# Alphabetical order:
def alienAlphabet(A):
node = None
def insertNode(node,v):
node_ = Node()
node_.value = v
node_.next = None
node.next = node_
for k,v in A:
node = Node()
node.value = k[0]
for c in range(1,len(k)):
if node.value != k[c]:
node_ = node
while node.next is not None:
if node.value == k[c]:
break
else:
if node.next.value != k[c]:
insertNode(node,k[c])
node = node.next
if node.next is None and node.value != k[c]:
insertNode(node,k[c])
while node.next is not None: print node.value
# Find minimum nnumber of operations that can
# be performed to turn s1 into s2
def minNum(s1,s2):
def levensheinDistance(s1,s2,ls1=len(s1),ls2=len(s2)):
if ls1 == 0: return ls2
if ls2 == 0: return ls1
if s1[ls1-1] == s2[ls2-1]:
cost = 0
else:
cost = 1
return min(
levensheinDistance(s1,s2,ls1-1,ls2) + 1,
levensheinDistance(s1,s2,ls1,ls2-1) + 1,
levensheinDistance(s1,s2,ls1-1,ls2-1) + cost)
return levensheinDistance(s1,s2)
# Dynamic programming approach:
M = [[0 for _ in s1] for _ in s2]
for i in range(1,len(s1)):
for j in range(1,len(s2)):
if s1[i] != s2[j]:
M[i][j] = max(M[i-1][j],M[i][j-1],M[i-1][j-1])
print M[-1][-1]
# Find all positions where the anagram of a substring
# S exists in A
# Complexity: O(A + S)
def needleHaystack(S,A):
indexes = []
T = sufixTree(A)
i = 0
while i < len(S):
k = T.findSubstring(S)
if k = len(S): indexes.append(k)
S = getNextAnagram(S)
return indexes
left,right = 0,0
count = len(S)
indexes = []
dic = {}
for c in S:
if c in S:
dic[c] = dic[c] + 1
else:
dic[c] = 0
while right < len(A):
right = right + 1
if A[right] in dic and A[right] >= 0:
A[right] = A[right] - 1
count = count -1
if count == 0: indexes.append(left)
left = left + 1
if right - left == len(S) and left in A and A[left] >= 0:
A[left] = A[left] + 1
count = count + 1
return indexes
| {
"repo_name": "adewynter/Tools",
"path": "Algorithms/stringOps.py",
"copies": "1",
"size": "4317",
"license": "mit",
"hash": -1284095204729518800,
"line_mean": 17.7695652174,
"line_max": 60,
"alpha_frac": 0.5788742182,
"autogenerated": false,
"ratio": 2.3060897435897436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33849639617897437,
"avg_score": null,
"num_lines": null
} |
# Adrian deWynter, 2017
# Random exercises for linked lists
# and stuff I couldn't fit in the
# other categories.
# Sum of two linked lists -- pick the smallest
# and padd with zeros
def twoNums(L1,L2):
# Assume this is a linked list
ans = [[0,i] for i in range(max(len(L2),len(L1)))]
carry = 0
# Something like this
if len(L1) < len(L2):
for i in range(len(L1)+1):
number = L1[i+1] + L2[i+1] + carry
carry = 0 if number < 10 else 1
ans[i][0] = number%10
else:
pass # The same
return ans
# Given a number find next greater number with
# the same digits
def nextGreater(x_):
x = list(str(x_))
x.sort()
x = x[::-1]
i = 0
while i < len(x):
ans = int("".join(x))
if ans > x_:
return ans
#swap stuff
i = i + 1
return x_
# Clone a linked list such that (value, next, random)
def cloneLinkedList(start):
def insert(n):
tmp = n.next
n.next = node(n.data)
n.next.next = tmp
# Insert a copy of n after n
n = start
while n.next is not None:
insert(n)
n = n.next
# Copy the arbitrary node
n = start
while n.next is not None:
n.next.arbitrary = n.arbitrary.next
n = n.next.next if n.next.next is not None else n.next
# Copy our new n into tmp
n,tmp = start,n.next
while n.next is not None and tmp.next is not None:
n = n.next.next if n.next.next is not None else n.next
tmp = tmp.next.next if tmp.next.next is not None else tmp.next
n = n.next
tmp = tmp.next
tmp.next = None
return tmp
# Hamiltonian path to visit all petrol stations
# in O(n)
def findCircularTour(M):
# Imagine M to be a structure of the form
# (liters,dist) \in M
# Find Hamiltonian path. This is an NP-c
# problem analogous to the one above.
#sorted(M, key=lambda x: x[2])
start,end = 0,1
distance = 0
# Pick closest pump
tank = M[0][0] - M[0][1]
# So here's how it goes:
# - We go over all the pumps in a circle.
# - If our tank becomes negative, we just move to
# the next thing.
# - We will only handle a single data structure,
# so we will keep two pointers.
# Go around until we either return to the start
# or we run out of gas.
while start != end or tank < 0:
# Find a good starting point. Note we don't
# really have to sort the array.
while tank < 0 and start != end:
tank = tank - (M[start][0] - M[start][1])
start = (start+1)%len(M)
# No possible solution
if start == 0: return -1
tank = tank + M[end][0] - M[end][1]
end = (end + 1)%len(M)
return start
# A person can only skip one or none
def waysToReachNthStair(steps):
def fib(n):
if n <= 1: return n
return fib(n-1) + fib(n -2)
print fib(steps+1)
# Find lowest common ancestor
def LCA(root,node1,node2):
if root is None: return root
if root.data > node1 and root.data > node2:
return LCA(root.left, node1, node2)
if root.data < node1 and root.data < node2:
return LCA(root.right, node1,node2)
return root
# Find kth smallest element in BST:
def ksmallest(node=root,k):
# Traverse up
def goUp(node=root,number=0):
if number == k:
return node,node.value
else:
if node.left == None:
goUp(node.parent,number+1)
elif node.parent.right != None:
node = node.parent.right
while node.left != None:
node = node.left
goUp(node,number)
print goUp()
# Median of two sorted arrays:
def medianAB(A,B):
median = 0
midpoint = (len(A)+len(B))/2
i,j = 0,0
while i+j <= midpoint:
if A[i] <= B[j]:
median = A[i]
i = i + 1
else:
median = B[j]
j = j + 1
print median
import re
class blackList(object):
# For regexes
def matchString(self, s):
m = re.search('(?<=abc)def','abcdef')
m = re.search('(?<=-\w+)','spam-egg')
def __init__(self):
self.filters = set()
def match(self,f,s):
return re.search(f,s) is None
def addFilter(self,f):
self.filters.add(f)
def isInBlackList(self,s):
for f in self.filters:
if self.match(s,f): return True
return False
# From a continuous stream of random numbers,
# maintain the median.
from heapq import heappush,heappop
minim,maxim = [],[]
median = -1
# Implementation of a max heap
def maxheappush(a,x): heappush(a,-1*x)
def maxheappop(a): return -1*heappop(a)
def maintainMedian(x):
if x > median:
maxheappush(maxim,x)
else:
heappush(minim,x)
if len(minim) > len(maxim):
y = heappop(minim)
maxheappush(maxim,y)
if len(maxim) > len(minim):
y = maxheappop(maxim)
heappush(minim,y)
median = -1*maxim[0] | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/exercises.py",
"copies": "1",
"size": "4427",
"license": "mit",
"hash": 8002738199428586000,
"line_mean": 18.1688311688,
"line_max": 64,
"alpha_frac": 0.6428732776,
"autogenerated": false,
"ratio": 2.5648899188876015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.847709522118838,
"avg_score": 0.04613359505984454,
"num_lines": 231
} |
# Adrian deWynter
import bisect
import random
# Predefined classes
# Non deterministic finite automaton
class NFA(object):
EPSILON,ANY = object(),object()
def __init__(self, start_state):
self.transitions = {}
self.final_states = set()
self._start_state = start_state
@property
def start_state(self): return frozenset(self._expand(set([self._start_state])))
def add_transition(self, src, input, dest):
self.transitions.setdefault(src, {}).setdefault(input, set()).add(dest)
def add_final_state(self, state): self.final_states.add(state)
def is_final(self, states): return self.final_states.intersection(states)
def _expand(self, states):
frontier = set(states)
while frontier:
state = frontier.pop()
new_states = self.transitions.get(state, {}).get(NFA.EPSILON, set()).difference(states)
frontier.update(new_states)
states.update(new_states)
return states
def next_state(self, states, input):
dest_states = set()
for state in states:
state_transitions = self.transitions.get(state, {})
dest_states.update(state_transitions.get(input, []))
dest_states.update(state_transitions.get(NFA.ANY, []))
return frozenset(self._expand(dest_states))
def get_inputs(self, states):
inputs = set()
for state in states:
inputs.update(self.transitions.get(state, {}).keys())
return inputs
def to_dfa(self):
dfa = DFA(self.start_state)
frontier = [self.start_state]
seen = set()
while frontier:
current = frontier.pop()
inputs = self.get_inputs(current)
for input in inputs:
if input == NFA.EPSILON: continue
new_state = self.next_state(current, input)
if new_state not in seen:
frontier.append(new_state)
seen.add(new_state)
if self.is_final(new_state):
dfa.add_final_state(new_state)
if input == NFA.ANY:
dfa.set_default_transition(current, new_state)
else:
dfa.add_transition(current, input, new_state)
return dfa
# Deterministic finite automaton
class DFA(object):
def __init__(self, start_state):
self.start_state = start_state
self.transitions = {}
self.defaults = {}
self.final_states = set()
def add_transition(self, src, input, dest): self.transitions.setdefault(src, {})[input] = dest
def set_default_transition(self, src, dest): self.defaults[src] = dest
def add_final_state(self, state): self.final_states.add(state)
def is_final(self, state): return state in self.final_states
def next_state(self, src, input):
state_transitions = self.transitions.get(src, {})
return state_transitions.get(input, self.defaults.get(src, None))
def next_valid_string(self, input):
state = self.start_state
stack = []
# Evaluate the DFA as far as possible
for i, x in enumerate(input):
stack.append((input[:i], state, x))
state = self.next_state(state, x)
if not state: break
else:
stack.append((input[:i+1], state, None))
if self.is_final(state):
# Input word is already valid
return input
# Perform a 'wall following' search for the lexicographically smallest
# accepting state.
while stack:
path, state, x = stack.pop()
x = self.find_next_edge(state, x)
if x:
path += x
state = self.next_state(state, x)
if self.is_final(state):
return path
stack.append((path, state, None))
return None
def find_next_edge(self, s, x):
if x is None:
x = u'\0'
else:
x = unichr(ord(x) + 1)
state_transitions = self.transitions.get(s, {})
if x in state_transitions or s in self.defaults:
return x
labels = sorted(state_transitions.keys())
pos = bisect.bisect_left(labels, x)
if pos < len(labels):
return labels[pos]
return None
def levenshtein_automata(term, k):
nfa = NFA((0, 0))
for i, c in enumerate(term):
for e in range(k + 1):
# Correct character
nfa.add_transition((i, e), c, (i + 1, e))
if e < k:
# Deletion
nfa.add_transition((i, e), NFA.ANY, (i, e + 1))
# Insertion
nfa.add_transition((i, e), NFA.EPSILON, (i + 1, e + 1))
# Substitution
nfa.add_transition((i, e), NFA.ANY, (i + 1, e + 1))
for e in range(k + 1):
if e < k:
nfa.add_transition((len(term), e), NFA.ANY, (len(term), e + 1))
nfa.add_final_state((len(term), e))
return nfa
def find_all_matches(word, k, lookup_func):
lev = levenshtein_automata(word, k).to_dfa()
match = lev.next_valid_string(u'\0')
while match:
next = lookup_func(match)
if not next:
return
if match == next:
yield match
next = next + u'\0'
match = lev.next_valid_string(next)
class Matcher(object):
def __init__(self, l):
self.l = l
self.probes = 0
def __call__(self, w):
self.probes += 1
pos = bisect.bisect_left(self.l, w)
if pos < len(self.l):
return self.l[pos]
else:
return None
def levenshtein(s1, s2):
if len(s1) < len(s2): return levenshtein(s2, s1)
if not s1: return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
class BKNode(object):
def __init__(self, term):
self.term = term
self.children = {}
def insert(self, other):
distance = levenshtein(self.term, other)
if distance in self.children:
self.children[distance].insert(other)
else:
self.children[distance] = BKNode(other)
def search(self, term, k, results=None):
if results is None:
results = []
distance = levenshtein(self.term, term)
counter = 1
if distance <= k:
results.append(self.term)
for i in range(max(0, distance - k), distance + k + 1):
child = self.children.get(i)
if child:
counter += child.search(term, k, results)
return counter | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/dataStructures/levensheinAutomata.py",
"copies": "1",
"size": "5905",
"license": "mit",
"hash": 8737966731999849000,
"line_mean": 25.4843049327,
"line_max": 118,
"alpha_frac": 0.666553768,
"autogenerated": false,
"ratio": 2.8132444020962364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8390889349488008,
"avg_score": 0.11778176412164577,
"num_lines": 223
} |
# Adrian deWynter
######################################################
# General info
#####################################################
# Takes in a document and turns it into a k-ary tree.
# We need to modify the binary tree structure to support
# siblings. I.e., for the sample provided in the .pdf, the
# output would be:
#
# root (a placeholder)
# |
# heading 1
# / \
# heading 2 heading 2
# / | \ \
# heading 3 heading 3 heading 3 heading 3
# ..etc
######################################################
# Notes
#####################################################
# - I thought of using Scala but I picked Python for two
# reasons:
# - It's easy to read
# - I've been coding in Python for years (as opposed
# to weeks, with Scala)
# - In practice, Python is very inefficient and wouldn't
# recommend it for larger files.
# - I made a few assumptions to keep the code short and
# readable:
# - Document isn't degenerate (no sequences of the form
# HEADING n HEADING n+1 HEADING n + x; x > 1)
# - Document is as described in the example (every header
# will be stored in a single line, and starts with
# HEADING)
# - We don't mind using Python
######################################################
# Usage
#####################################################
# In the python shell:
# > import parser
# > p = parser.documentToOutline()
# > p.parseDocument(<filename>)
# To print pretty (as requested in the .pdf):
# > p.printPretty()
# To traverse the tree (and print by level):
# > p.traverseAndPrintByLevel()
class documentToOutline(object):
# Basic node class so we can implement
# any algorithms we could use (plus we
# use this as the unit for our k-ary
# tree here
class Node(object):
# Default values for a root node.
def __init__(self,parent=None,children=[],level=0,value=None):
self.parent = parent
self.children = children
self.level = level
self.value = value
# This is just a placeholder so we can
# generate several trees on a single instance.
# That saves space! (not much, but anything
# counts!)
def __init__(self):
pass
# Create our tree.
# Builds the k-ary tree and allows output from other functions.
# Note that for k = 2 it's easier to just allocate values as a
# function of the depth and sort the array, this building a tree
# in (also) O(n log n)
def parseDocument(self, file="test"):
# We can imagine our tree to be a doubly linked
# list, which allows us to do O(1) insertions
# at the cost of O(n) walks. (Walks are optimized,
# see below)
# Moreover, we will have at most one O(n) walk
# provided our document constraints are true.
# Since we DO NOT have control over the memory
# allocation of the python lists (and they may
# resize eventually), we will use the doubly
# linked list instead of a list of lists as in
# the .pdf. Besides, this doesn't waste any
# space.
with open(file) as f:
self.root = self.Node()
lastNode = self.root
for line in f:
# Ignore anything else.
if line.split(" ")[0] == "HEADING":
thisLevel = int(line.split(" ")[1])
thisContent = line.replace('\r\n','').replace('\n','').split(" ",2)[-1]
thisNode = self.Node(lastNode,[],thisLevel,thisContent)
# Hande the three cases:
# - Is a child of our last node
# (Remember, we need it increasing in +1 intervals.)
if thisLevel > lastNode.level:
if thisLevel - lastNode.level != 1:
print "Error parsing the file. Please ensure the headings are correctly formatted."
self.root = None
return
lastNode.children.append(thisNode)
# - Is a sibling.
elif thisLevel == lastNode.level:
thisNode.parent = lastNode.parent
lastNode.parent.children.append(thisNode)
# It's the sibling of one of the
# grandparents
elif thisLevel < lastNode.level:
thisNode = self.traverseAndInsert(thisNode,lastNode)
lastNode = thisNode
if lastNode == self.root:
self.root = None
print "Error: no headings to parse. Please ensure the format is correct!"
else:
print "Tree created successfully."
# A modified naive binary insertion algorithm.
# It traverses *up* from the current node.and
# inserts it and returns the parent.
# A modified binary insert to support our k-ary
# tree.
# We will optimize this algorithm by traversing
# *up* from the tree as opposed to downwards.
def traverseAndInsert(self,targetNode,parentNode=None):
if parentNode is None: parentNode = self.root
while parentNode.level >= 0:
if targetNode.level == parentNode.level:
parentNode.parent.children.append(targetNode)
targetNode.parent = parentNode.parent
break
parentNode = parentNode.parent
return targetNode
# Print the tree as shown in the example; as a
# DFS ran algorithm (i.e., print the table of
# contents as opposed to the tree.)
def printPretty(self, parentNode=None):
# Make sure we aren't trying to print a broken
# tree:
if self.root is None:
print "Error printing the tree. Have you initialized it?"
return
# To print from the root:
if parentNode is None: parentNode = self.root
if parentNode.children != []:
for child in parentNode.children:
print child.value
self.printPretty(child)
# Print the tree as a hierarchical data structure
# (analogous to printing through BFS)
# That's not what I was asked for but it's a useful
# tool.
def traverseAndPrintByLevel(self,parentNode=None):
# Make sure we aren't trying to print a broken
# tree:
if self.root is None:
print "Error printing the tree. Have you initialized it?"
return
# To print from the root:
if parentNode is None: parentNode = self.root
if parentNode.children != []:
print [c.value for c in parentNode.children]
for child in parentNode.children:
self.traverseAndPrintByLevel(child)
if __name__ == "__main__":
pass | {
"repo_name": "adewynter/Tools",
"path": "Algorithms/Exercises/Python/parser.py",
"copies": "1",
"size": "5954",
"license": "mit",
"hash": -5297029871998038000,
"line_mean": 28.6268656716,
"line_max": 90,
"alpha_frac": 0.6451125294,
"autogenerated": false,
"ratio": 3.512684365781711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46577968951817106,
"avg_score": null,
"num_lines": null
} |
"""Adrian language AST nodes."""
from dataclasses import dataclass, field
from typing import Optional, Tuple, List
# Types and expressions
class Type:
pass
class Expression:
pass
# @Cleanup: rearrange fields; do we need is_only_named field?
# @Cleanup: move to ArgumentDeclaration
@dataclass
class Argument:
type_: Type
default_expression: Optional[Expression] = None
name: Optional[str] = None
is_only_named: bool = False
def __post_init__(self):
if self.is_only_named:
assert(self.default_expression is not None and self.name is not None)
class Statement:
pass
class Void(Type):
pass
@dataclass
class IntrinsicType(Type):
name: str
@dataclass
class FunctionType(Type):
arguments: List[Argument]
return_type: Type
@dataclass
class Name(Type, Expression):
line_pos: Tuple[int, int]
name: str
without_mangling: str
module: Optional[str]
type_annotation: Optional[Type] = None
@dataclass
class ParameterDeclaration:
outer_name: Name
inner_name: Name
type_: Optional[Type] = None
@dataclass
class NamedParameter(Type):
name: Name
type_: Type
@dataclass
class ParameterizedType(Type):
name: Name
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
parameters: List[Type]
@dataclass
class IntrinsicFunction(Expression):
line_pos: Tuple[int, int]
name: str
arguments_type_annotation: Optional[List[Argument]] = None
return_type_annotation: Optional[Type] = None
@dataclass
class IntrinsicStruct(Expression):
line_pos: Tuple[int, int]
name: str
class Literal(Expression):
pass
@dataclass
class StringLiteral(Literal):
line_pos: Tuple[int, int]
value: str
type_annotation: Optional[Type] = None
@dataclass
class IntLiteral(Literal):
line_pos: Tuple[int, int]
value: int
as_string: str
type_annotation: Optional[Type] = None
@dataclass
class LiteralType(Type):
literal_type: type
@dataclass
class Member(Expression):
base: Expression
members: List[str]
type_annotation: Optional[Type] = None
def __post_init__(self):
assert(self.members)
@dataclass
class Call(Statement, Expression):
callee: Expression
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[Expression]
@dataclass
class FunctionCall(Statement, Expression):
callee: Expression
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[Expression]
type_annotation: Optional[Type] = None
@dataclass
class StructCall(Statement, Expression):
callee: Expression
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[Expression]
type_annotation: Optional[Type] = None
@dataclass
class IntrinsicFunctionCall(Statement, Expression):
callee: IntrinsicFunction
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[Expression]
type_annotation: Optional[Type] = None
@dataclass
class IntrinsicStructCall(Expression):
callee: IntrinsicStruct
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[Expression]
type_annotation: Optional[Type] = None
@dataclass
class MethodCall(Expression):
callee: Expression
method: str
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[Expression]
type_annotation: Optional[Type] = None
@dataclass
class NamedArgument(Expression):
name: Name
expression: Expression
type_annotation: Optional[Type] = None
def __post_init__(self):
assert(self.name.module is None)
@dataclass
class ArgumentDeclaration:
outer_name: Name
inner_name: Name
colon: Tuple[int, int]
type_: Type
eq_sign: Optional[Tuple[int, int]]
default_expression: Optional[Expression]
def __post_init__(self):
assert(self.outer_name.module is None and self.inner_name.module is None)
if self.eq_sign is not None or self.default_expression is not None:
assert(self.eq_sign is not None and self.default_expression is not None)
@dataclass
class Annotation:
at_sign: Tuple[int, int]
name: Name
parentheses_line_pos: Optional[Tuple[Tuple[int, int], Tuple[int, int]]]
arguments: List[Name]
# Declarations
@dataclass
class ConstantDeclaration(Statement):
let_keyword: Tuple[int, int]
name: Name
colon: Optional[Tuple[int, int]]
type_: Optional[Type]
eq_sign: Optional[Tuple[int, int]]
expression: Optional[Expression]
annotations: List[Annotation] = field(default_factory=list)
def __post_init__(self):
assert(self.name.module is None)
if self.type_ is None:
assert(self.colon is None)
assert(self.eq_sign is not None and self.expression is not None)
elif self.expression is None:
assert(self.eq_sign is None)
assert(self.colon is not None and self.type_ is not None)
else:
assert(self.colon is not None or self.eq_sign is not None)
@dataclass
class InterfaceDeclaration(Statement):
interface_keyword: Tuple[int, int]
name: Name
parentheses_line_pos: Optional[Tuple[Tuple[int, int], Tuple[int, int]]]
parameters: List[ParameterDeclaration]
is_operator_line_pos: Optional[Tuple[int, int]]
interfaces: List[Type]
body: List[Statement]
def __post_init__(self):
assert(self.name.module is None)
if self.is_operator_line_pos is not None:
assert(self.interfaces)
if self.parentheses_line_pos is not None:
assert(self.parameters)
@dataclass
class FunctionPrototypeDeclaration(Statement):
fun_keyword: Tuple[int, int]
name: Name
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[ArgumentDeclaration]
arrow_line_pos: Tuple[int, int]
return_type: Type
def __post_init__(self):
assert(self.name.module is None)
@dataclass
class FieldDeclaration(Statement):
name: Name
type_: Type
eq_sign: Optional[Tuple[int, int]]
expression: Optional[Expression]
def __post_init__(self):
assert(self.name.module is None)
if self.eq_sign is not None:
assert(self.expression is not None)
@dataclass
class StructDeclaration(Statement):
struct_keyword: Tuple[int, int]
name: Name
parentheses_line_pos: Optional[Tuple[Tuple[int, int], Tuple[int, int]]]
parameters: List[ParameterDeclaration]
is_operator_line_pos: Optional[Tuple[int, int]]
interfaces: List[Type]
body: List[Statement]
def __post_init__(self):
assert(self.name.module is None)
if self.is_operator_line_pos is not None:
assert(self.interfaces)
if self.parentheses_line_pos is not None:
assert(self.parameters)
@dataclass
class FunctionDeclaration(Statement):
fun_keyword: Tuple[int, int]
name: Name
parentheses_line_pos: Tuple[Tuple[int, int], Tuple[int, int]]
arguments: List[ArgumentDeclaration]
arrow_line_pos: Tuple[int, int]
return_type: Type
body: List[Statement]
def __post_init__(self):
assert(self.name.module is None)
@dataclass
class Return(Statement):
line_pos: Tuple[int, int]
expression: Expression
@dataclass
class Reassignment(Statement):
left: Expression
operator_line_pos: Tuple[int, int]
operator: str
right: Expression
| {
"repo_name": "adrian-lang/adrian",
"path": "adrian-cpp-compiler-in-py/adrian_cpp_py/adrian_ast.py",
"copies": "1",
"size": "7494",
"license": "bsd-3-clause",
"hash": 8281820577030127000,
"line_mean": 22.6403785489,
"line_max": 84,
"alpha_frac": 0.6817453963,
"autogenerated": false,
"ratio": 3.677134445534838,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9858309574488074,
"avg_score": 0.00011405346935265604,
"num_lines": 317
} |
# Adrian Rosebrock CV boilerplate
# import the necessary packages
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
import numpy as np
import urllib
import cv2
import base64
@csrf_exempt
def detection(request):
# initialize the data dictionary to be returned by the request
data = {"success": False}
# check to see if this is a post request
if request.method == "POST":
# check to see if an image was uploaded
if request.FILES.get("image", None) is not None:
# grab the uploaded image
image = _grab_image(stream=request.FILES["image"])
# otherwise, assume that a URL was passed in
else:
# grab the URL from the request
url = request.POST.get("url", None)
# if the URL is None, then return an error
if url is None:
data["error"] = "No URL provided."
return JsonResponse(data)
# load the image and convert
image = _grab_image(url=url)
# START WRAPPING OF COMPUTER VISION APP
# v1 = useless
# Insert code here to process the image and update
# img_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Otsu's thresholding after Gaussian filtering
# blur = cv2.GaussianBlur(img_grey, (5, 5), 0)
# blur = cv2.bilateralFilter(img_grey, 5,200,200)
# retval,th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# th3 = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 11, 2)
# v1 ends here
# v2 start here
# Let's try something here
red_upper = np.array([207, 128, 255], np.uint8)
red_lower = np.array([0, 00, 159], np.uint8)
green_upper = np.array([40, 171, 139], np.uint8)
green_lower = np.array([12, 38, 12], np.uint8)
# convert to HSV if we wan tto use video as input
# hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Construct mask for the ripe one
mask = cv2.inRange(image, red_lower, red_upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
green_mask = cv2.inRange(image, green_lower, green_upper)
green_mask = cv2.erode(green_mask, None, iterations=2)
green_mask = cv2.dilate(green_mask, None, iterations=2)
# Final Step
# Countour drawing
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
(x, y), radius = cv2.minEnclosingCircle(c)
center = (int(x), int(y))
radius = int(radius)
if (radius > 15) and (radius <= 80):
cv2.circle(image, center, 1, (0, 255, 0), 2)
cv2.putText(image, "Ripe", (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
im2, contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
(x, y), radius = cv2.minEnclosingCircle(c)
center = (int(x), int(y))
radius = int(radius)
if (radius > 15) and (radius <= 80):
cv2.circle(image, center, 1, (0, 255, 0), 2)
cv2.putText(image, "Not Ripe", (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# the `data` dictionary with your results
retval, buffer = cv2.imencode('.jpg', image)
data["image"] = base64.b64encode(buffer)
# END WRAPPING OF COMPUTER VISION APP
# update the data dictionary
data["success"] = True
# return a JSON response
# data = json.dumps(data)
return JsonResponse(data)
def _grab_image(path=None, stream=None, url=None):
# if the path is not None, then load the image from disk
if path is not None:
image = cv2.imread(path, 0)
# otherwise, the image does not reside on disk
else:
# if the URL is not None, then download the image
if url is not None:
resp = urllib.urlopen(url)
data = resp.read()
# if the stream is not None, then the image has been uploaded
elif stream is not None:
data = stream.read()
# convert the image to a NumPy array and then read it into
# OpenCV format
image = np.asarray(bytearray(data), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
| {
"repo_name": "RoasteryHub/lavie-selekopi",
"path": "KopiSelection/views.py",
"copies": "1",
"size": "4572",
"license": "mit",
"hash": -7081262267436723000,
"line_mean": 35.2857142857,
"line_max": 115,
"alpha_frac": 0.5962379703,
"autogenerated": false,
"ratio": 3.450566037735849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9542748818581298,
"avg_score": 0.0008110378909100697,
"num_lines": 126
} |
# Adrian Rosebrock Gradient descent with Python
# http://www.pyimagesearch.com/2016/10/10/gradient-descent-with-python/
# import the necessary packages
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
import argparse
def sigmoid_activation(x):
# compute and return the sigmoid activation value for a
# given input value
return 1.0 / (1 + np.exp(-x))
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--epochs", type=float, default=100,
help="# of epochs")
ap.add_argument("-a", "--alpha", type=float, default=0.01,
help="learning rate")
args = vars(ap.parse_args())
# generate a 2-class classification problem with 250 data points,
# where each data point is a 2D feature vector
(X, y) = make_blobs(n_samples=250, n_features=2, centers=2,
cluster_std=1.05, random_state=20)
# insert a column of 1's as the first entry in the feature
# vector -- this is a little trick that allows us to treat
# the bias as a trainable parameter *within* the weight matrix
# rather than an entirely separate variable
X = np.c_[np.ones((X.shape[0])), X]
# initialize our weight matrix such it has the same number of
# columns as our input features
print("[INFO] starting training...")
W = np.random.uniform(size=(X.shape[1],))
# initialize a list to store the loss value for each epoch
lossHistory = []
# loop over the desired number of epochs
for epoch in np.arange(0, args["epochs"]):
# take the dot product between our features `X` and the
# weight matrix `W`, then pass this value through the
# sigmoid activation function, thereby giving us our
# predictions on the dataset
preds = sigmoid_activation(X.dot(W))
# now that we have our predictions, we need to determine
# our `error`, which is the difference between our predictions
# and the true values
error = preds - y
# given our `error`, we can compute the total loss value as
# the sum of squared loss -- ideally, our loss should
# decrease as we continue training
loss = np.sum(error ** 2)
lossHistory.append(loss)
print("[INFO] epoch #{}, loss={:.7f}".format(epoch + 1, loss))
# the gradient update is therefore the dot product between
# the transpose of `X` and our error, scaled by the total
# number of data points in `X`
gradient = X.T.dot(error) / X.shape[0]
# in the update stage, all we need to do is nudge our weight
# matrix in the opposite direction of the gradient (hence the
# term "gradient descent" by taking a small step towards a
# set of "more optimal" parameters
W += -args["alpha"] * gradient
# to demonstrate how to use our weight matrix as a classifier,
# let's look over our a sample of training examples
for i in np.random.choice(250, 10):
# compute the prediction by taking the dot product of the
# current feature vector with the weight matrix W, then
# passing it through the sigmoid activation function
activation = sigmoid_activation(X[i].dot(W))
# the sigmoid function is defined over the range y=[0, 1],
# so we can use 0.5 as our threshold -- if `activation` is
# below 0.5, it's class `0`; otherwise it's class `1`
label = 0 if activation < 0.5 else 1
# show our output classification
print("activation={:.4f}; predicted_label={}, true_label={}".format(
activation, label, y[i]))
# compute the line of best fit by setting the sigmoid function
# to 0 and solving for X2 in terms of X1
Y = (-W[0] - (W[1] * X)) / W[2]
# plot the original data along with our line of best fit
plt.figure()
plt.scatter(X[:, 1], X[:, 2], marker="o", c=y)
plt.plot(X, Y, "r-")
# construct a figure that plots the loss over time
fig = plt.figure()
plt.plot(np.arange(0, args["epochs"]), lossHistory)
fig.suptitle("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.show() | {
"repo_name": "mbayon/TFG-MachineLearning",
"path": "Gradient-Descent-Roosebrock/gradient-descent-rosebrock.py",
"copies": "1",
"size": "3811",
"license": "mit",
"hash": -6732967676216497000,
"line_mean": 35.3047619048,
"line_max": 71,
"alpha_frac": 0.7173970087,
"autogenerated": false,
"ratio": 3.39964317573595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.461704018443595,
"avg_score": null,
"num_lines": null
} |
# A driver for rendering 2D images using the FijiBento alignment project
# The input is a directory that contains image files (tilespecs) where each file is of a single section,
# and the output is a 2D montage of these sections
#
# requires:
# - java (executed from the command line)
# -
import sys
import os
import argparse
import json
import utils
from render_2d import render_2d
from normalize_coordinates import normalize_coordinates
# Command line parser
parser = argparse.ArgumentParser(description='A driver that does a 2D rendering of tilespec images.')
parser.add_argument('tiles_fname', metavar='tiles_fname', type=str,
help='a tile_spec (json) file that contains a single section to be rendered')
parser.add_argument('-w', '--workspace_dir', type=str,
help='a directory where the output files of the different stages will be kept (default: ./2d_render_workdir)',
default='./2d_render_workdir')
parser.add_argument('-o', '--output_fname', type=str,
help='the output file (default: ./[tiles_fname].tif)',
default=None)
parser.add_argument('-j', '--jar_file', type=str,
help='the jar file that includes the render (default: ../target/render-0.0.1-SNAPSHOT.jar)',
default='../target/render-0.0.1-SNAPSHOT.jar')
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: number of cores in the system)',
default=None)
args = parser.parse_args()
print args
utils.create_dir(args.workspace_dir)
norm_dir = os.path.join(args.workspace_dir, "normalized")
utils.create_dir(norm_dir)
tiles_fname_basename = os.path.basename(args.tiles_fname)
tiles_fname_prefix = os.path.splitext(tiles_fname_basename)[0]
# Normalize the json file
norm_json = os.path.join(norm_dir, tiles_fname_basename)
if not os.path.exists(norm_json):
normalize_coordinates(args.tiles_fname, norm_dir, args.jar_file)
# Render the normalized json file
out_fname = args.output_fname
if not os.path.exists(out_fname):
render_2d(norm_json, out_fname, -1, args.jar_file, args.threads_num)
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/2d_render_driver.py",
"copies": "1",
"size": "2202",
"license": "mit",
"hash": -2138760151976209700,
"line_mean": 36.3220338983,
"line_max": 130,
"alpha_frac": 0.6825613079,
"autogenerated": false,
"ratio": 3.627677100494234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9693127039494143,
"avg_score": 0.023422273780018415,
"num_lines": 59
} |
# A driver for rendering 2D images using the FijiBento alignment project
# The input is a tilespec (json) file of a single section,
# and the output is a directory with squared tiles of the 2D montage of the sections
#
# requires:
# - java (executed from the command line)
# -
import sys
import os
import argparse
import json
import utils
from render_tiles_2d import render_tiles_2d
from normalize_coordinates import normalize_coordinates
from create_zoomed_tiles import create_zoomed_tiles
# Command line parser
parser = argparse.ArgumentParser(description='A driver that does a 2D rendering of tilespec images.')
parser.add_argument('tiles_fname', metavar='tiles_fname', type=str,
help='a tile_spec (json) file that contains a single section to be rendered')
parser.add_argument('-w', '--workspace_dir', type=str,
help='a directory where the output files of the different stages will be kept (default: ./2d_render_workdir)',
default='./2d_render_workdir')
parser.add_argument('-o', '--output_dir', type=str,
help='the output directory (default: ./output_tiles)',
default='./output_tiles')
parser.add_argument('-j', '--jar_file', type=str,
help='the jar file that includes the render (default: ../target/render-0.0.1-SNAPSHOT.jar)',
default='../target/render-0.0.1-SNAPSHOT.jar')
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: number of cores in the system)',
default=None)
parser.add_argument('-s', '--tile_size', type=int,
help='the size (square side) of each tile (default: 512)',
default=512)
parser.add_argument('--avoid_mipmaps', action="store_true",
help='Do not create mipmaps after the full scale tiling')
parser.add_argument('-b', '--blend_type', type=str,
help='the mosaics blending type',
default=None)
parser.add_argument('--output_type', type=str,
help='The output type format',
default='jpg')
parser.add_argument('--output_pattern', type=str,
help='The output file name pattern where "%row%col" will be replaced by "_tr[row]-tc[rol]_" with the row and column numbers',
default=None)
args = parser.parse_args()
print args
utils.create_dir(args.workspace_dir)
norm_dir = os.path.join(args.workspace_dir, "normalized")
utils.create_dir(norm_dir)
utils.create_dir(args.output_dir)
tiles_fname_basename = os.path.basename(args.tiles_fname)
tiles_fname_prefix = os.path.splitext(tiles_fname_basename)[0]
# Normalize the json file
norm_json = os.path.join(norm_dir, tiles_fname_basename)
if not os.path.exists(norm_json):
normalize_coordinates(args.tiles_fname, norm_dir, args.jar_file)
# Render the normalized json file
out_pattern = args.output_pattern
if out_pattern is None:
out_pattern = '{}%rowcolmontaged'.format(tiles_fname_prefix)
out_0_dir = os.path.join(args.output_dir, "0")
if not os.path.exists(out_0_dir):
render_tiles_2d(norm_json, out_0_dir, args.tile_size, args.output_type,
args.jar_file, out_pattern, args.blend_type, args.threads_num)
# create the zoomed tiles
if not args.avoid_mipmaps:
out_1_dir = os.path.join(args.output_dir, "1")
if not os.path.exists(out_1_dir):
create_zoomed_tiles(args.output_dir, True, args.threads_num)
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/2d_render_tiles_driver.py",
"copies": "1",
"size": "3533",
"license": "mit",
"hash": -124119443825206060,
"line_mean": 40.5647058824,
"line_max": 145,
"alpha_frac": 0.658080951,
"autogenerated": false,
"ratio": 3.561491935483871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4719572886483871,
"avg_score": null,
"num_lines": null
} |
# A driver for running 2D alignment using the FijiBento alignment project
# The input is a directory that contains image files (tiles), and the output is a 2D montage of these files
# Activates ComputeSIFTFeaturs -> MatchSIFTFeatures -> OptimizeMontageTransfrom
# and the result can then be rendered if needed
#
# requires:
# - java (executed from the command line)
# -
import sys
import os
import argparse
import json
import itertools
from bounding_box import BoundingBox
import time
from filter_tiles import filter_tiles
from create_sift_features_cv2 import create_sift_features
from create_surf_features_cv2 import create_surf_features
#from match_sift_features import match_sift_features
from match_sift_features_and_filter_cv2 import match_single_sift_features_and_filter
from json_concat import json_concat
from optimize_2d_mfovs import optimize_2d_mfovs
from utils import write_list_to_file
def load_tilespecs(tile_file):
tile_file = tile_file.replace('file://', '')
with open(tile_file, 'r') as data_file:
tilespecs = json.load(data_file)
return tilespecs
# Command line parser
parser = argparse.ArgumentParser(description='A driver that does a 2D affine alignment of images.')
parser.add_argument('tiles_fname', metavar='tiles_json', type=str,
help='a tile_spec file that contains all the images to be aligned in json format')
parser.add_argument('-w', '--workspace_dir', type=str,
help='a directory where the output files of the different stages will be kept (default: current directory)',
default='.')
parser.add_argument('-o', '--output_file_name', type=str,
help='the file that includes the output to be rendered in json format (default: output.json)',
default='output.json')
parser.add_argument('-c', '--conf_file_name', type=str,
help='the configuration file with the parameters for each step of the alignment process in json format (uses default parameters, if )',
default=None)
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: 1)',
default=None)
args = parser.parse_args()
print args
# create a workspace directory if not found
if not os.path.exists(args.workspace_dir):
os.makedirs(args.workspace_dir)
tiles_fname_prefix = os.path.splitext(os.path.basename(args.tiles_fname))[0]
# read tile spec and find the features for each tile
tilespecs = load_tilespecs(args.tiles_fname)
all_features = {}
all_matched_features = []
start_time = time.time()
for i, ts in enumerate(tilespecs):
imgurl = ts["mipmapLevels"]["0"]["imageUrl"]
tile_fname = os.path.basename(imgurl).split('.')[0]
# create the features of these tiles
features_json = os.path.join(args.workspace_dir, "{0}_sifts_{1}.hdf5".format(tiles_fname_prefix, tile_fname))
if not os.path.exists(features_json):
create_sift_features(args.tiles_fname, features_json, i, args.conf_file_name)
all_features[imgurl] = features_json
print 'Features computation took {0:1.4f} seconds'.format(time.time() - start_time)
# read every pair of overlapping tiles, and match their sift features
# TODO: add all tiles to a kd-tree so it will be faster to find overlap between tiles
# iterate over the tiles, and for each tile, find intersecting tiles that overlap,
# and match their features
# Nested loop:
# for each tile_i in range[0..N):
# for each tile_j in range[tile_i..N)]
start_time = time.time()
indices = []
for pair in itertools.combinations(xrange(len(tilespecs)), 2):
idx1 = pair[0]
idx2 = pair[1]
ts1 = tilespecs[idx1]
ts2 = tilespecs[idx2]
# if the two tiles intersect, match them
bbox1 = BoundingBox.fromList(ts1["bbox"])
bbox2 = BoundingBox.fromList(ts2["bbox"])
if bbox1.overlap(bbox2):
imageUrl1 = ts1["mipmapLevels"]["0"]["imageUrl"]
imageUrl2 = ts2["mipmapLevels"]["0"]["imageUrl"]
tile_fname1 = os.path.basename(imageUrl1).split('.')[0]
tile_fname2 = os.path.basename(imageUrl2).split('.')[0]
print "Matching features of tiles: {0} and {1}".format(imageUrl1, imageUrl2)
index_pair = [idx1, idx2]
match_json = os.path.join(args.workspace_dir, "{0}_sift_matches_{1}_{2}.json".format(tiles_fname_prefix, tile_fname1, tile_fname2))
# match the features of overlapping tiles
if not os.path.exists(match_json):
match_single_sift_features_and_filter(args.tiles_fname, all_features[imageUrl1], all_features[imageUrl2], match_json, index_pair, conf_fname=args.conf_file_name)
all_matched_features.append(match_json)
print 'features matching took {0:1.4f} seconds'.format(time.time() - start_time)
# Create a single file that lists all tilespecs and a single file that lists all pmcc matches (the os doesn't support a very long list)
matches_list_file = os.path.join(args.workspace_dir, "all_matched_sifts_files.txt")
write_list_to_file(matches_list_file, all_matched_features)
# optimize the 2d layer montage
if not os.path.exists(args.output_file_name):
print "Optimizing section in tilespec: {}".format(args.tiles_fname)
start_time = time.time()
optimize_2d_mfovs(args.tiles_fname, matches_list_file, args.output_file_name, args.conf_file_name)
print '2D Optimization took {0:1.4f} seconds'.format(time.time() - start_time)
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/2d_align_affine_driver.py",
"copies": "1",
"size": "5463",
"license": "mit",
"hash": 7823267391598086000,
"line_mean": 40.3863636364,
"line_max": 173,
"alpha_frac": 0.6979681494,
"autogenerated": false,
"ratio": 3.4597846738442053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9599076089619037,
"avg_score": 0.011735346725033489,
"num_lines": 132
} |
# A driver for running 3D alignment using the FijiBento alignment project
# The input is two tile spec files with their 2d alignment
# and each file has also a z axis (layer) index, and the output is a tile spec after 3D alignment
# Activates ComputeLayerSIFTFeaturs -> MatchLayersSIFTFeatures -> FilterRansac -> OptimizeLayersAffine
# and the result can then be rendered if needed
#
# requires:
# - java (executed from the command line)
# -
import sys
import os
import argparse
import json
import glob
#from filter_tiles import filter_tiles
#from create_sift_features import create_sift_features
from create_meshes import create_meshes
from create_layer_sift_features import create_layer_sift_features
from match_layers_sift_features import match_layers_sift_features
from filter_ransac import filter_ransac
from optimize_layers_affine import optimize_layers_affine
from utils import path2url, write_list_to_file, create_dir, read_layer_from_file, parse_range, read_conf_args
from bounding_box import BoundingBox
# Command line parser
parser = argparse.ArgumentParser(description='A driver that does a 3D affine alignment of images.')
parser.add_argument('input_dir', metavar='input_dir', type=str,
help='a directory that contains all the tile_spec files of all sections (each section already aligned and in a single tile_spec file) json format')
parser.add_argument('-w', '--workspace_dir', type=str,
help='a directory where the output files of the different stages will be kept (default: ./work_dir)',
default='./work_dir')
parser.add_argument('-o', '--output_dir', type=str,
help='the directory where the output to be rendered in json format files will be stored (default: ./output)',
default='./output')
parser.add_argument('-j', '--jar_file', type=str,
help='the jar file that includes the render (default: ../target/render-0.0.1-SNAPSHOT.jar)',
default='../target/render-0.0.1-SNAPSHOT.jar')
# the default bounding box is as big as the image can be
parser.add_argument('-b', '--bounding_box', type=str,
help='the bounding box of the part of image that needs to be aligned format: "from_x to_x from_y to_y" (default: all tiles)',
default='{0} {1} {2} {3}'.format((-sys.maxint - 1), sys.maxint, (-sys.maxint - 1), sys.maxint))
parser.add_argument('-d', '--max_layer_distance', type=int,
help='the largest distance between two layers to be matched (default: 1)',
default=1)
parser.add_argument('-c', '--conf_file_name', type=str,
help='the configuration file with the parameters for each step of the alignment process in json format (uses default parameters, if )',
default=None)
parser.add_argument('--auto_add_model', action="store_true",
help='automatically add the identity model, if a model is not found')
parser.add_argument('--from_layer', type=int,
help='the layer to start from (inclusive, default: the first layer in the data)',
default=-1)
parser.add_argument('--to_layer', type=int,
help='the last layer to render (inclusive, default: the last layer in the data)',
default=-1)
parser.add_argument('-s', '--skip_layers', type=str,
help='the range of layers (sections) that will not be processed e.g., "2,3,9-11,18" (default: no skipped sections)',
default=None)
parser.add_argument('-M', '--manual_match', type=str, nargs="*",
help='pairs of layers (sections) that will need to be manually aligned (not part of the max_layer_distance) e.g., "2:10,7:21" (default: none)',
default=None)
args = parser.parse_args()
print args
# create a workspace directory if not found
create_dir(args.workspace_dir)
conf = None
if not args.conf_file_name is None:
conf = args.conf_file_name
#after_bbox_dir = os.path.join(args.workspace_dir, "after_bbox")
#create_dir(after_bbox_dir)
sifts_dir = os.path.join(args.workspace_dir, "sifts")
create_dir(sifts_dir)
matched_sifts_dir = os.path.join(args.workspace_dir, "matched_sifts")
create_dir(matched_sifts_dir)
after_ransac_dir = os.path.join(args.workspace_dir, "after_ransac")
create_dir(after_ransac_dir)
all_layers = []
layer_to_sifts = {}
layer_to_ts_json = {}
layer_to_json_prefix = {}
layer_meshes_dir = {}
skipped_layers = parse_range(args.skip_layers)
bbox_suffix = "_bbox"
for tiles_fname in glob.glob(os.path.join(args.input_dir, '*.json')):
tiles_fname_prefix = os.path.splitext(os.path.basename(tiles_fname))[0]
# read the layer from the file
layer = read_layer_from_file(tiles_fname)
if args.from_layer != -1:
if layer < args.from_layer:
continue
if args.to_layer != -1:
if layer > args.to_layer:
continue
if layer in skipped_layers:
continue
all_layers.append(layer)
# update the bbox of each section
#after_bbox_json = os.path.join(after_bbox_dir, "{0}{1}.json".format(tiles_fname_prefix, bbox_suffix))
#if not os.path.exists(after_bbox_json):
# print "Updating bounding box of {0}".format(tiles_fname_prefix)
# update_bbox(args.jar_file, tiles_fname, out_dir=after_bbox_dir, out_suffix=bbox_suffix)
#bbox = read_bbox(after_bbox_json)
# create the sift features of these tiles
print "Computing sift features of {0}".format(tiles_fname_prefix)
sifts_json = os.path.join(sifts_dir, "{0}_sifts.json".format(tiles_fname_prefix))
if not os.path.exists(sifts_json):
#create_layer_sift_features(after_bbox_json, sifts_json, args.jar_file, conf)
create_layer_sift_features(tiles_fname, sifts_json, args.jar_file, conf=conf)
layer_to_sifts[layer] = sifts_json
layer_to_json_prefix[layer] = tiles_fname_prefix
#layer_to_ts_json[layer] = after_bbox_json
layer_to_ts_json[layer] = tiles_fname
# Verify that all the layers are there and that there are no holes
all_layers.sort()
for i in range(len(all_layers) - 1):
if all_layers[i + 1] - all_layers[i] != 1:
for l in range(all_layers[i] + 1, all_layers[i + 1]):
if l not in skipped_layers:
print "Error missing layer {} between: {} and {}".format(l, all_layers[i], all_layers[i + 1])
sys.exit(1)
print "Found the following layers: {0}".format(all_layers)
print "All json files prefix are: {0}".format(layer_to_json_prefix)
# Set the middle layer as a fixed layer
fixed_layers = [ all_layers[len(all_layers)//2] ]
# Handle manual matches
# manual_matches = {}
# if args.manual_match is not None:
# for match in args.manual_match:
# # parse the manual match string
# match_layers = [int(l) for l in match.split(':')]
# # add a manual match between the lower layer and the higher layer
# if min(match_layers) not in manual_matches.keys():
# manual_matches[min(match_layers)] = []
# manual_matches[min(match_layers)].append(max(match_layers))
# Match and optimize each two layers in the required distance
all_matched_sifts_files = []
all_model_files = []
for ei, i in enumerate(all_layers):
# layers_to_process = min(i + args.max_layer_distance + 1, all_layers[-1] + 1) - i
# to_range = range(1, layers_to_process)
# # add manual matches
# if i in manual_matches.keys():
# for second_layer in manual_matches[i]:
# diff_layers = second_layer - i
# if diff_layers not in to_range:
# to_range.append(diff_layers)
# Process all matched layers
# print "layers_to_process {0}".format(to_range[-1])
matched_after_layers = 0
j = 1
while matched_after_layers < args.max_layer_distance:
if ei + j >= len(all_layers):
break
if i in skipped_layers or (i+j) in skipped_layers:
print "Skipping matching of layers {} and {}, because at least one of them should be skipped".format(i, i+j)
j += 1
continue
fname1_prefix = layer_to_json_prefix[i]
fname2_prefix = layer_to_json_prefix[i + j]
# match the features of neighboring tiles
match_json = os.path.join(matched_sifts_dir, "{0}_{1}_sift_matches.json".format(fname1_prefix, fname2_prefix))
if not os.path.exists(match_json):
print "Matching layers' sifts: {0} and {1}".format(i, i + j)
match_layers_sift_features(layer_to_ts_json[i], layer_to_sifts[i], \
layer_to_ts_json[i + j], layer_to_sifts[i + j], match_json, args.jar_file, conf)
all_matched_sifts_files.append(match_json)
# filter and ransac the matched points
ransac_fname = os.path.join(after_ransac_dir, "{0}_{1}_filter_ransac.json".format(fname1_prefix, fname2_prefix))
if not os.path.exists(ransac_fname):
print "Filter-and-Ransac of layers: {0} and {1}".format(i, i + j)
filter_ransac(match_json, path2url(layer_to_ts_json[i]), ransac_fname, args.jar_file, conf)
all_model_files.append(ransac_fname)
j += 1
matched_after_layers += 1
# Optimize all layers to a single 3d image
all_ts_files = layer_to_ts_json.values()
create_dir(args.output_dir)
ts_list_file = os.path.join(args.workspace_dir, "all_ts_files.txt")
write_list_to_file(ts_list_file, all_ts_files)
matched_sifts_list_file = os.path.join(args.workspace_dir, "all_matched_sifts_files.txt")
write_list_to_file(matched_sifts_list_file, all_matched_sifts_files)
model_list_file = os.path.join(args.workspace_dir, "all_model_files.txt")
write_list_to_file(model_list_file, all_model_files)
optimize_layers_affine([ ts_list_file ], [ matched_sifts_list_file ], [ model_list_file ],
fixed_layers, args.output_dir, args.max_layer_distance,
args.jar_file, conf, args.skip_layers, manual_matches=args.manual_match)
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/3d_align_affine_driver.py",
"copies": "1",
"size": "10045",
"license": "mit",
"hash": 1061204726325815600,
"line_mean": 42.864628821,
"line_max": 167,
"alpha_frac": 0.6549527128,
"autogenerated": false,
"ratio": 3.3394281914893615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9445305692754149,
"avg_score": 0.009815042307042351,
"num_lines": 229
} |
"""A dropdown completer widget for the qtconsole."""
from qtconsole.qt import QtCore, QtGui
class CompletionWidget(QtGui.QListWidget):
""" A widget for GUI tab completion.
"""
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
def __init__(self, console_widget):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
text_edit = console_widget._control
assert isinstance(text_edit, (QtGui.QTextEdit, QtGui.QPlainTextEdit))
super(CompletionWidget, self).__init__()
self._text_edit = text_edit
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
# We need Popup style to ensure correct mouse interaction
# (dialog would dissappear on mouse click with ToolTip style)
self.setWindowFlags(QtCore.Qt.Popup)
self.setAttribute(QtCore.Qt.WA_StaticContents)
original_policy = text_edit.focusPolicy()
self.setFocusPolicy(QtCore.Qt.NoFocus)
text_edit.setFocusPolicy(original_policy)
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
self.setFrameShadow(QtGui.QFrame.Plain)
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.itemActivated.connect(self._complete_current)
def eventFilter(self, obj, event):
""" Reimplemented to handle mouse input and to auto-hide when the
text edit loses focus.
"""
if obj is self:
if event.type() == QtCore.QEvent.MouseButtonPress:
pos = self.mapToGlobal(event.pos())
target = QtGui.QApplication.widgetAt(pos)
if (target and self.isAncestorOf(target) or target is self):
return False
else:
self.cancel_completion()
return super(CompletionWidget, self).eventFilter(obj, event)
def keyPressEvent(self, event):
key = event.key()
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Tab):
self._complete_current()
elif key == QtCore.Qt.Key_Escape:
self.hide()
elif key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,
QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown,
QtCore.Qt.Key_Home, QtCore.Qt.Key_End):
return super(CompletionWidget, self).keyPressEvent(event)
else:
QtGui.QApplication.sendEvent(self._text_edit, event)
#--------------------------------------------------------------------------
# 'QWidget' interface
#--------------------------------------------------------------------------
def hideEvent(self, event):
""" Reimplemented to disconnect signal handlers and event filter.
"""
super(CompletionWidget, self).hideEvent(event)
self._text_edit.cursorPositionChanged.disconnect(self._update_current)
self.removeEventFilter(self)
def showEvent(self, event):
""" Reimplemented to connect signal handlers and event filter.
"""
super(CompletionWidget, self).showEvent(event)
self._text_edit.cursorPositionChanged.connect(self._update_current)
self.installEventFilter(self)
#--------------------------------------------------------------------------
# 'CompletionWidget' interface
#--------------------------------------------------------------------------
def show_items(self, cursor, items):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
text_edit = self._text_edit
point = text_edit.cursorRect(cursor).bottomRight()
point = text_edit.mapToGlobal(point)
self.clear()
self.addItems(items)
height = self.sizeHint().height()
screen_rect = QtGui.QApplication.desktop().availableGeometry(self)
if (screen_rect.size().height() + screen_rect.y() -
point.y() - height < 0):
point = text_edit.mapToGlobal(text_edit.cursorRect().topRight())
point.setY(point.y() - height)
w = (self.sizeHintForColumn(0) +
self.verticalScrollBar().sizeHint().width())
self.setGeometry(point.x(), point.y(), w, height)
self._start_position = cursor.position()
self.setCurrentRow(0)
self.raise_()
self.show()
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
self._current_text_cursor().insertText(self.currentItem().text())
self.hide()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
def _update_current(self):
""" Updates the current item based on the current text.
"""
prefix = self._current_text_cursor().selection().toPlainText()
if prefix:
items = self.findItems(prefix, (QtCore.Qt.MatchStartsWith |
QtCore.Qt.MatchCaseSensitive))
if items:
self.setCurrentItem(items[0])
else:
self.hide()
else:
self.hide()
def cancel_completion(self):
self.hide()
| {
"repo_name": "nitin-cherian/LifeLongLearning",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/qtconsole/completion_widget.py",
"copies": "10",
"size": "6165",
"license": "mit",
"hash": 2845136489873942500,
"line_mean": 38.7741935484,
"line_max": 79,
"alpha_frac": 0.5492295215,
"autogenerated": false,
"ratio": 4.649321266968326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006451612903225806,
"num_lines": 155
} |
"""A dropdown completer widget for the qtconsole."""
import os
import sys
from qtpy import QtCore, QtGui, QtWidgets
class CompletionWidget(QtWidgets.QListWidget):
""" A widget for GUI tab completion.
"""
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
def __init__(self, console_widget):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
text_edit = console_widget._control
assert isinstance(text_edit, (QtWidgets.QTextEdit, QtWidgets.QPlainTextEdit))
super(CompletionWidget, self).__init__(parent=console_widget)
self._text_edit = text_edit
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
# We need Popup style to ensure correct mouse interaction
# (dialog would dissappear on mouse click with ToolTip style)
self.setWindowFlags(QtCore.Qt.Popup)
self.setAttribute(QtCore.Qt.WA_StaticContents)
original_policy = text_edit.focusPolicy()
self.setFocusPolicy(QtCore.Qt.NoFocus)
text_edit.setFocusPolicy(original_policy)
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
self.setFrameShadow(QtWidgets.QFrame.Plain)
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.itemActivated.connect(self._complete_current)
def eventFilter(self, obj, event):
""" Reimplemented to handle mouse input and to auto-hide when the
text edit loses focus.
"""
if obj is self:
if event.type() == QtCore.QEvent.MouseButtonPress:
pos = self.mapToGlobal(event.pos())
target = QtWidgets.QApplication.widgetAt(pos)
if (target and self.isAncestorOf(target) or target is self):
return False
else:
self.cancel_completion()
return super(CompletionWidget, self).eventFilter(obj, event)
def keyPressEvent(self, event):
key = event.key()
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Tab):
self._complete_current()
elif key == QtCore.Qt.Key_Escape:
self.hide()
elif key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,
QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown,
QtCore.Qt.Key_Home, QtCore.Qt.Key_End):
return super(CompletionWidget, self).keyPressEvent(event)
else:
QtWidgets.QApplication.sendEvent(self._text_edit, event)
#--------------------------------------------------------------------------
# 'QWidget' interface
#--------------------------------------------------------------------------
def hideEvent(self, event):
""" Reimplemented to disconnect signal handlers and event filter.
"""
super(CompletionWidget, self).hideEvent(event)
try:
self._text_edit.cursorPositionChanged.disconnect(self._update_current)
except TypeError:
pass
self.removeEventFilter(self)
def showEvent(self, event):
""" Reimplemented to connect signal handlers and event filter.
"""
super(CompletionWidget, self).showEvent(event)
self._text_edit.cursorPositionChanged.connect(self._update_current)
self.installEventFilter(self)
#--------------------------------------------------------------------------
# 'CompletionWidget' interface
#--------------------------------------------------------------------------
def show_items(self, cursor, items, prefix_length=0):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
text_edit = self._text_edit
point = self._get_top_left_position(cursor)
self.clear()
path_items = []
for item in items:
# Check if the item could refer to a file or dir. The replacing
# of '"' is needed for items on Windows
if (os.path.isfile(os.path.abspath(item.replace("\"", ""))) or
os.path.isdir(os.path.abspath(item.replace("\"", "")))):
path_items.append(item.replace("\"", ""))
else:
list_item = QtWidgets.QListWidgetItem()
list_item.setData(QtCore.Qt.UserRole, item)
# Need to split to only show last element of a dot completion
list_item.setText(item.split(".")[-1])
self.addItem(list_item)
common_prefix = os.path.dirname(os.path.commonprefix(path_items))
for path_item in path_items:
list_item = QtWidgets.QListWidgetItem()
list_item.setData(QtCore.Qt.UserRole, path_item)
if common_prefix:
text = path_item.split(common_prefix)[-1]
else:
text = path_item
list_item.setText(text)
self.addItem(list_item)
height = self.sizeHint().height()
screen_rect = QtWidgets.QApplication.desktop().availableGeometry(self)
if (screen_rect.size().height() + screen_rect.y() -
point.y() - height < 0):
point = text_edit.mapToGlobal(text_edit.cursorRect().topRight())
point.setY(point.y() - height)
w = (self.sizeHintForColumn(0) +
self.verticalScrollBar().sizeHint().width() +
2 * self.frameWidth())
self.setGeometry(point.x(), point.y(), w, height)
# Move cursor to start of the prefix to replace it
# when a item is selected
cursor.movePosition(QtGui.QTextCursor.Left, n=prefix_length)
self._start_position = cursor.position()
self.setCurrentRow(0)
self.raise_()
self.show()
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _get_top_left_position(self, cursor):
""" Get top left position for this widget.
"""
point = self._text_edit.cursorRect(cursor).center()
point_size = self._text_edit.font().pointSize()
if sys.platform == 'darwin':
delta = int((point_size * 1.20) ** 0.98)
elif os.name == 'nt':
delta = int((point_size * 1.20) ** 1.05)
else:
delta = int((point_size * 1.20) ** 0.98)
y = delta - (point_size / 2)
point.setY(point.y() + y)
point = self._text_edit.mapToGlobal(point)
return point
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
text = self.currentItem().data(QtCore.Qt.UserRole)
self._current_text_cursor().insertText(text)
self.hide()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
def _update_current(self):
""" Updates the current item based on the current text and the
position of the widget.
"""
# Update widget position
cursor = self._text_edit.textCursor()
point = self._get_top_left_position(cursor)
self.move(point)
# Update current item
prefix = self._current_text_cursor().selection().toPlainText()
if prefix:
items = self.findItems(prefix, (QtCore.Qt.MatchStartsWith |
QtCore.Qt.MatchCaseSensitive))
if items:
self.setCurrentItem(items[0])
else:
self.hide()
else:
self.hide()
def cancel_completion(self):
self.hide()
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/qtconsole/completion_widget.py",
"copies": "1",
"size": "8391",
"license": "mit",
"hash": 1386138399002421500,
"line_mean": 38.2102803738,
"line_max": 85,
"alpha_frac": 0.5477297104,
"autogenerated": false,
"ratio": 4.479978643886812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5527708354286812,
"avg_score": null,
"num_lines": null
} |
"""A dropdown completer widget for the qtconsole."""
# System library imports
from IPython.external.qt import QtCore, QtGui
class CompletionWidget(QtGui.QListWidget):
""" A widget for GUI tab completion.
"""
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
def __init__(self, console_widget):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
text_edit = console_widget._control
assert isinstance(text_edit, (QtGui.QTextEdit, QtGui.QPlainTextEdit))
super(CompletionWidget, self).__init__()
self._text_edit = text_edit
self.setAttribute(QtCore.Qt.WA_StaticContents)
self.setWindowFlags(QtCore.Qt.ToolTip | QtCore.Qt.WindowStaysOnTopHint)
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
self.setFrameShadow(QtGui.QFrame.Plain)
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.itemActivated.connect(self._complete_current)
def eventFilter(self, obj, event):
""" Reimplemented to handle keyboard input and to auto-hide when the
text edit loses focus.
"""
if obj == self._text_edit:
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
key, text = event.key(), event.text()
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Tab):
self._complete_current()
return True
elif key == QtCore.Qt.Key_Escape:
self.hide()
return True
elif key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,
QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown,
QtCore.Qt.Key_Home, QtCore.Qt.Key_End):
self.keyPressEvent(event)
return True
elif etype == QtCore.QEvent.FocusOut:
self.hide()
return super(CompletionWidget, self).eventFilter(obj, event)
#--------------------------------------------------------------------------
# 'QWidget' interface
#--------------------------------------------------------------------------
def hideEvent(self, event):
""" Reimplemented to disconnect signal handlers and event filter.
"""
super(CompletionWidget, self).hideEvent(event)
self._text_edit.cursorPositionChanged.disconnect(self._update_current)
self._text_edit.removeEventFilter(self)
def showEvent(self, event):
""" Reimplemented to connect signal handlers and event filter.
"""
super(CompletionWidget, self).showEvent(event)
self._text_edit.cursorPositionChanged.connect(self._update_current)
self._text_edit.installEventFilter(self)
#--------------------------------------------------------------------------
# 'CompletionWidget' interface
#--------------------------------------------------------------------------
def show_items(self, cursor, items):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
text_edit = self._text_edit
point = text_edit.cursorRect(cursor).bottomRight()
point = text_edit.mapToGlobal(point)
height = self.sizeHint().height()
screen_rect = QtGui.QApplication.desktop().availableGeometry(self)
if screen_rect.size().height() - point.y() - height < 0:
point = text_edit.mapToGlobal(text_edit.cursorRect().topRight())
point.setY(point.y() - height)
self.move(point)
self._start_position = cursor.position()
self.clear()
self.addItems(items)
self.setCurrentRow(0)
self.show()
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
self._current_text_cursor().insertText(self.currentItem().text())
self.hide()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
def _update_current(self):
""" Updates the current item based on the current text.
"""
prefix = self._current_text_cursor().selection().toPlainText()
if prefix:
items = self.findItems(prefix, (QtCore.Qt.MatchStartsWith |
QtCore.Qt.MatchCaseSensitive))
if items:
self.setCurrentItem(items[0])
else:
self.hide()
else:
self.hide()
def cancel_completion(self):
self.hide()
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/qt/console/completion_widget.py",
"copies": "1",
"size": "5371",
"license": "mit",
"hash": 3849019426117285000,
"line_mean": 37.3642857143,
"line_max": 79,
"alpha_frac": 0.5187115993,
"autogenerated": false,
"ratio": 4.851851851851852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5870563451151852,
"avg_score": null,
"num_lines": null
} |
"""A drop-in replacement for tempfile that adds the errors argument to
NamedTemporary and TemporaryFile.
"""
import os
import io
import tempfile
from tempfile import * # pylint: disable=wildcard-import, ungrouped-imports
__all__ = tempfile.__all__
def _patch_encoding(ctor, mode, **kwargs):
"Wrap the resulting instance if the errors argument is provided"
# The strategy is to create the underlying instance in binary mode and
# wrap the result in a TextIOWrapper with the appropriate encoding/errors
binary = 'b' in mode
# If the <errors> argument was not passed or if the mode is not binary and
# 'strict' was specifed, the default errors mode, then the default
# implementation can be used
errors = kwargs.pop('errors', None)
if errors is None or not binary and errors == 'strict':
return ctor(mode=mode, **kwargs)
# Encoding/errors are only valid for text mode
if binary:
raise ValueError('binary mode doesn\'t take an errors argument')
# Determine how the buffering should be handled
buffering = kwargs.pop('buffering', -1)
if buffering == 0:
# A <buffering> of 0 is binary only
raise ValueError('can\'t have unbuffered text I/O')
if buffering == 1:
# A <buffering> of 1 is line buffering - the binary instance will have
# no buffering specified and the TextIOWrapper will have line buffering
# enabled
buffering = -1
line_buffering = True
else:
# The <buffering> argument is not 0 or 1 so it will be passed directly
# to the binary instance and the TextIOWrapper will have no line
# buffering
line_buffering = False
encoding = kwargs.pop('encoding', None)
newline = kwargs.pop('newline', None)
fobj = ctor(mode=mode.replace('t', '') + 'b', buffering=buffering,
encoding=None, newline=None, **kwargs)
try:
return io.TextIOWrapper(fobj, encoding=encoding, errors=errors,
newline=newline, line_buffering=line_buffering)
except:
fobj.close()
# Attempt to clean up on exception if the object does not delete itself
if not getattr(fobj, 'delete', True):
os.unlink(fobj.name)
raise
def TemporaryFile(mode='w+b', **kwargs): # pylint: disable=invalid-name, function-redefined
"Wrapper around TemporaryFile to add errors argument."
return _patch_encoding(tempfile.TemporaryFile, mode, **kwargs)
def NamedTemporaryFile(mode='w+b', **kwargs): # pylint: disable=invalid-name, function-redefined
"Wrapper around NamedTemporaryFile to add errors argument."
return _patch_encoding(tempfile.NamedTemporaryFile, mode, **kwargs)
| {
"repo_name": "nxdevel/nx_tempfile",
"path": "nx_tempfile/__init__.py",
"copies": "1",
"size": "2737",
"license": "mit",
"hash": 605129901143096200,
"line_mean": 35.9864864865,
"line_max": 96,
"alpha_frac": 0.6737303617,
"autogenerated": false,
"ratio": 4.330696202531645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5504426564231645,
"avg_score": null,
"num_lines": null
} |
ADS_ATTR_CLEAR = ( 1 )
ADS_ATTR_UPDATE = ( 2 )
ADS_ATTR_APPEND = ( 3 )
ADS_ATTR_DELETE = ( 4 )
ADS_EXT_MINEXTDISPID = ( 1 )
ADS_EXT_MAXEXTDISPID = ( 16777215 )
ADS_EXT_INITCREDENTIALS = ( 1 )
ADS_EXT_INITIALIZE_COMPLETE = ( 2 )
ADS_SEARCHPREF_ASYNCHRONOUS = 0
ADS_SEARCHPREF_DEREF_ALIASES = 1
ADS_SEARCHPREF_SIZE_LIMIT = 2
ADS_SEARCHPREF_TIME_LIMIT = 3
ADS_SEARCHPREF_ATTRIBTYPES_ONLY = 4
ADS_SEARCHPREF_SEARCH_SCOPE = 5
ADS_SEARCHPREF_TIMEOUT = 6
ADS_SEARCHPREF_PAGESIZE = 7
ADS_SEARCHPREF_PAGED_TIME_LIMIT = 8
ADS_SEARCHPREF_CHASE_REFERRALS = 9
ADS_SEARCHPREF_SORT_ON = 10
ADS_SEARCHPREF_CACHE_RESULTS = 11
ADS_SEARCHPREF_DIRSYNC = 12
ADS_SEARCHPREF_TOMBSTONE = 13
ADS_SCOPE_BASE = 0
ADS_SCOPE_ONELEVEL = 1
ADS_SCOPE_SUBTREE = 2
ADS_SECURE_AUTHENTICATION = 0x1
ADS_USE_ENCRYPTION = 0x2
ADS_USE_SSL = 0x2
ADS_READONLY_SERVER = 0x4
ADS_PROMPT_CREDENTIALS = 0x8
ADS_NO_AUTHENTICATION = 0x10
ADS_FAST_BIND = 0x20
ADS_USE_SIGNING = 0x40
ADS_USE_SEALING = 0x80
ADS_USE_DELEGATION = 0x100
ADS_SERVER_BIND = 0x200
ADSTYPE_INVALID = 0
ADSTYPE_DN_STRING = ADSTYPE_INVALID + 1
ADSTYPE_CASE_EXACT_STRING = ADSTYPE_DN_STRING + 1
ADSTYPE_CASE_IGNORE_STRING = ADSTYPE_CASE_EXACT_STRING + 1
ADSTYPE_PRINTABLE_STRING = ADSTYPE_CASE_IGNORE_STRING + 1
ADSTYPE_NUMERIC_STRING = ADSTYPE_PRINTABLE_STRING + 1
ADSTYPE_BOOLEAN = ADSTYPE_NUMERIC_STRING + 1
ADSTYPE_INTEGER = ADSTYPE_BOOLEAN + 1
ADSTYPE_OCTET_STRING = ADSTYPE_INTEGER + 1
ADSTYPE_UTC_TIME = ADSTYPE_OCTET_STRING + 1
ADSTYPE_LARGE_INTEGER = ADSTYPE_UTC_TIME + 1
ADSTYPE_PROV_SPECIFIC = ADSTYPE_LARGE_INTEGER + 1
ADSTYPE_OBJECT_CLASS = ADSTYPE_PROV_SPECIFIC + 1
ADSTYPE_CASEIGNORE_LIST = ADSTYPE_OBJECT_CLASS + 1
ADSTYPE_OCTET_LIST = ADSTYPE_CASEIGNORE_LIST + 1
ADSTYPE_PATH = ADSTYPE_OCTET_LIST + 1
ADSTYPE_POSTALADDRESS = ADSTYPE_PATH + 1
ADSTYPE_TIMESTAMP = ADSTYPE_POSTALADDRESS + 1
ADSTYPE_BACKLINK = ADSTYPE_TIMESTAMP + 1
ADSTYPE_TYPEDNAME = ADSTYPE_BACKLINK + 1
ADSTYPE_HOLD = ADSTYPE_TYPEDNAME + 1
ADSTYPE_NETADDRESS = ADSTYPE_HOLD + 1
ADSTYPE_REPLICAPOINTER = ADSTYPE_NETADDRESS + 1
ADSTYPE_FAXNUMBER = ADSTYPE_REPLICAPOINTER + 1
ADSTYPE_EMAIL = ADSTYPE_FAXNUMBER + 1
ADSTYPE_NT_SECURITY_DESCRIPTOR = ADSTYPE_EMAIL + 1
ADSTYPE_UNKNOWN = ADSTYPE_NT_SECURITY_DESCRIPTOR + 1
ADSTYPE_DN_WITH_BINARY = ADSTYPE_UNKNOWN + 1
ADSTYPE_DN_WITH_STRING = ADSTYPE_DN_WITH_BINARY + 1
ADS_PROPERTY_CLEAR = 1
ADS_PROPERTY_UPDATE = 2
ADS_PROPERTY_APPEND = 3
ADS_PROPERTY_DELETE = 4
ADS_SYSTEMFLAG_DISALLOW_DELETE = -2147483648
ADS_SYSTEMFLAG_CONFIG_ALLOW_RENAME = 0x40000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_MOVE = 0x20000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_LIMITED_MOVE = 0x10000000
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_RENAME = -2147483648
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_MOVE = 0x4000000
ADS_SYSTEMFLAG_CR_NTDS_NC = 0x1
ADS_SYSTEMFLAG_CR_NTDS_DOMAIN = 0x2
ADS_SYSTEMFLAG_ATTR_NOT_REPLICATED = 0x1
ADS_SYSTEMFLAG_ATTR_IS_CONSTRUCTED = 0x4
ADS_GROUP_TYPE_GLOBAL_GROUP = 0x2
ADS_GROUP_TYPE_DOMAIN_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_UNIVERSAL_GROUP = 0x8
ADS_GROUP_TYPE_SECURITY_ENABLED = -2147483648
ADS_UF_SCRIPT = 0x1
ADS_UF_ACCOUNTDISABLE = 0x2
ADS_UF_HOMEDIR_REQUIRED = 0x8
ADS_UF_LOCKOUT = 0x10
ADS_UF_PASSWD_NOTREQD = 0x20
ADS_UF_PASSWD_CANT_CHANGE = 0x40
ADS_UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED = 0x80
ADS_UF_TEMP_DUPLICATE_ACCOUNT = 0x100
ADS_UF_NORMAL_ACCOUNT = 0x200
ADS_UF_INTERDOMAIN_TRUST_ACCOUNT = 0x800
ADS_UF_WORKSTATION_TRUST_ACCOUNT = 0x1000
ADS_UF_SERVER_TRUST_ACCOUNT = 0x2000
ADS_UF_DONT_EXPIRE_PASSWD = 0x10000
ADS_UF_MNS_LOGON_ACCOUNT = 0x20000
ADS_UF_SMARTCARD_REQUIRED = 0x40000
ADS_UF_TRUSTED_FOR_DELEGATION = 0x80000
ADS_UF_NOT_DELEGATED = 0x100000
ADS_UF_USE_DES_KEY_ONLY = 0x200000
ADS_UF_DONT_REQUIRE_PREAUTH = 0x400000
ADS_UF_PASSWORD_EXPIRED = 0x800000
ADS_UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION = 0x1000000
ADS_RIGHT_DELETE = 0x10000
ADS_RIGHT_READ_CONTROL = 0x20000
ADS_RIGHT_WRITE_DAC = 0x40000
ADS_RIGHT_WRITE_OWNER = 0x80000
ADS_RIGHT_SYNCHRONIZE = 0x100000
ADS_RIGHT_ACCESS_SYSTEM_SECURITY = 0x1000000
ADS_RIGHT_GENERIC_READ = -2147483648
ADS_RIGHT_GENERIC_WRITE = 0x40000000
ADS_RIGHT_GENERIC_EXECUTE = 0x20000000
ADS_RIGHT_GENERIC_ALL = 0x10000000
ADS_RIGHT_DS_CREATE_CHILD = 0x1
ADS_RIGHT_DS_DELETE_CHILD = 0x2
ADS_RIGHT_ACTRL_DS_LIST = 0x4
ADS_RIGHT_DS_SELF = 0x8
ADS_RIGHT_DS_READ_PROP = 0x10
ADS_RIGHT_DS_WRITE_PROP = 0x20
ADS_RIGHT_DS_DELETE_TREE = 0x40
ADS_RIGHT_DS_LIST_OBJECT = 0x80
ADS_RIGHT_DS_CONTROL_ACCESS = 0x100
ADS_ACETYPE_ACCESS_ALLOWED = 0
ADS_ACETYPE_ACCESS_DENIED = 0x1
ADS_ACETYPE_SYSTEM_AUDIT = 0x2
ADS_ACETYPE_ACCESS_ALLOWED_OBJECT = 0x5
ADS_ACETYPE_ACCESS_DENIED_OBJECT = 0x6
ADS_ACETYPE_SYSTEM_AUDIT_OBJECT = 0x7
ADS_ACETYPE_SYSTEM_ALARM_OBJECT = 0x8
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK = 0x9
ADS_ACETYPE_ACCESS_DENIED_CALLBACK = 0xa
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK_OBJECT = 0xb
ADS_ACETYPE_ACCESS_DENIED_CALLBACK_OBJECT = 0xc
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK = 0xd
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK = 0xe
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK_OBJECT = 0xf
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK_OBJECT = 0x10
ADS_ACEFLAG_INHERIT_ACE = 0x2
ADS_ACEFLAG_NO_PROPAGATE_INHERIT_ACE = 0x4
ADS_ACEFLAG_INHERIT_ONLY_ACE = 0x8
ADS_ACEFLAG_INHERITED_ACE = 0x10
ADS_ACEFLAG_VALID_INHERIT_FLAGS = 0x1f
ADS_ACEFLAG_SUCCESSFUL_ACCESS = 0x40
ADS_ACEFLAG_FAILED_ACCESS = 0x80
ADS_FLAG_OBJECT_TYPE_PRESENT = 0x1
ADS_FLAG_INHERITED_OBJECT_TYPE_PRESENT = 0x2
ADS_SD_CONTROL_SE_OWNER_DEFAULTED = 0x1
ADS_SD_CONTROL_SE_GROUP_DEFAULTED = 0x2
ADS_SD_CONTROL_SE_DACL_PRESENT = 0x4
ADS_SD_CONTROL_SE_DACL_DEFAULTED = 0x8
ADS_SD_CONTROL_SE_SACL_PRESENT = 0x10
ADS_SD_CONTROL_SE_SACL_DEFAULTED = 0x20
ADS_SD_CONTROL_SE_DACL_AUTO_INHERIT_REQ = 0x100
ADS_SD_CONTROL_SE_SACL_AUTO_INHERIT_REQ = 0x200
ADS_SD_CONTROL_SE_DACL_AUTO_INHERITED = 0x400
ADS_SD_CONTROL_SE_SACL_AUTO_INHERITED = 0x800
ADS_SD_CONTROL_SE_DACL_PROTECTED = 0x1000
ADS_SD_CONTROL_SE_SACL_PROTECTED = 0x2000
ADS_SD_CONTROL_SE_SELF_RELATIVE = 0x8000
ADS_SD_REVISION_DS = 4
ADS_NAME_TYPE_1779 = 1
ADS_NAME_TYPE_CANONICAL = 2
ADS_NAME_TYPE_NT4 = 3
ADS_NAME_TYPE_DISPLAY = 4
ADS_NAME_TYPE_DOMAIN_SIMPLE = 5
ADS_NAME_TYPE_ENTERPRISE_SIMPLE = 6
ADS_NAME_TYPE_GUID = 7
ADS_NAME_TYPE_UNKNOWN = 8
ADS_NAME_TYPE_USER_PRINCIPAL_NAME = 9
ADS_NAME_TYPE_CANONICAL_EX = 10
ADS_NAME_TYPE_SERVICE_PRINCIPAL_NAME = 11
ADS_NAME_TYPE_SID_OR_SID_HISTORY_NAME = 12
ADS_NAME_INITTYPE_DOMAIN = 1
ADS_NAME_INITTYPE_SERVER = 2
ADS_NAME_INITTYPE_GC = 3
ADS_OPTION_SERVERNAME = 0
ADS_OPTION_REFERRALS = ADS_OPTION_SERVERNAME + 1
ADS_OPTION_PAGE_SIZE = ADS_OPTION_REFERRALS + 1
ADS_OPTION_SECURITY_MASK = ADS_OPTION_PAGE_SIZE + 1
ADS_OPTION_MUTUAL_AUTH_STATUS = ADS_OPTION_SECURITY_MASK + 1
ADS_OPTION_QUOTA = ADS_OPTION_MUTUAL_AUTH_STATUS + 1
ADS_OPTION_PASSWORD_PORTNUMBER = ADS_OPTION_QUOTA + 1
ADS_OPTION_PASSWORD_METHOD = ADS_OPTION_PASSWORD_PORTNUMBER + 1
ADS_SECURITY_INFO_OWNER = 0x1
ADS_SECURITY_INFO_GROUP = 0x2
ADS_SECURITY_INFO_DACL = 0x4
ADS_SECURITY_INFO_SACL = 0x8
ADS_SETTYPE_FULL = 1
ADS_SETTYPE_PROVIDER = 2
ADS_SETTYPE_SERVER = 3
ADS_SETTYPE_DN = 4
ADS_FORMAT_WINDOWS = 1
ADS_FORMAT_WINDOWS_NO_SERVER = 2
ADS_FORMAT_WINDOWS_DN = 3
ADS_FORMAT_WINDOWS_PARENT = 4
ADS_FORMAT_X500 = 5
ADS_FORMAT_X500_NO_SERVER = 6
ADS_FORMAT_X500_DN = 7
ADS_FORMAT_X500_PARENT = 8
ADS_FORMAT_SERVER = 9
ADS_FORMAT_PROVIDER = 10
ADS_FORMAT_LEAF = 11
ADS_DISPLAY_FULL = 1
ADS_DISPLAY_VALUE_ONLY = 2
ADS_ESCAPEDMODE_DEFAULT = 1
ADS_ESCAPEDMODE_ON = 2
ADS_ESCAPEDMODE_OFF = 3
ADS_ESCAPEDMODE_OFF_EX = 4
ADS_PATH_FILE = 1
ADS_PATH_FILESHARE = 2
ADS_PATH_REGISTRY = 3
ADS_SD_FORMAT_IID = 1
ADS_SD_FORMAT_RAW = 2
ADS_SD_FORMAT_HEXSTRING = 3
# Generated by h2py from AdsErr.h
def _HRESULT_TYPEDEF_(_sc): return _sc
E_ADS_BAD_PATHNAME = _HRESULT_TYPEDEF_((-2147463168))
E_ADS_INVALID_DOMAIN_OBJECT = _HRESULT_TYPEDEF_((-2147463167))
E_ADS_INVALID_USER_OBJECT = _HRESULT_TYPEDEF_((-2147463166))
E_ADS_INVALID_COMPUTER_OBJECT = _HRESULT_TYPEDEF_((-2147463165))
E_ADS_UNKNOWN_OBJECT = _HRESULT_TYPEDEF_((-2147463164))
E_ADS_PROPERTY_NOT_SET = _HRESULT_TYPEDEF_((-2147463163))
E_ADS_PROPERTY_NOT_SUPPORTED = _HRESULT_TYPEDEF_((-2147463162))
E_ADS_PROPERTY_INVALID = _HRESULT_TYPEDEF_((-2147463161))
E_ADS_BAD_PARAMETER = _HRESULT_TYPEDEF_((-2147463160))
E_ADS_OBJECT_UNBOUND = _HRESULT_TYPEDEF_((-2147463159))
E_ADS_PROPERTY_NOT_MODIFIED = _HRESULT_TYPEDEF_((-2147463158))
E_ADS_PROPERTY_MODIFIED = _HRESULT_TYPEDEF_((-2147463157))
E_ADS_CANT_CONVERT_DATATYPE = _HRESULT_TYPEDEF_((-2147463156))
E_ADS_PROPERTY_NOT_FOUND = _HRESULT_TYPEDEF_((-2147463155))
E_ADS_OBJECT_EXISTS = _HRESULT_TYPEDEF_((-2147463154))
E_ADS_SCHEMA_VIOLATION = _HRESULT_TYPEDEF_((-2147463153))
E_ADS_COLUMN_NOT_SET = _HRESULT_TYPEDEF_((-2147463152))
S_ADS_ERRORSOCCURRED = _HRESULT_TYPEDEF_(0x00005011L)
S_ADS_NOMORE_ROWS = _HRESULT_TYPEDEF_(0x00005012L)
S_ADS_NOMORE_COLUMNS = _HRESULT_TYPEDEF_(0x00005013L)
E_ADS_INVALID_FILTER = _HRESULT_TYPEDEF_((-2147463148))
# ADS_DEREFENUM enum
ADS_DEREF_NEVER = 0
ADS_DEREF_SEARCHING = 1
ADS_DEREF_FINDING = 2
ADS_DEREF_ALWAYS = 3
# ADS_PREFERENCES_ENUM
ADSIPROP_ASYNCHRONOUS = 0
ADSIPROP_DEREF_ALIASES = 0x1
ADSIPROP_SIZE_LIMIT = 0x2
ADSIPROP_TIME_LIMIT = 0x3
ADSIPROP_ATTRIBTYPES_ONLY = 0x4
ADSIPROP_SEARCH_SCOPE = 0x5
ADSIPROP_TIMEOUT = 0x6
ADSIPROP_PAGESIZE = 0x7
ADSIPROP_PAGED_TIME_LIMIT = 0x8
ADSIPROP_CHASE_REFERRALS = 0x9
ADSIPROP_SORT_ON = 0xa
ADSIPROP_CACHE_RESULTS = 0xb
ADSIPROP_ADSIFLAG = 0xc
# ADSI_DIALECT_ENUM
ADSI_DIALECT_LDAP = 0
ADSI_DIALECT_SQL = 0x1
# ADS_CHASE_REFERRALS_ENUM
ADS_CHASE_REFERRALS_NEVER = 0
ADS_CHASE_REFERRALS_SUBORDINATE = 0x20
ADS_CHASE_REFERRALS_EXTERNAL = 0x40
ADS_CHASE_REFERRALS_ALWAYS = ADS_CHASE_REFERRALS_SUBORDINATE | ADS_CHASE_REFERRALS_EXTERNAL
# Generated by h2py from ObjSel.h
DSOP_SCOPE_TYPE_TARGET_COMPUTER = 0x00000001
DSOP_SCOPE_TYPE_UPLEVEL_JOINED_DOMAIN = 0x00000002
DSOP_SCOPE_TYPE_DOWNLEVEL_JOINED_DOMAIN = 0x00000004
DSOP_SCOPE_TYPE_ENTERPRISE_DOMAIN = 0x00000008
DSOP_SCOPE_TYPE_GLOBAL_CATALOG = 0x00000010
DSOP_SCOPE_TYPE_EXTERNAL_UPLEVEL_DOMAIN = 0x00000020
DSOP_SCOPE_TYPE_EXTERNAL_DOWNLEVEL_DOMAIN = 0x00000040
DSOP_SCOPE_TYPE_WORKGROUP = 0x00000080
DSOP_SCOPE_TYPE_USER_ENTERED_UPLEVEL_SCOPE = 0x00000100
DSOP_SCOPE_TYPE_USER_ENTERED_DOWNLEVEL_SCOPE = 0x00000200
DSOP_SCOPE_FLAG_STARTING_SCOPE = 0x00000001
DSOP_SCOPE_FLAG_WANT_PROVIDER_WINNT = 0x00000002
DSOP_SCOPE_FLAG_WANT_PROVIDER_LDAP = 0x00000004
DSOP_SCOPE_FLAG_WANT_PROVIDER_GC = 0x00000008
DSOP_SCOPE_FLAG_WANT_SID_PATH = 0x00000010
DSOP_SCOPE_FLAG_WANT_DOWNLEVEL_BUILTIN_PATH = 0x00000020
DSOP_SCOPE_FLAG_DEFAULT_FILTER_USERS = 0x00000040
DSOP_SCOPE_FLAG_DEFAULT_FILTER_GROUPS = 0x00000080
DSOP_SCOPE_FLAG_DEFAULT_FILTER_COMPUTERS = 0x00000100
DSOP_SCOPE_FLAG_DEFAULT_FILTER_CONTACTS = 0x00000200
DSOP_FILTER_INCLUDE_ADVANCED_VIEW = 0x00000001
DSOP_FILTER_USERS = 0x00000002
DSOP_FILTER_BUILTIN_GROUPS = 0x00000004
DSOP_FILTER_WELL_KNOWN_PRINCIPALS = 0x00000008
DSOP_FILTER_UNIVERSAL_GROUPS_DL = 0x00000010
DSOP_FILTER_UNIVERSAL_GROUPS_SE = 0x00000020
DSOP_FILTER_GLOBAL_GROUPS_DL = 0x00000040
DSOP_FILTER_GLOBAL_GROUPS_SE = 0x00000080
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_DL = 0x00000100
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_SE = 0x00000200
DSOP_FILTER_CONTACTS = 0x00000400
DSOP_FILTER_COMPUTERS = 0x00000800
DSOP_DOWNLEVEL_FILTER_USERS = (-2147483647)
DSOP_DOWNLEVEL_FILTER_LOCAL_GROUPS = (-2147483646)
DSOP_DOWNLEVEL_FILTER_GLOBAL_GROUPS = (-2147483644)
DSOP_DOWNLEVEL_FILTER_COMPUTERS = (-2147483640)
DSOP_DOWNLEVEL_FILTER_WORLD = (-2147483632)
DSOP_DOWNLEVEL_FILTER_AUTHENTICATED_USER = (-2147483616)
DSOP_DOWNLEVEL_FILTER_ANONYMOUS = (-2147483584)
DSOP_DOWNLEVEL_FILTER_BATCH = (-2147483520)
DSOP_DOWNLEVEL_FILTER_CREATOR_OWNER = (-2147483392)
DSOP_DOWNLEVEL_FILTER_CREATOR_GROUP = (-2147483136)
DSOP_DOWNLEVEL_FILTER_DIALUP = (-2147482624)
DSOP_DOWNLEVEL_FILTER_INTERACTIVE = (-2147481600)
DSOP_DOWNLEVEL_FILTER_NETWORK = (-2147479552)
DSOP_DOWNLEVEL_FILTER_SERVICE = (-2147475456)
DSOP_DOWNLEVEL_FILTER_SYSTEM = (-2147467264)
DSOP_DOWNLEVEL_FILTER_EXCLUDE_BUILTIN_GROUPS = (-2147450880)
DSOP_DOWNLEVEL_FILTER_TERMINAL_SERVER = (-2147418112)
DSOP_DOWNLEVEL_FILTER_ALL_WELLKNOWN_SIDS = (-2147352576)
DSOP_DOWNLEVEL_FILTER_LOCAL_SERVICE = (-2147221504)
DSOP_DOWNLEVEL_FILTER_NETWORK_SERVICE = (-2146959360)
DSOP_DOWNLEVEL_FILTER_REMOTE_LOGON = (-2146435072)
DSOP_FLAG_MULTISELECT = 0x00000001
DSOP_FLAG_SKIP_TARGET_COMPUTER_DC_CHECK = 0x00000002
CFSTR_DSOP_DS_SELECTION_LIST = "CFSTR_DSOP_DS_SELECTION_LIST"
| {
"repo_name": "kkdd/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32comext/adsi/adsicon.py",
"copies": "17",
"size": "12544",
"license": "apache-2.0",
"hash": -3502810082107859000,
"line_mean": 36.3333333333,
"line_max": 91,
"alpha_frac": 0.7641103316,
"autogenerated": false,
"ratio": 2.290304911447873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02260908295610515,
"num_lines": 336
} |
ADS_ATTR_CLEAR = ( 1 )
ADS_ATTR_UPDATE = ( 2 )
ADS_ATTR_APPEND = ( 3 )
ADS_ATTR_DELETE = ( 4 )
ADS_EXT_MINEXTDISPID = ( 1 )
ADS_EXT_MAXEXTDISPID = ( 16777215 )
ADS_EXT_INITCREDENTIALS = ( 1 )
ADS_EXT_INITIALIZE_COMPLETE = ( 2 )
ADS_SEARCHPREF_ASYNCHRONOUS = 0
ADS_SEARCHPREF_DEREF_ALIASES = 1
ADS_SEARCHPREF_SIZE_LIMIT = 2
ADS_SEARCHPREF_TIME_LIMIT = 3
ADS_SEARCHPREF_ATTRIBTYPES_ONLY = 4
ADS_SEARCHPREF_SEARCH_SCOPE = 5
ADS_SEARCHPREF_TIMEOUT = 6
ADS_SEARCHPREF_PAGESIZE = 7
ADS_SEARCHPREF_PAGED_TIME_LIMIT = 8
ADS_SEARCHPREF_CHASE_REFERRALS = 9
ADS_SEARCHPREF_SORT_ON = 10
ADS_SEARCHPREF_CACHE_RESULTS = 11
ADS_SEARCHPREF_DIRSYNC = 12
ADS_SEARCHPREF_TOMBSTONE = 13
ADS_SCOPE_BASE = 0
ADS_SCOPE_ONELEVEL = 1
ADS_SCOPE_SUBTREE = 2
ADS_SECURE_AUTHENTICATION = 0x1
ADS_USE_ENCRYPTION = 0x2
ADS_USE_SSL = 0x2
ADS_READONLY_SERVER = 0x4
ADS_PROMPT_CREDENTIALS = 0x8
ADS_NO_AUTHENTICATION = 0x10
ADS_FAST_BIND = 0x20
ADS_USE_SIGNING = 0x40
ADS_USE_SEALING = 0x80
ADS_USE_DELEGATION = 0x100
ADS_SERVER_BIND = 0x200
ADSTYPE_INVALID = 0
ADSTYPE_DN_STRING = ADSTYPE_INVALID + 1
ADSTYPE_CASE_EXACT_STRING = ADSTYPE_DN_STRING + 1
ADSTYPE_CASE_IGNORE_STRING = ADSTYPE_CASE_EXACT_STRING + 1
ADSTYPE_PRINTABLE_STRING = ADSTYPE_CASE_IGNORE_STRING + 1
ADSTYPE_NUMERIC_STRING = ADSTYPE_PRINTABLE_STRING + 1
ADSTYPE_BOOLEAN = ADSTYPE_NUMERIC_STRING + 1
ADSTYPE_INTEGER = ADSTYPE_BOOLEAN + 1
ADSTYPE_OCTET_STRING = ADSTYPE_INTEGER + 1
ADSTYPE_UTC_TIME = ADSTYPE_OCTET_STRING + 1
ADSTYPE_LARGE_INTEGER = ADSTYPE_UTC_TIME + 1
ADSTYPE_PROV_SPECIFIC = ADSTYPE_LARGE_INTEGER + 1
ADSTYPE_OBJECT_CLASS = ADSTYPE_PROV_SPECIFIC + 1
ADSTYPE_CASEIGNORE_LIST = ADSTYPE_OBJECT_CLASS + 1
ADSTYPE_OCTET_LIST = ADSTYPE_CASEIGNORE_LIST + 1
ADSTYPE_PATH = ADSTYPE_OCTET_LIST + 1
ADSTYPE_POSTALADDRESS = ADSTYPE_PATH + 1
ADSTYPE_TIMESTAMP = ADSTYPE_POSTALADDRESS + 1
ADSTYPE_BACKLINK = ADSTYPE_TIMESTAMP + 1
ADSTYPE_TYPEDNAME = ADSTYPE_BACKLINK + 1
ADSTYPE_HOLD = ADSTYPE_TYPEDNAME + 1
ADSTYPE_NETADDRESS = ADSTYPE_HOLD + 1
ADSTYPE_REPLICAPOINTER = ADSTYPE_NETADDRESS + 1
ADSTYPE_FAXNUMBER = ADSTYPE_REPLICAPOINTER + 1
ADSTYPE_EMAIL = ADSTYPE_FAXNUMBER + 1
ADSTYPE_NT_SECURITY_DESCRIPTOR = ADSTYPE_EMAIL + 1
ADSTYPE_UNKNOWN = ADSTYPE_NT_SECURITY_DESCRIPTOR + 1
ADSTYPE_DN_WITH_BINARY = ADSTYPE_UNKNOWN + 1
ADSTYPE_DN_WITH_STRING = ADSTYPE_DN_WITH_BINARY + 1
ADS_PROPERTY_CLEAR = 1
ADS_PROPERTY_UPDATE = 2
ADS_PROPERTY_APPEND = 3
ADS_PROPERTY_DELETE = 4
ADS_SYSTEMFLAG_DISALLOW_DELETE = -2147483648
ADS_SYSTEMFLAG_CONFIG_ALLOW_RENAME = 0x40000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_MOVE = 0x20000000
ADS_SYSTEMFLAG_CONFIG_ALLOW_LIMITED_MOVE = 0x10000000
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_RENAME = -2147483648
ADS_SYSTEMFLAG_DOMAIN_DISALLOW_MOVE = 0x4000000
ADS_SYSTEMFLAG_CR_NTDS_NC = 0x1
ADS_SYSTEMFLAG_CR_NTDS_DOMAIN = 0x2
ADS_SYSTEMFLAG_ATTR_NOT_REPLICATED = 0x1
ADS_SYSTEMFLAG_ATTR_IS_CONSTRUCTED = 0x4
ADS_GROUP_TYPE_GLOBAL_GROUP = 0x2
ADS_GROUP_TYPE_DOMAIN_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_LOCAL_GROUP = 0x4
ADS_GROUP_TYPE_UNIVERSAL_GROUP = 0x8
ADS_GROUP_TYPE_SECURITY_ENABLED = -2147483648
ADS_UF_SCRIPT = 0x1
ADS_UF_ACCOUNTDISABLE = 0x2
ADS_UF_HOMEDIR_REQUIRED = 0x8
ADS_UF_LOCKOUT = 0x10
ADS_UF_PASSWD_NOTREQD = 0x20
ADS_UF_PASSWD_CANT_CHANGE = 0x40
ADS_UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED = 0x80
ADS_UF_TEMP_DUPLICATE_ACCOUNT = 0x100
ADS_UF_NORMAL_ACCOUNT = 0x200
ADS_UF_INTERDOMAIN_TRUST_ACCOUNT = 0x800
ADS_UF_WORKSTATION_TRUST_ACCOUNT = 0x1000
ADS_UF_SERVER_TRUST_ACCOUNT = 0x2000
ADS_UF_DONT_EXPIRE_PASSWD = 0x10000
ADS_UF_MNS_LOGON_ACCOUNT = 0x20000
ADS_UF_SMARTCARD_REQUIRED = 0x40000
ADS_UF_TRUSTED_FOR_DELEGATION = 0x80000
ADS_UF_NOT_DELEGATED = 0x100000
ADS_UF_USE_DES_KEY_ONLY = 0x200000
ADS_UF_DONT_REQUIRE_PREAUTH = 0x400000
ADS_UF_PASSWORD_EXPIRED = 0x800000
ADS_UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION = 0x1000000
ADS_RIGHT_DELETE = 0x10000
ADS_RIGHT_READ_CONTROL = 0x20000
ADS_RIGHT_WRITE_DAC = 0x40000
ADS_RIGHT_WRITE_OWNER = 0x80000
ADS_RIGHT_SYNCHRONIZE = 0x100000
ADS_RIGHT_ACCESS_SYSTEM_SECURITY = 0x1000000
ADS_RIGHT_GENERIC_READ = -2147483648
ADS_RIGHT_GENERIC_WRITE = 0x40000000
ADS_RIGHT_GENERIC_EXECUTE = 0x20000000
ADS_RIGHT_GENERIC_ALL = 0x10000000
ADS_RIGHT_DS_CREATE_CHILD = 0x1
ADS_RIGHT_DS_DELETE_CHILD = 0x2
ADS_RIGHT_ACTRL_DS_LIST = 0x4
ADS_RIGHT_DS_SELF = 0x8
ADS_RIGHT_DS_READ_PROP = 0x10
ADS_RIGHT_DS_WRITE_PROP = 0x20
ADS_RIGHT_DS_DELETE_TREE = 0x40
ADS_RIGHT_DS_LIST_OBJECT = 0x80
ADS_RIGHT_DS_CONTROL_ACCESS = 0x100
ADS_ACETYPE_ACCESS_ALLOWED = 0
ADS_ACETYPE_ACCESS_DENIED = 0x1
ADS_ACETYPE_SYSTEM_AUDIT = 0x2
ADS_ACETYPE_ACCESS_ALLOWED_OBJECT = 0x5
ADS_ACETYPE_ACCESS_DENIED_OBJECT = 0x6
ADS_ACETYPE_SYSTEM_AUDIT_OBJECT = 0x7
ADS_ACETYPE_SYSTEM_ALARM_OBJECT = 0x8
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK = 0x9
ADS_ACETYPE_ACCESS_DENIED_CALLBACK = 0xa
ADS_ACETYPE_ACCESS_ALLOWED_CALLBACK_OBJECT = 0xb
ADS_ACETYPE_ACCESS_DENIED_CALLBACK_OBJECT = 0xc
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK = 0xd
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK = 0xe
ADS_ACETYPE_SYSTEM_AUDIT_CALLBACK_OBJECT = 0xf
ADS_ACETYPE_SYSTEM_ALARM_CALLBACK_OBJECT = 0x10
ADS_ACEFLAG_INHERIT_ACE = 0x2
ADS_ACEFLAG_NO_PROPAGATE_INHERIT_ACE = 0x4
ADS_ACEFLAG_INHERIT_ONLY_ACE = 0x8
ADS_ACEFLAG_INHERITED_ACE = 0x10
ADS_ACEFLAG_VALID_INHERIT_FLAGS = 0x1f
ADS_ACEFLAG_SUCCESSFUL_ACCESS = 0x40
ADS_ACEFLAG_FAILED_ACCESS = 0x80
ADS_FLAG_OBJECT_TYPE_PRESENT = 0x1
ADS_FLAG_INHERITED_OBJECT_TYPE_PRESENT = 0x2
ADS_SD_CONTROL_SE_OWNER_DEFAULTED = 0x1
ADS_SD_CONTROL_SE_GROUP_DEFAULTED = 0x2
ADS_SD_CONTROL_SE_DACL_PRESENT = 0x4
ADS_SD_CONTROL_SE_DACL_DEFAULTED = 0x8
ADS_SD_CONTROL_SE_SACL_PRESENT = 0x10
ADS_SD_CONTROL_SE_SACL_DEFAULTED = 0x20
ADS_SD_CONTROL_SE_DACL_AUTO_INHERIT_REQ = 0x100
ADS_SD_CONTROL_SE_SACL_AUTO_INHERIT_REQ = 0x200
ADS_SD_CONTROL_SE_DACL_AUTO_INHERITED = 0x400
ADS_SD_CONTROL_SE_SACL_AUTO_INHERITED = 0x800
ADS_SD_CONTROL_SE_DACL_PROTECTED = 0x1000
ADS_SD_CONTROL_SE_SACL_PROTECTED = 0x2000
ADS_SD_CONTROL_SE_SELF_RELATIVE = 0x8000
ADS_SD_REVISION_DS = 4
ADS_NAME_TYPE_1779 = 1
ADS_NAME_TYPE_CANONICAL = 2
ADS_NAME_TYPE_NT4 = 3
ADS_NAME_TYPE_DISPLAY = 4
ADS_NAME_TYPE_DOMAIN_SIMPLE = 5
ADS_NAME_TYPE_ENTERPRISE_SIMPLE = 6
ADS_NAME_TYPE_GUID = 7
ADS_NAME_TYPE_UNKNOWN = 8
ADS_NAME_TYPE_USER_PRINCIPAL_NAME = 9
ADS_NAME_TYPE_CANONICAL_EX = 10
ADS_NAME_TYPE_SERVICE_PRINCIPAL_NAME = 11
ADS_NAME_TYPE_SID_OR_SID_HISTORY_NAME = 12
ADS_NAME_INITTYPE_DOMAIN = 1
ADS_NAME_INITTYPE_SERVER = 2
ADS_NAME_INITTYPE_GC = 3
ADS_OPTION_SERVERNAME = 0
ADS_OPTION_REFERRALS = ADS_OPTION_SERVERNAME + 1
ADS_OPTION_PAGE_SIZE = ADS_OPTION_REFERRALS + 1
ADS_OPTION_SECURITY_MASK = ADS_OPTION_PAGE_SIZE + 1
ADS_OPTION_MUTUAL_AUTH_STATUS = ADS_OPTION_SECURITY_MASK + 1
ADS_OPTION_QUOTA = ADS_OPTION_MUTUAL_AUTH_STATUS + 1
ADS_OPTION_PASSWORD_PORTNUMBER = ADS_OPTION_QUOTA + 1
ADS_OPTION_PASSWORD_METHOD = ADS_OPTION_PASSWORD_PORTNUMBER + 1
ADS_SECURITY_INFO_OWNER = 0x1
ADS_SECURITY_INFO_GROUP = 0x2
ADS_SECURITY_INFO_DACL = 0x4
ADS_SECURITY_INFO_SACL = 0x8
ADS_SETTYPE_FULL = 1
ADS_SETTYPE_PROVIDER = 2
ADS_SETTYPE_SERVER = 3
ADS_SETTYPE_DN = 4
ADS_FORMAT_WINDOWS = 1
ADS_FORMAT_WINDOWS_NO_SERVER = 2
ADS_FORMAT_WINDOWS_DN = 3
ADS_FORMAT_WINDOWS_PARENT = 4
ADS_FORMAT_X500 = 5
ADS_FORMAT_X500_NO_SERVER = 6
ADS_FORMAT_X500_DN = 7
ADS_FORMAT_X500_PARENT = 8
ADS_FORMAT_SERVER = 9
ADS_FORMAT_PROVIDER = 10
ADS_FORMAT_LEAF = 11
ADS_DISPLAY_FULL = 1
ADS_DISPLAY_VALUE_ONLY = 2
ADS_ESCAPEDMODE_DEFAULT = 1
ADS_ESCAPEDMODE_ON = 2
ADS_ESCAPEDMODE_OFF = 3
ADS_ESCAPEDMODE_OFF_EX = 4
ADS_PATH_FILE = 1
ADS_PATH_FILESHARE = 2
ADS_PATH_REGISTRY = 3
ADS_SD_FORMAT_IID = 1
ADS_SD_FORMAT_RAW = 2
ADS_SD_FORMAT_HEXSTRING = 3
# Generated by h2py from AdsErr.h
def _HRESULT_TYPEDEF_(_sc): return _sc
E_ADS_BAD_PATHNAME = _HRESULT_TYPEDEF_((-2147463168))
E_ADS_INVALID_DOMAIN_OBJECT = _HRESULT_TYPEDEF_((-2147463167))
E_ADS_INVALID_USER_OBJECT = _HRESULT_TYPEDEF_((-2147463166))
E_ADS_INVALID_COMPUTER_OBJECT = _HRESULT_TYPEDEF_((-2147463165))
E_ADS_UNKNOWN_OBJECT = _HRESULT_TYPEDEF_((-2147463164))
E_ADS_PROPERTY_NOT_SET = _HRESULT_TYPEDEF_((-2147463163))
E_ADS_PROPERTY_NOT_SUPPORTED = _HRESULT_TYPEDEF_((-2147463162))
E_ADS_PROPERTY_INVALID = _HRESULT_TYPEDEF_((-2147463161))
E_ADS_BAD_PARAMETER = _HRESULT_TYPEDEF_((-2147463160))
E_ADS_OBJECT_UNBOUND = _HRESULT_TYPEDEF_((-2147463159))
E_ADS_PROPERTY_NOT_MODIFIED = _HRESULT_TYPEDEF_((-2147463158))
E_ADS_PROPERTY_MODIFIED = _HRESULT_TYPEDEF_((-2147463157))
E_ADS_CANT_CONVERT_DATATYPE = _HRESULT_TYPEDEF_((-2147463156))
E_ADS_PROPERTY_NOT_FOUND = _HRESULT_TYPEDEF_((-2147463155))
E_ADS_OBJECT_EXISTS = _HRESULT_TYPEDEF_((-2147463154))
E_ADS_SCHEMA_VIOLATION = _HRESULT_TYPEDEF_((-2147463153))
E_ADS_COLUMN_NOT_SET = _HRESULT_TYPEDEF_((-2147463152))
S_ADS_ERRORSOCCURRED = _HRESULT_TYPEDEF_(0x00005011L)
S_ADS_NOMORE_ROWS = _HRESULT_TYPEDEF_(0x00005012L)
S_ADS_NOMORE_COLUMNS = _HRESULT_TYPEDEF_(0x00005013L)
E_ADS_INVALID_FILTER = _HRESULT_TYPEDEF_((-2147463148))
# ADS_DEREFENUM enum
ADS_DEREF_NEVER = 0
ADS_DEREF_SEARCHING = 1
ADS_DEREF_FINDING = 2
ADS_DEREF_ALWAYS = 3
# ADS_PREFERENCES_ENUM
ADSIPROP_ASYNCHRONOUS = 0
ADSIPROP_DEREF_ALIASES = 0x1
ADSIPROP_SIZE_LIMIT = 0x2
ADSIPROP_TIME_LIMIT = 0x3
ADSIPROP_ATTRIBTYPES_ONLY = 0x4
ADSIPROP_SEARCH_SCOPE = 0x5
ADSIPROP_TIMEOUT = 0x6
ADSIPROP_PAGESIZE = 0x7
ADSIPROP_PAGED_TIME_LIMIT = 0x8
ADSIPROP_CHASE_REFERRALS = 0x9
ADSIPROP_SORT_ON = 0xa
ADSIPROP_CACHE_RESULTS = 0xb
ADSIPROP_ADSIFLAG = 0xc
# ADSI_DIALECT_ENUM
ADSI_DIALECT_LDAP = 0
ADSI_DIALECT_SQL = 0x1
# ADS_CHASE_REFERRALS_ENUM
ADS_CHASE_REFERRALS_NEVER = 0
ADS_CHASE_REFERRALS_SUBORDINATE = 0x20
ADS_CHASE_REFERRALS_EXTERNAL = 0x40
ADS_CHASE_REFERRALS_ALWAYS = ADS_CHASE_REFERRALS_SUBORDINATE | ADS_CHASE_REFERRALS_EXTERNAL
# Generated by h2py from ObjSel.h
DSOP_SCOPE_TYPE_TARGET_COMPUTER = 0x00000001
DSOP_SCOPE_TYPE_UPLEVEL_JOINED_DOMAIN = 0x00000002
DSOP_SCOPE_TYPE_DOWNLEVEL_JOINED_DOMAIN = 0x00000004
DSOP_SCOPE_TYPE_ENTERPRISE_DOMAIN = 0x00000008
DSOP_SCOPE_TYPE_GLOBAL_CATALOG = 0x00000010
DSOP_SCOPE_TYPE_EXTERNAL_UPLEVEL_DOMAIN = 0x00000020
DSOP_SCOPE_TYPE_EXTERNAL_DOWNLEVEL_DOMAIN = 0x00000040
DSOP_SCOPE_TYPE_WORKGROUP = 0x00000080
DSOP_SCOPE_TYPE_USER_ENTERED_UPLEVEL_SCOPE = 0x00000100
DSOP_SCOPE_TYPE_USER_ENTERED_DOWNLEVEL_SCOPE = 0x00000200
DSOP_SCOPE_FLAG_STARTING_SCOPE = 0x00000001
DSOP_SCOPE_FLAG_WANT_PROVIDER_WINNT = 0x00000002
DSOP_SCOPE_FLAG_WANT_PROVIDER_LDAP = 0x00000004
DSOP_SCOPE_FLAG_WANT_PROVIDER_GC = 0x00000008
DSOP_SCOPE_FLAG_WANT_SID_PATH = 0x00000010
DSOP_SCOPE_FLAG_WANT_DOWNLEVEL_BUILTIN_PATH = 0x00000020
DSOP_SCOPE_FLAG_DEFAULT_FILTER_USERS = 0x00000040
DSOP_SCOPE_FLAG_DEFAULT_FILTER_GROUPS = 0x00000080
DSOP_SCOPE_FLAG_DEFAULT_FILTER_COMPUTERS = 0x00000100
DSOP_SCOPE_FLAG_DEFAULT_FILTER_CONTACTS = 0x00000200
DSOP_FILTER_INCLUDE_ADVANCED_VIEW = 0x00000001
DSOP_FILTER_USERS = 0x00000002
DSOP_FILTER_BUILTIN_GROUPS = 0x00000004
DSOP_FILTER_WELL_KNOWN_PRINCIPALS = 0x00000008
DSOP_FILTER_UNIVERSAL_GROUPS_DL = 0x00000010
DSOP_FILTER_UNIVERSAL_GROUPS_SE = 0x00000020
DSOP_FILTER_GLOBAL_GROUPS_DL = 0x00000040
DSOP_FILTER_GLOBAL_GROUPS_SE = 0x00000080
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_DL = 0x00000100
DSOP_FILTER_DOMAIN_LOCAL_GROUPS_SE = 0x00000200
DSOP_FILTER_CONTACTS = 0x00000400
DSOP_FILTER_COMPUTERS = 0x00000800
DSOP_DOWNLEVEL_FILTER_USERS = (-2147483647)
DSOP_DOWNLEVEL_FILTER_LOCAL_GROUPS = (-2147483646)
DSOP_DOWNLEVEL_FILTER_GLOBAL_GROUPS = (-2147483644)
DSOP_DOWNLEVEL_FILTER_COMPUTERS = (-2147483640)
DSOP_DOWNLEVEL_FILTER_WORLD = (-2147483632)
DSOP_DOWNLEVEL_FILTER_AUTHENTICATED_USER = (-2147483616)
DSOP_DOWNLEVEL_FILTER_ANONYMOUS = (-2147483584)
DSOP_DOWNLEVEL_FILTER_BATCH = (-2147483520)
DSOP_DOWNLEVEL_FILTER_CREATOR_OWNER = (-2147483392)
DSOP_DOWNLEVEL_FILTER_CREATOR_GROUP = (-2147483136)
DSOP_DOWNLEVEL_FILTER_DIALUP = (-2147482624)
DSOP_DOWNLEVEL_FILTER_INTERACTIVE = (-2147481600)
DSOP_DOWNLEVEL_FILTER_NETWORK = (-2147479552)
DSOP_DOWNLEVEL_FILTER_SERVICE = (-2147475456)
DSOP_DOWNLEVEL_FILTER_SYSTEM = (-2147467264)
DSOP_DOWNLEVEL_FILTER_EXCLUDE_BUILTIN_GROUPS = (-2147450880)
DSOP_DOWNLEVEL_FILTER_TERMINAL_SERVER = (-2147418112)
DSOP_DOWNLEVEL_FILTER_ALL_WELLKNOWN_SIDS = (-2147352576)
DSOP_DOWNLEVEL_FILTER_LOCAL_SERVICE = (-2147221504)
DSOP_DOWNLEVEL_FILTER_NETWORK_SERVICE = (-2146959360)
DSOP_DOWNLEVEL_FILTER_REMOTE_LOGON = (-2146435072)
DSOP_FLAG_MULTISELECT = 0x00000001
DSOP_FLAG_SKIP_TARGET_COMPUTER_DC_CHECK = 0x00000002
CFSTR_DSOP_DS_SELECTION_LIST = "CFSTR_DSOP_DS_SELECTION_LIST"
| {
"repo_name": "Southpaw-TACTIC/Team",
"path": "src/python/Lib/site-packages/win32comext/adsi/adsicon.py",
"copies": "1",
"size": "12880",
"license": "epl-1.0",
"hash": 6591334290745903000,
"line_mean": 36.3333333333,
"line_max": 91,
"alpha_frac": 0.7441770186,
"autogenerated": false,
"ratio": 2.225678244340764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8360431609672279,
"avg_score": 0.021884730653696952,
"num_lines": 336
} |
""" ADT stands for Algebraic data type
"""
from rhetoric.exceptions import ConfigurationError
class ADTConfiguratorMixin(object):
def update_adt_registry(self, adt_meta):
"""
:type adt_meta: dict
"""
adt_type = adt_meta['type']
self.adt[adt_type] = adt_meta
def check_adt_consistency(self):
for obj_id, adt_meta in self.adt.items():
for case_name, case_meta in adt_meta['cases'].items():
for variant, implementation in case_meta.items():
if implementation is None:
raise ConfigurationError(
'Case {case_name} of {type} is not exhaustive. '
'Here is the variant that is not matched: {variant} '
.format(
case_name=case_name,
type=str(adt_meta['type']),
variant=variant
)
)
# All good. We no longer need the adt meta.
delattr(self, 'adt')
| {
"repo_name": "avanov/Rhetoric",
"path": "rhetoric/config/adt.py",
"copies": "1",
"size": "1118",
"license": "mit",
"hash": 8713968545831401000,
"line_mean": 35.064516129,
"line_max": 81,
"alpha_frac": 0.4821109123,
"autogenerated": false,
"ratio": 4.4189723320158105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.540108324431581,
"avg_score": null,
"num_lines": null
} |
""" A dual network policy gradient RL architecture: Model network learns a representation of the environment on the
basis of observations it receives from the interactions between the PolicyNet - encoding the agent - and the true
environment. PolicyNet learns its optimal policy by learning from the simulated data provided by the ModelNet only.
The so defined procedure accelerates the training process for the agent. """
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer as xi
import gym
env = gym.make('CartPole-v0')
# Declare hyperparameters for the agent network and helper functions
# Current parameters work sufficiently well depending on network initialization
AGENT_HIDDEN_1 = 64
AGENT_HIDDEN_2 = 128
AGENT_HIDDEN_3 = 32
MODEL_HIDDEN_1 = 256
MODEL_HIDDEN_2 = 512
MODEL_HIDDEN_3 = 128
KEEP_PROB_MODEL = 0.5
KEEP_PROB_AGENT = 0.5
LR = 1e-2
GAMMA = 0.99
INPUT_DIMS = 4
STATE_DIMS = 5
NUM_ACTIONS = 2
# Define training parameters
TOTAL_EPS = 5000
MAX_STEPS = 300
REAL_BSIZE = 3
MODEL_BSIZE = 3
BINARY_OBJECTIVE = True
class PolicyNet(object):
""" Policy net encoding the agent and learning the optimal policy through interaction with the model net.
Two objective functions are implemented: binary_objective=True, for when the agent has to decide between two actions
and binary_objective=False, for when the action space is larger. The latter option also works for binary decision,
yet the former offers more reliable convergence. """
def __init__(self, input_dims, hidden_1, hidden_2, hidden_3, num_actions, learning_rate, binary_objective=True):
self.input_dims = input_dims
self.hidden_1 = hidden_1
self.hidden_2 = hidden_2
self.hidden_3 = hidden_3
self.learning_rate = learning_rate
self.dtype = tf.float32
self.binary = binary_objective
if self.binary:
self.num_actions = num_actions - 1
else:
self.num_actions = num_actions
self.state = tf.placeholder(shape=[None, self.input_dims], dtype=self.dtype, name='current_state')
if self.binary:
self.action_holder = tf.placeholder(shape=[None, 1], dtype=self.dtype, name='actions')
else:
self.action_holder = tf.placeholder(shape=[None, 1], dtype=tf.int32, name='actions')
self.reward_holder = tf.placeholder(dtype=self.dtype, name='rewards')
self.keep_prob = tf.placeholder(dtype=self.dtype, name='keep_prob')
with tf.variable_scope('layer_1'):
w1 = tf.get_variable(name='weight', shape=[self.input_dims, self.hidden_1], dtype=self.dtype,
initializer=xi())
o1 = tf.nn.relu(tf.matmul(self.state, w1), name='output')
d1 = tf.nn.dropout(o1, self.keep_prob)
with tf.variable_scope('layer_2'):
w2 = tf.get_variable(name='weight', shape=[self.hidden_1, self.hidden_2], dtype=self.dtype,
initializer=xi())
o2 = tf.nn.relu(tf.matmul(d1, w2), name='output')
d2 = tf.nn.dropout(o2, self.keep_prob)
with tf.variable_scope('layer_3'):
w3 = tf.get_variable(name='weight', shape=[self.hidden_2, self.hidden_3], dtype=self.dtype,
initializer=xi())
o3 = tf.nn.relu(tf.matmul(d2, w3), name='hidden_1')
with tf.variable_scope('layer_4'):
w4 = tf.get_variable(name='weight', shape=[self.hidden_3, self.num_actions], dtype=self.dtype,
initializer=xi())
score = tf.matmul(o3, w4, name='score')
if self.binary:
self.probability = tf.nn.sigmoid(score, name='action_probability')
else:
self.probability = tf.nn.softmax(score, name='action_probabilities')
self.t_vars = tf.trainable_variables()
with tf.variable_scope('loss'):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.gradient_holders = list()
for _idx, var in enumerate(self.t_vars):
placeholder = tf.placeholder(dtype=tf.float32, name=str(_idx) + '_holder')
self.gradient_holders.append(placeholder)
if self.binary:
self.action_holder = tf.abs(self.action_holder - 1)
log_lh = tf.log(
self.action_holder * (self.action_holder - self.probability) + (1 - self.action_holder) *
(self.action_holder + self.probability))
self.loss = - tf.reduce_mean(log_lh * self.reward_holder)
else:
indices = tf.range(0, tf.shape(self.probability)[0]) * tf.shape(self.probability)[1] + \
self.action_holder
responsible_outputs = tf.gather(tf.reshape(self.probability, [-1]), indices)
self.loss = - tf.reduce_mean(tf.multiply(tf.log(responsible_outputs), self.reward_holder), name='loss')
self.get_gradients = tf.gradients(self.loss, self.t_vars)
self.batch_update = optimizer.apply_gradients(zip(self.gradient_holders, self.t_vars))
class ModelNet(object):
""" Network predicting environment data based on previous observations. """
def __init__(self, hidden_1, hidden_2, hidden_3, input_dims, state_dims, learning_rate):
self.hidden_1 = hidden_1
self.hidden_2 = hidden_2
self.hidden_3 = hidden_3
self.input_dims = input_dims
self.state_dims = state_dims
self.learning_rate = learning_rate
self.dtype = tf.float32
self.previous_state = tf.placeholder(shape=[None, self.state_dims], dtype=self.dtype, name='model_input')
self.true_observation = tf.placeholder(shape=[None, self.input_dims], dtype=self.dtype, name='true_obs')
self.true_reward = tf.placeholder(shape=[None, 1], dtype=self.dtype, name='true_reward')
self.true_done = tf.placeholder(shape=[None, 1], dtype=self.dtype, name='true_done')
self.keep_prob = tf.placeholder(dtype=self.dtype, name='keep_prob')
# Define layers
with tf.variable_scope('layer_1'):
w_1 = tf.get_variable(name='weights', shape=[self.state_dims, self.hidden_1], dtype=self.dtype,
initializer=xi())
b_1 = tf.get_variable(name='biases', shape=[self.hidden_1], dtype=self.dtype,
initializer=tf.constant_initializer(0.0))
o_1 = tf.nn.relu(tf.nn.xw_plus_b(self.previous_state, w_1, b_1), name='output')
d_1 = tf.nn.dropout(o_1, keep_prob=self.keep_prob)
with tf.variable_scope('layer_2'):
w_2 = tf.get_variable(name='weights', shape=[self.hidden_1, self.hidden_2], dtype=self.dtype,
initializer=xi())
b_2 = tf.get_variable(name='biases', shape=[self.hidden_2], dtype=self.dtype,
initializer=tf.constant_initializer(0.0))
o_2 = tf.nn.relu(tf.nn.xw_plus_b(d_1, w_2, b_2), name='output')
d_2 = tf.nn.dropout(o_2, self.keep_prob)
with tf.variable_scope('layer_3'):
w_3 = tf.get_variable(name='weights', shape=[self.hidden_2, self.hidden_3], dtype=self.dtype,
initializer=xi())
b_3 = tf.get_variable(name='biases', shape=[self.hidden_3], dtype=self.dtype,
initializer=tf.constant_initializer(0.0))
o_3 = tf.nn.relu(tf.nn.xw_plus_b(d_2, w_3, b_3), name='output')
with tf.variable_scope('prediction_layer'):
w_obs = tf.get_variable(name='state_weight', shape=[self.hidden_3, self.input_dims], dtype=self.dtype,
initializer=xi())
b_obs = tf.get_variable(name='state_bias', shape=[self.input_dims], dtype=self.dtype,
initializer=tf.constant_initializer(0.0))
w_reward = tf.get_variable(name='reward_weight', shape=[self.hidden_3, 1], dtype=self.dtype,
initializer=xi())
b_reward = tf.get_variable(name='reward_bias', shape=[1], dtype=self.dtype,
initializer=tf.constant_initializer(0.0))
w_done = tf.get_variable(name='done_weight', shape=[self.hidden_3, 1], dtype=self.dtype,
initializer=xi())
b_done = tf.get_variable(name='done_bias', shape=[1], dtype=self.dtype,
initializer=tf.constant_initializer(1.0))
predicted_observation = tf.nn.xw_plus_b(o_3, w_obs, b_obs, name='observation_prediction')
predicted_reward = tf.nn.xw_plus_b(o_3, w_reward, b_reward, name='reward_prediction')
predicted_done = tf.nn.sigmoid(tf.nn.xw_plus_b(o_3, w_done, b_done, name='done_prediction'))
self.predicted_state = tf.concat(values=[predicted_observation, predicted_reward, predicted_done],
axis=1, name='state_prediction')
# Get losses
with tf.variable_scope('loss'):
observation_loss = tf.square(tf.subtract(self.true_observation, predicted_observation),
name='observation_loss')
reward_loss = tf.square(tf.subtract(self.true_reward, predicted_reward), name='reward_loss')
# Cross-entropy due to one-hot nature of the done-vector (1 if match, 0 otherwise)
done_loss = tf.multiply(self.true_done, predicted_done) + tf.multiply(1 - self.true_done,
1 - predicted_done)
done_loss = - tf.log(done_loss)
self.loss = tf.reduce_mean(1.0 * observation_loss + 1.0 * reward_loss + 2.0 * done_loss,
name='combined_loss')
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.update_model = optimizer.minimize(loss=self.loss)
# Declare any helper functions
def reset_grad_buff(grad_buff):
""" Resets the gradients kept within the gradient buffer. """
for index, gradient in enumerate(grad_buff):
grad_buff[index] = gradient * 0
return grad_buff
def discount_rewards(reward_vector):
""" Produces a discounter rewards 1D vector from the rewards collected over the duration of an episode. """
discounted = np.zeros_like(reward_vector)
running_add = 0
for i in reversed(range(0, reward_vector.size)):
running_add = running_add * GAMMA + reward_vector[i]
discounted[i] = running_add
discounted -= np.mean(discounted)
discounted /= np.std(discounted)
return discounted
def model_step_function(this_sess, model, checked_state, checked_action, current_step):
""" Performs a single training step using the model network. """
feed_input = np.hstack([checked_state, np.reshape(np.array(checked_action), [-1, 1])])
# Obtain prediction
msf_prediction = this_sess.run(model.predicted_state, feed_dict={model.previous_state: feed_input,
model.keep_prob: 1.0})
next_observation = msf_prediction[:, 0:4]
next_reward = msf_prediction[:, 4]
# Clip values
next_observation[:, 0] = np.clip(next_observation[:, 0], - 2.4, 2.4)
next_observation[:, 2] = np.clip(next_observation[:, 2], - 0.4, 0.4)
done_prob = np.clip(msf_prediction[:, 5], 0, 1)
# Check if episode done or maximum number of steps is exceeded
if done_prob > 0.1 or current_step > MAX_STEPS:
next_done = True
else:
next_done = False
return next_observation, next_reward, next_done
total_reward = list()
episodic_reward = 0
episode_history = list()
episode_number = 0
real_episodes = 0
batch_size = REAL_BSIZE
solved = False
draw_from_model = False
train_the_model = True
train_the_policy = False
agent = PolicyNet(INPUT_DIMS, AGENT_HIDDEN_1, AGENT_HIDDEN_2, AGENT_HIDDEN_3, NUM_ACTIONS, LR)
env_model = ModelNet(MODEL_HIDDEN_1, MODEL_HIDDEN_2, MODEL_HIDDEN_3, INPUT_DIMS, STATE_DIMS, LR)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
grad_buffer = sess.run(agent.t_vars)
grad_buffer = reset_grad_buff(grad_buffer)
new_state = env.reset()
state = new_state
while episode_number <= TOTAL_EPS:
state = np.reshape(new_state, [1, 4])
if BINARY_OBJECTIVE:
action_prob = sess.run(agent.probability, feed_dict={agent.state: state, agent.keep_prob: 1.0})
action = 1 if np.random.uniform() < action_prob else 0
else:
action_distribution = sess.run(agent.probability, feed_dict={agent.state: state, agent.keep_prob: 1.0})
action_value = np.random.choice(action_distribution[0], p=action_distribution[0])
match = np.square(action_distribution - action_value)
action = np.argmin(match)
# Perform a single step either within the model or the real environment to obtain new measurements
if draw_from_model:
new_state, reward, done = model_step_function(sess, env_model, state, action, len(episode_history))
else:
new_state, reward, done, info = env.step(action)
episode_history.append([state, action, reward, done, new_state])
episodic_reward += reward
if done:
if not draw_from_model:
real_episodes += 1
total_reward.append(episodic_reward)
episode_number += 1
episodic_reward = 0
episode_history = np.array(episode_history)
# Unravel the history
episode_state = np.vstack(episode_history[:, 0])
episode_action = np.reshape(episode_history[:, 1], [-1, 1])
episode_reward = np.reshape(episode_history[:, 2], [-1, 1])
episode_done = np.reshape(episode_history[:, 3], [-1, 1])
episode_next = np.vstack(episode_history[:, 4])
# episode_check = np.reshape(episode_history[:, 5], [-1, 1])
episode_history = list()
# Train each of the networks when specified
if train_the_model:
state_plus_action = np.hstack([episode_state, episode_action])
episode_all = np.hstack([episode_next, episode_reward, episode_done])
feed_dict = {env_model.previous_state: state_plus_action,
env_model.true_observation: episode_next,
env_model.true_done: episode_done,
env_model.true_reward: episode_reward,
env_model.keep_prob: KEEP_PROB_MODEL}
loss, state_prediction, _ = sess.run([env_model.loss, env_model.predicted_state,
env_model.update_model], feed_dict=feed_dict)
if train_the_policy:
discounted_reward = discount_rewards(episode_reward).astype('float32')
feed_dict = {agent.state: episode_state,
agent.action_holder: episode_action,
agent.reward_holder: discounted_reward,
agent.keep_prob: KEEP_PROB_AGENT}
agent_gradients = sess.run(agent.get_gradients, feed_dict=feed_dict)
# Break if gradients become too large
if np.sum(agent_gradients[0] == agent_gradients[0]) == 0:
break
for idx, grad in enumerate(agent_gradients):
grad_buffer[idx] += grad
if episode_number % batch_size == 0 and real_episodes >= 100:
if train_the_policy:
_ = sess.run(agent.batch_update, feed_dict=dict(zip(agent.gradient_holders, grad_buffer)))
grad_buffer = reset_grad_buff(grad_buffer)
if not draw_from_model:
batch_reward = np.mean(total_reward[- REAL_BSIZE:])
mean_total = np.mean(total_reward[- REAL_BSIZE * 100:])
print('Acting in env. | Episode: %d | Batch reward %.4f | Action: %.4f | Mean reward: %.4f'
% (real_episodes, batch_reward, action, mean_total))
if batch_reward >= 200:
solved = True
# Once the model has been trained on 100 episodes, we start alternating between training the policy
# from the model and training the model from the real environment.
if episode_number > 100:
draw_from_model = not draw_from_model
train_the_model = not train_the_model
train_the_policy = not train_the_policy
if draw_from_model:
new_state = np.random.uniform(-0.1, 0.1, [4]) # Generate reasonable starting point
batch_size = MODEL_BSIZE
else:
new_state = env.reset()
batch_size = REAL_BSIZE
if episode_number % 1000 == 0:
LR /= 2
if solved:
print('Found a solution!')
break
print('Agent has experienced %d real episodes.' % real_episodes)
| {
"repo_name": "demelin/learning_reinforcement_learning",
"path": "model_based_rl.py",
"copies": "1",
"size": "17484",
"license": "mit",
"hash": 1691261135482080800,
"line_mean": 49.386167147,
"line_max": 120,
"alpha_frac": 0.5899107756,
"autogenerated": false,
"ratio": 3.7803243243243245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48702350999243244,
"avg_score": null,
"num_lines": null
} |
#adults dataset
def load_adult():
"""loads adult dataset"""
remove_sp = lambda n: n.replace(' ', '')
last_column = lambda i: i.pop(-1)
binary_= lambda u: 0 if u == '<=50K' else 1
defs_ = [
{'age': None},
{'workclass': ['Private', '?', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay', 'Never-worked']},
{'fnlwgt': None},
{'education': ['Bachelors', '?', ' Some-college', ' 11th', ' HS-grad', ' Prof-school', ' Assoc-acdm', ' Assoc-voc', ' 9th', ' 7th-8th', ' 12th', ' Masters', ' 1st-4th', ' 10th', ' Doctorate', ' 5th-6th', ' Preschool']},
{'education-num': None},
{'marital-status': ['Married-civ-spouse', '?', 'Divorced', 'Never-married', 'Separated', 'Widowed', ' Married-spouse-absent', ' Married-AF-spouse']},
{'occupation': ['Tech-support', ' Craft-repair', '?', ' Other-service', ' Sales', ' Exec-managerial', ' Prof-specialty', ' Handlers-cleaners', ' Machine-op-inspct', 'Adm-clerical', ' Farming-fishing', ' Transport-moving', ' Priv-house-serv', ' Protective-serv', ' Armed-Forces']},
{'relationship': ['Wife', ' Own-child', ' Husband', '?', ' Not-in-family', ' Other-relative', ' Unmarried']},
{'race': ['White', ' Asian-Pac-Islander', '?', ' Amer-Indian-Eskimo', ' Other', ' Black']},
{'sex': ['Female', ' Male', '?']},
{'capital-gain': None},
{'capital-loss': None},
{'hours-per-week': None},
{'native-country': ['United-States', '?', ' Cambodia', ' England', ' Puerto-Rico', ' Canada', ' Germany', ' Outlying-US(Guam-USVI-etc)', ' India', ' Japan', ' Greece', ' South', ' China', ' Cuba', ' Iran', ' Honduras', ' Philippines', ' Italy', ' Poland', ' Jamaica', ' Vietnam', ' Mexico', ' Portugal', ' Ireland', ' France', ' Dominican-Republic', ' Laos', ' Ecuador', ' Taiwan', ' Haiti', ' Columbia', ' Hungary', ' Guatemala', ' Nicaragua', ' Scotland', ' Thailand', ' Yugoslavia', ' El-Salvador', ' Trinadad&Tobago', ' Peru', ' Hong', ' Holand-Netherlands']}
]
v =-1
for i,a in enumerate(defs_):
current_col = a
v += 1
key_ = current_col.keys()[0]
if current_col[key_]:
defs_[i][key_] = dict([(b.strip(' '), i_) for b, i_ in zip(current_col[key_], range(0, len(current_col[key_])))])
defs_[i][v] = defs_[i].pop(key_)
y = ''
f = open("datasets_/adults.txt", 'rb')
for a in f:
y += a
y = y.split('\n')
y.pop(-1)
labels_ = []
for n, j in enumerate(y):
y[n] = y[n].split(',')
current_ = map(remove_sp, y[n])
indicator_ = current_.pop(-1)
labels_.append(indicator_)
for i, a in enumerate(current_):
column_ = defs_[i]
if column_.values()[0] == None:
current_[i] = float(current_[i])
elif column_.values()[0].has_key(current_[i]):
current_[i] = column_.values()[0][current_[i]]
y[n] = current_
return y, map(binary_, labels_)
#wines dataset
def load_wines():
y = ''
f = open('datasets_/wines.txt', 'rb')
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i] = y[i].split(',')
indicator_ = y[i].pop(0)
labels_.append(indicator_)
y[i] = map(float, y[i])
return y, map(float, labels_)
#car dataset
#http://archive.ics.uci.edu/ml/machine-learning-databases/car/
def load_cars():
def replace_stuff(n):
if n in ['more','5more']:
return 5
else:
return n
defs_ = [
{'buying': {'vhigh': 4, 'high': 3, 'med': 2, 'low': 1}},
{'maint': {'vhigh': 4, 'high': 3, 'med': 2, 'low': 1}},
{'doors': None},
{'persons': None},
{'lug_boot': {'small': 1, 'med': 2, 'big': 3}},
{'safety': {'low': 1, 'med': 2, 'high': 3}},
]
v = -1
for i, a in enumerate(defs_):
v += 1
key_ = defs_[i].keys()[0]
defs_[i][v] = defs_[i].pop(key_)
y = ''
f = open('datasets_/cars.txt', 'rb')
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i] = y[i].split(',')
indicator_ = y[i].pop(-1)
labels_.append(indicator_)
current_ = map(replace_stuff, y[i])
for j, b in enumerate(current_):
col_ = defs_[j]
item_ = current_[j]
if col_.values()[0] == None:
current_[j]= float(current_[j])
else:
if col_.values()[0].has_key(current_[j]):
current_[j] = col_.values()[0][current_[j]]
y[i] = current_
return y, labels_
#yeasts dataset (all continuous)
#http://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data
def load_yeast():
defs_ = {'sequence_name': str,
'mcg': float,
'gvh': float,
'alm': float,
'mit': float,
'erl': float,
'pox': float,
'vac': float,
'nuc': float,
'class': str
}
f = open('datasets_/yeast.txt', 'rb')
y = ''
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i]= y[i].split(' ')
indicator_ = y[i].pop(-1)
labels_.append(indicator_)
remove_first = y[i].pop(0)
y[i] = map(float, filter(lambda n: len(n) > 0, y[i]))
return y, labels_
#wine quality dataset (all continuous)
#http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality.names
def load_wine_quality():
defs_ = {
'fixed acidity': float,
'volatile acidity': float,
'citric acid': float,
'residual sugar': float,
'chlorides': float,
'free sulfur dioxide': float,
'total sulfur dioxide': float,
'density': float,
'pH': float,
'sulphates': float,
'alcohol': float,
'quality': int
}
f = open('datasets_/wine_quality.txt', 'rb')
y = ''
for a in f:
y += a
y = y.split('\n')
y.pop(-1)
labels_ = []
for i, a in enumerate(y):
y[i] = filter(lambda n : len(n) > 0, y[i].split('\t'))
indicator_ = y[i].pop(-1)
labels_.append(int(indicator_))
y[i] = map(float, y[i])
return y, labels_
#seeds dataset (all continuous)
#https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt
def load_seeds():
defs_ = {
'area': float,
'perimeter': float,
'compactness': float,
'width of kernel': float,
'asymmetry coefficient': float,
'length of kernel groove': float,
'seed type': int
}
f = open('datasets_/seeds.txt', 'rb')
y = ''
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i] = filter(lambda n: len(n) > 0, y[i].split('\t'))
indicator_ = y[i].pop(-1)
labels_.append(int(indicator_))
y[i] = map(float, y[i])
return y, labels_
| {
"repo_name": "saifuddin778/pwperceptrons",
"path": "datasets.py",
"copies": "1",
"size": "7411",
"license": "mit",
"hash": -538180132751841340,
"line_mean": 31.9377777778,
"line_max": 569,
"alpha_frac": 0.4805019566,
"autogenerated": false,
"ratio": 3.106035205364627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4086537161964627,
"avg_score": null,
"num_lines": null
} |
"""A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
from __future__ import division, print_function, absolute_import
_os = __import__('os')
from scipy.lib.six import builtins
from scipy.lib.six import string_types
_open = builtins.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(object):
def __init__(self, file):
self._dirfile = file + '.dir'
self._datfile = file + '.dat'
self._bakfile = file + '.bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
import string
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = string.rstrip(f.readline())
if not line:
break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try:
_os.unlink(self._bakfile)
except _os.error:
pass
try:
_os.rename(self._dirfile, self._bakfile)
except _os.error:
pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz)))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def __contains__(self, key):
return key in self._index
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = f.tell()
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, pos_and_siz):
(pos, siz) = pos_and_siz
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz)))
f.close()
def __setitem__(self, key, val):
if not isinstance(key, string_types) or not isinstance(val, string_types):
raise TypeError("keys and values must be strings")
if key not in self._index:
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
self._addkey(key, (pos, siz))
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return list(self._index.keys())
def has_key(self, key):
return key in self._index
def __len__(self):
return len(self._index)
def close(self):
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def open(file, flag=None, mode=None):
# flag, mode arguments are currently ignored
return _Database(file)
| {
"repo_name": "jsilter/scipy",
"path": "scipy/weave/_dumbdbm_patched.py",
"copies": "15",
"size": "4514",
"license": "bsd-3-clause",
"hash": -2137187660849858600,
"line_mean": 27.3899371069,
"line_max": 82,
"alpha_frac": 0.5350022153,
"autogenerated": false,
"ratio": 3.5265625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
_os = __import__('os')
import builtins
_open = builtins.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database:
def __init__(self, file):
self._dirfile = file + '.dir'
self._datfile = file + '.dat'
self._bakfile = file + '.bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
import string
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = string.rstrip(f.readline())
if not line: break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try: _os.unlink(self._bakfile)
except _os.error: pass
try: _os.rename(self._dirfile, self._bakfile)
except _os.error: pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in list(self._index.items()):
f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz)))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = f.tell()
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, xxx_todo_changeme):
(pos, siz) = xxx_todo_changeme
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz)))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError("keys and values must be strings")
if key not in self._index:
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
self._addkey(key, (pos, siz))
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return list(self._index.keys())
def has_key(self, key):
return key in self._index
def __len__(self):
return len(self._index)
def close(self):
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def open(file, flag = None, mode = None):
# flag, mode arguments are currently ignored
return _Database(file)
| {
"repo_name": "macronucleus/chromagnon",
"path": "Chromagnon/Priithon/plt/dumbdbm_patched.py",
"copies": "1",
"size": "4286",
"license": "mit",
"hash": 4994364099795589000,
"line_mean": 28.156462585,
"line_max": 78,
"alpha_frac": 0.5258982734,
"autogenerated": false,
"ratio": 3.507364975450082,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4533263248850082,
"avg_score": null,
"num_lines": null
} |
"""A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import ast as _ast
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
with _open(self._datfile, 'w') as f:
self._chmod(self._datfile)
else:
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
with f:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = _ast.literal_eval(line)
self._index[key] = pos_and_siz_pair
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
with self._open(self._dirfile, 'w') as f:
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
with _open(self._datfile, 'rb') as f:
f.seek(pos)
dat = f.read(siz)
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
with _open(self._datfile, 'rb+') as f:
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
with _open(self._datfile, 'rb+') as f:
f.seek(pos)
f.write(val)
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
with _open(self._dirfile, 'a') as f:
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
try:
self._commit()
finally:
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
| {
"repo_name": "nmercier/linux-cross-gcc",
"path": "win32/bin/Lib/dumbdbm.py",
"copies": "2",
"size": "9187",
"license": "bsd-3-clause",
"hash": -4357775524957528000,
"line_mean": 34.8955823293,
"line_max": 78,
"alpha_frac": 0.5720039186,
"autogenerated": false,
"ratio": 3.992611907866145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037953771902725613,
"num_lines": 249
} |
"""A dummy audio actor for use in tests.
This class implements the audio API in the simplest way possible. It is used in
tests of the core and backends.
"""
from __future__ import absolute_import, unicode_literals
from mopidy import audio
import pykka
def create_proxy(config=None, mixer=None):
return DummyAudio.start(config, mixer).proxy()
# TODO: reset position on track change?
class DummyAudio(pykka.ThreadingActor):
def __init__(self, config=None, mixer=None):
super(DummyAudio, self).__init__()
self.state = audio.PlaybackState.STOPPED
self._volume = 0
self._position = 0
self._callback = None
self._uri = None
self._stream_changed = False
self._tags = {}
self._bad_uris = set()
def set_uri(self, uri):
assert self._uri is None, "prepare change not called before set"
self._tags = {}
self._uri = uri
self._stream_changed = True
def set_appsrc(self, *args, **kwargs):
pass
def emit_data(self, buffer_):
pass
def emit_end_of_stream(self):
pass
def get_position(self):
return self._position
def set_position(self, position):
self._position = position
audio.AudioListener.send("position_changed", position=position)
return True
def start_playback(self):
return self._change_state(audio.PlaybackState.PLAYING)
def pause_playback(self):
return self._change_state(audio.PlaybackState.PAUSED)
def prepare_change(self):
self._uri = None
return True
def stop_playback(self):
return self._change_state(audio.PlaybackState.STOPPED)
def get_volume(self):
return self._volume
def set_volume(self, volume):
self._volume = volume
return True
def set_metadata(self, track):
pass
def get_current_tags(self):
return self._tags
def set_about_to_finish_callback(self, callback):
self._callback = callback
def enable_sync_handler(self):
pass
def wait_for_state_change(self):
pass
def _change_state(self, new_state):
if not self._uri:
return False
if new_state == audio.PlaybackState.STOPPED and self._uri:
self._stream_changed = True
self._uri = None
if self._uri is not None:
audio.AudioListener.send("position_changed", position=0)
if self._stream_changed:
self._stream_changed = False
audio.AudioListener.send("stream_changed", uri=self._uri)
old_state, self.state = self.state, new_state
audio.AudioListener.send(
"state_changed", old_state=old_state, new_state=new_state, target_state=None
)
if new_state == audio.PlaybackState.PLAYING:
self._tags["audio-codec"] = ["fake info..."]
audio.AudioListener.send("tags_changed", tags=["audio-codec"])
return self._uri not in self._bad_uris
def trigger_fake_playback_failure(self, uri):
self._bad_uris.add(uri)
def trigger_fake_tags_changed(self, tags):
self._tags.update(tags)
audio.AudioListener.send("tags_changed", tags=self._tags.keys())
def get_about_to_finish_callback(self):
# This needs to be called from outside the actor or we lock up.
def wrapper():
if self._callback:
self.prepare_change()
self._callback()
if not self._uri or not self._callback:
self._tags = {}
audio.AudioListener.send("reached_end_of_stream")
else:
audio.AudioListener.send("position_changed", position=0)
audio.AudioListener.send("stream_changed", uri=self._uri)
return wrapper
| {
"repo_name": "rectalogic/mopidy-pandora",
"path": "tests/dummy_audio.py",
"copies": "2",
"size": "3848",
"license": "apache-2.0",
"hash": 2276735564828996000,
"line_mean": 27.2941176471,
"line_max": 88,
"alpha_frac": 0.6068087318,
"autogenerated": false,
"ratio": 3.9629248197734293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569733551573429,
"avg_score": null,
"num_lines": null
} |
"""A dummy audio actor for use in tests.
This class implements the audio API in the simplest way possible. It is used in
tests of the core and backends.
"""
from __future__ import unicode_literals
import pykka
from .constants import PlaybackState
from .listener import AudioListener
class DummyAudio(pykka.ThreadingActor):
def __init__(self, config=None, mixer=None):
super(DummyAudio, self).__init__()
self.state = PlaybackState.STOPPED
self._volume = 0
self._position = 0
self._callback = None
self._uri = None
self._state_change_result = True
def set_uri(self, uri):
assert self._uri is None, 'prepare change not called before set'
self._uri = uri
def set_appsrc(self, *args, **kwargs):
pass
def emit_data(self, buffer_):
pass
def emit_end_of_stream(self):
pass
def get_position(self):
return self._position
def set_position(self, position):
self._position = position
AudioListener.send('position_changed', position=position)
return True
def start_playback(self):
return self._change_state(PlaybackState.PLAYING)
def pause_playback(self):
return self._change_state(PlaybackState.PAUSED)
def prepare_change(self):
self._uri = None
return True
def stop_playback(self):
return self._change_state(PlaybackState.STOPPED)
def get_volume(self):
return self._volume
def set_volume(self, volume):
self._volume = volume
return True
def set_metadata(self, track):
pass
def set_about_to_finish_callback(self, callback):
self._callback = callback
def enable_sync_handler(self):
pass
def wait_for_state_change(self):
pass
def _change_state(self, new_state):
if not self._uri:
return False
if self.state == PlaybackState.STOPPED and self._uri:
AudioListener.send('position_changed', position=0)
AudioListener.send('stream_changed', uri=self._uri)
if new_state == PlaybackState.STOPPED:
self._uri = None
AudioListener.send('stream_changed', uri=self._uri)
old_state, self.state = self.state, new_state
AudioListener.send('state_changed', old_state=old_state,
new_state=new_state, target_state=None)
return self._state_change_result
def trigger_fake_playback_failure(self):
self._state_change_result = False
def get_about_to_finish_callback(self):
# This needs to be called from outside the actor or we lock up.
def wrapper():
if self._callback:
self.prepare_change()
self._callback()
if not self._uri or not self._callback:
AudioListener.send('reached_end_of_stream')
else:
AudioListener.send('position_changed', position=0)
AudioListener.send('stream_changed', uri=self._uri)
return wrapper
| {
"repo_name": "woutervanwijk/mopidy",
"path": "mopidy/audio/dummy.py",
"copies": "1",
"size": "3097",
"license": "apache-2.0",
"hash": 3544604332561993700,
"line_mean": 26.6517857143,
"line_max": 79,
"alpha_frac": 0.6131740394,
"autogenerated": false,
"ratio": 4.080368906455863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193542945855864,
"avg_score": null,
"num_lines": null
} |
"""A dummy audio actor for use in tests.
This class implements the audio API in the simplest way possible. It is used in
tests of the core and backends.
"""
import pykka
from mopidy import audio
def create_proxy(config=None, mixer=None):
return DummyAudio.start(config, mixer).proxy()
# TODO: reset position on track change?
class DummyAudio(pykka.ThreadingActor):
def __init__(self, config=None, mixer=None):
super().__init__()
self.state = audio.PlaybackState.STOPPED
self._volume = 0
self._position = 0
self._callback = None
self._uri = None
self._stream_changed = False
self._live_stream = False
self._tags = {}
self._bad_uris = set()
def set_uri(self, uri, live_stream=False, download=False):
assert self._uri is None, "prepare change not called before set"
self._position = 0
self._uri = uri
self._stream_changed = True
self._live_stream = live_stream
self._tags = {}
def set_appsrc(self, *args, **kwargs):
pass
def emit_data(self, buffer_):
pass
def get_position(self):
return self._position
def set_position(self, position):
self._position = position
audio.AudioListener.send("position_changed", position=position)
return True
def start_playback(self):
return self._change_state(audio.PlaybackState.PLAYING)
def pause_playback(self):
return self._change_state(audio.PlaybackState.PAUSED)
def prepare_change(self):
self._uri = None
return True
def stop_playback(self):
return self._change_state(audio.PlaybackState.STOPPED)
def get_volume(self):
return self._volume
def set_volume(self, volume):
self._volume = volume
return True
def set_metadata(self, track):
pass
def get_current_tags(self):
return self._tags
def set_about_to_finish_callback(self, callback):
self._callback = callback
def enable_sync_handler(self):
pass
def wait_for_state_change(self):
pass
def _change_state(self, new_state):
if not self._uri:
return False
if new_state == audio.PlaybackState.STOPPED and self._uri:
self._stream_changed = True
self._uri = None
if self._uri is not None:
audio.AudioListener.send("position_changed", position=0)
if self._stream_changed:
self._stream_changed = False
audio.AudioListener.send("stream_changed", uri=self._uri)
old_state, self.state = self.state, new_state
audio.AudioListener.send(
"state_changed",
old_state=old_state,
new_state=new_state,
target_state=None,
)
if new_state == audio.PlaybackState.PLAYING:
self._tags["audio-codec"] = ["fake info..."]
audio.AudioListener.send("tags_changed", tags=["audio-codec"])
return self._uri not in self._bad_uris
def trigger_fake_playback_failure(self, uri):
self._bad_uris.add(uri)
def trigger_fake_tags_changed(self, tags):
self._tags.update(tags)
audio.AudioListener.send("tags_changed", tags=self._tags.keys())
def get_about_to_finish_callback(self):
# This needs to be called from outside the actor or we lock up.
def wrapper():
if self._callback:
self.prepare_change()
self._callback()
if not self._uri or not self._callback:
self._tags = {}
audio.AudioListener.send("reached_end_of_stream")
else:
audio.AudioListener.send("position_changed", position=0)
audio.AudioListener.send("stream_changed", uri=self._uri)
return wrapper
| {
"repo_name": "jodal/mopidy",
"path": "tests/dummy_audio.py",
"copies": "3",
"size": "3900",
"license": "apache-2.0",
"hash": -6468374728811022000,
"line_mean": 27.2608695652,
"line_max": 79,
"alpha_frac": 0.5984615385,
"autogenerated": false,
"ratio": 3.9959016393442623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 138
} |
"""A dummy backend for use in tests.
This backend implements the backend API in the simplest way possible. It is
used in tests of the frontends.
"""
from __future__ import absolute_import, unicode_literals
import pykka
from mopidy import backend
from mopidy.models import Playlist, Ref, SearchResult
def create_dummy_backend_proxy(config=None, audio=None):
return DummyBackend.start(config=config, audio=audio).proxy()
class DummyBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(DummyBackend, self).__init__()
self.library = DummyLibraryProvider(backend=self)
self.playback = DummyPlaybackProvider(audio=audio, backend=self)
self.playlists = DummyPlaylistsProvider(backend=self)
self.uri_schemes = ['dummy']
class DummyLibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri='dummy:/', name='dummy')
def __init__(self, *args, **kwargs):
super(DummyLibraryProvider, self).__init__(*args, **kwargs)
self.dummy_library = []
self.dummy_browse_result = {}
self.dummy_find_exact_result = SearchResult()
self.dummy_search_result = SearchResult()
def browse(self, path):
return self.dummy_browse_result.get(path, [])
def find_exact(self, **query):
return self.dummy_find_exact_result
def lookup(self, uri):
return [t for t in self.dummy_library if uri == t.uri]
def refresh(self, uri=None):
pass
def search(self, **query):
return self.dummy_search_result
class DummyPlaybackProvider(backend.PlaybackProvider):
def __init__(self, *args, **kwargs):
super(DummyPlaybackProvider, self).__init__(*args, **kwargs)
self._time_position = 0
def pause(self):
return True
def play(self, track):
"""Pass a track with URI 'dummy:error' to force failure"""
self._time_position = 0
return track.uri != 'dummy:error'
def resume(self):
return True
def seek(self, time_position):
self._time_position = time_position
return True
def stop(self):
return True
def get_time_position(self):
return self._time_position
class DummyPlaylistsProvider(backend.PlaylistsProvider):
def create(self, name):
playlist = Playlist(name=name, uri='dummy:%s' % name)
self._playlists.append(playlist)
return playlist
def delete(self, uri):
playlist = self.lookup(uri)
if playlist:
self._playlists.remove(playlist)
def lookup(self, uri):
for playlist in self._playlists:
if playlist.uri == uri:
return playlist
def refresh(self):
pass
def save(self, playlist):
old_playlist = self.lookup(playlist.uri)
if old_playlist is not None:
index = self._playlists.index(old_playlist)
self._playlists[index] = playlist
else:
self._playlists.append(playlist)
return playlist
| {
"repo_name": "priestd09/mopidy",
"path": "mopidy/backend/dummy.py",
"copies": "2",
"size": "3068",
"license": "apache-2.0",
"hash": 8353517146123160000,
"line_mean": 26.6396396396,
"line_max": 76,
"alpha_frac": 0.6398305085,
"autogenerated": false,
"ratio": 3.9536082474226806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5593438755922681,
"avg_score": null,
"num_lines": null
} |
"""A dummy backend for use in tests.
This backend implements the backend API in the simplest way possible. It is
used in tests of the frontends.
"""
from __future__ import unicode_literals
import pykka
from mopidy import backend
from mopidy.models import Playlist, Ref, SearchResult
def create_dummy_backend_proxy(config=None, audio=None):
return DummyBackend.start(config=config, audio=audio).proxy()
class DummyBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(DummyBackend, self).__init__()
self.library = DummyLibraryProvider(backend=self)
self.playback = DummyPlaybackProvider(audio=audio, backend=self)
self.playlists = DummyPlaylistsProvider(backend=self)
self.uri_schemes = ['dummy']
class DummyLibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri='dummy:/', name='dummy')
def __init__(self, *args, **kwargs):
super(DummyLibraryProvider, self).__init__(*args, **kwargs)
self.dummy_library = []
self.dummy_browse_result = {}
self.dummy_find_exact_result = SearchResult()
self.dummy_search_result = SearchResult()
def browse(self, path):
return self.dummy_browse_result.get(path, [])
def find_exact(self, **query):
return self.dummy_find_exact_result
def lookup(self, uri):
return filter(lambda t: uri == t.uri, self.dummy_library)
def refresh(self, uri=None):
pass
def search(self, **query):
return self.dummy_search_result
class DummyPlaybackProvider(backend.PlaybackProvider):
def __init__(self, *args, **kwargs):
super(DummyPlaybackProvider, self).__init__(*args, **kwargs)
self._time_position = 0
def pause(self):
return True
def play(self, track):
"""Pass a track with URI 'dummy:error' to force failure"""
self._time_position = 0
return track.uri != 'dummy:error'
def resume(self):
return True
def seek(self, time_position):
self._time_position = time_position
return True
def stop(self):
return True
def get_time_position(self):
return self._time_position
class DummyPlaylistsProvider(backend.PlaylistsProvider):
def create(self, name):
playlist = Playlist(name=name, uri='dummy:%s' % name)
self._playlists.append(playlist)
return playlist
def delete(self, uri):
playlist = self.lookup(uri)
if playlist:
self._playlists.remove(playlist)
def lookup(self, uri):
for playlist in self._playlists:
if playlist.uri == uri:
return playlist
def refresh(self):
pass
def save(self, playlist):
old_playlist = self.lookup(playlist.uri)
if old_playlist is not None:
index = self._playlists.index(old_playlist)
self._playlists[index] = playlist
else:
self._playlists.append(playlist)
return playlist
| {
"repo_name": "woutervanwijk/mopidy",
"path": "mopidy/backend/dummy.py",
"copies": "3",
"size": "3054",
"license": "apache-2.0",
"hash": 2854898147553960000,
"line_mean": 26.5135135135,
"line_max": 76,
"alpha_frac": 0.6394891945,
"autogenerated": false,
"ratio": 3.9559585492227978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6095447743722798,
"avg_score": null,
"num_lines": null
} |
"""A dummy backend for use in tests.
This backend implements the backend API in the simplest way possible. It is
used in tests of the frontends.
"""
import pykka
from mopidy import backend
from mopidy.models import Playlist, Ref, SearchResult
def create_proxy(config=None, audio=None):
return DummyBackend.start(config=config, audio=audio).proxy()
class DummyBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super().__init__()
self.library = DummyLibraryProvider(backend=self)
if audio:
self.playback = backend.PlaybackProvider(audio=audio, backend=self)
else:
self.playback = DummyPlaybackProvider(audio=audio, backend=self)
self.playlists = DummyPlaylistsProvider(backend=self)
self.uri_schemes = ["dummy"]
class DummyLibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri="dummy:/", name="dummy")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dummy_library = []
self.dummy_get_distinct_result = {}
self.dummy_browse_result = {}
self.dummy_find_exact_result = SearchResult()
self.dummy_search_result = SearchResult()
def browse(self, path):
return self.dummy_browse_result.get(path, [])
def get_distinct(self, field, query=None):
return self.dummy_get_distinct_result.get(field, set())
def lookup(self, uri):
uri = Ref.track(uri=uri).uri
return [t for t in self.dummy_library if uri == t.uri]
def refresh(self, uri=None):
pass
def search(self, query=None, uris=None, exact=False):
if exact: # TODO: remove uses of dummy_find_exact_result
return self.dummy_find_exact_result
return self.dummy_search_result
class DummyPlaybackProvider(backend.PlaybackProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._uri = None
self._time_position = 0
def pause(self):
return True
def play(self):
return self._uri and self._uri != "dummy:error"
def change_track(self, track):
"""Pass a track with URI 'dummy:error' to force failure"""
self._uri = track.uri
self._time_position = 0
return True
def prepare_change(self):
pass
def resume(self):
return True
def seek(self, time_position):
self._time_position = time_position
return True
def stop(self):
self._uri = None
return True
def get_time_position(self):
return self._time_position
class DummyPlaylistsProvider(backend.PlaylistsProvider):
def __init__(self, backend):
super().__init__(backend)
self._playlists = []
self._allow_save = True
def set_dummy_playlists(self, playlists):
"""For tests using the dummy provider through an actor proxy."""
self._playlists = playlists
def set_allow_save(self, enabled):
self._allow_save = enabled
def as_list(self):
return [
Ref.playlist(uri=pl.uri, name=pl.name) for pl in self._playlists
]
def get_items(self, uri):
playlist = self.lookup(uri)
if playlist is None:
return
return [Ref.track(uri=t.uri, name=t.name) for t in playlist.tracks]
def lookup(self, uri):
uri = Ref.playlist(uri=uri).uri
for playlist in self._playlists:
if playlist.uri == uri:
return playlist
def refresh(self):
pass
def create(self, name):
playlist = Playlist(name=name, uri=f"dummy:{name}")
self._playlists.append(playlist)
return playlist
def delete(self, uri):
playlist = self.lookup(uri)
if playlist:
self._playlists.remove(playlist)
def save(self, playlist):
if not self._allow_save:
return None
old_playlist = self.lookup(playlist.uri)
if old_playlist is not None:
index = self._playlists.index(old_playlist)
self._playlists[index] = playlist
else:
self._playlists.append(playlist)
return playlist
| {
"repo_name": "adamcik/mopidy",
"path": "tests/dummy_backend.py",
"copies": "4",
"size": "4258",
"license": "apache-2.0",
"hash": 8571851900242502000,
"line_mean": 26.8300653595,
"line_max": 79,
"alpha_frac": 0.613903241,
"autogenerated": false,
"ratio": 3.9280442804428044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 153
} |
"""A dummy module for testing purposes."""
import logging
import os
import uuid
import lambdautils.state as state
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def partition_key(event):
return event.get("client_id", str(uuid.uuid4()))
def input_filter(event, *args, **kwargs):
if os.environ.get("mydummyvar") != "mydummyval":
raise ValueError("Unable to retrieve 'mydummyvar' from environment")
event["input_filter"] = True
val = state.get_state(event["id"])
if val:
logger.info("Retrieved state key '{}': '{}'".format(event["id"], val))
return False
else:
logger.info("State key '{}' not found".format(event["id"]))
state.set_state(event["id"], "hello there")
return True
def output_filter_1(event, *args, **kwargs):
event["output_filter_1"] = True
return True
def output_mapper_1(event, *args, **kwargs):
event["output_mapper_1"] = True
return event
def output_mapper_2(event, *args, **kwargs):
event["output_mapper_2"] = True
return event
def output_mapper_2b(event, *args, **kwargs):
event["output_mapper_2b"] = True
return event
def output_filter_2b(event, *args, **kwargs):
return True
def batch_mapper(events, *args, **kwargs):
for ev in events:
ev["batch_mapped"] = True
return events
| {
"repo_name": "humilis/humilis-kinesis-mapper",
"path": "tests/integration/mycode/mypkg/__init__.py",
"copies": "2",
"size": "1346",
"license": "mit",
"hash": -6640724321236282000,
"line_mean": 21.813559322,
"line_max": 78,
"alpha_frac": 0.6433878158,
"autogenerated": false,
"ratio": 3.4690721649484537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112459980748454,
"avg_score": null,
"num_lines": null
} |
# A dummy service that implements the mettle protocol for one pipeline, called
# "bar". The "bar" pipeline will make targets of "tmp/<target_time>/[0-9].txt".
import os
import json
import socket
import time
import random
import sys
from datetime import timedelta
import pika
import isodate
import utc
import yaml
import mettle_protocol as mp
class PizzaPipeline(mp.Pipeline):
def get_expire_time(self, target_time, target, start_time):
"""
Given a target, and a UTC execution start time, return a UTC datetime
for when the system should consider the job to have failed.
"""
# We just hardcode a 1 minute expiration time.
return start_time + timedelta(minutes=1)
def make_target(self, target_time, target, target_parameters):
self.log("Making target %s." % target)
try:
if self._target_exists(target_time, target):
self.log("%s already exists." % target)
else:
self.log("%s does not exist. Creating." % target)
# Let's just randomly fail 10% of the time.
if random.random() < .1:
raise Exception("No one expects the Spanish Inquisition!")
filename = self._target_to_filename(target_time, target)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
# sleep some random amount of time from 1 to 5 seconds.
time.sleep(random.randint(1, 5))
f.write(target)
return True
except Exception as e:
self.log("Error making target %s: %s" % (target, e))
return False
def _get_dir(self, target_time):
return os.path.join('tmp', type(self).__name__, target_time.isoformat())
def _target_exists(self, target_time, target):
filename = self._target_to_filename(target_time, target)
if os.path.isfile(filename):
return True
def _target_to_filename(self, target_time, target):
dirname = self._get_dir(target_time)
return os.path.join(dirname, '%s.txt' % target)
class PepperoniPipeline(PizzaPipeline):
targets = {
"flour": [],
"water": [],
"yeast": [],
"sugar": [],
"salt": [],
"olive oil": [],
"mix": ["flour", "water", "yeast", "sugar", "salt", "olive oil"],
"raise": ["mix"],
"roll": ["raise"],
"sauce": ["roll"],
"cheese": ["sauce"],
"pepperoni": ["cheese"],
"green peppers": ["cheese"],
"mushrooms": ["cheese"],
"bake": ["pepperoni", "green peppers", "mushrooms"],
"box": ["bake"],
"deliver": ["box"],
"eat": ["deliver"]
}
def get_targets(self, target_time):
# The get_targets function must return a dictionary where all the keys
# are strings representing the targets to be created, and the values are
# lists of targets on which a target depends.
# Rules:
# - all targets must be strings
# - any dependency listed must itself be a target in the dict
# - cyclic dependencies are not allowed
return self.targets
def get_target_parameters(self, target_time):
return {
"flour": {"foo": "bar"},
}
class HawaiianPipeline(PizzaPipeline):
def get_targets(self, target_time):
# The HawaiianPipeline is in no hurry. If you call get_targets with a
# target_time that's too recent, it will nack and make you wait.
now = utc.now()
wait_until = target_time + timedelta(days=4)
if now < wait_until:
raise mp.PipelineNack("What's the rush, man?", wait_until)
return {
"flour": [],
"water": [],
"yeast": [],
"sugar": [],
"salt": [],
"olive oil": [],
"mix": ["flour", "water", "yeast", "sugar", "salt", "olive oil"],
"raise": ["mix"],
"roll": ["raise"],
"sauce": ["roll"],
"cheese": ["sauce"],
"ham": ["cheese"],
"pineapple": ["cheese"],
"bake": ["ham", "pineapple"],
"box": ["bake"],
"deliver": ["box"],
"eat": ["deliver"]
}
def _get_queue_name(service_name):
# Helper function specifically for this demo script. You probably don't
# need one of these in your own services
try:
return sys.argv[1]
except IndexError:
return mp.service_queue_name(service_name)
def main():
with open(os.environ['APP_SETTINGS_YAML'], 'rb') as f:
settings = yaml.safe_load(f)
rabbit_url = settings.get('rabbit_url',
'amqp://guest:guest@127.0.0.1:5672/%2f')
pipelines = {
'pepperoni': PepperoniPipeline,
'hawaiian': HawaiianPipeline,
}
service_name = 'pizza'
mp.run_pipelines(service_name, rabbit_url, pipelines,
_get_queue_name(service_name))
if __name__ == '__main__':
main()
| {
"repo_name": "yougov/mettle",
"path": "scripts/pizza_service.py",
"copies": "1",
"size": "5305",
"license": "mit",
"hash": 197432441424808160,
"line_mean": 31.950310559,
"line_max": 80,
"alpha_frac": 0.5379830349,
"autogenerated": false,
"ratio": 3.880760790051207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4918743824951207,
"avg_score": null,
"num_lines": null
} |
#Advanced Encryption Standard
from random import SystemRandom
rand = SystemRandom()
try:
range = xrange
except Exception:
pass
xtime = lambda x: (((x << 1) ^ 0x1b) & 0xff) if (x & 0x80) else (x << 1)
SBox = [[0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76],
[0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0],
[0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15],
[0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75],
[0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84],
[0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf],
[0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8],
[0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2],
[0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73],
[0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb],
[0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79],
[0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08],
[0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a],
[0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e],
[0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf],
[0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]]
invSBox = [[0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb],
[0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb],
[0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e],
[0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25],
[0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92],
[0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84],
[0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06],
[0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b],
[0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73],
[0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e],
[0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b],
[0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4],
[0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f],
[0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef],
[0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61],
[0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d]]
rcon = [0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36]
def SubBytes(a):
for i in range(16):
a[i] = SBox[a[i] >> 4][a[i] & 0xf]
return a
def InvSubBytes(a):
for i in range(16):
a[i] = invSBox[a[i] >> 4][a[i] & 0xf]
return a
def ShiftRows(a):
return [a[0], a[5], a[10], a[15], a[4], a[9], a[14], a[3], a[8], a[13], a[2], a[7], a[12], a[1], a[6], a[11]]
def InvShiftRows(a):
return [a[0], a[13], a[10], a[7], a[4], a[1], a[14], a[11], a[8], a[5], a[2], a[15], a[12], a[9], a[6], a[3]]
def MixColumns(a):
for i in range(4):
b = a[0 + (i * 4):4 + (i * 4)]
t = b[0] ^ b[1] ^ b[2] ^ b[3]
u = b[0]
b[0] ^= t ^ xtime(b[0] ^ b[1])
b[1] ^= t ^ xtime(b[1] ^ b[2])
b[2] ^= t ^ xtime(b[2] ^ b[3])
b[3] ^= t ^ xtime(b[3] ^ u)
a[0 + (i * 4):4 + (i * 4)] = b
return a
def InvMixColumns(a):
for i in range(4):
b = a[0 + (i * 4):4 + (i * 4)]
u = xtime(xtime(b[0] ^ b[2]))
v = xtime(xtime(b[1] ^ b[3]))
b[0] ^= u
b[1] ^= v
b[2] ^= u
b[3] ^= v
a[0 + (i * 4):4 + (i * 4)] = b
return MixColumns(a)
def AddRoundKey(a,b):
for i in range(4):
for j in range(4):
a[j + (4 * i)] ^= (b[i] >> (8 * (3 - j))) & 0xff
return a
def SubWord(a):
return (SBox[(a >> 28) & 0xf][(a >> 24) & 0xf] << 24) | (SBox[(a >> 20) & 0xf][(a >> 16) & 0xf] << 16) | (SBox[(a >> 12) & 0xf][(a >> 8) & 0xf] << 8) | (SBox[(a >> 4) & 0xf][a & 0xf])
def RotWord(a):
return ((a << 8) & 0xffffffff) | (a >> 24)
def AESencryptblock(keys, numRounds, inbits):
state = []
for i in range(16):
state += [(inbits >> (120 - (8 * i))) & 0xff]
state = AddRoundKey(state, keys[0:4])
for i in range(numRounds - 1):
state = SubBytes(state)
state = ShiftRows(state)
state = MixColumns(state)
state = AddRoundKey(state, keys[(i * 4) + 4:(i * 4) + 8])
state = SubBytes(state)
state = ShiftRows(state)
state = AddRoundKey(state, keys[numRounds * 4:(numRounds * 4) + 4])
out = 0
for i in state:
out <<= 8
out += i
return out
def AESdecryptblock(keys, numRounds, inbits):
state = []
for i in range(16):
state += [(inbits >> (120 - (8 * i))) & 0xff]
state = AddRoundKey(state, keys[numRounds * 4:(numRounds * 4) + 4])
state = InvShiftRows(state)
state = InvSubBytes(state)
for i in range(numRounds - 2,-1,-1):
state = AddRoundKey(state, keys[(i * 4) + 4:(i * 4) + 8])
state = InvMixColumns(state)
state = InvShiftRows(state)
state = InvSubBytes(state)
state = AddRoundKey(state, keys[0:4])
out = 0
for i in state:
out <<= 8
out += i
return out
def AESencrypt(length, keys, inbits, lead=0):
refrounds = {128:10,192:12,256:14}
numRounds = refrounds.get(length, Exception())
bytecomplete = (inbits & 1) ^ 1
bits = 8 - ((inbits.bit_length() + lead) % 8)
for i in range(bits):
inbits <<= 1
inbits += bytecomplete
bytelength = (inbits.bit_length() + lead) // 8
pad = 16 - (bytelength % 16)
for i in range(pad):
inbits <<= 8
if pad != 16:
inbits += pad
out = rand.randint(0,(2 ** 128) - 1)
for i in range((inbits.bit_length() + lead) // 128):
out <<= 128
out += AESencryptblock(keys, numRounds, ((inbits >> ((inbits.bit_length() + lead) - (128 * (i + 1)))) ^ (out >> 128)) & 0xffffffffffffffffffffffffffffffff)
return out
def AESdecrypt(length, keys, inbits):
refrounds = {128:10,192:12,256:14}
numRounds = refrounds.get(length, Exception())
lead = 128 - (inbits.bit_length() % 128) if inbits.bit_length() % 128 != 0 else 0
out = 0
for i in range((inbits.bit_length() + lead - 128) // 128):
out <<= 128
out += AESdecryptblock(keys, numRounds, (inbits >> ((inbits.bit_length() + lead) - (128 * (i + 2)))) & 0xffffffffffffffffffffffffffffffff) ^ ((inbits >> ((inbits.bit_length() + lead) - (128 * (i + 1)))) & 0xffffffffffffffffffffffffffffffff)
lead = (-128 * (-out.bit_length() // 128)) - out.bit_length()
if out & 0xff == 0:
out >>= 128
else:
out >>= 8 * (out & 0xff)
remove = out & 1
while out & 1 == remove:
out >>= 1
return out, lead
def getKey(length,key):
if not (length in [128,192,256]):
raise Exception("invalid key length")
if length == 128:
w = []
for i in range(4):
w += [key >> (96 - (32 * i)) & 0xffffffff]
while len(w) < 44:
temp = w[len(w) - 1]
if len(w) % 4 == 0:
temp = SubWord(RotWord(temp)) ^ (rcon[len(w)//4] << 24)
temp ^= w[len(w) - 4]
w += [temp]
return w
if length == 192:
w = []
for i in range(6):
w += [key >> (160 - (32 * i)) & 0xffffffff]
while len(w) < 52:
temp = w[len(w) - 1]
if len(w) % 6 == 0:
temp = SubWord(RotWord(temp)) ^ (rcon[len(w)//6] << 24)
temp ^= w[len(w) - 6]
w += [temp]
return w
if length == 256:
w = []
for i in range(8):
w += [key >> (224 - (32 * i)) & 0xffffffff]
while len(w) < 60:
temp = w[len(w) - 1]
if len(w) % 8 == 0:
temp = SubWord(RotWord(temp)) ^ (rcon[len(w)//8] << 24)
elif len(w) % 4 == 0:
temp = SubWord(temp)
temp ^= w[len(w) - 8]
w += [temp]
return w
| {
"repo_name": "Fitzgibbons/Cryptograpy",
"path": "AES.py",
"copies": "1",
"size": "9274",
"license": "mit",
"hash": -8497146949694690000,
"line_mean": 46.8041237113,
"line_max": 248,
"alpha_frac": 0.5258788009,
"autogenerated": false,
"ratio": 2.173933427097984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8161862709431211,
"avg_score": 0.007589903713354525,
"num_lines": 194
} |
"""Advanced examples."""
import logging
import os
from multiprocessing import Process
from time import sleep
from phial import Message, Phial, Response, Schedule, command
slackbot = Phial(os.getenv("SLACK_API_TOKEN", "NONE"))
SCHEDULED_CHANNEL = "channel-id"
@slackbot.command("cent(er|re)")
def regex_in_command() -> Response:
"""Command that uses regex to define structure."""
base_command = command.text.split(" ")[0]
if slackbot.config["prefix"]:
base_command = base_command[1:]
if base_command == "center":
return Response(text="Yeehaw! You're a Yank", channel=command.channel)
elif base_command == "centre":
return Response(text="I say! You appear to be a Brit", channel=command.channel)
else:
return Response(
text="Well this is awkward... this isn't meant to \
happen",
channel=command.channel,
)
@slackbot.command("colo[u]?r <arg>")
def regex_in_command_with_arg(arg: str) -> Response:
"""Command that uses regex to define structure with an arg."""
base_command = command.text.split(" ")[0]
return Response(
text="My favourite {0} is {1}".format(base_command, arg),
channel=command.channel,
)
def fire_and_forget(channel: str) -> None:
"""
Example function used by background_processing().
Sends a message outside of a command context.
"""
sleep(3)
slackbot.send_message(Response(text="Background Process Message", channel=channel))
@slackbot.command("background")
def background_processing() -> str:
"""Command that starts a process to allow a non blocking sleep."""
p = Process(target=fire_and_forget, args=(command.channel,), daemon=True)
p.start()
return "Foreground message"
@slackbot.middleware()
def log_message(message: Message) -> Message:
"""Middleware that logs a message."""
logging.info(message)
return message
@slackbot.scheduled(Schedule().seconds(30))
def shceduled_function() -> None:
"""Sends a message on a schedule."""
slackbot.send_message(Response(text="Hey! Hey Listen!", channel=SCHEDULED_CHANNEL))
@slackbot.command("messageWithAttachment")
def get_message_with_attachment() -> Response:
"""A command that posts a message with a Slack attachment."""
return Response(
channel=command.channel,
attachments=[
{
"title": "Here's a message, it has 2 attachment fields",
"title_link": "https://api.slack.com/docs/message-attachments",
"text": "This message has some text!",
"fields": [
{
"title": "Here's the first attachment field",
"value": "And here's it's body",
"short": True,
},
{
"title": "...And here's the second",
"value": "And here's it's body",
"short": True,
},
],
}
],
)
if __name__ == "__main__":
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
slackbot.run()
| {
"repo_name": "sedders123/phial",
"path": "examples/advanced.py",
"copies": "1",
"size": "3247",
"license": "mit",
"hash": 678805643877652400,
"line_mean": 30.8333333333,
"line_max": 87,
"alpha_frac": 0.5897751771,
"autogenerated": false,
"ratio": 4.038557213930348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033422459893048126,
"num_lines": 102
} |
"""Advanced example using other configuration options."""
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask
from flask_apscheduler import APScheduler
class Config:
"""App configuration."""
JOBS = [
{
"id": "job1",
"func": "advanced:job1",
"args": (1, 2),
"trigger": "interval",
"seconds": 10,
}
]
SCHEDULER_JOBSTORES = {"default": SQLAlchemyJobStore(url="sqlite://")}
SCHEDULER_EXECUTORS = {"default": {"type": "threadpool", "max_workers": 20}}
SCHEDULER_JOB_DEFAULTS = {"coalesce": False, "max_instances": 3}
SCHEDULER_API_ENABLED = True
def job1(var_one, var_two):
"""Demo job function.
:param var_two:
:param var_two:
"""
print(str(var_one) + " " + str(var_two))
if __name__ == "__main__":
app = Flask(__name__)
app.config.from_object(Config())
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
| {
"repo_name": "viniciuschiele/flask-apscheduler",
"path": "examples/advanced.py",
"copies": "1",
"size": "1028",
"license": "apache-2.0",
"hash": 8727885259553075000,
"line_mean": 20.4166666667,
"line_max": 80,
"alpha_frac": 0.5836575875,
"autogenerated": false,
"ratio": 3.496598639455782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4580256226955782,
"avg_score": null,
"num_lines": null
} |
#advanced feature
L=[]
n=1
while n<=99:
L.append(n)
n+=2
print(L)
L=['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']
print(L[1])
print(L[2])
print(L[3])
print(L[0:3])
r = []
k = 3
for i in range(k):
r.append(L[i])
print(r)
print(L[-1])
print(L[-2])
print(L[-2:-1])
print('key value------')
d = {'a': 1, 'b': 2, 'c': 3}
for key in d:
print(key)
for key in d.values():
print(key)
print('key value------')
for ch in 'ABCD':
print(ch)
from collections import Iterable
print(isinstance('abc',Iterable))
for i, value in enumerate(['A','B','C']):
print(i,value)
# list generate
print('---------------------------generate')
L=[]
L= list(range(1,11))
print(L)
L= list(range(10))
print(L)
L=[x*x for x in range(1,11)]
print(L)
import os
L=[d for d in os.listdir('.')]
print(L)
d = {'x': 'A', 'y': 'B', 'z': 'C' }
for k,v in d.items():
print(k,'=',v)
L=[k+'='+v for k,v in d.items()]
print(L)
L = ['Hello', 'World', 'IBM', 'Apple']
L= [s.lower() for s in L]
print(L)
L = ['Hello', 'World', 18, 'Apple', None]
L=[s.lower() for s in L if isinstance(s,str)]
print(L)
g=(x*x for x in range(10))
print(g)
print(next(g))
print(next(g))
print(next(g))
for n in g:
print(n)
def fib(max):
n,a,b = 0,0,1
while n<max:
print(b)
a,b = b,a+b
n+=1
return 'done'
print(fib(6))
def fib_gen(max):
n,a,b = 0,0,1
while n<max:
yield b
a,b = b,a+b
n+=1
return 'done'
print(fib_gen(6))
def odd():
print('step1')
yield 1
print('step 2')
yield 3
print('step 3')
yield 5
o=odd()
print(next(o))
print(next(o))
print(next(o))
# for n in fib(10):
# print(n)
print("-------------------")
for n in fib_gen(10):
print(n)
print("-------------------")
g=fib_gen(6)
while True:
try:
x=next(g)
print('g:',x)
except StopIteration as e:
print('Generator return vaule:',e.value)
break
# -*- coding: utf-8 -*-
print('triangles')
# def tlist(n):
# if n==1:
# return [1]
# else:
# t=tlist[n-1]
# L=[]
# for k in range(n):
# if k==0 or k==n-1:
# L.append(1)
# else:
# L.append(t[k]+t[k-1])
# return L
# def triangles(n):
# for n in range(n):
# print(tlist(n))
# triangles(2)
def triangles(n):
L=[1]
count=0
while True:
if count == n:
return None
yield(L)
L.append(0)
L=[L[i-1]+L[i] for i in range(len(L))]
count+=1
for i in triangles(10):
print(i)
g=triangles(9)
print(g)
L=[1,2,3,4,5,6]
print(L)
print(L[2:])
print(L[1:3])
print(sum(L[1:3]))
#迭代器和迭代对象
it = iter([1,2,2,4,45])
while True:
try:
x= next(it)
print(x)
except StopIteration:
break
| {
"repo_name": "CrazyBBer/Python-Learn-Sample",
"path": "Function/advanced.py",
"copies": "1",
"size": "2583",
"license": "mit",
"hash": -4487050427185922000,
"line_mean": 11.2822966507,
"line_max": 46,
"alpha_frac": 0.542267238,
"autogenerated": false,
"ratio": 2.0819140308191404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7605253065189652,
"avg_score": 0.10378564072589745,
"num_lines": 209
} |
# Advanced Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. This
# example is advanced because it preforms a background update to deal with the
# backgound image changing overtime.
import sensor, image, pyb, os, time
TRIGGER_THRESHOLD = 5
BG_UPDATE_FRAMES = 50 # How many frames before blending.
BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]).
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")
triggered = False
frame_count = 0
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
frame_count += 1
if (frame_count > BG_UPDATE_FRAMES):
frame_count = 0
# Blend in new frame. We're doing 256-alpha here because we want to
# blend the new frame into the backgound. Not the background into the
# new frame which would be just alpha. Blend replaces each pixel by
# ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in
# low blending of the new image while a high alpha results in high
# blending of the new image. We need to reverse that for this update.
img.blend("temp/bg.bmp", alpha=(256-BG_UPDATE_BLEND))
img.save("temp/bg.bmp")
# Replace the image with the "abs(NEW-OLD)" frame difference.
img.difference("temp/bg.bmp")
hist = img.get_histogram()
# This code below works by comparing the 99th percentile value (e.g. the
# non-outlier max value against the 90th percentile value (e.g. a non-max
# value. The difference between the two values will grow as the difference
# image seems more pixels change.
diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value()
triggered = diff > TRIGGER_THRESHOLD
print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
| {
"repo_name": "openmv/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/on_disk_advanced_frame_differencing.py",
"copies": "2",
"size": "2549",
"license": "mit",
"hash": 9050809899523862000,
"line_mean": 41.4833333333,
"line_max": 87,
"alpha_frac": 0.702236171,
"autogenerated": false,
"ratio": 3.6104815864022664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5312717757402267,
"avg_score": null,
"num_lines": null
} |
# Advanced Frame Differencing Example
#
# This example demonstrates using frame differencing with your OpenMV Cam. This
# example is advanced because it preforms a background update to deal with the
# backgound image changing overtime.
import sensor, image, pyb, os, time
TRIGGER_THRESHOLD = 5
BG_UPDATE_FRAMES = 50 # How many frames before blending.
BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]).
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now. However,
# frame differencing doesn't use a lot of the extra space in the frame buffer.
# But, things like AprilTags do and won't work if you do this...
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
extra_fb.replace(sensor.snapshot())
print("Saved background image - Now frame differencing!")
triggered = False
frame_count = 0
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
frame_count += 1
if (frame_count > BG_UPDATE_FRAMES):
frame_count = 0
# Blend in new frame. We're doing 256-alpha here because we want to
# blend the new frame into the backgound. Not the background into the
# new frame which would be just alpha. Blend replaces each pixel by
# ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in
# low blending of the new image while a high alpha results in high
# blending of the new image. We need to reverse that for this update.
img.blend(extra_fb, alpha=(256-BG_UPDATE_BLEND))
extra_fb.replace(img)
# Replace the image with the "abs(NEW-OLD)" frame difference.
img.difference(extra_fb)
hist = img.get_histogram()
# This code below works by comparing the 99th percentile value (e.g. the
# non-outlier max value against the 90th percentile value (e.g. a non-max
# value. The difference between the two values will grow as the difference
# image seems more pixels change.
diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value()
triggered = diff > TRIGGER_THRESHOLD
print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
| {
"repo_name": "openmv/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/20-Frame-Differencing/in_memory_advanced_frame_differencing.py",
"copies": "2",
"size": "2937",
"license": "mit",
"hash": -8568993232279012000,
"line_mean": 44.890625,
"line_max": 87,
"alpha_frac": 0.7136533878,
"autogenerated": false,
"ratio": 3.680451127819549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5394104515619549,
"avg_score": null,
"num_lines": null
} |
#advanced functions library
from bas_lib import *
from mac_lib import *
import random
import time
import datetime
import cus_lib
cus_funct=cus_lib.cus_funct
#tim_funct=cus_lib.tim_funct #funzioni a tempo
def esegui (utente,comando,destinatario,testo):
ambiente_attivo=cus_lib.ambiente_attivo
#splitta testo
comandobot=parametri=''
if testo.find(' ')!=-1:
parametri=testo[testo.index(' ')+1:].strip()
try:
if testo[0:2]==':'+trigger and testo[2:].split()[0] in e_privmsg:
comandobot=testo[2:].split()[0]
except:
pass
#esempio PING
#utente = PING
#comando = :12345
#esempio = PING :12345
if utente == 'PING':
e_ping(comando)
#esempio PRIVMSG
#utente = tizio
#comando = PRIVMSG
#destinatario = persona o #chan
#comandobot = +comando
#parametri = param1 param2
#esempio = <tizio> PRIVMSG persona :+comando param1 param2
elif comando == 'PRIVMSG':
if ambiente_attivo == '':
if comandobot in e_privmsg:
e_privmsg[comandobot](utente,destinatario,parametri)
else:
if comandobot in e_privmsg:
e_privmsg[comandobot](utente,destinatario,parametri)
else:
e_privmsg[ambiente_attivo](utente,destinatario,testo[1:])
#esempio JOIN
#utente = tizio
#comando = JOIN
#destinatario = #chan
elif comando == 'JOIN':
if utente != nomebot:
e_join(utente,destinatario)#destinatario inizia con ':'
def analisi (data): #sta cosa di sicuro si puo' fare con regex in tipo 2 righe
riga=data.split("\n")
for i in range(len(riga)-1):
stampa = utente = comando = destinatario = testo = ''
parola=riga[i].split()
#parola[0] e' l'utente
if parola[0][0]==':':
if '!' in parola[0]:
utente = parola[0][1:parola[0].index('!')]
else:
utente = parola[0][1:len(parola[0])]
else:
utente = parola[0][0:len(parola[0])]
try:
comando = parola[1]
destinatario = parola[2]
testo = riga[i][riga[i].index(parola[3]):]
except:
pass
stampa = datetime.datetime.fromtimestamp(time.time()).strftime('%H-%M-%S')+\
' <'+utente+'> '+comando+' '+destinatario+' '+testo #finale
# stampa = '<'+utente+'> _utente_ '+comando+' _comando_ '+destinatario+' _destinatario_ '+testo+' _testo_' #debug
if utente!=nomebot:
print stampa
esegui (utente,comando,destinatario,testo)
def ricarica (utente, destinatario, parametri) :
if utente == owner:
reload(cus_lib)
cus_funct=cus_lib.cus_funct
e_privmsg.update(cus_funct)
#tim_funct=cus_lib.tim_funct
#e_tempo.update(tim_funct)
print 'funzioni ricaricate'
adv_funct={'messaggio':messaggio,\
'query':query,\
'join':join,\
'part':part,\
'quit':cuit,\
'notice':notice,\
'ricarica':ricarica}
e_privmsg=dict(adv_funct,**cus_funct)
#e_tempo=tim_funct | {
"repo_name": "izabera/izabot",
"path": "adv_lib.py",
"copies": "1",
"size": "2835",
"license": "mit",
"hash": -4665285421567215000,
"line_mean": 27.36,
"line_max": 116,
"alpha_frac": 0.6373897707,
"autogenerated": false,
"ratio": 2.6372093023255814,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8550517166629954,
"avg_score": 0.04481638127912553,
"num_lines": 100
} |
# ADVANCED MATH CALCULATOR v1.4
# by Raphael Gutierrez (fb.com/raphael.gutierrez.17)
# Licensed under MIT (https://github.com/ralphgutz/Advanced-Python-Calculator/blob/master/LICENSE)
# I wrote the codes using my basic Python knowledge to easily understand the codes.
import math
def basic():
print("*" * 40)
print("\nWELCOME TO BASIC MATH MENU!")
print("You have four operations in this menu:\n")
print(" 1 - ADDITION\n 2 - SUBTRACTION\n 3 - MULTIPLICATION\n 4 - DIVISION")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the ADDITION operation!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", input_num1 + input_num2)
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the SUBTRACTION operation!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", input_num1 - input_num2)
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the MULTIPLICATION operation!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", input_num1 * input_num2)
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the DIVISION operation!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", input_num1 / input_num2)
else:
print("\n>>> ERROR! Please enter a valid number.")
def theoric():
print("*" * 40)
print("\nWELCOME TO NUMERIC-THEORIC MENU!")
print("You have 14 operations in this menu:\n")
print(" 1 - CEILING\n 2 - COPYSIGN\n 3 - FABS\n 4 - FACTORIAL\n 5 - FLOOR\n 6 - FMOD\n 7 - FREXP\n 8 - GCD\n 9 - IS FINITE\n 10 - IS INFINITE\n 11 - IS NaN\n 12 - LDEXP\n 13 - MODF\n 14 - TRUNC")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the MATH.CEIL!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.ceil(input_num1))
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the MATH.COPYSIGN!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", math.copysign(input_num1, input_num2))
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the MATH.FABS!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.fabs(input_num1))
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the MATH.FACTORIAL!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.factorial(input_num1))
elif oper_input == "5":
print("*" * 40)
print("\nYou've chosen the MATH.FLOOR!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.floor(input_num1))
elif oper_input == "6":
print("*" * 40)
print("\nYou've chosen the MATH.FMOD!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", math.fmod(input_num1, input_num2))
elif oper_input == "7":
print("*" * 40)
print("\nYou've chosen the MATH.FREXP!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.frexp(input_num1))
elif oper_input == "8":
print("*" * 40)
print("\nYou've chosen the MATH.GCD!\n")
input_num1 = int(input(" Enter the first number: "))
input_num2 = int(input(" Enter the second number: "))
print("\n Answer: ", math.gcd(input_num1, input_num2))
elif oper_input == "9":
print("*" * 40)
print("\nYou've chosen the MATH.ISFINITE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.isfinite(input_num1))
elif oper_input == "10":
print("*" * 40)
print("\nYou've chosen the MATH.ISINF!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.isinf(input_num1))
elif oper_input == "11":
print("*" * 40)
print("\nYou've chosen the MATH.ISNAN!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.isnan(input_num1))
elif oper_input == "12":
print("*" * 40)
print("\nYou've chosen the MATH.LDEXP!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = int(input(" Enter the second number: "))
print("\n Answer: ", math.ldexp(input_num1, input_num2))
elif oper_input == "13":
print("*" * 40)
print("\nYou've chosen the MATH.MODF!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.modf(input_num1))
elif oper_input == "14":
print("*" * 40)
print("\nYou've chosen the MATH.TRUNC!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.trunc(input_num1))
else:
print("\n>>> ERROR! Please enter a valid number.")
def logarithm():
print("*" * 40)
print("\nWELCOME TO POWER-LOGARITHMIC MENU!")
print("You have eight operations in this menu:\n")
print(" 1 - EXP\n 2 - EXPM1\n 3 - LOGARITHM\n 4 - LOGARITHM OF 1+x\n 5 - LOGARITHM Base-2\n 6 - LOGARITHM Base-10\n 7 - POWER\n 8 - SQUARE ROOT")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the MATH.EXP!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.exp(input_num1))
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the MATH.EXPM1!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.expm1(input_num1))
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the LOGARITHMIC opeartion!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", math.log(input_num1, input_num2))
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the LOGARITHM of 1+x!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.log1p(input_num1))
elif oper_input == "5":
print("*" * 40)
print("\nYou've chosen the LOGARITHIM Base-2!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.log2(input_num1))
elif oper_input == "6":
print("*" * 40)
print("\nYou've chosen the LOGARITHM Base-10!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.log10(input_num1))
elif oper_input == "7":
print("*" * 40)
print("\nYou've chosen the POWER operation!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", math.pow(input_num1, input_num2))
elif oper_input == "8":
print("*" * 40)
print("\nYou've chosen the SQUARE ROOT operation!!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.sqrt(input_num1))
else:
print("\n>>> ERROR! Please enter a valid number.")
def trigonometry():
print("*" * 40)
print("\nWELCOME TO TRIGONOMETRY MENU!")
print("You have eight operations in this menu:\n")
print(" 1 - ARC COSINE\n 2 - ARC SINE\n 3 - ARC TANGENT\n 4 - ARC TANGENT2\n 5 - COSINE\n 6 - HYPOTHENUS\n 7 - SINE\n 8 - TANGENT")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the ARC COSINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.acos(input_num1))
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the ARC SINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.asin(input_num1))
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the ARC TANGENT!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.atan(input_num1))
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the ARC TANGENT2!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", math.atan2(input_num2,input_num1))
elif oper_input == "5":
print("*" * 40)
print("\nYou've chosen the COSINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.cos(input_num1))
elif oper_input == "6":
print("*" * 40)
print("\nYou've chosen the HYPOTHENUS!\n")
input_num1 = float(input(" Enter the first number: "))
input_num2 = float(input(" Enter the second number: "))
print("\n Answer: ", math.hypot(input_num1, input_num2))
elif oper_input == "7":
print("*" * 40)
print("\nYou've chosen the SINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.sin(input_num1))
elif oper_input == "8":
print("*" * 40)
print("\nYou've chosen the TANGENT!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.tan(input_num1))
else:
print("\n>>> ERROR! Please enter a valid number.")
def angular():
print("*" * 40)
print("\nWELCOME TO ANGULAR MENU!")
print("You have two operations in this menu:\n")
print(" 1 - DEGREES\n 2 - RADIANS")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the DEGREES!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.degrees(input_num1))
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the RADIANS!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.radians(input_num1))
else:
print("\n>>> ERROR! Please enter a valid number.")
def hyperbole():
print("*" * 40)
print("\nWELCOME TO HYPERBOLE MENU!")
print("You have six operations in this menu:\n")
print(" 1 - INV HYPERBOLIC COSINE\n 2 - INV HYPERBOLIC SINE\n 3 - INV HYPERBOLIC TANGENT\n 4 - HYPERBOLIC COSINE\n 5 - HYPERBOLIC SINE\n 6 - HYPERBOLIC TANGENT")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the INV HYPERBOLIC COSINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.acosh(input_num1))
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the INV HYPERBOLIC SINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.asinh(input_num1))
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the INV HYPERBOLIC TANGENT!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.atanh(input_num1))
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the HYPERBOLIC COSINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.cosh(input_num1))
elif oper_input == "5":
print("*" * 40)
print("\nYou've chosen the HYPERBOLIC SINE!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.sinh(input_num1))
elif oper_input == "6":
print("*" * 40)
print("\nYou've chosen the HYPERBOLIC TANGENT!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.tanh(input_num1))
else:
print("\n>>> ERROR! Please enter a valid number.")
def special():
print("*" * 40)
print("\nWELCOME TO SPECIAL FUNCTIONS MENU!")
print("You have four operations in this menu:\n")
print(" 1 - ERROR FUNCTION\n 2 - COMPLEMENTARY ERROR FUNC.\n 3 - GAMMA FUNCTION\n 4 - LOGARITHM OF GAMMA FUNC.")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the ERROR FUNCTION!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.erf(input_num1))
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the COMPLEMENTARY ERROR FUNC.!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.erfc(input_num1))
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the GAMMA FUNCTION!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.gamma(input_num1))
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the LOGARITHM OF GAMMA FUNC.!\n")
input_num1 = float(input(" Enter a number: "))
print("\n Answer: ", math.lgamma(input_num1))
else:
print("\n>>> ERROR! Please enter a valid number.")
def constants():
print("*" * 40)
print("\nWELCOME TO CONSTANTS MENU!")
print("You have four constants in this menu:\n")
print(" 1 - PI\n 2 - EULER'S NUMBER (e)\n 3 - INFINITY\n 4 - NOT A NUMBER")
oper_input = input("\n What constant do you want to use? ")
if oper_input == "1":
print("*" * 40)
print("\nYou've chosen the PI NUMBER!\n")
print(" Answer: ", math.pi)
elif oper_input == "2":
print("*" * 40)
print("\nYou've chosen the EULER'S NUMBER!\n")
print(" Answer: ", math.e)
elif oper_input == "3":
print("*" * 40)
print("\nYou've chosen the INFINITY CONSTANT!\n")
print(" Answer: ", math.inf)
elif oper_input == "4":
print("*" * 40)
print("\nYou've chosen the NOT A NUMBER CONSTANT!\n")
print(" Answer: ", math.nan)
else:
print("\n>>> ERROR! Please enter a valid number.")
def welcome():
print("*" * 40)
print("\nWELCOME TO PYTHON CALCULATOR!")
print("You have nine options to choose.\n")
print(" 1 - BASIC OPERATIONS\n 2 - THEORIC\n 3 - LOGARITHM\n 4 - TRIGONOMETRY\n 5 - ANGULAR FUNCTIONS\n 6 - HYPERBOLIC FUNCTIONS\n 7 - SPECIAL OPERATIONS\n 8 - CONSTANTS\n 9 - EXIT")
oper_input = input("\n What operation do you want to use? ")
if oper_input == "1":
basic()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "2":
theoric()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "3":
logarithm()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "4":
theoric()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "5":
angular()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "6":
hyperbole()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "7":
special()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "8":
constants()
repeat_exit = input("\nPress 'ENTER' to repeat the program, 'E' to exit. ")
if repeat_exit == "":
welcome()
elif repeat_exit == "E" or repeat_exit == "e":
exit()
else:
print("\n>>> ERROR! Please enter a valid character.")
elif oper_input == "9":
exit()
else:
print("\n>>> ERROR! Please enter a valid number.")
welcome()
# Press 'UP' key to repeat the program after using it.
| {
"repo_name": "ralphgutz/Advanced-Python-Calculator",
"path": "Calculator.py",
"copies": "1",
"size": "16952",
"license": "mit",
"hash": -7667261597707341000,
"line_mean": 30.8527131783,
"line_max": 238,
"alpha_frac": 0.5910217084,
"autogenerated": false,
"ratio": 2.8639972968406826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39550190052406825,
"avg_score": null,
"num_lines": null
} |
#This program reads a XML and writes it into a CSV
#For each of those tasks there is a seperate function written.
#The filename to read has to be the first argument from the command line.
#The filename to write into has to be the second arguments from command line.
import sys
from bs4 import BeautifulSoup as Soup
import csv
###############################################################################
def readXML(filename):
#reads an xml file using BeautifulSoup with lxml parser
#input: name of file
#returns: list with 3 entries: date as YYYY-MM-DD, time as HH
# and value of measured power without unit
#
#initializing list to return
measurements=[]
#initializing temporary objects
currentDay=''
currentTime=''
currentPower=''
#opening file with handler
with open(filename,'r') as fileHandler:
#parse the file into an soup object using lxml parser
#note that all the tags have only lower case letters now
#because xml should't distinguish between lower and upper case
soup=Soup(fileHandler,'lxml')
#find all tags 'gasDay '
for day in soup.findAll('gasday'):
#set the date for list entry
currentDay=day.attrs['date']
#find all tags 'boundarynode'
#this is just to make sure that the rest is enclosed in a
#boundarynode tag
for node in day.findAll('boundarynode'):
#find all tags 'time'
for time in node.findAll('time'):
#set the time for list entry
currentTime=time.attrs['hour'].zfill(2)
#find all tags 'amountofpower'
for power in time.findAll('amountofpower'):
currentPower=power.attrs['value']
measurements.append([currentDay,currentTime,\
currentPower])
return measurements
###############################################################################
def writeCSV(fileName,data):
#write a CSV file with the
#input: name of file
#returns: None
#open file with handler
with open(fileName,'w') as fileHandler:
#create CSV writer
writer = csv.writer(fileHandler, delimiter=',')
#iterate through data
for line in data:
#write row
writer.writerow(line)
###############################################################################
#read filenames out of system input
inputName=sys.argv[1]
outputName=sys.argv[2]
#inputName='example.measured-1-1-0.xml'
#outputName='output.csv'
#read in the measurements
measurements=readXML(inputName)
#write the measurements into csv file
writeCSV(outputName,measurements)
| {
"repo_name": "frodo4fingers/appfs",
"path": "Jeney/02_Exercise/ex2.py",
"copies": "3",
"size": "2929",
"license": "mit",
"hash": 299876927021330940,
"line_mean": 31.9101123596,
"line_max": 79,
"alpha_frac": 0.5821099351,
"autogenerated": false,
"ratio": 4.656597774244833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6738707709344833,
"avg_score": null,
"num_lines": null
} |
# advanced_search.py
import wx
from pubsub import pub
class AdvancedSearch(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.free_text = wx.TextCtrl(self)
self.ui_helper('Free text search:', self.free_text)
self.nasa_center = wx.TextCtrl(self)
self.ui_helper('NASA Center:', self.nasa_center)
self.description = wx.TextCtrl(self)
self.ui_helper('Description:', self.description)
self.description_508 = wx.TextCtrl(self)
self.ui_helper('Description 508:', self.description_508)
self.keywords = wx.TextCtrl(self)
self.ui_helper('Keywords (separate with commas):',
self.keywords)
self.location = wx.TextCtrl(self)
self.ui_helper('Location:', self.location)
self.nasa_id = wx.TextCtrl(self)
self.ui_helper('NASA ID:', self.nasa_id)
self.photographer = wx.TextCtrl(self)
self.ui_helper('Photographer:', self.photographer)
self.secondary_creator = wx.TextCtrl(self)
self.ui_helper('Secondary photographer:', self.secondary_creator)
self.title = wx.TextCtrl(self)
self.ui_helper('Title:', self.title)
search = wx.Button(self, label='Search')
search.Bind(wx.EVT_BUTTON, self.on_search)
self.main_sizer.Add(search, 0, wx.ALL | wx.CENTER, 5)
self.SetSizer(self.main_sizer)
def ui_helper(self, label, textctrl):
sizer = wx.BoxSizer()
lbl = wx.StaticText(self, label=label, size=(150, -1))
sizer.Add(lbl, 0, wx.ALL, 5)
sizer.Add(textctrl, 1, wx.ALL | wx.EXPAND, 5)
self.main_sizer.Add(sizer, 0, wx.EXPAND)
def on_search(self, event):
query = {'q': self.free_text.GetValue(),
'media_type': 'image',
'center': self.nasa_center.GetValue(),
'description': self.description.GetValue(),
'description_508': self.description_508.GetValue(),
'keywords': self.keywords.GetValue(),
'location': self.location.GetValue(),
'nasa_id': self.nasa_id.GetValue(),
'photographer': self.photographer.GetValue(),
'secondary_creator': self.secondary_creator.GetValue(),
'title': self.title.GetValue()}
pub.sendMessage('update_ui')
pub.sendMessage('search_results', query=query) | {
"repo_name": "slogan621/tscharts",
"path": "apps/xrayuploader/advanced_search.py",
"copies": "1",
"size": "2491",
"license": "apache-2.0",
"hash": -8101417628134089000,
"line_mean": 39.1935483871,
"line_max": 73,
"alpha_frac": 0.5953432356,
"autogenerated": false,
"ratio": 3.6524926686217007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47478359042217005,
"avg_score": null,
"num_lines": null
} |
"""Advanced Settings Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from .ftds2svpns import FTDS2SVPNs
import logging
class AdvancedSettings(APIClassTemplate):
"""The AdvancedSettings Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"advancedIkeSetting",
"advancedTunnelSetting",
"advancedIpsecSetting",
"version",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
FIRST_SUPPORTED_FMC_VERSION = "6.3"
PREFIX_URL = "/policy/ftds2svpns"
REQUIRED_FOR_POST = ["vpn_id"]
def __init__(self, fmc, **kwargs):
"""
Initialize AdvancedSettings object.
:param fmc: (object) FMC object
:param **kwargs: Set initial variables during instantiation of AdvancedSettings object.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for AdvancedSettings class.")
self.parse_kwargs(**kwargs)
self.type = "AdvancedSettings"
def vpn_policy(self, pol_name):
"""
Associate a Policy with this VPN.
:param pol_name: (str) Name of policy.
:return: None
"""
logging.debug("In vpn_policy() for AdvancedSettings class.")
ftd_s2s = FTDS2SVPNs(fmc=self.fmc)
ftd_s2s.get(name=pol_name)
if "id" in ftd_s2s.__dict__:
self.vpn_id = ftd_s2s.id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.vpn_id}/advancedsettings"
self.vpn_added_to_url = True
else:
logging.warning(
f'FTD S2S VPN Policy "{pol_name}" not found. Cannot set up AdvancedSettings for FTDS2SVPNs Policy.'
)
| {
"repo_name": "daxm/fmcapi",
"path": "fmcapi/api_objects/policy_services/advancedsettings.py",
"copies": "1",
"size": "1750",
"license": "bsd-3-clause",
"hash": 765436822783422800,
"line_mean": 30.8181818182,
"line_max": 116,
"alpha_frac": 0.5942857143,
"autogenerated": false,
"ratio": 3.593429158110883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4687714872410883,
"avg_score": null,
"num_lines": null
} |
""" Advanced signal (e.g. ctrl+C) handling for IPython
So far, this only ignores ctrl + C in IPython file a subprocess
is executing, to get closer to how a "proper" shell behaves.
Other signal processing may be implemented later on.
If _ip.options.verbose is true, show exit status if nonzero
"""
import signal,os,sys
from IPython.core import ipapi
import subprocess
ip = ipapi.get()
def new_ipsystem_posix(cmd):
""" ctrl+c ignoring replacement for system() command in iplib.
Ignore ctrl + c in IPython process during the command execution.
The subprocess will still get the ctrl + c signal.
posix implementation
"""
p = subprocess.Popen(cmd, shell = True)
old_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pid,status = os.waitpid(p.pid,0)
signal.signal(signal.SIGINT, old_handler)
if status and ip.options.verbose:
print "[exit status: %d]" % status
def new_ipsystem_win32(cmd):
""" ctrl+c ignoring replacement for system() command in iplib.
Ignore ctrl + c in IPython process during the command execution.
The subprocess will still get the ctrl + c signal.
win32 implementation
"""
old_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
status = os.system(cmd)
signal.signal(signal.SIGINT, old_handler)
if status and ip.options.verbose:
print "[exit status: %d]" % status
def init():
o = ip.options
try:
o.verbose
except AttributeError:
o.allow_new_attr (True )
o.verbose = 0
ip.system = (sys.platform == 'win32' and new_ipsystem_win32 or
new_ipsystem_posix)
init()
| {
"repo_name": "sodafree/backend",
"path": "build/ipython/IPython/quarantine/ipy_signals.py",
"copies": "1",
"size": "1652",
"license": "bsd-3-clause",
"hash": -6988002960970420000,
"line_mean": 26.0819672131,
"line_max": 68,
"alpha_frac": 0.6761501211,
"autogenerated": false,
"ratio": 3.729119638826185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9859285613673525,
"avg_score": 0.009196829250531906,
"num_lines": 61
} |
advanced_sparta = (
# ("sensors", "keyboard"),
#("sensors", "mouse"),
#("sensors", "collision"),
#("sensors", "near"),
#("sensors", "message"),
#("sensors", "random_"),
#("processors", "trigger"),
#("processors", "toggle"),
#("processors", "switch"),
#("processors", "if_"),
#("processors", "python"),
("processors", "advanced_python"),
("processors", "pull_buffer"),
("processors", "transistor"),
("processors", "push_buffer"),
#("assessors", "not_"),
#("assessors", "any_"),
#("assessors", "all_"),
#("assessors", "compare"),
#("assessors", "between"),
#("assessors", "variable"),
#("assessors", "get_property"),
#("assessors", "view"),
("assessors", "game_object"),
#("assessors", "python"),
#("rerouters", "hop_in"),
#("rerouters", "hop_out"),
#("rerouters", "splitter"),
#("triggers", "start"),
#("triggers", "always"),
#("triggers", "if_"),
#("triggers", "change"),
#("triggers", "delay"),
("triggers", "stop"),
("triggers", "state_activate"),
("triggers", "state_deactivate"),
#("actuators", "object"),
#("actuators", "motion"),
("actuators", "view"),
#("actuators", "launch"),
#("actuators", "kill"),
#("actuators", "pause"),
#("actuators", "resume"),
#("actuators", "stop"),
#("actuators", "set_property"),
#("actuators", "message"),
#("actuators", "action"),
#("actuators", "parent"),
("actuators", "statemachine"),
("actuators", "state"),
)
def get_level(path):
if path is None:
return 0
module = path[0]
if module == "sparta":
if path[1:3] in advanced_sparta:
return 2
else:
return 1
if module in ("segments", "spyderbees"):
return 0 # visibility depends on workergui/spydergui
if module in ("hivemaps", "workers"):
return 1
if module in ("dragonfly", "spydermaps"):
return 3
if module == "bees":
if len(path) == 1:
return 0
sub_module = path[1]
if sub_module in ("parameter", "io"):
return 3
if sub_module in ("attribute", "pyattribute", "wasp", "part"):
return 6
return 6
def minlevel(context, level):
try:
current_level = int(context.scene.hive_level)
except (TypeError, AttributeError):
return False
return current_level >= level
def active(context, path):
return minlevel(context, get_level(path))
def active_workergui(context):
return minlevel(context, 4)
def active_spydergui(context):
return minlevel(context, 5)
| {
"repo_name": "agoose77/hivesystem",
"path": "hiveguilib/HBlender/level.py",
"copies": "1",
"size": "2668",
"license": "bsd-2-clause",
"hash": -9034565752096989000,
"line_mean": 23.2545454545,
"line_max": 70,
"alpha_frac": 0.535982009,
"autogenerated": false,
"ratio": 3.469440832249675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4505422841249675,
"avg_score": null,
"num_lines": null
} |
"""Advanced timeout handling.
Set of helper classes to handle timeouts of tasks with advanced options
like zones and freezing of timeouts.
"""
from __future__ import annotations
import asyncio
import enum
from types import TracebackType
from typing import Any, Dict, List, Optional, Type, Union
from .async_ import run_callback_threadsafe
ZONE_GLOBAL = "global"
class _State(str, enum.Enum):
"""States of a task."""
INIT = "INIT"
ACTIVE = "ACTIVE"
TIMEOUT = "TIMEOUT"
EXIT = "EXIT"
class _GlobalFreezeContext:
"""Context manager that freezes the global timeout."""
def __init__(self, manager: TimeoutManager) -> None:
"""Initialize internal timeout context manager."""
self._loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
self._manager: TimeoutManager = manager
async def __aenter__(self) -> _GlobalFreezeContext:
self._enter()
return self
async def __aexit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._exit()
return None
def __enter__(self) -> _GlobalFreezeContext:
self._loop.call_soon_threadsafe(self._enter)
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._loop.call_soon_threadsafe(self._exit)
return True
def _enter(self) -> None:
"""Run freeze."""
if not self._manager.freezes_done:
return
# Global reset
for task in self._manager.global_tasks:
task.pause()
# Zones reset
for zone in self._manager.zones.values():
if not zone.freezes_done:
continue
zone.pause()
self._manager.global_freezes.append(self)
def _exit(self) -> None:
"""Finish freeze."""
self._manager.global_freezes.remove(self)
if not self._manager.freezes_done:
return
# Global reset
for task in self._manager.global_tasks:
task.reset()
# Zones reset
for zone in self._manager.zones.values():
if not zone.freezes_done:
continue
zone.reset()
class _ZoneFreezeContext:
"""Context manager that freezes a zone timeout."""
def __init__(self, zone: _ZoneTimeoutManager) -> None:
"""Initialize internal timeout context manager."""
self._loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
self._zone: _ZoneTimeoutManager = zone
async def __aenter__(self) -> _ZoneFreezeContext:
self._enter()
return self
async def __aexit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._exit()
return None
def __enter__(self) -> _ZoneFreezeContext:
self._loop.call_soon_threadsafe(self._enter)
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._loop.call_soon_threadsafe(self._exit)
return True
def _enter(self) -> None:
"""Run freeze."""
if self._zone.freezes_done:
self._zone.pause()
self._zone.enter_freeze(self)
def _exit(self) -> None:
"""Finish freeze."""
self._zone.exit_freeze(self)
if not self._zone.freezes_done:
return
self._zone.reset()
class _GlobalTaskContext:
"""Context manager that tracks a global task."""
def __init__(
self,
manager: TimeoutManager,
task: asyncio.Task[Any],
timeout: float,
cool_down: float,
) -> None:
"""Initialize internal timeout context manager."""
self._loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
self._manager: TimeoutManager = manager
self._task: asyncio.Task[Any] = task
self._time_left: float = timeout
self._expiration_time: Optional[float] = None
self._timeout_handler: Optional[asyncio.Handle] = None
self._wait_zone: asyncio.Event = asyncio.Event()
self._state: _State = _State.INIT
self._cool_down: float = cool_down
async def __aenter__(self) -> _GlobalTaskContext:
self._manager.global_tasks.append(self)
self._start_timer()
self._state = _State.ACTIVE
return self
async def __aexit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._stop_timer()
self._manager.global_tasks.remove(self)
# Timeout on exit
if exc_type is asyncio.CancelledError and self.state == _State.TIMEOUT:
raise asyncio.TimeoutError
self._state = _State.EXIT
self._wait_zone.set()
return None
@property
def state(self) -> _State:
"""Return state of the Global task."""
return self._state
def zones_done_signal(self) -> None:
"""Signal that all zones are done."""
self._wait_zone.set()
def _start_timer(self) -> None:
"""Start timeout handler."""
if self._timeout_handler:
return
self._expiration_time = self._loop.time() + self._time_left
self._timeout_handler = self._loop.call_at(
self._expiration_time, self._on_timeout
)
def _stop_timer(self) -> None:
"""Stop zone timer."""
if self._timeout_handler is None:
return
self._timeout_handler.cancel()
self._timeout_handler = None
# Calculate new timeout
assert self._expiration_time
self._time_left = self._expiration_time - self._loop.time()
def _on_timeout(self) -> None:
"""Process timeout."""
self._state = _State.TIMEOUT
self._timeout_handler = None
# Reset timer if zones are running
if not self._manager.zones_done:
asyncio.create_task(self._on_wait())
else:
self._cancel_task()
def _cancel_task(self) -> None:
"""Cancel own task."""
if self._task.done():
return
self._task.cancel()
def pause(self) -> None:
"""Pause timers while it freeze."""
self._stop_timer()
def reset(self) -> None:
"""Reset timer after freeze."""
self._start_timer()
async def _on_wait(self) -> None:
"""Wait until zones are done."""
await self._wait_zone.wait()
await asyncio.sleep(self._cool_down) # Allow context switch
if not self.state == _State.TIMEOUT:
return
self._cancel_task()
class _ZoneTaskContext:
"""Context manager that tracks an active task for a zone."""
def __init__(
self,
zone: _ZoneTimeoutManager,
task: asyncio.Task[Any],
timeout: float,
) -> None:
"""Initialize internal timeout context manager."""
self._loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
self._zone: _ZoneTimeoutManager = zone
self._task: asyncio.Task[Any] = task
self._state: _State = _State.INIT
self._time_left: float = timeout
self._expiration_time: Optional[float] = None
self._timeout_handler: Optional[asyncio.Handle] = None
@property
def state(self) -> _State:
"""Return state of the Zone task."""
return self._state
async def __aenter__(self) -> _ZoneTaskContext:
self._zone.enter_task(self)
self._state = _State.ACTIVE
# Zone is on freeze
if self._zone.freezes_done:
self._start_timer()
return self
async def __aexit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._zone.exit_task(self)
self._stop_timer()
# Timeout on exit
if exc_type is asyncio.CancelledError and self.state == _State.TIMEOUT:
raise asyncio.TimeoutError
self._state = _State.EXIT
return None
def _start_timer(self) -> None:
"""Start timeout handler."""
if self._timeout_handler:
return
self._expiration_time = self._loop.time() + self._time_left
self._timeout_handler = self._loop.call_at(
self._expiration_time, self._on_timeout
)
def _stop_timer(self) -> None:
"""Stop zone timer."""
if self._timeout_handler is None:
return
self._timeout_handler.cancel()
self._timeout_handler = None
# Calculate new timeout
assert self._expiration_time
self._time_left = self._expiration_time - self._loop.time()
def _on_timeout(self) -> None:
"""Process timeout."""
self._state = _State.TIMEOUT
self._timeout_handler = None
# Timeout
if self._task.done():
return
self._task.cancel()
def pause(self) -> None:
"""Pause timers while it freeze."""
self._stop_timer()
def reset(self) -> None:
"""Reset timer after freeze."""
self._start_timer()
class _ZoneTimeoutManager:
"""Manage the timeouts for a zone."""
def __init__(self, manager: TimeoutManager, zone: str) -> None:
"""Initialize internal timeout context manager."""
self._manager: TimeoutManager = manager
self._zone: str = zone
self._tasks: List[_ZoneTaskContext] = []
self._freezes: List[_ZoneFreezeContext] = []
@property
def name(self) -> str:
"""Return Zone name."""
return self._zone
@property
def active(self) -> bool:
"""Return True if zone is active."""
return len(self._tasks) > 0 or len(self._freezes) > 0
@property
def freezes_done(self) -> bool:
"""Return True if all freeze are done."""
return len(self._freezes) == 0 and self._manager.freezes_done
def enter_task(self, task: _ZoneTaskContext) -> None:
"""Start into new Task."""
self._tasks.append(task)
def exit_task(self, task: _ZoneTaskContext) -> None:
"""Exit a running Task."""
self._tasks.remove(task)
# On latest listener
if not self.active:
self._manager.drop_zone(self.name)
def enter_freeze(self, freeze: _ZoneFreezeContext) -> None:
"""Start into new freeze."""
self._freezes.append(freeze)
def exit_freeze(self, freeze: _ZoneFreezeContext) -> None:
"""Exit a running Freeze."""
self._freezes.remove(freeze)
# On latest listener
if not self.active:
self._manager.drop_zone(self.name)
def pause(self) -> None:
"""Stop timers while it freeze."""
if not self.active:
return
# Forward pause
for task in self._tasks:
task.pause()
def reset(self) -> None:
"""Reset timer after freeze."""
if not self.active:
return
# Forward reset
for task in self._tasks:
task.reset()
class TimeoutManager:
"""Class to manage timeouts over different zones.
Manages both global and zone based timeouts.
"""
def __init__(self) -> None:
"""Initialize TimeoutManager."""
self._loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()
self._zones: Dict[str, _ZoneTimeoutManager] = {}
self._globals: List[_GlobalTaskContext] = []
self._freezes: List[_GlobalFreezeContext] = []
@property
def zones_done(self) -> bool:
"""Return True if all zones are finished."""
return not bool(self._zones)
@property
def freezes_done(self) -> bool:
"""Return True if all freezes are finished."""
return not self._freezes
@property
def zones(self) -> Dict[str, _ZoneTimeoutManager]:
"""Return all Zones."""
return self._zones
@property
def global_tasks(self) -> List[_GlobalTaskContext]:
"""Return all global Tasks."""
return self._globals
@property
def global_freezes(self) -> List[_GlobalFreezeContext]:
"""Return all global Freezes."""
return self._freezes
def drop_zone(self, zone_name: str) -> None:
"""Drop a zone out of scope."""
self._zones.pop(zone_name, None)
if self._zones:
return
# Signal Global task, all zones are done
for task in self._globals:
task.zones_done_signal()
def async_timeout(
self, timeout: float, zone_name: str = ZONE_GLOBAL, cool_down: float = 0
) -> Union[_ZoneTaskContext, _GlobalTaskContext]:
"""Timeout based on a zone.
For using as Async Context Manager.
"""
current_task: Optional[asyncio.Task[Any]] = asyncio.current_task()
assert current_task
# Global Zone
if zone_name == ZONE_GLOBAL:
task = _GlobalTaskContext(self, current_task, timeout, cool_down)
return task
# Zone Handling
if zone_name in self.zones:
zone: _ZoneTimeoutManager = self.zones[zone_name]
else:
self.zones[zone_name] = zone = _ZoneTimeoutManager(self, zone_name)
# Create Task
return _ZoneTaskContext(zone, current_task, timeout)
def async_freeze(
self, zone_name: str = ZONE_GLOBAL
) -> Union[_ZoneFreezeContext, _GlobalFreezeContext]:
"""Freeze all timer until job is done.
For using as Async Context Manager.
"""
# Global Freeze
if zone_name == ZONE_GLOBAL:
return _GlobalFreezeContext(self)
# Zone Freeze
if zone_name in self.zones:
zone: _ZoneTimeoutManager = self.zones[zone_name]
else:
self.zones[zone_name] = zone = _ZoneTimeoutManager(self, zone_name)
return _ZoneFreezeContext(zone)
def freeze(
self, zone_name: str = ZONE_GLOBAL
) -> Union[_ZoneFreezeContext, _GlobalFreezeContext]:
"""Freeze all timer until job is done.
For using as Context Manager.
"""
return run_callback_threadsafe(
self._loop, self.async_freeze, zone_name
).result()
| {
"repo_name": "GenericStudent/home-assistant",
"path": "homeassistant/util/timeout.py",
"copies": "6",
"size": "14604",
"license": "apache-2.0",
"hash": 2547671032862168600,
"line_mean": 27.7480314961,
"line_max": 80,
"alpha_frac": 0.5780608053,
"autogenerated": false,
"ratio": 4.106861642294713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00002430251774083795,
"num_lines": 508
} |
"""Advanced tools for dense recursive polynomials in ``K[x]`` or ``K[X]``."""
from .densearith import (dmp_add, dmp_add_term, dmp_div, dmp_exquo_ground,
dmp_mul, dmp_mul_ground, dmp_neg, dmp_sub, dup_add,
dup_mul)
from .densebasic import (dmp_convert, dmp_degree_in, dmp_from_dict, dmp_ground,
dmp_ground_LC, dmp_LC, dmp_strip, dmp_TC, dmp_to_dict,
dmp_zero, dmp_zero_p)
from .polyerrors import DomainError
def dmp_diff_in(f, m, j, u, K):
"""
``m``-th order derivative in ``x_j`` of a polynomial in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> f.diff()
y**2 + 2*y + 3
>>> f.diff(y)
2*x*y + 2*x + 4*y + 3
"""
ring = K.poly_ring(*[f'_{i}' for i in range(u + 1)])
f = ring.from_dense(f)
return ring.to_dense(f.diff(x=j, m=m))
def dmp_eval_in(f, a, j, u, K):
"""
Evaluate a polynomial at ``x_j = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_in(f, 2, 0)
5*y + 8
>>> R.dmp_eval_in(f, 2, 1)
7*x + 4
"""
if j < 0 or j > u:
raise IndexError(f'0 <= j <= {u} expected, got {j}')
if not j:
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u - 1
if u:
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
else:
for coeff in f[1:]:
result *= a
result += coeff
return result
def eval_in(g, a, v, i, j, K):
if i == j:
return dmp_eval_in(g, a, 0, v, K)
v, i = v - 1, i + 1
return dmp_strip([eval_in(c, a, v, i, j, K) for c in g], v)
return eval_in(f, a, u, 0, j, K)
def dmp_eval_tail(f, A, u, K):
"""
Evaluate a polynomial at ``x_j = a_j, ...`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_tail(f, [2])
7*x + 4
>>> R.dmp_eval_tail(f, [2, 2])
18
"""
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
def eval_tail(g, i, A, u, K):
if i == u:
return dmp_eval_in(g, A[-1], 0, 0, K)
else:
h = [eval_tail(c, i + 1, A, u, K) for c in g]
if i < u - len(A) + 1:
return h
else:
return dmp_eval_in(h, A[-u + i - 1], 0, 0, K)
e = eval_tail(f, 0, A, u, K)
if u == len(A) - 1:
return e
else:
return dmp_strip(e, u - len(A))
def dmp_diff_eval_in(f, m, a, j, u, K):
"""
Differentiate and evaluate a polynomial in ``x_j`` at ``a`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_eval_in(f, 1, 2, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_eval_in(f, 1, 2, 1)
6*x + 11
"""
if j > u:
raise IndexError(f'-{u} <= j < {u} expected, got {j}')
if not j:
return dmp_eval_in(dmp_diff_in(f, m, 0, u, K), a, 0, u, K)
def diff_eval(g, m, a, v, i, j, K):
if i == j:
return dmp_eval_in(dmp_diff_in(g, m, 0, v, K), a, 0, v, K)
v, i = v - 1, i + 1
return dmp_strip([diff_eval(c, m, a, v, i, j, K) for c in g], v)
return diff_eval(f, m, a, u, 0, j, K)
def dup_trunc(f, p, K):
"""
Reduce a ``K[x]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dmp_ground_trunc(2*x**3 + 3*x**2 + 5*x + 7, ZZ(3))
-x**3 - x + 1
"""
from ..ntheory.modular import symmetric_residue
if K.is_IntegerRing:
g = []
for c in f:
c = c % p
c = symmetric_residue(c, p)
g.append(c)
else:
g = [c % p for c in f]
return dmp_strip(g, 0)
def dmp_ground_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_trunc(f, ZZ(3))
-x**2 - x*y - y
"""
if not u:
return dup_trunc(f, p, K)
v = u - 1
return dmp_strip([dmp_ground_trunc(c, p, v, K) for c in f], u)
def dmp_ground_monic(f, u, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = 3*x**2*y + 6*x**2 + 3*x*y + 9*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 2*x**2 + x*y + 3*y + 1
>>> R, x, y = ring('x y', QQ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 8/3*x**2 + 5/3*x*y + 2*x + 2/3*y + 1
"""
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if lc == K.one:
return f
else:
return dmp_exquo_ground(f, lc, u, K)
def dmp_ground_content(f, u, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> f.content()
2
>>> R, x, y = ring('x y', QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> f.content()
2
"""
ring = K.poly_ring(*[f'_{i}' for i in range(u + 1)])
f = ring.from_dense(f)
return f.content()
def dmp_ground_primitive(f, u, K):
"""
Compute content and the primitive form of ``f`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> f.primitive()
(2, x*y + 3*x + 2*y + 6)
>>> R, x, y = ring('x y', QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> f.primitive()
(2, x*y + 3*x + 2*y + 6)
"""
ring = K.poly_ring(*[f'_{i}' for i in range(u + 1)])
f = ring.from_dense(f)
cont, p = f.primitive()
return cont, ring.to_dense(p)
def dup_real_imag(f, K):
"""
Return bivariate polynomials ``f1`` and ``f2``, such that ``f = f1 + f2*I``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dup_real_imag(x**3 + x**2 + x + 1)
(x**3 + x**2 - 3*x*y**2 + x - y**2 + 1, 3*x**2*y + 2*x*y - y**3 + y)
>>> R, x, y = ring('x y', QQ.algebraic_field(I))
>>> R.dup_real_imag(x**2 + I*x - 1)
(x**2 - y**2 - y - 1, 2*x*y + x)
"""
if K.is_ComplexAlgebraicField:
K0 = K.domain
r1, i1 = dup_real_imag([_.real for _ in f], K0)
r2, i2 = dup_real_imag([_.imag for _ in f], K0)
return dmp_add(r1, dmp_neg(i2, 1, K0), 1, K0), dmp_add(r2, i1, 1, K0)
elif not K.is_IntegerRing and not K.is_RationalField and not K.is_RealAlgebraicField:
raise DomainError(f'computing real and imaginary parts is not supported over {K}')
f1 = dmp_zero(1)
f2 = dmp_zero(1)
if not f:
return f1, f2
g = [[[K.one, K.zero]], [[K.one], []]]
h = dmp_ground(f[0], 2)
for c in f[1:]:
h = dmp_mul(h, g, 2, K)
h = dmp_add_term(h, dmp_ground(c, 1), 0, 2, K)
H = dmp_to_dict(h, 0)
for (k,), h in H.items():
m = k % 4
if not m:
f1 = dmp_add(f1, h, 1, K)
elif m == 1:
f2 = dmp_add(f2, h, 1, K)
elif m == 2:
f1 = dmp_sub(f1, h, 1, K)
else:
f2 = dmp_sub(f2, h, 1, K)
return f1, f2
def dup_mirror(f, K):
"""
Evaluate efficiently the composition ``f(-x)`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dup_mirror(x**3 + 2*x**2 - 4*x + 2)
-x**3 + 2*x**2 + 4*x + 2
"""
f = list(f)
for i in range(len(f) - 2, -1, -2):
f[i] = -f[i]
return f
def dup_scale(f, a, K):
"""
Evaluate efficiently composition ``f(a*x)`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dup_scale(x**2 - 2*x + 1, ZZ(2))
4*x**2 - 4*x + 1
"""
f, n, b = list(f), len(f) - 1, a
for i in range(n - 1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_shift(f, a, K):
"""
Evaluate efficiently Taylor shift ``f(x + a)`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dup_shift(x**2 - 2*x + 1, ZZ(2))
x**2 + 2*x + 1
"""
f, n = list(f), len(f) - 1
for i in range(n, 0, -1):
for j in range(i):
f[j + 1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""
Evaluate functional transformation ``q**n * f(p/q)`` in ``K[x]``.
Examples
========
>>> R, x = ring('x', ZZ)
>>> R.dup_transform(x**2 - 2*x + 1, x**2 + 1, x - 1)
x**4 - 2*x**3 + 5*x**2 - 4*x + 4
"""
if not f:
return []
n = len(f) - 1
h, Q = [f[0]], [[K.one]]
for i in range(n):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dmp_mul_ground(q, c, 0, K)
h = dup_add(h, q, K)
return h
def dmp_compose(f, g, u, K):
"""
Evaluate functional composition ``f(g)`` in ``K[X]``.
Examples
========
>>> R, x, y = ring('x y', ZZ)
>>> R.dmp_compose(x*y + 2*x + y, y)
y**2 + 3*y
"""
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
def _dup_right_decompose(f, s, K):
n = len(f) - 1
lc = dmp_LC(f, K)
f = dmp_to_dict(f, 0)
g = {(s,): K.one}
r = n // s
for i in range(1, s):
coeff = K.zero
for j in range(i):
if not (n + j - i,) in f:
continue
assert (s - j,) in g
fc, gc = f[(n + j - i,)], g[(s - j,)]
coeff += (i - r*j)*fc*gc
g[(s - i,)] = K.quo(coeff, i*r*lc)
return dmp_from_dict(g, 0, K)
def _dup_left_decompose(f, h, K):
g, i = {}, 0
while f:
q, r = dmp_div(f, h, 0, K)
if dmp_degree_in(r, 0, 0) > 0:
return
else:
g[(i,)] = dmp_LC(r, K)
f, i = q, i + 1
return dmp_from_dict(g, 0, K)
def _dup_decompose(f, K):
df = len(f) - 1
for s in range(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
def dup_decompose(f, K):
"""
Compute functional decomposition of ``f`` in ``K[x]``.
Given a univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
Examples
========
>>> R, x = ring('x', ZZ)
>>> (x**4 - 2*x**3 + x**2).decompose()
[x**2, x**2 - x]
References
==========
* :cite:`Kozen1989decomposition`
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> R, x, y = ring('x y', QQ)
>>> f = x/2 + y/3 + 1
>>> R.dmp_clear_denoms(f, convert=False)
(6, 3*x + 2*y + 6)
>>> R.dmp_clear_denoms(f, convert=True)
(6, 3*x + 2*y + 6)
"""
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.ring
else:
K1 = K0
def clear_denoms(g, v, K0, K1):
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, c.denominator)
else:
w = v - 1
for c in g:
common = K1.lcm(common, clear_denoms(c, w, K0, K1))
return common
common = clear_denoms(f, u, K0, K1)
f = dmp_mul_ground(f, common, u, K0)
if not convert:
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
| {
"repo_name": "skirpichev/omg",
"path": "diofant/polys/densetools.py",
"copies": "1",
"size": "12628",
"license": "bsd-3-clause",
"hash": -2260708377628337700,
"line_mean": 19.6339869281,
"line_max": 90,
"alpha_frac": 0.4302343997,
"autogenerated": false,
"ratio": 2.6496013428451533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8579554322601424,
"avg_score": 0.00005628398874586239,
"num_lines": 612
} |
"""Advanced tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_lshift,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dup_div,
dup_rem, dmp_rem,
dmp_expand,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
)
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_to_dict,
dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC,
dmp_zero, dmp_ground,
dmp_zero_p,
dup_to_raw_dict, dup_from_raw_dict,
dmp_zeros
)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
DomainError
)
from sympy.utilities import variations
from math import ceil as _ceil, log as _log
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g
def dmp_integrate(f, m, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_0`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate(x + 2*y, 1)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate(x + 2*y, 2)
1/6*x**3 + x**2*y
"""
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u - 1, K), u - 1
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
def _rec_integrate_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_integrate_in`."""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
def dmp_integrate_in(f, m, j, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate_in(x + 2*y, 1, 0)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate_in(x + 2*y, 1, 1)
x*y + y**2
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= u expected, got u = %d, j = %d" % (u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
def dup_diff(f, m, K):
"""
``m``-th order derivative of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 1)
3*x**2 + 4*x + 3
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 2)
6*x + 4
"""
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv = []
if m == 1:
for coeff in f[:-m]:
deriv.append(K(n)*coeff)
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(K(k)*coeff)
n -= 1
return dup_strip(deriv)
def dmp_diff(f, m, u, K):
"""
``m``-th order derivative in ``x_0`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff(f, 1)
y**2 + 2*y + 3
>>> R.dmp_diff(f, 2)
0
"""
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, v = [], u - 1
if m == 1:
for coeff in f[:-m]:
deriv.append(dmp_mul_ground(coeff, K(n), v, K))
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(dmp_mul_ground(coeff, K(k), v, K))
n -= 1
return dmp_strip(deriv, u)
def _rec_diff_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_in`."""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
def dmp_diff_in(f, m, j, u, K):
"""
``m``-th order derivative in ``x_j`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_in(f, 1, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_in(f, 1, 1)
2*x*y + 2*x + 4*y + 3
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""
Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_eval(x**2 + 2*x + 3, 2)
11
"""
if not a:
return dup_TC(f, K)
result = K.zero
for c in f:
result *= a
result += c
return result
def dmp_eval(f, a, u, K):
"""
Evaluate a polynomial at ``x_0 = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_eval(2*x*y + 3*x + y + 2, 2)
5*y + 8
"""
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u - 1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
def _rec_eval_in(g, a, v, i, j, K):
"""Recursive helper for :func:`dmp_eval_in`."""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
def dmp_eval_in(f, a, j, u, K):
"""
Evaluate a polynomial at ``x_j = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_in(f, 2, 0)
5*y + 8
>>> R.dmp_eval_in(f, 2, 1)
7*x + 4
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_eval_in(f, a, u, 0, j, K)
def _rec_eval_tail(g, i, A, u, K):
"""Recursive helper for :func:`dmp_eval_tail`."""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i + 1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u + i - 1], K)
def dmp_eval_tail(f, A, u, K):
"""
Evaluate a polynomial at ``x_j = a_j, ...`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_tail(f, [2])
7*x + 4
>>> R.dmp_eval_tail(f, [2, 2])
18
"""
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A) - 1:
return e
else:
return dmp_strip(e, u - len(A))
def _rec_diff_eval(g, m, a, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_eval`."""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
def dmp_diff_eval_in(f, m, a, j, u, K):
"""
Differentiate and evaluate a polynomial in ``x_j`` at ``a`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_eval_in(f, 1, 2, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_eval_in(f, 1, 2, 1)
6*x + 11
"""
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_trunc(f, p, K):
"""
Reduce a ``K[x]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_trunc(2*x**3 + 3*x**2 + 5*x + 7, ZZ(3))
-x**3 - x + 1
"""
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
def dmp_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a polynomial ``p`` in ``K[Y]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> g = (y - 1).drop(x)
>>> R.dmp_trunc(f, g)
11*x**2 + 11*x + 5
"""
return dmp_strip([ dmp_rem(c, p, u - 1, K) for c in f ], u)
def dmp_ground_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_trunc(f, ZZ(3))
-x**2 - x*y - y
"""
if not u:
return dup_trunc(f, p, K)
v = u - 1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_monic(3*x**2 + 6*x + 9)
x**2 + 2*x + 3
>>> R, x = ring("x", QQ)
>>> R.dup_monic(3*x**2 + 4*x + 2)
x**2 + 4/3*x + 2/3
"""
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_exquo_ground(f, lc, K)
def dmp_ground_monic(f, u, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 6*x**2 + 3*x*y + 9*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 2*x**2 + x*y + 3*y + 1
>>> R, x,y = ring("x,y", QQ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 8/3*x**2 + 5/3*x*y + 2*x + 2/3*y + 1
"""
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_exquo_ground(f, lc, u, K)
def dup_content(f, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
"""
from sympy.polys.domains import QQ
if not f:
return K.zero
cont = K.zero
if K == QQ:
for c in f:
cont = K.gcd(cont, c)
else:
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def dmp_ground_content(f, u, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
"""
from sympy.polys.domains import QQ
if not u:
return dup_content(f, K)
if dmp_zero_p(f, u):
return K.zero
cont, v = K.zero, u - 1
if K == QQ:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
else:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
if K.is_one(cont):
break
return cont
def dup_primitive(f, K):
"""
Compute content and the primitive form of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
"""
if not f:
return K.zero, f
cont = dup_content(f, K)
if K.is_one(cont):
return cont, f
else:
return cont, dup_quo_ground(f, cont, K)
def dmp_ground_primitive(f, u, K):
"""
Compute content and the primitive form of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
"""
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_quo_ground(f, cont, u, K)
def dup_extract(f, g, K):
"""
Extract common content from a pair of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_extract(6*x**2 + 12*x + 18, 4*x**2 + 8*x + 12)
(2, 3*x**2 + 6*x + 9, 2*x**2 + 4*x + 6)
"""
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_quo_ground(f, gcd, K)
g = dup_quo_ground(g, gcd, K)
return gcd, f, g
def dmp_ground_extract(f, g, u, K):
"""
Extract common content from a pair of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_ground_extract(6*x*y + 12*x + 18, 4*x*y + 8*x + 12)
(2, 3*x*y + 6*x + 9, 2*x*y + 4*x + 6)
"""
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_quo_ground(f, gcd, u, K)
g = dmp_quo_ground(g, gcd, u, K)
return gcd, f, g
def dup_real_imag(f, K):
"""
Return bivariate polynomials ``f1`` and ``f2``, such that ``f = f1 + f2*I``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dup_real_imag(x**3 + x**2 + x + 1)
(x**3 + x**2 - 3*x*y**2 + x - y**2 + 1, 3*x**2*y + 2*x*y - y**3 + y)
"""
if not K.is_ZZ and not K.is_QQ:
raise DomainError("computing real and imaginary parts is not supported over %s" % K)
f1 = dmp_zero(1)
f2 = dmp_zero(1)
if not f:
return f1, f2
g = [[[K.one, K.zero]], [[K.one], []]]
h = dmp_ground(f[0], 2)
for c in f[1:]:
h = dmp_mul(h, g, 2, K)
h = dmp_add_term(h, dmp_ground(c, 1), 0, 2, K)
H = dup_to_raw_dict(h)
for k, h in H.items():
m = k % 4
if not m:
f1 = dmp_add(f1, h, 1, K)
elif m == 1:
f2 = dmp_add(f2, h, 1, K)
elif m == 2:
f1 = dmp_sub(f1, h, 1, K)
else:
f2 = dmp_sub(f2, h, 1, K)
return f1, f2
def dup_mirror(f, K):
"""
Evaluate efficiently the composition ``f(-x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mirror(x**3 + 2*x**2 - 4*x + 2)
-x**3 + 2*x**2 + 4*x + 2
"""
f = list(f)
for i in range(len(f) - 2, -1, -2):
f[i] = -f[i]
return f
def dup_scale(f, a, K):
"""
Evaluate efficiently composition ``f(a*x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_scale(x**2 - 2*x + 1, ZZ(2))
4*x**2 - 4*x + 1
"""
f, n, b = list(f), len(f) - 1, a
for i in range(n - 1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_shift(f, a, K):
"""
Evaluate efficiently Taylor shift ``f(x + a)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_shift(x**2 - 2*x + 1, ZZ(2))
x**2 + 2*x + 1
"""
f, n = list(f), len(f) - 1
for i in range(n, 0, -1):
for j in range(0, i):
f[j + 1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""
Evaluate functional transformation ``q**n * f(p/q)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_transform(x**2 - 2*x + 1, x**2 + 1, x - 1)
x**4 - 2*x**3 + 5*x**2 - 4*x + 4
"""
if not f:
return []
n = len(f) - 1
h, Q = [f[0]], [[K.one]]
for i in range(0, n):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""
Evaluate functional composition ``f(g)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_compose(x**2 + x, x - 1)
x**2 - x
"""
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
def dmp_compose(f, g, u, K):
"""
Evaluate functional composition ``f(g)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_compose(x*y + 2*x + y, y)
y**2 + 3*y
"""
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
def _dup_right_decompose(f, s, K):
"""Helper function for :func:`_dup_decompose`."""
n = len(f) - 1
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s: K.one }
r = n // s
for i in range(1, s):
coeff = K.zero
for j in range(0, i):
if not n + j - i in f:
continue
if not s - j in g:
continue
fc, gc = f[n + j - i], g[s - j]
coeff += (i - r*j)*fc*gc
g[s - i] = K.quo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
def _dup_left_decompose(f, h, K):
"""Helper function for :func:`_dup_decompose`."""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
def _dup_decompose(f, K):
"""Helper function for :func:`dup_decompose`."""
df = len(f) - 1
for s in range(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""
Computes functional decomposition of ``f`` in ``K[x]``.
Given a univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_decompose(x**4 - 2*x**3 + x**2)
[x**2, x**2 - x]
References
==========
.. [1] [Kozen89]_
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dmp_lift(f, u, K):
"""
Convert algebraic coefficients to integers in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> from sympy import I
>>> K = QQ.algebraic_field(I)
>>> R, x = ring("x", K)
>>> f = x**2 + K([QQ(1), QQ(0)])*x + K([QQ(2), QQ(0)])
>>> R.dmp_lift(f)
x**8 + 2*x**6 + 9*x**4 - 8*x**2 + 16
"""
if not K.is_Algebraic:
raise DomainError(
'computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.items():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""
Compute the number of sign variations of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sign_variations(x**4 - x**2 - x + 1)
2
"""
prev, k = K.zero, 0
for coeff in f:
if K.is_negative(coeff*prev):
k += 1
if coeff:
prev = coeff
return k
def dup_clear_denoms(f, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)
>>> R.dup_clear_denoms(f, convert=False)
(6, 3*x + 2)
>>> R.dup_clear_denoms(f, convert=True)
(6, 3*x + 2)
"""
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not convert:
return common, f
else:
return common, dup_convert(f, K0, K1)
def _rec_clear_denoms(g, v, K0, K1):
"""Recursive helper for :func:`dmp_clear_denoms`."""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v - 1
for c in g:
common = K1.lcm(common, _rec_clear_denoms(c, w, K0, K1))
return common
def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)*y + 1
>>> R.dmp_clear_denoms(f, convert=False)
(6, 3*x + 2*y + 6)
>>> R.dmp_clear_denoms(f, convert=True)
(6, 3*x + 2*y + 6)
"""
if not u:
return dup_clear_denoms(f, K0, K1, convert=convert)
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = _rec_clear_denoms(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not convert:
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
def dup_revert(f, n, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
This function computes first ``2**n`` terms of a polynomial that
is a result of inversion of a polynomial modulo ``x**n``. This is
useful to efficiently compute series expansion of ``1/f``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = -QQ(1,720)*x**6 + QQ(1,24)*x**4 - QQ(1,2)*x**2 + 1
>>> R.dup_revert(f, 8)
61/720*x**6 + 5/24*x**4 + 1/2*x**2 + 1
"""
g = [K.revert(dup_TC(f, K))]
h = [K.one, K.zero, K.zero]
N = int(_ceil(_log(n, 2)))
for i in range(1, N + 1):
a = dup_mul_ground(g, K(2), K)
b = dup_mul(f, dup_sqr(g, K), K)
g = dup_rem(dup_sub(a, b, K), h, K)
h = dup_lshift(h, dup_degree(h), K)
return g
def dmp_revert(f, g, u, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
"""
if not u:
return dup_revert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/polys/densetools.py",
"copies": "6",
"size": "25867",
"license": "bsd-3-clause",
"hash": -2728441805223602000,
"line_mean": 18.8062787136,
"line_max": 92,
"alpha_frac": 0.4559477326,
"autogenerated": false,
"ratio": 2.7384077916578446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003190266526427957,
"num_lines": 1306
} |
"""Advanced tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_to_dict,
dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC,
dmp_zero, dmp_ground,
dmp_zero_p,
dup_to_raw_dict, dup_from_raw_dict,
dmp_zeros
)
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_lshift,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dup_div,
dup_rem, dmp_rem,
dmp_expand,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
DomainError
)
from sympy.utilities import variations
from math import ceil as _ceil, log as _log
from sympy.core.compatibility import range
def dup_integrate(f, m, K):
"""
Computes the indefinite integral of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_integrate(x**2 + 2*x, 1)
1/3*x**3 + x**2
>>> R.dup_integrate(x**2 + 2*x, 2)
1/12*x**4 + 1/3*x**3
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, K.exquo(c, K(n)))
return g
def dmp_integrate(f, m, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_0`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate(x + 2*y, 1)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate(x + 2*y, 2)
1/6*x**3 + x**2*y
"""
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u - 1, K), u - 1
for i, c in enumerate(reversed(f)):
n = i + 1
for j in range(1, m):
n *= i + j + 1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
def _rec_integrate_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_integrate_in`."""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
def dmp_integrate_in(f, m, j, u, K):
"""
Computes the indefinite integral of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_integrate_in(x + 2*y, 1, 0)
1/2*x**2 + 2*x*y
>>> R.dmp_integrate_in(x + 2*y, 1, 1)
x*y + y**2
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= u expected, got %s" % (u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
def dup_diff(f, m, K):
"""
``m``-th order derivative of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 1)
3*x**2 + 4*x + 3
>>> R.dup_diff(x**3 + 2*x**2 + 3*x + 4, 2)
6*x + 4
"""
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv = []
if m == 1:
for coeff in f[:-m]:
deriv.append(K(n)*coeff)
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(K(k)*coeff)
n -= 1
return dup_strip(deriv)
def dmp_diff(f, m, u, K):
"""
``m``-th order derivative in ``x_0`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff(f, 1)
y**2 + 2*y + 3
>>> R.dmp_diff(f, 2)
0
"""
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, v = [], u - 1
if m == 1:
for coeff in f[:-m]:
deriv.append(dmp_mul_ground(coeff, K(n), v, K))
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in range(n - 1, n - m, -1):
k *= i
deriv.append(dmp_mul_ground(coeff, K(k), v, K))
n -= 1
return dmp_strip(deriv, u)
def _rec_diff_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_in`."""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v - 1, i + 1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
def dmp_diff_in(f, m, j, u, K):
"""
``m``-th order derivative in ``x_j`` of a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_in(f, 1, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_in(f, 1, 1)
2*x*y + 2*x + 4*y + 3
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""
Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_eval(x**2 + 2*x + 3, 2)
11
"""
if not a:
return dup_TC(f, K)
result = K.zero
for c in f:
result *= a
result += c
return result
def dmp_eval(f, a, u, K):
"""
Evaluate a polynomial at ``x_0 = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_eval(2*x*y + 3*x + y + 2, 2)
5*y + 8
"""
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u - 1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
def _rec_eval_in(g, a, v, i, j, K):
"""Recursive helper for :func:`dmp_eval_in`."""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
def dmp_eval_in(f, a, j, u, K):
"""
Evaluate a polynomial at ``x_j = a`` in ``K[X]`` using the Horner scheme.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_in(f, 2, 0)
5*y + 8
>>> R.dmp_eval_in(f, 2, 1)
7*x + 4
"""
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_eval_in(f, a, u, 0, j, K)
def _rec_eval_tail(g, i, A, u, K):
"""Recursive helper for :func:`dmp_eval_tail`."""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i + 1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u + i - 1], K)
def dmp_eval_tail(f, A, u, K):
"""
Evaluate a polynomial at ``x_j = a_j, ...`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 3*x + y + 2
>>> R.dmp_eval_tail(f, [2])
7*x + 4
>>> R.dmp_eval_tail(f, [2, 2])
18
"""
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A) - 1:
return e
else:
return dmp_strip(e, u - len(A))
def _rec_diff_eval(g, m, a, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_eval`."""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v - 1, i + 1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
def dmp_diff_eval_in(f, m, a, j, u, K):
"""
Differentiate and evaluate a polynomial in ``x_j`` at ``a`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x*y**2 + 2*x*y + 3*x + 2*y**2 + 3*y + 1
>>> R.dmp_diff_eval_in(f, 1, 2, 0)
y**2 + 2*y + 3
>>> R.dmp_diff_eval_in(f, 1, 2, 1)
6*x + 11
"""
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_trunc(f, p, K):
"""
Reduce a ``K[x]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_trunc(2*x**3 + 3*x**2 + 5*x + 7, ZZ(3))
-x**3 - x + 1
"""
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
def dmp_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a polynomial ``p`` in ``K[Y]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> g = (y - 1).drop(x)
>>> R.dmp_trunc(f, g)
11*x**2 + 11*x + 5
"""
return dmp_strip([ dmp_rem(c, p, u - 1, K) for c in f ], u)
def dmp_ground_trunc(f, p, u, K):
"""
Reduce a ``K[X]`` polynomial modulo a constant ``p`` in ``K``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_trunc(f, ZZ(3))
-x**2 - x*y - y
"""
if not u:
return dup_trunc(f, p, K)
v = u - 1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_monic(3*x**2 + 6*x + 9)
x**2 + 2*x + 3
>>> R, x = ring("x", QQ)
>>> R.dup_monic(3*x**2 + 4*x + 2)
x**2 + 4/3*x + 2/3
"""
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_exquo_ground(f, lc, K)
def dmp_ground_monic(f, u, K):
"""
Divide all coefficients by ``LC(f)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 3*x**2*y + 6*x**2 + 3*x*y + 9*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 2*x**2 + x*y + 3*y + 1
>>> R, x,y = ring("x,y", QQ)
>>> f = 3*x**2*y + 8*x**2 + 5*x*y + 6*x + 2*y + 3
>>> R.dmp_ground_monic(f)
x**2*y + 8/3*x**2 + 5/3*x*y + 2*x + 2/3*y + 1
"""
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_exquo_ground(f, lc, u, K)
def dup_content(f, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_content(f)
2
"""
from sympy.polys.domains import QQ
if not f:
return K.zero
cont = K.zero
if K == QQ:
for c in f:
cont = K.gcd(cont, c)
else:
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def dmp_ground_content(f, u, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_content(f)
2
"""
from sympy.polys.domains import QQ
if not u:
return dup_content(f, K)
if dmp_zero_p(f, u):
return K.zero
cont, v = K.zero, u - 1
if K == QQ:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
else:
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
if K.is_one(cont):
break
return cont
def dup_primitive(f, K):
"""
Compute content and the primitive form of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
>>> R, x = ring("x", QQ)
>>> f = 6*x**2 + 8*x + 12
>>> R.dup_primitive(f)
(2, 3*x**2 + 4*x + 6)
"""
if not f:
return K.zero, f
cont = dup_content(f, K)
if K.is_one(cont):
return cont, f
else:
return cont, dup_quo_ground(f, cont, K)
def dmp_ground_primitive(f, u, K):
"""
Compute content and the primitive form of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
>>> R, x,y = ring("x,y", QQ)
>>> f = 2*x*y + 6*x + 4*y + 12
>>> R.dmp_ground_primitive(f)
(2, x*y + 3*x + 2*y + 6)
"""
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_quo_ground(f, cont, u, K)
def dup_extract(f, g, K):
"""
Extract common content from a pair of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_extract(6*x**2 + 12*x + 18, 4*x**2 + 8*x + 12)
(2, 3*x**2 + 6*x + 9, 2*x**2 + 4*x + 6)
"""
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_quo_ground(f, gcd, K)
g = dup_quo_ground(g, gcd, K)
return gcd, f, g
def dmp_ground_extract(f, g, u, K):
"""
Extract common content from a pair of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_ground_extract(6*x*y + 12*x + 18, 4*x*y + 8*x + 12)
(2, 3*x*y + 6*x + 9, 2*x*y + 4*x + 6)
"""
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_quo_ground(f, gcd, u, K)
g = dmp_quo_ground(g, gcd, u, K)
return gcd, f, g
def dup_real_imag(f, K):
"""
Return bivariate polynomials ``f1`` and ``f2``, such that ``f = f1 + f2*I``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dup_real_imag(x**3 + x**2 + x + 1)
(x**3 + x**2 - 3*x*y**2 + x - y**2 + 1, 3*x**2*y + 2*x*y - y**3 + y)
"""
if not K.is_ZZ and not K.is_QQ:
raise DomainError("computing real and imaginary parts is not supported over %s" % K)
f1 = dmp_zero(1)
f2 = dmp_zero(1)
if not f:
return f1, f2
g = [[[K.one, K.zero]], [[K.one], []]]
h = dmp_ground(f[0], 2)
for c in f[1:]:
h = dmp_mul(h, g, 2, K)
h = dmp_add_term(h, dmp_ground(c, 1), 0, 2, K)
H = dup_to_raw_dict(h)
for k, h in H.items():
m = k % 4
if not m:
f1 = dmp_add(f1, h, 1, K)
elif m == 1:
f2 = dmp_add(f2, h, 1, K)
elif m == 2:
f1 = dmp_sub(f1, h, 1, K)
else:
f2 = dmp_sub(f2, h, 1, K)
return f1, f2
def dup_mirror(f, K):
"""
Evaluate efficiently the composition ``f(-x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_mirror(x**3 + 2*x**2 - 4*x + 2)
-x**3 + 2*x**2 + 4*x + 2
"""
f = list(f)
for i in range(len(f) - 2, -1, -2):
f[i] = -f[i]
return f
def dup_scale(f, a, K):
"""
Evaluate efficiently composition ``f(a*x)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_scale(x**2 - 2*x + 1, ZZ(2))
4*x**2 - 4*x + 1
"""
f, n, b = list(f), len(f) - 1, a
for i in range(n - 1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_shift(f, a, K):
"""
Evaluate efficiently Taylor shift ``f(x + a)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_shift(x**2 - 2*x + 1, ZZ(2))
x**2 + 2*x + 1
"""
f, n = list(f), len(f) - 1
for i in range(n, 0, -1):
for j in range(0, i):
f[j + 1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""
Evaluate functional transformation ``q**n * f(p/q)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_transform(x**2 - 2*x + 1, x**2 + 1, x - 1)
x**4 - 2*x**3 + 5*x**2 - 4*x + 4
"""
if not f:
return []
n = len(f) - 1
h, Q = [f[0]], [[K.one]]
for i in range(0, n):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""
Evaluate functional composition ``f(g)`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_compose(x**2 + x, x - 1)
x**2 - x
"""
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
def dmp_compose(f, g, u, K):
"""
Evaluate functional composition ``f(g)`` in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_compose(x*y + 2*x + y, y)
y**2 + 3*y
"""
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
def _dup_right_decompose(f, s, K):
"""Helper function for :func:`_dup_decompose`."""
n = len(f) - 1
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s: K.one }
r = n // s
for i in range(1, s):
coeff = K.zero
for j in range(0, i):
if not n + j - i in f:
continue
if not s - j in g:
continue
fc, gc = f[n + j - i], g[s - j]
coeff += (i - r*j)*fc*gc
g[s - i] = K.quo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
def _dup_left_decompose(f, h, K):
"""Helper function for :func:`_dup_decompose`."""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
def _dup_decompose(f, K):
"""Helper function for :func:`dup_decompose`."""
df = len(f) - 1
for s in range(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""
Computes functional decomposition of ``f`` in ``K[x]``.
Given a univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_decompose(x**4 - 2*x**3 + x**2)
[x**2, x**2 - x]
References
==========
1. [Kozen89]_
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dmp_lift(f, u, K):
"""
Convert algebraic coefficients to integers in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> from sympy import I
>>> K = QQ.algebraic_field(I)
>>> R, x = ring("x", K)
>>> f = x**2 + K([QQ(1), QQ(0)])*x + K([QQ(2), QQ(0)])
>>> R.dmp_lift(f)
x**8 + 2*x**6 + 9*x**4 - 8*x**2 + 16
"""
if not K.is_Algebraic:
raise DomainError(
'computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.items():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""
Compute the number of sign variations of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_sign_variations(x**4 - x**2 - x + 1)
2
"""
prev, k = K.zero, 0
for coeff in f:
if K.is_negative(coeff*prev):
k += 1
if coeff:
prev = coeff
return k
def dup_clear_denoms(f, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)
>>> R.dup_clear_denoms(f, convert=False)
(6, 3*x + 2)
>>> R.dup_clear_denoms(f, convert=True)
(6, 3*x + 2)
"""
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not convert:
return common, f
else:
return common, dup_convert(f, K0, K1)
def _rec_clear_denoms(g, v, K0, K1):
"""Recursive helper for :func:`dmp_clear_denoms`."""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v - 1
for c in g:
common = K1.lcm(common, _rec_clear_denoms(c, w, K0, K1))
return common
def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
>>> f = QQ(1,2)*x + QQ(1,3)*y + 1
>>> R.dmp_clear_denoms(f, convert=False)
(6, 3*x + 2*y + 6)
>>> R.dmp_clear_denoms(f, convert=True)
(6, 3*x + 2*y + 6)
"""
if not u:
return dup_clear_denoms(f, K0, K1, convert=convert)
if K1 is None:
if K0.has_assoc_Ring:
K1 = K0.get_ring()
else:
K1 = K0
common = _rec_clear_denoms(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not convert:
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
def dup_revert(f, n, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
This function computes first ``2**n`` terms of a polynomial that
is a result of inversion of a polynomial modulo ``x**n``. This is
useful to efficiently compute series expansion of ``1/f``.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> f = -QQ(1,720)*x**6 + QQ(1,24)*x**4 - QQ(1,2)*x**2 + 1
>>> R.dup_revert(f, 8)
61/720*x**6 + 5/24*x**4 + 1/2*x**2 + 1
"""
g = [K.revert(dup_TC(f, K))]
h = [K.one, K.zero, K.zero]
N = int(_ceil(_log(n, 2)))
for i in range(1, N + 1):
a = dup_mul_ground(g, K(2), K)
b = dup_mul(f, dup_sqr(g, K), K)
g = dup_rem(dup_sub(a, b, K), h, K)
h = dup_lshift(h, dup_degree(h), K)
return g
def dmp_revert(f, g, u, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x,y = ring("x,y", QQ)
"""
if not u:
return dup_revert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
| {
"repo_name": "emon10005/sympy",
"path": "sympy/polys/densetools.py",
"copies": "52",
"size": "25854",
"license": "bsd-3-clause",
"hash": -8522646158824810000,
"line_mean": 18.7509549274,
"line_max": 92,
"alpha_frac": 0.4560609577,
"autogenerated": false,
"ratio": 2.738771186440678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031829549912260594,
"num_lines": 1309
} |
"""Advanced tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_convert, dmp_convert,
dup_degree, dmp_degree, dmp_degree_in,
dup_to_dict, dmp_to_dict,
dup_from_dict, dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC, dmp_ground_TC,
dmp_zero, dmp_one, dmp_ground,
dmp_zero_p, dmp_one_p,
dmp_multi_deflate, dmp_inflate,
dup_to_raw_dict, dup_from_raw_dict,
dmp_raise, dmp_apply_pairs,
dmp_inject, dmp_zeros,
dup_terms_gcd
)
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_mul_term, dmp_mul_term,
dup_lshift, dup_rshift,
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr, dmp_sqr,
dup_pow, dmp_pow,
dup_div, dmp_div,
dup_rem, dmp_rem,
dup_quo, dmp_quo,
dup_exquo, dmp_exquo,
dup_prem, dmp_prem,
dup_expand, dmp_expand,
dup_add_mul, dup_sub_mul,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
dup_max_norm, dmp_max_norm
)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
HeuristicGCDFailed,
HomomorphismFailed,
RefinementFailed,
NotInvertible,
DomainError
)
from sympy.utilities import (
cythonized, variations
)
from math import ceil, log
@cythonized("m,n,i,j")
def dup_integrate(f, m, K):
"""
Computes indefinite integral of ``f`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densetools import dup_integrate
>>> dup_integrate([QQ(1), QQ(2), QQ(0)], 1, QQ)
[1/3, 1/1, 0/1, 0/1]
>>> dup_integrate([QQ(1), QQ(2), QQ(0)], 2, QQ)
[1/12, 1/3, 0/1, 0/1, 0/1]
"""
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i+1
for j in xrange(1, m):
n *= i+j+1
g.insert(0, K.exquo(c, K(n)))
return g
@cythonized("m,u,v,n,i,j")
def dmp_integrate(f, m, u, K):
"""
Computes indefinite integral of ``f`` in ``x_0`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densetools import dmp_integrate
>>> dmp_integrate([[QQ(1)], [QQ(2), QQ(0)]], 1, 1, QQ)
[[1/2], [2/1, 0/1], []]
>>> dmp_integrate([[QQ(1)], [QQ(2), QQ(0)]], 2, 1, QQ)
[[1/6], [1/1, 0/1], [], []]
"""
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u-1, K), u-1
for i, c in enumerate(reversed(f)):
n = i+1
for j in xrange(1, m):
n *= i+j+1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
@cythonized("m,v,w,i,j")
def _rec_integrate_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_integrate_in`."""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v-1, i+1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_integrate_in(f, m, j, u, K):
"""
Computes indefinite integral of ``f`` in ``x_j`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densetools import dmp_integrate_in
>>> dmp_integrate_in([[QQ(1)], [QQ(2), QQ(0)]], 1, 0, 1, QQ)
[[1/2], [2/1, 0/1], []]
>>> dmp_integrate_in([[QQ(1)], [QQ(2), QQ(0)]], 1, 1, 1, QQ)
[[1/1, 0/1], [1/1, 0/1, 0/1]]
"""
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
@cythonized("m,n,k,i")
def dup_diff(f, m, K):
"""
``m``-th order derivative of a polynomial in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_diff
>>> dup_diff([ZZ(1), ZZ(2), ZZ(3), ZZ(4)], 1, ZZ)
[3, 4, 3]
>>> dup_diff([ZZ(1), ZZ(2), ZZ(3), ZZ(4)], 2, ZZ)
[6, 4]
"""
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv = []
if m == 1:
for coeff in f[:-m]:
deriv.append(K(n)*coeff)
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in xrange(n-1, n-m, -1):
k *= i
deriv.append(K(k)*coeff)
n -= 1
return dup_strip(deriv)
@cythonized("u,v,m,n,k,i")
def dmp_diff(f, m, u, K):
"""
``m``-th order derivative in ``x_0`` of a polynomial in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_diff
>>> f = ZZ.map([[1, 2, 3], [2, 3, 1]])
>>> dmp_diff(f, 1, 1, ZZ)
[[1, 2, 3]]
>>> dmp_diff(f, 2, 1, ZZ)
[[]]
"""
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, v = [], u-1
if m == 1:
for coeff in f[:-m]:
deriv.append(dmp_mul_ground(coeff, K(n), v, K))
n -= 1
else:
for coeff in f[:-m]:
k = n
for i in xrange(n-1, n-m, -1):
k *= i
deriv.append(dmp_mul_ground(coeff, K(k), v, K))
n -= 1
return dmp_strip(deriv, u)
@cythonized("m,v,w,i,j")
def _rec_diff_in(g, m, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_in`."""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v-1, i+1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_diff_in(f, m, j, u, K):
"""
``m``-th order derivative in ``x_j`` of a polynomial in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_diff_in
>>> f = ZZ.map([[1, 2, 3], [2, 3, 1]])
>>> dmp_diff_in(f, 1, 0, 1, ZZ)
[[1, 2, 3]]
>>> dmp_diff_in(f, 1, 1, 1, ZZ)
[[2, 2], [4, 3]]
"""
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""
Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_eval
>>> dup_eval([ZZ(1), ZZ(2), ZZ(3)], 2, ZZ)
11
"""
if not a:
return dup_TC(f, K)
result = K.zero
for c in f:
result *= a
result += c
return result
@cythonized("u,v")
def dmp_eval(f, a, u, K):
"""
Evaluate a polynomial at ``x_0 = a`` in ``K[X]`` using the Horner scheme.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_eval
>>> f = ZZ.map([[2, 3], [1, 2]])
>>> dmp_eval(f, 2, 1, ZZ)
[5, 8]
"""
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u-1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
@cythonized("v,i,j")
def _rec_eval_in(g, a, v, i, j, K):
"""Recursive helper for :func:`dmp_eval_in`."""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v-1, i+1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
@cythonized("u")
def dmp_eval_in(f, a, j, u, K):
"""
Evaluate a polynomial at ``x_j = a`` in ``K[X]`` using the Horner scheme.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_eval_in
>>> f = ZZ.map([[2, 3], [1, 2]])
>>> dmp_eval_in(f, 2, 0, 1, ZZ)
[5, 8]
>>> dmp_eval_in(f, 2, 1, 1, ZZ)
[7, 4]
"""
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_eval_in(f, a, u, 0, j, K)
@cythonized("i,u")
def _rec_eval_tail(g, i, A, u, K):
"""Recursive helper for :func:`dmp_eval_tail`."""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i+1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u+i-1], K)
@cythonized("u")
def dmp_eval_tail(f, A, u, K):
"""
Evaluate a polynomial at ``x_j = a_j, ...`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_eval_tail
>>> f = ZZ.map([[2, 3], [1, 2]])
>>> dmp_eval_tail(f, (2, 2), 1, ZZ)
18
>>> dmp_eval_tail(f, (2,), 1, ZZ)
[7, 4]
"""
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A)-1:
return e
else:
return dmp_strip(e, u - len(A))
@cythonized("m,v,i,j")
def _rec_diff_eval(g, m, a, v, i, j, K):
"""Recursive helper for :func:`dmp_diff_eval`."""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v-1, i+1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_diff_eval_in(f, m, a, j, u, K):
"""
Differentiate and evaluate a polynomial in ``x_j`` at ``a`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_diff_eval_in
>>> f = ZZ.map([[1, 2, 3], [2, 3, 1]])
>>> dmp_diff_eval_in(f, 1, 2, 0, 1, ZZ)
[1, 2, 3]
>>> dmp_diff_eval_in(f, 1, 2, 1, 1, ZZ)
[6, 11]
"""
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_trunc(f, p, K):
"""
Reduce ``K[x]`` polynomial modulo a constant ``p`` in ``K``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_trunc
>>> f = ZZ.map([2, 3, 5, 7])
>>> dup_trunc(f, ZZ(3), ZZ)
[-1, 0, -1, 1]
"""
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
@cythonized("u")
def dmp_trunc(f, p, u, K):
"""
Reduce ``K[X]`` polynomial modulo a polynomial ``p`` in ``K[Y]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_trunc
>>> f = ZZ.map([[3, 8], [5, 6], [2, 3]])
>>> g = ZZ.map([1, -1])
>>> dmp_trunc(f, g, 1, ZZ)
[[11], [11], [5]]
"""
return dmp_strip([ dmp_rem(c, p, u-1, K) for c in f ], u)
@cythonized("u,v")
def dmp_ground_trunc(f, p, u, K):
"""
Reduce ``K[X]`` polynomial modulo a constant ``p`` in ``K``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_ground_trunc
>>> f = ZZ.map([[3, 8], [5, 6], [2, 3]])
>>> dmp_ground_trunc(f, ZZ(3), 1, ZZ)
[[-1], [-1, 0], [-1, 0]]
"""
if not u:
return dup_trunc(f, p, K)
v = u-1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""
Divides all coefficients by ``LC(f)`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densetools import dup_monic
>>> dup_monic([ZZ(3), ZZ(6), ZZ(9)], ZZ)
[1, 2, 3]
>>> dup_monic([QQ(3), QQ(4), QQ(2)], QQ)
[1/1, 4/3, 2/3]
"""
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_quo_ground(f, lc, K)
@cythonized("u")
def dmp_ground_monic(f, u, K):
"""
Divides all coefficients by ``LC(f)`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densetools import dmp_ground_monic
>>> f = ZZ.map([[3, 6], [3, 0], [9, 3]])
>>> g = QQ.map([[3, 8], [5, 6], [2, 3]])
>>> dmp_ground_monic(f, 1, ZZ)
[[1, 2], [1, 0], [3, 1]]
>>> dmp_ground_monic(g, 1, QQ)
[[1/1, 8/3], [5/3, 2/1], [2/3, 1/1]]
"""
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_quo_ground(f, lc, u, K)
def dup_content(f, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densetools import dup_content
>>> f = ZZ.map([6, 8, 12])
>>> g = QQ.map([6, 8, 12])
>>> dup_content(f, ZZ)
2
>>> dup_content(g, QQ)
1/1
"""
if not f:
return K.zero
cont = K.zero
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
@cythonized("u,v")
def dmp_ground_content(f, u, K):
"""
Compute the GCD of coefficients of ``f`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densetools import dmp_ground_content
>>> f = ZZ.map([[2, 6], [4, 12]])
>>> g = QQ.map([[2, 6], [4, 12]])
>>> dmp_ground_content(f, 1, ZZ)
2
>>> dmp_ground_content(g, 1, QQ)
1/1
"""
if not u:
return dup_content(f, K)
if dmp_zero_p(f, u):
return K.zero
cont, v = K.zero, u-1
for c in f:
cont = K.gcd(cont, dmp_ground_content(c, v, K))
if K.is_one(cont):
break
return cont
def dup_primitive(f, K):
"""
Compute content and the primitive form of ``f`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densetools import dup_primitive
>>> f = ZZ.map([6, 8, 12])
>>> g = QQ.map([6, 8, 12])
>>> dup_primitive(f, ZZ)
(2, [3, 4, 6])
>>> dup_primitive(g, QQ)
(1/1, [6/1, 8/1, 12/1])
"""
if not f:
return K.zero, f
cont = dup_content(f, K)
if K.is_one(cont):
return cont, f
else:
return cont, dup_exquo_ground(f, cont, K)
@cythonized("u")
def dmp_ground_primitive(f, u, K):
"""
Compute content and the primitive form of ``f`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.polys.densetools import dmp_ground_primitive
>>> f = ZZ.map([[2, 6], [4, 12]])
>>> g = QQ.map([[2, 6], [4, 12]])
>>> dmp_ground_primitive(f, 1, ZZ)
(2, [[1, 3], [2, 6]])
>>> dmp_ground_primitive(g, 1, QQ)
(1/1, [[2/1, 6/1], [4/1, 12/1]])
"""
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_exquo_ground(f, cont, u, K)
def dup_extract(f, g, K):
"""
Extract common content from a pair of polynomials in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_extract
>>> f = ZZ.map([6, 12, 18])
>>> g = ZZ.map([4, 8, 12])
>>> dup_extract(f, g, ZZ)
(2, [3, 6, 9], [2, 4, 6])
"""
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_exquo_ground(f, gcd, K)
g = dup_exquo_ground(g, gcd, K)
return gcd, f, g
@cythonized("u")
def dmp_ground_extract(f, g, u, K):
"""
Extract common content from a pair of polynomials in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_ground_extract
>>> f = ZZ.map([[6, 12], [18]])
>>> g = ZZ.map([[4, 8], [12]])
>>> dmp_ground_extract(f, g, 1, ZZ)
(2, [[3, 6], [9]], [[2, 4], [6]])
"""
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_exquo_ground(f, gcd, u, K)
g = dmp_exquo_ground(g, gcd, u, K)
return gcd, f, g
def dup_real_imag(f, K):
"""
Return bivariate polynomials ``f1`` and ``f2``, such that ``f = f1 + f2*I``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_real_imag
>>> dup_real_imag([ZZ(1), ZZ(1), ZZ(1), ZZ(1)], ZZ)
([[1], [1], [-3, 0, 1], [-1, 0, 1]], [[3, 0], [2, 0], [-1, 0, 1, 0]])
"""
if not K.is_ZZ and not K.is_QQ:
raise DomainError("computing real and imaginary parts is not supported over %s" % K)
f1 = dmp_zero(1)
f2 = dmp_zero(1)
if not f:
return f1, f2
g = [[[K.one, K.zero]], [[K.one], []]]
h = dmp_ground(f[0], 2)
for c in f[1:]:
h = dmp_mul(h, g, 2, K)
h = dmp_add_term(h, dmp_ground(c, 1), 0, 2, K)
H = dup_to_raw_dict(h)
for k, h in H.iteritems():
m = k % 4
if not m:
f1 = dmp_add(f1, h, 1, K)
elif m == 1:
f2 = dmp_add(f2, h, 1, K)
elif m == 2:
f1 = dmp_sub(f1, h, 1, K)
else:
f2 = dmp_sub(f2, h, 1, K)
return f1, f2
@cythonized('i,n')
def dup_mirror(f, K):
"""
Evaluate efficiently the composition ``f(-x)`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_mirror
>>> dup_mirror([ZZ(1), ZZ(2), -ZZ(4), ZZ(2)], ZZ)
[-1, 2, 4, 2]
"""
f, n, a = list(f), dup_degree(f), -K.one
for i in xrange(n-1, -1, -1):
f[i], a = a*f[i], -a
return f
@cythonized('i,n')
def dup_scale(f, a, K):
"""
Evaluate efficiently composition ``f(a*x)`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_scale
>>> dup_scale([ZZ(1), -ZZ(2), ZZ(1)], ZZ(2), ZZ)
[4, -4, 1]
"""
f, n, b = list(f), dup_degree(f), a
for i in xrange(n-1, -1, -1):
f[i], b = b*f[i], b*a
return f
@cythonized('i,j,n')
def dup_shift(f, a, K):
"""
Evaluate efficiently Taylor shift ``f(x + a)`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_shift
>>> dup_shift([ZZ(1), -ZZ(2), ZZ(1)], ZZ(2), ZZ)
[1, 2, 1]
"""
f, n = list(f), dup_degree(f)
for i in xrange(n, 0, -1):
for j in xrange(0, i):
f[j+1] += a*f[j]
return f
@cythonized('i,n')
def dup_transform(f, p, q, K):
"""
Evaluate functional transformation ``q**n * f(p/q)`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_transform
>>> f = ZZ.map([1, -2, 1])
>>> p = ZZ.map([1, 0, 1])
>>> q = ZZ.map([1, -1])
>>> dup_transform(f, p, q, ZZ)
[1, -2, 5, -4, 4]
"""
if not f:
return []
n = dup_degree(f)
h, Q = [f[0]], [[K.one]]
for i in xrange(0, n):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""
Evaluate functional composition ``f(g)`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_compose
>>> f = ZZ.map([1, 1, 0])
>>> g = ZZ.map([1, -1])
>>> dup_compose(f, g, ZZ)
[1, -1, 0]
"""
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
@cythonized("u")
def dmp_compose(f, g, u, K):
"""
Evaluate functional composition ``f(g)`` in ``K[X]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dmp_compose
>>> f = ZZ.map([[1, 2], [1, 0]])
>>> g = ZZ.map([[1, 0]])
>>> dmp_compose(f, g, 1, ZZ)
[[1, 3, 0]]
"""
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
@cythonized("s,n,r,i,j")
def _dup_right_decompose(f, s, K):
"""Helper function for :func:`_dup_decompose`."""
n = dup_degree(f)
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s : K.one }
r = n // s
for i in xrange(1, s):
coeff = K.zero
for j in xrange(0, i):
if not n+j-i in f:
continue
if not s-j in g:
continue
fc, gc = f[n+j-i], g[s-j]
coeff += (i - r*j)*fc*gc
g[s-i] = K.exquo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
@cythonized("i")
def _dup_left_decompose(f, h, K):
"""Helper function for :func:`_dup_decompose`."""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
@cythonized("df,s")
def _dup_decompose(f, K):
"""Helper function for :func:`dup_decompose`."""
df = dup_degree(f)
for s in xrange(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""
Computes functional decomposition of ``f`` in ``K[x]``.
Given an univariate polynomial ``f`` with coefficients in a field of
characteristic zero, returns list ``[f_1, f_2, ..., f_n]``, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and ``f_2, ..., f_n`` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. ``f o g = f(x + b) o (g - b)``
2. ``x**n o x**m = x**m o x**n``
3. ``T_n o T_m = T_m o T_n``
where ``T_n`` and ``T_m`` are Chebyshev polynomials.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_decompose
>>> f = ZZ.map([1, -2, 1, 0, 0])
>>> dup_decompose(f, ZZ)
[[1, 0, 0], [1, -1, 0]]
**References**
1. [Kozen89]_
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
@cythonized("u")
def dmp_lift(f, u, K):
"""
Convert algebraic coefficients to integers in ``K[X]``.
**Examples**
>>> from sympy import I
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densetools import dmp_lift
>>> K = QQ.algebraic_field(I)
>>> f = [K(1), K([QQ(1), QQ(0)]), K([QQ(2), QQ(0)])]
>>> dmp_lift(f, 0, K)
[1/1, 0/1, 2/1, 0/1, 9/1, 0/1, -8/1, 0/1, 16/1]
"""
if not K.is_Algebraic:
raise DomainError('computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.iteritems():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""
Compute the number of sign variations of ``f`` in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densetools import dup_sign_variations
>>> f = ZZ.map([1, 0, -1, -1, 1])
>>> dup_sign_variations(f, ZZ)
2
"""
prev, k = K.zero, 0
for coeff in f:
if coeff*prev < 0:
k += 1
if coeff:
prev = coeff
return k
def dup_clear_denoms(f, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
**Examples**
>>> from sympy.polys.domains import QQ, ZZ
>>> from sympy.polys.densetools import dup_clear_denoms
>>> f = [QQ(1,2), QQ(1,3)]
>>> dup_clear_denoms(f, QQ, convert=False)
(6, [3/1, 2/1])
>>> f = [QQ(1,2), QQ(1,3)]
>>> dup_clear_denoms(f, QQ, convert=True)
(6, [3, 2])
"""
if K1 is None:
K1 = K0.get_ring()
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not convert:
return common, f
else:
return common, dup_convert(f, K0, K1)
@cythonized("v,w")
def _rec_clear_denoms(g, v, K0, K1):
"""Recursive helper for :func:`dmp_clear_denoms`."""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v-1
for c in g:
common = K1.lcm(common, _rec_clear_denoms(c, w, K0, K1))
return common
@cythonized("u")
def dmp_clear_denoms(f, u, K0, K1=None, convert=False):
"""
Clear denominators, i.e. transform ``K_0`` to ``K_1``.
**Examples**
>>> from sympy.polys.domains import QQ, ZZ
>>> from sympy.polys.densetools import dmp_clear_denoms
>>> f = [[QQ(1,2)], [QQ(1,3), QQ(1)]]
>>> dmp_clear_denoms(f, 1, QQ, convert=False)
(6, [[3/1], [2/1, 6/1]])
>>> f = [[QQ(1,2)], [QQ(1,3), QQ(1)]]
>>> dmp_clear_denoms(f, 1, QQ, convert=True)
(6, [[3], [2, 6]])
"""
if not u:
return dup_clear_denoms(f, K0, K1, convert=convert)
if K1 is None:
K1 = K0.get_ring()
common = _rec_clear_denoms(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not convert:
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
@cythonized('i,n')
def dup_revert(f, n, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
This function computes first ``2**n`` terms of a polynomial that
is a result of inversion of a polynomial modulo ``x**n``. This is
useful to efficiently compute series expansion of ``1/f``.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densetools import dup_revert
>>> f = [-QQ(1,720), QQ(0), QQ(1,24), QQ(0), -QQ(1,2), QQ(0), QQ(1)]
>>> dup_revert(f, 8, QQ)
[61/720, 0/1, 5/24, 0/1, 1/2, 0/1, 1/1]
"""
g = [K.revert(dup_TC(f, K))]
h = [K.one, K.zero, K.zero]
N = int(ceil(log(n, 2)))
for i in xrange(1, N + 1):
a = dup_mul_ground(g, K(2), K)
b = dup_mul(f, dup_sqr(g, K), K)
g = dup_rem(dup_sub(a, b, K), h, K)
h = dup_lshift(h, dup_degree(h), K)
return g
def dmp_revert(f, g, u, K):
"""
Compute ``f**(-1)`` mod ``x**n`` using Newton iteration.
**Examples**
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.densetools import dmp_revert
"""
if not u:
return dup_revert(f, g, K)
else:
raise MultivariatePolynomialError(f, g)
| {
"repo_name": "pernici/sympy",
"path": "sympy/polys/densetools.py",
"copies": "1",
"size": "27597",
"license": "bsd-3-clause",
"hash": 9168373856174546000,
"line_mean": 20.7985781991,
"line_max": 92,
"alpha_frac": 0.4964670073,
"autogenerated": false,
"ratio": 2.764676417551593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8750725592160458,
"avg_score": 0.0020835665382270877,
"num_lines": 1266
} |
"""Advanced tools for dense recursive polynomials in `K[x]` or `K[X]`. """
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_reverse,
dup_convert, dmp_convert,
dup_degree, dmp_degree, dmp_degree_in,
dup_to_dict, dmp_to_dict,
dup_from_dict, dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC, dmp_ground_TC,
dmp_zero, dmp_one, dmp_ground,
dmp_zero_p, dmp_one_p,
dmp_multi_deflate, dmp_inflate,
dup_to_raw_dict, dup_from_raw_dict,
dmp_raise, dmp_apply_pairs,
dmp_inject, dmp_zeros
)
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_mul_term, dmp_mul_term,
dup_lshift, dup_rshift,
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_pow, dmp_pow,
dup_div, dmp_div,
dup_rem, dmp_rem,
dup_quo, dmp_quo,
dup_exquo, dmp_exquo,
dup_prem, dmp_prem,
dup_expand, dmp_expand,
dup_add_mul, dup_sub_mul,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
dup_max_norm, dmp_max_norm
)
from sympy.polys.galoistools import (
gf_int, gf_crt
)
from sympy.polys.polyerrors import (
HeuristicGCDFailed,
HomomorphismFailed,
RefinementFailed,
NotInvertible,
DomainError
)
from sympy.ntheory import nextprime
from sympy.utilities import (
cythonized, variations
)
from random import random as randfloat
def dup_ground_to_ring(f, K0, K1=None, **args):
"""Clear denominators, i.e. transform `K_0` to `K_1`. """
if K1 is None:
K1 = K0.get_ring()
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not args.get('convert'):
return common, f
else:
return common, dup_convert(f, K0, K1)
@cythonized("v,w")
def _rec_ground_to_ring(g, v, K0, K1):
"""XXX"""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v-1
for c in g:
common = K1.lcm(common, _rec_ground_to_ring(c, w, K0, K1))
return common
@cythonized("u")
def dmp_ground_to_ring(f, u, K0, K1=None, **args):
"""Clear denominators, i.e. transform `K_0` to `K_1`. """
if not u:
return dup_ground_to_ring(f, K0, K1)
if K1 is None:
K1 = K0.get_ring()
common = _rec_ground_to_ring(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not args.get('convert'):
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
@cythonized("m,n,i,j")
def dup_integrate(f, m, K):
"""Computes indefinite integral of `f` in `K[x]`. """
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i+1
for j in xrange(1, m):
n *= i+j+1
g.insert(0, K.quo(c, K(n)))
return g
@cythonized("m,u,v,n,i,j")
def dmp_integrate(f, m, u, K):
"""Computes indefinite integral of `f` in `x_0` in `K[X]`. """
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u-1, K), u-1
for i, c in enumerate(reversed(f)):
n = i+1
for j in xrange(1, m):
n *= i+j+1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
@cythonized("m,v,w,i,j")
def _rec_integrate_in(g, m, v, i, j, K):
"""XXX"""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v-1, i+1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_integrate_in(f, m, j, u, K):
"""Computes indefinite integral of `f` in `x_j` in `K[X]`. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
@cythonized("m,n,i")
def dup_diff(f, m, K):
"""m-th order derivative of a polynomial in `K[x]`. """
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv, c = [], K.one
for i in xrange(0, m):
c, n = c*K(n), n-1
for coeff in f[:-m]:
deriv.append(coeff*c)
c, n = K(n)*K.exquo(c, K(n+m)), n-1
return deriv
@cythonized("u,v,m,n,i")
def dmp_diff(f, m, u, K):
"""m-th order derivative in `x_0` of a polynomial in `K[X]`. """
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, c, v = [], K.one, u-1
for i in xrange(0, m):
c, n = c*K(n), n-1
for coeff in f[:-m]:
h = dmp_mul_ground(coeff, c, v, K)
c, n = K(n)*K.exquo(c, K(n+m)), n-1
deriv.append(h)
return deriv
@cythonized("m,v,w,i,j")
def _rec_diff_in(g, m, v, i, j, K):
"""XXX"""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v-1, i+1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_diff_in(f, m, j, u, K):
"""m-th order derivative in `x_j` of a polynomial in `K[X]`. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""Evaluate a polynomial at `x = a` in `K[x]` using Horner scheme. """
if not a:
return dup_TC(f, K)
result = K.zero
for c in f:
result *= a
result += c
return result
@cythonized("u,v")
def dmp_eval(f, a, u, K):
"""Evaluate a polynomial at `x_0 = a` in `K[X]` using Horner scheme. """
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u-1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
@cythonized("v,i,j")
def _rec_eval_in(g, a, v, i, j, K):
"""XXX"""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v-1, i+1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
@cythonized("u")
def dmp_eval_in(f, a, j, u, K):
"""Evaluate a polynomial at `x_j = a` in `K[X]` using Horner scheme. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_eval_in(f, a, u, 0, j, K)
@cythonized("i,u")
def _rec_eval_tail(g, i, A, u, K):
"""XXX"""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i+1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u+i-1], K)
@cythonized("u")
def dmp_eval_tail(f, A, u, K):
"""Evaluate a polynomial at `x_j = a_j, ...` in `K[X]`. """
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A)-1:
return e
else:
return dmp_strip(e, u - len(A))
@cythonized("m,v,i,j")
def _rec_diff_eval(g, m, a, v, i, j, K):
"""XXX"""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v-1, i+1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_diff_eval_in(f, m, a, j, u, K):
"""Differentiate and evaluate a polynomial in `x_j` at `a` in `K[X]`. """
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_half_gcdex(f, g, K):
"""Half extended Euclidean algorithm in `F[x]`. """
if not K.has_Field:
raise DomainError('computation can be done only in a field')
a, b = [K.one], []
while g:
q, r = dup_div(f, g, K)
f, g = g, r
a, b = b, dup_sub_mul(a, q, b, K)
a = dup_quo_ground(a, dup_LC(f, K), K)
f = dup_monic(f, K)
return a, f
def dup_gcdex(f, g, K):
"""Extended Euclidean algorithm in `F[x]`. """
s, h = dup_half_gcdex(f, g, K)
F = dup_sub_mul(h, s, f, K)
t = dup_exquo(F, g, K)
return s, t, h
def dup_invert(f, g, K):
"""Compute multiplicative inverse of `f` in `F[x]/(g(x))`. """
s, h = dup_half_gcdex(f, g, K)
if h == [K.one]:
return dup_rem(s, g, K)
else:
raise NotInvertible("zero divisor")
@cythonized("n,m,d,k")
def dup_inner_subresultants(f, g, K):
"""Subresultant PRS algorithm in `K[x]`. """
n = dup_degree(f)
m = dup_degree(g)
if n < m:
f, g = g, f
n, m = m, n
R = [f, g]
d = n - m
b = (-K.one)**(d+1)
c = -K.one
B, D = [b], [d]
if not f or not g:
return R, B, D
h = dup_prem(f, g, K)
h = dup_mul_ground(h, b, K)
while h:
k = dup_degree(h)
R.append(h)
lc = dup_LC(g, K)
if not d:
q = c
else:
q = c**(d-1)
c = K.exquo((-lc)**d, q)
b = -lc * c**(m-k)
f, g, m, d = g, h, k, m-k
B.append(b)
D.append(d)
h = dup_prem(f, g, K)
h = dup_exquo_ground(h, b, K)
return R, B, D
def dup_subresultants(f, g, K):
"""Computes subresultant PRS of two polynomials in `K[x]`. """
return dup_inner_subresultants(f, g, K)[0]
@cythonized("s,i,du,dv,dw")
def dup_prs_resultant(f, g, K):
"""Resultant algorithm in `K[x]` using subresultant PRS. """
if not f or not g:
return (K.zero, [])
R, B, D = dup_inner_subresultants(f, g, K)
if dup_degree(R[-1]) > 0:
return (K.zero, R)
if R[-2] == [K.one]:
return (dup_LC(R[-1], K), R)
s, i = 1, 1
p, q = K.one, K.one
for b, d in zip(B, D)[:-1]:
du = dup_degree(R[i-1])
dv = dup_degree(R[i ])
dw = dup_degree(R[i+1])
if du % 2 and dv % 2:
s = -s
lc, i = dup_LC(R[i], K), i+1
p *= b**dv * lc**(du-dw)
q *= lc**(dv*(1+d))
if s < 0:
p = -p
i = dup_degree(R[-2])
res = dup_LC(R[-1], K)**i
res = K.quo(res*p, q)
return res, R
def dup_resultant(f, g, K):
"""Computes resultant of two polynomials in `K[x]`. """
return dup_prs_resultant(f, g, K)[0]
@cythonized("u,v,n,m,d,k")
def dmp_inner_subresultants(f, g, u, K):
"""Subresultant PRS algorithm in `K[X]`. """
if not u:
return dup_inner_subresultants(f, g, K)
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < m:
f, g = g, f
n, m = m, n
R = [f, g]
d = n - m
v = u - 1
b = dmp_pow(dmp_ground(-K.one, v), d+1, v, K)
c = dmp_ground(-K.one, v)
B, D = [b], [d]
if dmp_zero_p(f, u) or dmp_zero_p(g, u):
return R, B, D
h = dmp_prem(f, g, u, K)
h = dmp_mul_term(h, b, 0, u, K)
while not dmp_zero_p(h, u):
k = dmp_degree(h, u)
R.append(h)
lc = dmp_LC(g, K)
p = dmp_pow(dmp_neg(lc, v, K), d, v, K)
if not d:
q = c
else:
q = dmp_pow(c, d-1, v, K)
c = dmp_exquo(p, q, v, K)
b = dmp_mul(dmp_neg(lc, v, K),
dmp_pow(c, m-k, v, K), v, K)
f, g, m, d = g, h, k, m-k
B.append(b)
D.append(d)
h = dmp_prem(f, g, u, K)
h = [ dmp_exquo(ch, b, v, K) for ch in h ]
return R, B, D
@cythonized("u")
def dmp_subresultants(f, g, u, K):
"""Computes subresultant PRS of two polynomials in `K[X]`. """
return dmp_inner_subresultants(f, g, u, K)[0]
@cythonized("u,v,s,i,d,du,dv,dw")
def dmp_prs_resultant(f, g, u, K):
"""Resultant algorithm in `K[X]` using subresultant PRS. """
if not u:
return dup_prs_resultant(f, g, K)
if dmp_zero_p(f, u) or dmp_zero_p(g, u):
return (dmp_zero(u-1), [])
R, B, D = dmp_inner_subresultants(f, g, u, K)
if dmp_degree(R[-1], u) > 0:
return (dmp_zero(u-1), R)
if dmp_one_p(R[-2], u, K):
return (dmp_LC(R[-1], K), R)
s, i, v = 1, 1, u-1
p = dmp_one(v, K)
q = dmp_one(v, K)
for b, d in zip(B, D)[:-1]:
du = dmp_degree(R[i-1], u)
dv = dmp_degree(R[i ], u)
dw = dmp_degree(R[i+1], u)
if du % 2 and dv % 2:
s = -s
lc, i = dmp_LC(R[i], K), i+1
p = dmp_mul(dmp_mul(p, dmp_pow(b, dv, v, K), v, K),
dmp_pow(lc, du-dw, v, K), v, K)
q = dmp_mul(q, dmp_pow(lc, dv*(1+d), v, K), v, K)
_, p, q = dmp_inner_gcd(p, q, v, K)
if s < 0:
p = dmp_neg(p, v, K)
i = dmp_degree(R[-2], u)
res = dmp_pow(dmp_LC(R[-1], K), i, v, K)
res = dmp_quo(dmp_mul(res, p, v, K), q, v, K)
return res, R
@cythonized("u,v,n,m,N,M,B")
def dmp_zz_modular_resultant(f, g, p, u, K):
"""Compute resultant of `f` and `g` modulo a prime `p`. """
if not u:
return gf_int(dup_prs_resultant(f, g, K)[0] % p, p)
v = u - 1
n = dmp_degree(f, u)
m = dmp_degree(g, u)
N = dmp_degree_in(f, 1, u)
M = dmp_degree_in(g, 1, u)
B = n*M + m*N
D, a = [K.one], -K.one
r = dmp_zero(v)
while dup_degree(D) <= B:
while True:
a += K.one
if a == p:
raise HomomorphismFailed('no luck')
F = dmp_eval_in(f, gf_int(a, p), 1, u, K)
if dmp_degree(F, v) == n:
G = dmp_eval_in(g, gf_int(a, p), 1, u, K)
if dmp_degree(G, v) == m:
break
R = dmp_zz_modular_resultant(F, G, p, v, K)
e = dmp_eval(r, a, v, K)
if not v:
R = dup_strip([R])
e = dup_strip([e])
else:
R = [R]
e = [e]
d = K.invert(dup_eval(D, a, K), p)
d = dup_mul_ground(D, d, K)
d = dmp_raise(d, v, 0, K)
c = dmp_mul(d, dmp_sub(R, e, v, K), v, K)
r = dmp_add(r, c, v, K)
r = dmp_ground_trunc(r, p, v, K)
D = dup_mul(D, [K.one, -a], K)
D = dup_trunc(D, p, K)
return r
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p)
@cythonized("u,v,n,m")
def dmp_zz_collins_resultant(f, g, u, K):
"""Collins's modular resultant algorithm in `Z[X]`. """
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u-1)
A = dmp_max_norm(f, u, K)
B = dmp_max_norm(g, u, K)
a = dmp_ground_LC(f, u, K)
b = dmp_ground_LC(g, u, K)
v = u - 1
B = K(2)*K.factorial(n+m)*A**m*B**n
r, p, P = dmp_zero(v), K.one, K.one
while P <= B:
p = K(nextprime(p))
while not (a % p) or not (b % p):
p = K(nextprime(p))
F = dmp_ground_trunc(f, p, u, K)
G = dmp_ground_trunc(g, p, u, K)
try:
R = dmp_zz_modular_resultant(F, G, p, u, K)
except HomomorphismFailed:
continue
if K.is_one(P):
r = R
else:
r = dmp_apply_pairs(r, R, _collins_crt, (P, p, K), v, K)
P *= p
return r
@cythonized("u,n,m")
def dmp_qq_collins_resultant(f, g, u, K0):
"""Collins's modular resultant algorithm in `Q[X]`. """
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u-1)
K1 = K0.get_ring()
cf, f = dmp_ground_to_ring(f, u, K0, K1)
cg, g = dmp_ground_to_ring(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
r = dmp_zz_collins_resultant(f, g, u, K1)
r = dmp_convert(r, u-1, K1, K0)
c = K0.convert(cf**m * cg**n, K1)
return dmp_exquo_ground(r, c, u-1, K0)
USE_COLLINS_RESULTANT = 0
@cythonized("u")
def dmp_resultant(f, g, u, K):
"""Computes resultant of two polynomials in `K[X]`. """
if not u:
return dup_resultant(f, g, K)
if K.has_Field:
if USE_COLLINS_RESULTANT and K.is_QQ:
return dmp_qq_collins_resultant(f, g, u, K)
else:
if USE_COLLINS_RESULTANT and K.is_ZZ:
return dmp_zz_collins_resultant(f, g, u, K)
return dmp_prs_resultant(f, g, u, K)[0]
@cythonized("d,s")
def dup_discriminant(f, K):
"""Computes discriminant of a polynomial in `K[x]`. """
d = dup_degree(f)
if d <= 0:
return K.zero
else:
s = (-1)**((d*(d-1)) // 2)
c = dup_LC(f, K)
r = dup_resultant(f, dup_diff(f, 1, K), K)
return K.quo(r, c*K(s))
@cythonized("u,v,d,s")
def dmp_discriminant(f, u, K):
"""Computes discriminant of a polynomial in `K[X]`. """
if not u:
return dup_discriminant(f, K)
d, v = dmp_degree(f, u), u-1
if d <= 0:
return dmp_zero(v)
else:
s = (-1)**((d*(d-1)) // 2)
c = dmp_LC(f, K)
r = dmp_resultant(f, dmp_diff(f, 1, u, K), u, K)
c = dmp_mul_ground(c, K(s), v, K)
return dmp_quo(r, c, v, K)
def _dup_rr_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a ring. """
if not (f or g):
return [], [], []
elif not f:
if K.is_nonnegative(dup_LC(g, K)):
return g, [], [K.one]
else:
return dup_neg(g, K), [], [-K.one]
elif not g:
if K.is_nonnegative(dup_LC(f, K)):
return f, [K.one], []
else:
return dup_neg(f, K), [-K.one], []
return None
def _dup_ff_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a field. """
if not (f or g):
return [], [], []
elif not f:
return dup_monic(g, K), [], [dup_LC(g, K)]
elif not g:
return dup_monic(f, K), [dup_LC(f, K)], []
else:
return None
USE_DMP_SIMPLIFY_GCD = 1
@cythonized("u")
def _dmp_rr_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a ring. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
if K.is_nonnegative(dmp_ground_LC(g, u, K)):
return g, dmp_zero(u), dmp_one(u, K)
else:
return dmp_neg(g, u, K), dmp_zero(u), dmp_ground(-K.one, u)
elif zero_g:
if K.is_nonnegative(dmp_ground_LC(f, u, K)):
return f, dmp_one(u, K), dmp_zero(u)
else:
return dmp_neg(f, u, K), dmp_ground(-K.one, u), dmp_zero(u)
elif USE_DMP_SIMPLIFY_GCD:
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
@cythonized("u")
def _dmp_ff_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a field. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
return (dmp_ground_monic(g, u, K),
dmp_zero(u),
dmp_ground(dmp_ground_LC(g, u, K), u))
elif zero_g:
return (dmp_ground_monic(f, u, K),
dmp_ground(dmp_ground_LC(f, u, K), u),
dmp_zero(u))
elif USE_DMP_SIMPLIFY_GCD:
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
@cythonized("u,v,df,dg")
def _dmp_simplify_gcd(f, g, u, K):
"""Try to eliminate `x_0` from GCD computation in `K[X]`. """
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if df > 0 and dg > 0:
return None
if not (df or dg):
F = dmp_LC(f, K)
G = dmp_LC(g, K)
else:
if not df:
F = dmp_LC(f, K)
G = dmp_content(g, u, K)
else:
F = dmp_content(f, u, K)
G = dmp_LC(g, K)
v = u - 1
h = dmp_gcd(F, G, v, K)
cff = [ dmp_exquo(cf, h, v, K) for cf in f ]
cfg = [ dmp_exquo(cg, h, v, K) for cg in g ]
return [h], cff, cfg
def dup_rr_prs_gcd(f, g, K):
"""Computes polynomial GCD using subresultants over a ring. """
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
fc, F = dup_primitive(f, K)
gc, G = dup_primitive(g, K)
c = K.gcd(fc, gc)
h = dup_subresultants(F, G, K)[-1]
_, h = dup_primitive(h, K)
if K.is_negative(dup_LC(h, K)):
c = -c
h = dup_mul_ground(h, c, K)
cff = dup_exquo(f, h, K)
cfg = dup_exquo(g, h, K)
return h, cff, cfg
def dup_ff_prs_gcd(f, g, K):
"""Computes polynomial GCD using subresultants over a field. """
result = _dup_ff_trivial_gcd(f, g, K)
if result is not None:
return result
h = dup_subresultants(f, g, K)[-1]
h = dup_monic(h, K)
cff = dup_exquo(f, h, K)
cfg = dup_exquo(g, h, K)
return h, cff, cfg
@cythonized("u")
def dmp_rr_prs_gcd(f, g, u, K):
"""Computes polynomial GCD using subresultants over a ring. """
if not u:
return dup_rr_prs_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, F = dmp_primitive(f, u, K)
gc, G = dmp_primitive(g, u, K)
h = dmp_subresultants(F, G, u, K)[-1]
c, _, _ = dmp_rr_prs_gcd(fc, gc, u-1, K)
if K.is_negative(dmp_ground_LC(h, u, K)):
h = dmp_neg(h, u, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
cff = dmp_exquo(f, h, u, K)
cfg = dmp_exquo(g, h, u, K)
return h, cff, cfg
@cythonized("u")
def dmp_ff_prs_gcd(f, g, u, K):
"""Computes polynomial GCD using subresultants over a field. """
if not u:
return dup_ff_prs_gcd(f, g, K)
result = _dmp_ff_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, f = dmp_primitive(f, u, K)
gc, g = dmp_primitive(g, u, K)
h = dmp_subresultants(f, g, u, K)[-1]
c, _, _ = dmp_ff_prs_gcd(fc, gc, u-1, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
h = dmp_ground_monic(h, u, K)
cff = dmp_exquo(f, h, u, K)
cfg = dmp_exquo(g, h, u, K)
return h, cff, cfg
HEU_GCD_MAX = 6
def _dup_zz_gcd_interpolate(h, x, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while h:
g = h % x
if g > x // 2:
g -= x
f.insert(0, g)
h = (h-g) // x
return f
@cythonized("i,df,dg")
def dup_zz_heu_gcd(f, g, K):
"""Heuristic polynomial GCD in `Z[x]`.
Given univariate polynomials `f` and `g` in `Z[x]`, returns their GCD
and cofactors, i.e. polynomials `h`, `cff` and `cfg` such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The final step is to verify if the result is the
correct GCD. This gives cofactors as a side effect.
References
==========
.. [Liao95] Hsin-Chao Liao, R. Fateman, Evaluation of the heuristic
polynomial GCD, International Symposium on Symbolic and Algebraic
Computation (ISSAC), ACM Press, Montreal, Quebec, Canada, 1995,
pp. 240--247
"""
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
df = dup_degree(f)
dg = dup_degree(g)
gcd, f, g = dup_extract(f, g, K)
if df == 0 or dg == 0:
return [gcd], f, g
f_norm = dup_max_norm(f, K)
g_norm = dup_max_norm(g, K)
B = 2*min(f_norm, g_norm) + 29
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dup_LC(f, K)),
g_norm // abs(dup_LC(g, K))) + 2)
for i in xrange(0, HEU_GCD_MAX):
ff = dup_eval(f, x, K)
gg = dup_eval(g, x, K)
if ff and gg:
h = K.gcd(ff, gg)
cff = ff // h
cfg = gg // h
h = _dup_zz_gcd_interpolate(h, x, K)
h = dup_primitive(h, K)[1]
cff_, r = dup_div(f, h, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff_, cfg_
cff = _dup_zz_gcd_interpolate(cff, x, K)
h, r = dup_div(f, cff, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff, cfg_
cfg = _dup_zz_gcd_interpolate(cfg, x, K)
h, r = dup_div(g, cfg, K)
if not r:
cff_, r = dup_div(f, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
@cythonized("v")
def _dmp_zz_gcd_interpolate(h, x, v, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while not dmp_zero_p(h, v):
g = dmp_ground_trunc(h, x, v, K)
f.insert(0, g)
h = dmp_sub(h, g, v, K)
h = dmp_exquo_ground(h, x, v, K)
if K.is_negative(dmp_ground_LC(f, v+1, K)):
return dmp_neg(f, v+1, K)
else:
return f
@cythonized("u,v,i,dg,df")
def dmp_zz_heu_gcd(f, g, u, K):
"""Heuristic polynomial GCD in `Z[X]`.
Given univariate polynomials `f` and `g` in `Z[X]`, returns their GCD
and cofactors, i.e. polynomials `h`, `cff` and `cfg` such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The evaluation proces reduces f and g variable by
variable into a large integer. The final step is to verify if the
interpolated polynomial is the correct GCD. This gives cofactors of
the input polynomials as a side effect.
References
==========
.. [Liao95] Hsin-Chao Liao, R. Fateman, Evaluation of the heuristic
polynomial GCD, International Symposium on Symbolic and Algebraic
Computation (ISSAC), ACM Press, Montreal, Quebec, Canada, 1995,
pp. 240--247
"""
if not u:
return dup_zz_heu_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
gcd, f, g = dmp_ground_extract(f, g, u, K)
f_norm = dmp_max_norm(f, u, K)
g_norm = dmp_max_norm(g, u, K)
B = 2*min(f_norm, g_norm) + 29
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dmp_ground_LC(f, u, K)),
g_norm // abs(dmp_ground_LC(g, u, K))) + 2)
for i in xrange(0, HEU_GCD_MAX):
ff = dmp_eval(f, x, u, K)
gg = dmp_eval(g, x, u, K)
v = u - 1
if not (dmp_zero_p(ff, v) or dmp_zero_p(gg, v)):
h, cff, cfg = dmp_zz_heu_gcd(ff, gg, v, K)
h = _dmp_zz_gcd_interpolate(h, x, v, K)
h = dmp_ground_primitive(h, u, K)[1]
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg_
cff = _dmp_zz_gcd_interpolate(cff, x, v, K)
h, r = dmp_div(f, cff, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff, cfg_
cfg = _dmp_zz_gcd_interpolate(cfg, x, v, K)
h, r = dmp_div(g, cfg, u, K)
if dmp_zero_p(r, u):
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def dup_qq_heu_gcd(f, g, K0):
"""Heuristic polynomial GCD in `Q[x]`. """
result = _dup_ff_trivial_gcd(f, g, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dup_ground_to_ring(f, K0, K1)
cg, g = dup_ground_to_ring(g, K0, K1)
f = dup_convert(f, K0, K1)
g = dup_convert(g, K0, K1)
h, cff, cfg = dup_zz_heu_gcd(f, g, K1)
h = dup_convert(h, K1, K0)
c = dup_LC(h, K0)
h = dup_monic(h, K0)
cff = dup_convert(cff, K1, K0)
cfg = dup_convert(cfg, K1, K0)
cff = dup_mul_ground(cff, K0.quo(c, cf), K0)
cfg = dup_mul_ground(cfg, K0.quo(c, cg), K0)
return h, cff, cfg
@cythonized("u")
def dmp_qq_heu_gcd(f, g, u, K0):
"""Heuristic polynomial GCD in `Q[X]`. """
result = _dmp_ff_trivial_gcd(f, g, u, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dmp_ground_to_ring(f, u, K0, K1)
cg, g = dmp_ground_to_ring(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
h, cff, cfg = dmp_zz_heu_gcd(f, g, u, K1)
h = dmp_convert(h, u, K1, K0)
c = dmp_ground_LC(h, u, K0)
h = dmp_ground_monic(h, u, K0)
cff = dmp_convert(cff, u, K1, K0)
cfg = dmp_convert(cfg, u, K1, K0)
cff = dmp_mul_ground(cff, K0.quo(c, cf), u, K0)
cfg = dmp_mul_ground(cfg, K0.quo(c, cg), u, K0)
return h, cff, cfg
USE_DUP_HEU_GCD = 1
USE_DMP_HEU_GCD = 1
def dup_inner_gcd(f, g, K):
"""Computes polynomial GCD and cofactors of `f` and `g` in `K[x]`. """
if K.has_Field or not K.is_Exact:
if USE_DUP_HEU_GCD:
if K.is_QQ:
try:
return dup_qq_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_ff_prs_gcd(f, g, K)
else:
if USE_DUP_HEU_GCD:
if K.is_ZZ:
try:
return dup_zz_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_rr_prs_gcd(f, g, K)
@cythonized("u")
def _dmp_inner_gcd(f, g, u, K):
"""Helper function for `dmp_inner_gcd()`. """
if K.has_Field or not K.is_Exact:
if USE_DMP_HEU_GCD:
if K.is_QQ:
try:
return dmp_qq_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_ff_prs_gcd(f, g, u, K)
else:
if USE_DMP_HEU_GCD:
if K.is_ZZ:
try:
return dmp_zz_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_rr_prs_gcd(f, g, u, K)
@cythonized("u")
def dmp_inner_gcd(f, g, u, K):
"""Computes polynomial GCD and cofactors of `f` and `g` in `K[X]`. """
if not u:
return dup_inner_gcd(f, g, K)
J, (f, g) = dmp_multi_deflate((f, g), u, K)
h, cff, cfg = _dmp_inner_gcd(f, g, u, K)
return (dmp_inflate(h, J, u, K),
dmp_inflate(cff, J, u, K),
dmp_inflate(cfg, J, u, K))
def dup_gcd(f, g, K):
"""Computes polynomial GCD of `f` and `g` in `K[x]`. """
return dup_inner_gcd(f, g, K)[0]
@cythonized("u")
def dmp_gcd(f, g, u, K):
"""Computes polynomial GCD of `f` and `g` in `K[X]`. """
return dmp_inner_gcd(f, g, u, K)[0]
def dup_rr_lcm(f, g, K):
"""Computes polynomial LCM over a ring in `K[x]`. """
fc, f = dup_primitive(f, K)
gc, g = dup_primitive(g, K)
c = K.lcm(fc, gc)
h = dup_exquo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_mul_ground(h, c, K)
def dup_ff_lcm(f, g, K):
"""Computes polynomial LCM over a field in `K[x]`. """
h = dup_exquo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_monic(h, K)
def dup_lcm(f, g, K):
"""Computes polynomial LCM of `f` and `g` in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K)
@cythonized("u")
def dmp_rr_lcm(f, g, u, K):
"""Computes polynomial LCM over a ring in `K[X]`. """
fc, f = dmp_ground_primitive(f, u, K)
gc, g = dmp_ground_primitive(g, u, K)
c = K.lcm(fc, gc)
h = dmp_exquo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_mul_ground(h, c, u, K)
@cythonized("u")
def dmp_ff_lcm(f, g, u, K):
"""Computes polynomial LCM over a field in `K[X]`. """
h = dmp_exquo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_ground_monic(h, u, K)
@cythonized("u")
def dmp_lcm(f, g, u, K):
"""Computes polynomial LCM of `f` and `g` in `K[X]`. """
if not u:
return dup_lcm(f, g, K)
if K.has_Field or not K.is_Exact:
return dmp_ff_lcm(f, g, u, K)
else:
return dmp_rr_lcm(f, g, u, K)
def dup_trunc(f, p, K):
"""Reduce `K[x]` polynomial modulo a constant `p` in `K`. """
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
@cythonized("u")
def dmp_trunc(f, p, u, K):
"""Reduce `K[X]` polynomial modulo a polynomial `p` in `K[Y]`. """
return dmp_strip([ dmp_rem(c, p, u-1, K) for c in f ], u)
@cythonized("u,v")
def dmp_ground_trunc(f, p, u, K):
"""Reduce `K[X]` polynomial modulo a constant `p` in `K`. """
if not u:
return dup_trunc(f, p, K)
v = u-1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""Divides all coefficients by `LC(f)` in `K[x]`. """
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_quo_ground(f, lc, K)
@cythonized("u")
def dmp_ground_monic(f, u, K):
"""Divides all coefficients by `LC(f)` in `K[X]`. """
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_quo_ground(f, lc, u, K)
def dup_rr_content(f, K):
"""Returns GCD of coefficients over a ring. """
cont = K.zero
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def dup_ff_content(f, K):
"""Returns GCD of coefficients over a field. """
if not f:
return K.zero
else:
return K.one
def dup_content(f, K):
"""Returns GCD of coefficients in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_content(f, K)
else:
return dup_rr_content(f, K)
@cythonized("u,v")
def dmp_content(f, u, K):
"""Returns GCD of multivariate coefficients. """
cont, v = dmp_LC(f, K), u-1
if dmp_zero_p(f, u):
return cont
for c in f[1:]:
cont = dmp_gcd(cont, c, v, K)
if dmp_one_p(cont, v, K):
break
if K.is_negative(dmp_ground_LC(cont, v, K)):
return dmp_neg(cont, v, K)
else:
return cont
@cythonized("u,v")
def dmp_rr_ground_content(f, u, K):
"""Returns GCD of coefficients over a ring. """
if not u:
return dup_rr_content(f, K)
cont, v = K.zero, u-1
for c in f:
gc = dmp_rr_ground_content(c, v, K)
cont = K.gcd(cont, gc)
if K.is_one(cont):
break
return cont
@cythonized("u")
def dmp_ff_ground_content(f, u, K):
"""Returns GCD of coefficients over a field. """
if not f:
return K.zero
else:
return K.one
@cythonized("u")
def dmp_ground_content(f, u, K):
"""Returns GCD of coefficients in `K[X]`. """
if not u:
return dup_content(f, K)
if K.has_Field or not K.is_Exact:
return dmp_ff_ground_content(f, u, K)
else:
return dmp_rr_ground_content(f, u, K)
def dup_rr_primitive(f, K):
"""Returns content and a primitive polynomial over a ring. """
cont = dup_content(f, K)
if not f or K.is_one(cont):
return cont, f
else:
return cont, dup_exquo_ground(f, cont, K)
def dup_ff_primitive(f, K):
"""Returns content and a primitive polynomial over a field. """
return K.one, f
def dup_primitive(f, K):
"""Returns content and a primitive polynomial in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_primitive(f, K)
else:
return dup_rr_primitive(f, K)
@cythonized("u,v")
def dmp_primitive(f, u, K):
"""Returns multivariate content and a primitive polynomial. """
cont, v = dmp_content(f, u, K), u-1
if dmp_zero_p(f, u) or dmp_one_p(cont, v, K):
return cont, f
else:
return cont, [ dmp_exquo(c, cont, v, K) for c in f ]
@cythonized("u")
def dmp_rr_ground_primitive(f, u, K):
"""Returns content and a primitive polynomial over a ring. """
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_exquo_ground(f, cont, u, K)
@cythonized("u")
def dmp_ff_ground_primitive(f, u, K):
"""Returns content and a primitive polynomial over a ring. """
if dmp_zero_p(f, u):
return K.zero, f
else:
return K.one, f
@cythonized("u")
def dmp_ground_primitive(f, u, K):
"""Returns content and a primitive polynomial in `K[x]`. """
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
if K.has_Field or not K.is_Exact:
return dmp_ff_ground_primitive(f, u, K)
else:
return dmp_rr_ground_primitive(f, u, K)
def dup_sqf_p(f, K):
"""Returns `True` if `f` is a square-free polynomial in `K[x]`. """
if not f:
return True
else:
return not dup_degree(dup_gcd(f, dup_diff(f, 1, K), K))
@cythonized("u")
def dmp_sqf_p(f, u, K):
"""Returns `True` if `f` is a square-free polynomial in `K[X]`. """
if dmp_zero_p(f, u):
return True
else:
return not dmp_degree(dmp_gcd(f, dmp_diff(f, 1, u, K), u, K), u)
@cythonized("s")
def dup_sqf_norm(f, K):
"""Square-free norm of `f` in `K[x]`, useful over algebraic domains. """
if not K.is_Algebraic:
raise DomainError("ground domain must be algebraic")
s, g = 0, dmp_raise(K.mod.rep, 1, 0, K.dom)
while True:
h, _ = dmp_inject(f, 0, K, front=True)
r = dmp_resultant(g, h, 1, K.dom)
if dup_sqf_p(r, K.dom):
break
else:
f, s = dup_taylor(f, -K.unit, K), s+1
return s, f, r
@cythonized("s,u")
def dmp_sqf_norm(f, u, K):
"""Square-free norm of `f` in `K[X]`, useful over algebraic domains. """
if not u:
return dup_sqf_norm(f, K)
if not K.is_Algebraic:
raise DomainError("ground domain must be algebraic")
g = dmp_raise(K.mod.rep, u+1, 0, K.dom)
F = dmp_raise([K.one,-K.unit], u, 0, K)
s = 0
while True:
h, _ = dmp_inject(f, u, K, front=True)
r = dmp_resultant(g, h, u+1, K.dom)
if dmp_sqf_p(r, u, K.dom):
break
else:
f, s = dmp_compose(f, F, u, K), s+1
return s, f, r
def dup_sqf_part(f, K):
"""Returns square-free part of a polynomial in `K[x]`. """
if not f:
return f
if K.is_negative(dup_LC(f, K)):
f = dup_neg(f, K)
gcd = dup_gcd(f, dup_diff(f, 1, K), K)
sqf = dup_exquo(f, gcd, K)
if K.has_Field or not K.is_Exact:
return dup_monic(sqf, K)
else:
return dup_primitive(sqf, K)[1]
@cythonized("u")
def dmp_sqf_part(f, u, K):
"""Returns square-free part of a polynomial in `K[X]`. """
if dmp_zero_p(f, u):
return f
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
gcd = dmp_gcd(f, dmp_diff(f, 1, u, K), u, K)
sqf = dmp_exquo(f, gcd, u, K)
if K.has_Field or not K.is_Exact:
return dmp_ground_monic(sqf, u, K)
else:
return dmp_ground_primitive(sqf, u, K)[1]
@cythonized("i")
def dup_sqf_list(f, K, **args):
"""Returns square-free decomposition of a polynomial in `K[x]`. """
if K.has_Field or not K.is_Exact:
coeff = dup_LC(f, K)
f = dup_monic(f, K)
else:
coeff, f = dup_primitive(f, K)
if K.is_negative(dup_LC(f, K)):
f = dup_neg(f, K)
coeff = -coeff
if dup_degree(f) <= 0:
if args.get('include', False):
return f
else:
return coeff, []
result, i = [], 1
h = dup_diff(f, 1, K)
g, p, q = dup_inner_gcd(f, h, K)
all = args.get('all', False)
while True:
d = dup_diff(p, 1, K)
h = dup_sub(q, d, K)
if not h:
result.append((p, i))
break
g, p, q = dup_inner_gcd(p, h, K)
if all or dup_degree(g) > 0:
result.append((g, i))
i += 1
if not args.get('include', False):
return coeff, result
else:
(g, i), rest = result[0], result[1:]
g = dup_mul_ground(g, coeff, K)
return [(g, i)] + rest
@cythonized("u,i")
def dmp_sqf_list(f, u, K, **args):
"""Returns square-free decomposition of a polynomial in `K[X]`. """
if not u:
return dup_sqf_list(f, K, **args)
if K.has_Field or not K.is_Exact:
coeff = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
else:
coeff, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
coeff = -coeff
if dmp_degree(f, u) <= 0:
if args.get('include', False):
return f
else:
return coeff, []
result, i = [], 1
h = dmp_diff(f, 1, u, K)
g, p, q = dmp_inner_gcd(f, h, u, K)
all = args.get('all', False)
while True:
d = dmp_diff(p, 1, u, K)
h = dmp_sub(q, d, u, K)
if dmp_zero_p(h, u):
result.append((p, i))
break
g, p, q = dmp_inner_gcd(p, h, u, K)
if all or dmp_degree(g, u) > 0:
result.append((g, i))
i += 1
if not args.get('include', False):
return coeff, result
else:
(g, i), rest = result[0], result[1:]
g = dup_mul_ground(g, coeff, K)
return [(g, i)] + rest
def dup_extract(f, g, K):
"""Extracts common content from a pair of polynomials in `K[x]`. """
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_exquo_ground(f, gcd, K)
g = dup_exquo_ground(g, gcd, K)
return gcd, f, g
@cythonized("u")
def dmp_ground_extract(f, g, u, K):
"""Extracts common content from a pair of polynomials in `K[X]`. """
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_exquo_ground(f, gcd, u, K)
g = dmp_exquo_ground(g, gcd, u, K)
return gcd, f, g
def dup_mirror(f, K):
"""Evaluate efficiently composition `f(-x)` in `K[x]`. """
f, n, a = list(f), dup_degree(f), -K.one
for i in xrange(n-1, -1, -1):
f[i], a = a*f[i], -a
return f
def dup_scale(f, a, K):
"""Evaluate efficiently composition `f(a*x)` in `K[x]`. """
f, n, b = list(f), dup_degree(f), a
for i in xrange(n-1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_taylor(f, a, K):
"""Evaluate efficiently Taylor shift `f(x + a)` in `K[x]`. """
f, n = list(f), dup_degree(f)
for i in xrange(n, 0, -1):
for j in xrange(0, i):
f[j+1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""Evaluate functional transformation `q**n * f(p/q)` in `K[x]`. """
if not f:
return []
h, Q = [f[0]], [[K.one]]
for i in xrange(0, dup_degree(f)):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""Evaluate functional composition `f(g)` in `K[x]`. """
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
@cythonized("u")
def dmp_compose(f, g, u, K):
"""Evaluate functional composition `f(g)` in `K[X]`. """
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
@cythonized("s,n,r,i,j")
def _dup_right_decompose(f, s, K):
"""XXX"""
n = dup_degree(f)
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s : K.one }
r = n // s
for i in xrange(1, s):
coeff = K.zero
for j in xrange(0, i):
if not n+j-i in f:
continue
if not s-j in g:
continue
fc, gc = f[n+j-i], g[s-j]
coeff += (i - r*j)*fc*gc
g[s-i] = K.exquo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
@cythonized("i")
def _dup_left_decompose(f, h, K):
"""XXX"""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
@cythonized("df,s")
def _dup_decompose(f, K):
"""XXX"""
df = dup_degree(f)
for s in xrange(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""Computes functional decomposition of `f` in `K[x]`.
Given an univariate polynomial `f` with coefficients in a field of
characteristic zero, returns tuple `(f_1, f_2, ..., f_n)`, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and `f_2, ..., f_n` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. `f o g = f(x + b) o (g - b)`
2. `x**n o x**m = x**m o x**n`
3. `T_n o T_m = T_m o T_n`
where `T_n` and `T_m` are Chebyshev polynomials.
References
==========
.. [Kozen89] D. Kozen, S. Landau, Polynomial decomposition algorithms,
Journal of Symbolic Computation 7 (1989), pp. 445-456
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dup_sturm(f, K):
"""Computes the Sturm sequence of `f` in `F[x]`.
Given an univariate, square-free polynomial `f(x)` returns the
associated Sturm sequence `f_0(x), ..., f_n(x)` defined by::
f_0(x), f_1(x) = f(x), f'(x)
f_n = -rem(f_{n-2}(x), f_{n-1}(x))
References
==========
.. [Davenport88] J.H. Davenport, Y. Siret, E. Tournier,
Computer Algebra Systems and Algorithms for Algebraic
Computation, Academic Press, London, 1988, pp. 124-128
"""
if not K.has_Field:
raise DomainError('computation can be done only in a field')
f = dup_sqf_part(f, K)
sturm = [f, dup_diff(f, 1, K)]
while sturm[-1]:
s = dup_rem(sturm[-2], sturm[-1], K)
sturm.append(dup_neg(s, K))
return sturm[:-1]
@cythonized("u")
def dmp_lift(f, u, K):
"""Convert algebraic coefficients to integers in `K[X]`. """
if not K.is_Algebraic:
raise DomainError('computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.iteritems():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""Compute the number of sign variations of `f` in `K[x]`. """
prev, k = K.zero, 0
for coeff in f:
if coeff*prev < 0:
k += 1
if coeff:
prev = coeff
return k
def dup_root_upper_bound(f, K):
"""Compute LMQ upper bound for `f`'s positive roots. """
n, t, P = len(f), K.one, []
if dup_LC(f, K) < 0:
f = dup_neg(f, K)
f = list(reversed(f))
for i in xrange(0, n):
if f[i] >= 0:
continue
a, Q = K.log(-f[i], 2), []
for j in xrange(i+1, n):
if f[j] <= 0:
continue
q = t + a - K.log(f[j], 2)
Q.append(q // (j - i))
t += 1
if not Q:
continue
P.append(min(Q))
if not P:
return None
else:
return 2.0**(max(P)+1)
def dup_root_lower_bound(f, K):
"""Compute LMQ lower bound for `f`'s positive roots. """
bound = dup_root_upper_bound(dup_reverse(f), K)
if bound is not None:
return 1.0 / bound
else:
return None
def dup_inner_refine_real_root(f, (a, b, c, d), cond, fast, K):
"""Refine a positive root of `f` given a Mobius transform. """
F, i = K.get_field(), 0
while not c or not cond(a, b, c, d, i, F):
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_taylor(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
return F(b, d), F(b, d)
f, g = dup_taylor(f, K.one, K), f
a1, b1, c1, d1 = a, a+b, c, c+d
if not dup_eval(f, K.zero, K):
return F(b1, d1), F(b1, d1)
k = dup_sign_variations(f, K)
if k == 1:
a, b, c, d = a1, b1, c1, d1
else:
f = dup_taylor(dup_reverse(g), K.one, K)
if not dup_eval(f, K.zero, K):
f = dup_rshift(f, 1, K)
a, b, c, d = b, a+b, d, c+d
i += 1
s, t = F(a, c), F(b, d)
if s <= t:
return (s, t)
else:
return (t, s)
def dup_outer_refine_real_root(f, s, t, cond, fast, K):
"""Refine a positive root of `f` given an interval `(s, t)`. """
if s == t:
return (s, t)
F = K.get_field()
a, c = F.numer(s), F.denom(s)
b, d = F.numer(t), F.denom(t)
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), K)
if dup_sign_variations(f, K) != 1:
raise RefinementFailed("there should be exactly one root on (%s, %s)" % (s, t))
return dup_inner_refine_real_root(f, (a, b, c, d), cond, fast, K)
def dup_refine_real_root(f, s, t, n, K, **args):
"""Refine real root's approximating interval to the given precision. """
if K.is_QQ:
(_, f), K = dup_ground_to_ring(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("real root refinement not supported over %s" % K)
if s == t:
return (s, t)
if s > t:
s, t = t, s
negative = False
if s < 0:
if t <= 0:
f, s, t, negative = dup_mirror(f, K), -t, -s, True
else:
raise ValueError("can't refine a real root on (%s, %s)" % (s, t))
fast = args.get('fast')
if type(n) is not int:
cond = lambda a, b, c, d, i, F: abs(F(a, c) - F(b, d)) < n
else:
cond = lambda a, b, c, d, i, F: i >= n
s, t = dup_outer_refine_real_root(f, s, t, cond, fast, K)
if negative:
return (-t, -s)
else:
return ( s, t)
def dup_inner_isolate_real_roots(f, cond, fast, K):
"""Iteratively compute disjoint positive root isolation intervals. """
a, b, c, d = K.one, K.zero, K.zero, K.one
k = dup_sign_variations(f, K)
if k == 0:
return []
if k == 1:
roots = [dup_inner_refine_real_root(
f, (a, b, c, d), cond, fast, K)]
else:
roots, stack = [], [(a, b, c, d, f, k)]
F = K.get_field()
while stack:
a, b, c, d, f, k = stack.pop()
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_taylor(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
roots.append((F(b, d), F(b, d)))
f = dup_rshift(f, 1, K)
k = dup_sign_variations(f, K)
if k == 0:
continue
if k == 1:
roots.append(dup_inner_refine_real_root(
f, (a, b, c, d), cond, fast, K))
continue
f1 = dup_taylor(f, K.one, K)
a1, b1, c1, d1, r = a, a+b, c, c+d, 0
if not dup_eval(f1, K.zero, K):
roots.append((F(b1, d1), F(b1, d1)))
f1, r = dup_rshift(f1, 1, K), 1
k1 = dup_sign_variations(f1, K)
k2 = k - k1 - r
a2, b2, c2, d2 = b, a+b, d, c+d
if k2 > 1 or (k1 > 0 and k2 == 1):
f2 = dup_taylor(dup_reverse(f), K.one, K)
if not dup_eval(f2, K.zero, K):
f2 = dup_rshift(f2, 1, K)
k2 = dup_sign_variations(f2, K)
if k1 < k2:
a1, a2, b1, b2 = a2, a1, b2, b1
c1, c2, d1, d2 = c2, c1, d2, d1
f1, f2, k1, k2 = f2, f1, k2, k1
if k1 == 0:
continue
if k1 == 1:
roots.append(dup_inner_refine_real_root(
f1, (a1, b1, c1, d1), cond, fast, K))
else:
stack.append((a1, b1, c1, d1, f1, k1))
if k2 == 0:
continue
if k2 == 1:
roots.append(dup_inner_refine_real_root(
f2, (a2, b2, c2, d2), cond, fast, K))
else:
stack.append((a2, b2, c2, d2, f2, k2))
return sorted(roots)
def dup_isolate_real_roots(f, K, **args):
"""Isolate real roots using continued fractions approach. """
if K.is_QQ:
(_, f), K = dup_ground_to_ring(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
eps, fast = args.get('eps'), args.get('fast')
if eps is not None:
cond = lambda a, b, c, d, i, F: abs(F(a, c) - F(b, d)) < eps
else:
cond = lambda a, b, c, d, i, F: True
if args.get('sqf', False):
I_pos = dup_inner_isolate_real_roots(f, cond, fast, K)
f = dup_mirror(f, K)
I_neg = dup_inner_isolate_real_roots(f, cond, fast, K)
return sorted([ (-v, -u) for (u, v) in I_neg ] + I_pos)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
I_pos = dup_inner_isolate_real_roots(f, cond, fast, K)
f = dup_mirror(f, K)
I_neg = dup_inner_isolate_real_roots(f, cond, fast, K)
return sorted([ ((-v, -u), k) for (u, v) in I_neg ] + \
[ (( u, v), k) for (u, v) in I_pos ])
I_pos, I_neg = [], []
F_pos, F_neg = {}, {}
for f, k in factors:
for u, v in dup_inner_isolate_real_roots(f, cond, fast, K):
I_pos.append((u, v, k))
g = dup_mirror(f, K)
for s, t in dup_inner_isolate_real_roots(g, cond, fast, K):
I_neg.append((s, t, k))
F_pos[k], F_neg[k] = f, g
step = lambda a, b, c, d, i, F: i >= 1
for i, (u, v, k) in enumerate(I_pos):
for j, (s, t, m) in enumerate(I_pos[i+1:]):
while not (s >= v or t <= u):
u, v = dup_outer_refine_real_root(F_pos[k], u, v, step, fast, K)
s, t = dup_outer_refine_real_root(F_pos[m], s, t, step, fast, K)
I_pos[i+j+1] = (s, t, m)
I_pos[i] = (u, v, k)
for i, (u, v, k) in enumerate(I_neg):
for j, (s, t, m) in enumerate(I_neg[i+1:]):
while not (s >= v or t <= u):
u, v = dup_outer_refine_real_root(F_neg[k], u, v, step, fast, K)
s, t = dup_outer_refine_real_root(F_neg[m], s, t, step, fast, K)
I_neg[i+j+1] = (s, t, m)
I_neg[i] = (u, v, k)
return sorted([ ((-v, -u), k) for (u, v, k) in I_neg ] + \
[ (( u, v), k) for (u, v, k) in I_pos ])
def _dup_inner_sturm(f, p, q, x, y, K):
"""Compute Sturm sequence at x+I*y in p+I*q direction. """
C = K.complex_domain()
a, b = C(p, q), C(x, y)
f = dup_convert(f, K, C)
f = dup_taylor(f, b, C)
f = dup_scale(f, a, C)
u = dup_strip([ C.real(c) for c in f ])
v = dup_strip([ C.imag(c) for c in f ])
seq = [u, v]
while seq[-1]:
s = dup_rem(seq[-2], seq[-1], K)
seq.append(dup_neg(s, K))
return seq[:-1]
def _dup_sturm_shift(F, c, K):
"""Shift origin of a Sturm sequence by a real number `c`. """
return [ dup_taylor(f, c, K) for f in F ]
def _dup_sturm_mirror(F, K):
"""Flip the direction of a Sturm sequence at its origin. """
return [ dup_mirror(f, K) for f in F ]
def _dup_inner_zeros(F1, F2, F3, F4, hx, hy, K):
"""Return the exact number of zeros in the given rectangle. """
V1 = [
dup_sign_variations([ dup_eval(f, hx, K) for f in F1 ], K),
dup_sign_variations([ dup_eval(f, hy, K) for f in F2 ], K),
dup_sign_variations([ dup_eval(f, hx, K) for f in F3 ], K),
dup_sign_variations([ dup_eval(f, hy, K) for f in F4 ], K),
]
V0 = [
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F1 ], K),
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F2 ], K),
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F3 ], K),
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F4 ], K),
]
return sum(v1 - v0 for v1, v0 in zip(V1, V0)) // 2
def dup_inner_refine_complex_root(f, x, y, dx, dy, F, K):
"""One bisection step of complex root refinement algorithm. """
hx, hy = dx/2, dy/2
cx, cy = x + hx, y + hy
F1, F2, F3, F4 = F
Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K)
Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K)
# Quadrant #1: ++
F11 = Fx
F12 = _dup_sturm_shift(F2, hx, K)
F13 = F3
F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K)
k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K)
if k1 == 1:
return (cx, cy, hx, hy, (F11, F12, F13, F14))
# Quadrant #2: -+
F21 = _dup_sturm_shift(Fx,-hx, K)
F22 = Fy
F23 = _dup_sturm_shift(F3, hx, K)
F24 = F4
k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K)
if k2 == 1:
return (x, cy, hx, hy, (F21, F22, F23, F24))
# Quadrant #3: --
F31 = F1
F32 = _dup_sturm_shift(Fy,-hy, K)
F33 = _dup_sturm_mirror(Fx, K)
F34 = _dup_sturm_shift(F4, hy, K)
k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K)
if k3 == 1:
return (x, y, hx, hy, (F31, F32, F33, F34))
# Quadrant #4: +-
F41 = _dup_sturm_shift(F1, hx, K)
F42 = F2
F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K)
F44 = _dup_sturm_mirror(Fy, K)
k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K)
if k4 == 1:
return (cx, y, hx, hy, (F41, F42, F43, F44))
raise RefinementFailed("no roots in (%s, %s) x (%s, %s) rectangle" % (x, y, x+dx, y+dy))
def dup_outer_refine_complex_root(f, x, y, dx, dy, F, eps, K):
"""Refine a complex root until the desired precision is reached. """
while dx >= eps and dy >= eps:
x, y, dx, dy, F = dup_inner_refine_complex_root(f, x, y, dx, dy, F, K)
return x, y, dx, dy, F
def dup_refine_complex_root(f, x, y, dx, dy, eps, K):
"""Refine a complex root using Wilf's global bisection algorithm. """
if K.is_ZZ or K.is_QQ:
K0, K = K, K.float_domain()
f = dup_convert(f, K0, K)
else:
raise DomainError("isolation of complex roots not supported over %s" % K)
F1 = _dup_inner_sturm(f, K.one, K.zero, x, y, K)
F2 = _dup_inner_sturm(f, K.zero, K.one, x+dx, y, K)
F3 = _dup_inner_sturm(f,-K.one, K.zero, x+dx, y+dy, K)
F4 = _dup_inner_sturm(f, K.zero,-K.one, x, y+dy, K)
F = (F1, F2, F3, F4)
x, y, dx, dy, _ = dup_outer_refine_complex_root(f, x, y, dx, dy, F, eps, K)
return x, y, dx, dy
def dup_inner_isolate_complex_roots(f, K, **args):
"""Compute disjoint complex root isolating rectangles for all quadrants. """
n, lc = dup_degree(f), abs(dup_LC(f, K))
B = 2*max(abs(c)/lc for c in f)
while True:
r = randfloat()
if r < 0.5:
break
x, y, dx, dy = -B+r, -B-r, 2*B+r, 2*B+r
roots, stack = [], []
F1 = _dup_inner_sturm(f, K.one, K.zero, x, y, K)
F2 = _dup_inner_sturm(f, K.zero, K.one, x+dx, y, K)
F3 = _dup_inner_sturm(f,-K.one, K.zero, x+dx, y+dy, K)
F4 = _dup_inner_sturm(f, K.zero,-K.one, x, y+dy, K)
k = _dup_inner_zeros(F1, F2, F3, F4, dx, dy, K)
if k != n:
return dup_inner_isolate_complex_roots(f, K)
if k == 1:
roots.append((x, y, dx, dy, (F1, F2, F3, F4)))
elif k > 1:
stack.append((x, y, dx, dy, k, F1, F2, F3, F4))
while stack:
x, y, dx, dy, k, F1, F2, F3, F4 = stack.pop()
hx, hy = dx/2, dy/2
cx, cy = x + hx, y + hy
Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K)
Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K)
# Quadrant #1: ++
F11 = Fx
F12 = _dup_sturm_shift(F2, hx, K)
F13 = F3
F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K)
k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K)
if k1 == 1:
roots.append((cx, cy, hx, hy, (F11, F12, F13, F14)))
elif k1 > 1:
stack.append((cx, cy, hx, hy, k1, F11, F12, F13, F14))
# Quadrant #2: -+
F21 = _dup_sturm_shift(Fx,-hx, K)
F22 = Fy
F23 = _dup_sturm_shift(F3, hx, K)
F24 = F4
k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K)
if k2 == 1:
roots.append((x, cy, hx, hy, (F21, F22, F23, F24)))
elif k2 > 1:
stack.append((x, cy, hx, hy, k2, F21, F22, F23, F24))
# Quadrant #3: --
F31 = F1
F32 = _dup_sturm_shift(Fy,-hy, K)
F33 = _dup_sturm_mirror(Fx, K)
F34 = _dup_sturm_shift(F4, hy, K)
k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K)
if k3 == 1:
roots.append((x, y, hx, hy, (F31, F32, F33, F34)))
elif k3 > 1:
stack.append((x, y, hx, hy, k3, F31, F32, F33, F34))
# Quadrant #4: +-
F41 = _dup_sturm_shift(F1, hx, K)
F42 = F2
F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K)
F44 = _dup_sturm_mirror(Fy, K)
k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K)
if k4 == 1:
roots.append((cx, y, hx, hy, (F41, F42, F43, F44)))
elif k4 > 1:
stack.append((cx, y, hx, hy, k4, F41, F42, F43, F44))
if len(roots) == n:
eps = args.get('eps')
if eps is not None:
for i, (x, y, dx, dy, F) in enumerate(roots):
roots[i] = dup_outer_refine_complex_root(f, x, y, dx, dy, F, eps, K)
return roots
else:
return dup_inner_isolate_complex_roots(f, K)
def dup_isolate_complex_roots(f, K, **args):
"""Isolate complex roots using Wilf's global bisection algorithm. """
if K.is_ZZ or K.is_QQ:
F = K.float_domain()
else:
raise DomainError("isolation of complex roots not supported over %s" % K)
squarefree = args.get('sqf', False)
if squarefree:
roots = dup_inner_isolate_complex_roots(dup_convert(f, K, F), F, **args)
else:
roots = []
_, factors = dup_sqf_list(f, K)
for g, k in factors:
g = dup_convert(g, K, F)
for r in dup_inner_isolate_complex_roots(g, F, **args):
roots.append((g, r, k))
if len(factors) > 1:
for i, (f1, r1, k1) in enumerate(roots):
x1, y1, dx1, dy1, F1 = r1
for j, (f2, r2, k2) in enumerate(roots[i+1:]):
x2, y2, dx2, dy2, F2 = r2
while not ((x2 >= x1+dx1 or x2+dx2 <= x1) and (y2 >= y1+dy1 or y2+dy2 <= y1)):
x1, y1, dx1, dy1, F1 = dup_inner_refine_complex_root(f1, x1, y1, dx1, dy1, F1, K)
x2, y2, dx2, dy2, F2 = dup_inner_refine_complex_root(f2, x1, y1, dx1, dy1, F2, K)
roots[i+j+1] = (f2, (x2, y2, dx2, dy2, F2), k2)
roots[i] = (f1, (x1, y1, dx1, dy1, F1), k1)
multiplicity = {}
for (_, (x, y, dx, dy, _), k) in roots:
multiplicity[(x, y, dx, dy)] = k
roots = multiplicity.keys()
groups = {}
for (x, y, dx, dy) in roots:
if x in groups:
groups[x].append((x, y, dx, dy))
else:
groups[x] = [(x, y, dx, dy)]
upper, lower = [], []
for group in groups.values():
while len(group) > 1:
_max = max([ r[1] for r in group ])
for i, (x, y, dx, dy) in enumerate(group):
if y == _max:
upper.append((x, y, dx, dy))
del group[i]
break
_min = min([ r[1] for r in group ])
for i, (x, y, dx, dy) in enumerate(group):
if y == _min:
lower.append((x, y, dx, dy))
del group[i]
break
upper = sorted(upper, key=lambda r: r[0])
lower = sorted(lower, key=lambda r: r[0])
if not squarefree:
for i, r in enumerate(upper):
upper[i] = (r, multiplicity[r])
for i, r in enumerate(lower):
lower[i] = (r, multiplicity[r])
return upper, lower
| {
"repo_name": "tovrstra/sympy",
"path": "sympy/polys/densetools.py",
"copies": "3",
"size": "67487",
"license": "bsd-3-clause",
"hash": -9049831501624728000,
"line_mean": 24.3044619423,
"line_max": 105,
"alpha_frac": 0.4921540445,
"autogenerated": false,
"ratio": 2.6799698197124933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9656382023179708,
"avg_score": 0.0031483682065572166,
"num_lines": 2667
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.