repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
jpype-project/jpype
|
setupext/build_ext.py
|
BuildExtCommand.initialize_options
|
python
|
def initialize_options(self, *args):
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
# if 'CFLAGS' in cfg_vars:
# cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '')
for k,v in cfg_vars.items():
if isinstance(v,str) and v.find("-Wstrict-prototypes"):
v=v.replace('-Wstrict-prototypes', '')
cfg_vars[k]=v
if isinstance(v,str) and v.find("-Wimplicit-function-declaration"):
v=v.replace('-Wimplicit-function-declaration', '')
cfg_vars[k]=v
build_ext.initialize_options(self)
|
omit -Wstrict-prototypes from CFLAGS since its only valid for C code.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/setupext/build_ext.py#L37-L51
| null |
class BuildExtCommand(build_ext):
"""
Override some behavior in extension building:
1. Numpy:
If not opted out, try to use NumPy and define macro 'HAVE_NUMPY', so arrays
returned from Java can be wrapped efficiently in a ndarray.
2. handle compiler flags for different compilers via a dictionary.
3. try to disable warning ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++
"""
# extra compile args
copt = {'msvc': ['/EHsc'],
'unix' : ['-ggdb'],
'mingw32' : [],
}
# extra link args
lopt = {
'msvc': [],
'unix': [],
'mingw32' : [],
}
def initialize_options(self, *args):
"""omit -Wstrict-prototypes from CFLAGS since its only valid for C code."""
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
# if 'CFLAGS' in cfg_vars:
# cfg_vars['CFLAGS'] = cfg_vars['CFLAGS'].replace('-Wstrict-prototypes', '')
for k,v in cfg_vars.items():
if isinstance(v,str) and v.find("-Wstrict-prototypes"):
v=v.replace('-Wstrict-prototypes', '')
cfg_vars[k]=v
if isinstance(v,str) and v.find("-Wimplicit-function-declaration"):
v=v.replace('-Wimplicit-function-declaration', '')
cfg_vars[k]=v
build_ext.initialize_options(self)
def _set_cflags(self):
# set compiler flags
c = self.compiler.compiler_type
if c in self.copt:
for e in self.extensions:
e.extra_compile_args = self.copt[ c ]
if c in self.lopt:
for e in self.extensions:
e.extra_link_args = self.lopt[ c ]
def build_extensions(self):
# We need to create the thunk code
self.run_command("build_java")
self.run_command("build_thunk")
jpypeLib = self.extensions[0]
disable_numpy = self.distribution.disable_numpy
self._set_cflags()
# handle numpy
if not disable_numpy:
try:
import numpy
jpypeLib.include_dirs.append(numpy.get_include())
jpypeLib.define_macros.append(('HAVE_NUMPY', 1))
warnings.warn("Turned ON Numpy support for fast Java array access",
FeatureNotice)
except ImportError:
pass
else:
warnings.warn("Turned OFF Numpy support for fast Java array access",
FeatureNotice)
# has to be last call
build_ext.build_extensions(self)
|
jpype-project/jpype
|
jpype/_cygwin.py
|
WindowsJVMFinder._get_from_registry
|
python
|
def _get_from_registry(self):
from ._windows import reg_keys
for location in reg_keys:
location = location.replace('\\', '/')
jreKey = "/proc/registry/HKEY_LOCAL_MACHINE/{}".format(location)
try:
with open(jreKey + "/CurrentVersion") as f:
cv = f.read().split('\x00')
versionKey = jreKey + "/" + cv[0]
with open(versionKey + "/RunTimeLib") as f:
cv = f.read().split('\x00')
return cv[0]
except OSError:
pass
return None
|
Retrieves the path to the default Java installation stored in the
Windows registry
:return: The path found in the registry, or None
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_cygwin.py#L44-L67
| null |
class WindowsJVMFinder(_jvmfinder.JVMFinder):
"""
Windows JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
self._methods = (self._get_from_java_home, self._get_from_registry)
def check(self, jvm):
from ._windows import _checkJVMArch
_checkJVMArch(jvm)
|
jpype-project/jpype
|
jpype/_classpath.py
|
addClassPath
|
python
|
def addClassPath(path1):
global _CLASSPATHS
path1=_os.path.abspath(path1)
if _sys.platform=='cygwin':
path1=_posix2win(path1)
_CLASSPATHS.add(str(path1))
|
Add a path to the java class path
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_classpath.py#L57-L63
| null |
import os as _os
import sys as _sys
import glob as _glob
__all__=['addClassPath', 'getClassPath']
_CLASSPATHS=set()
_SEP = _os.path.pathsep
if _sys.platform=='cygwin':
_SEP=';'
def _init():
global _CLASSPATHS
global _SEP
classpath=_os.environ.get("CLASSPATH")
if classpath:
_CLASSPATHS|=set(classpath.split(_SEP))
_init()
# Cygwin needs to convert to windows paths
if _sys.platform=='cygwin':
_root=None
def _get_root():
global _root
if _root!=None:
return _root
import subprocess
proc = subprocess.Popen("cygpath -wa /", shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True)
_root=proc.stdout.read().strip().decode('utf-8')
return _root
def _splitpath(path):
parts=[]
(path, tail)=_os.path.split( path)
while path and tail:
parts.insert(0,tail)
(path,tail)=_os.path.split(path)
return parts
def _posix2win(directory):
root=_get_root()
directory=_os.path.abspath(directory)
paths=_splitpath(directory)
if paths[0]=="cygdrive":
paths.pop(0)
drive=paths.pop(0)
paths.insert(0, "%s:"%drive)
return '\\'.join(paths)
paths.insert(0,root)
return '\\'.join(paths)
# needed for testing
__all__.append("_posix2win")
def getClassPath():
""" Get the full java class path.
Includes user added paths and the environment CLASSPATH.
"""
global _CLASSPATHS
global _SEP
out=[]
for path in _CLASSPATHS:
if path=='':
continue
if path.endswith('*'):
paths=_glob.glob(path+".jar")
if len(path)==0:
continue
out.extend(paths)
else:
out.append(path)
return _SEP.join(out)
#print(getClassPath())
|
jpype-project/jpype
|
jpype/_classpath.py
|
getClassPath
|
python
|
def getClassPath():
global _CLASSPATHS
global _SEP
out=[]
for path in _CLASSPATHS:
if path=='':
continue
if path.endswith('*'):
paths=_glob.glob(path+".jar")
if len(path)==0:
continue
out.extend(paths)
else:
out.append(path)
return _SEP.join(out)
|
Get the full java class path.
Includes user added paths and the environment CLASSPATH.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_classpath.py#L65-L83
| null |
import os as _os
import sys as _sys
import glob as _glob
__all__=['addClassPath', 'getClassPath']
_CLASSPATHS=set()
_SEP = _os.path.pathsep
if _sys.platform=='cygwin':
_SEP=';'
def _init():
global _CLASSPATHS
global _SEP
classpath=_os.environ.get("CLASSPATH")
if classpath:
_CLASSPATHS|=set(classpath.split(_SEP))
_init()
# Cygwin needs to convert to windows paths
if _sys.platform=='cygwin':
_root=None
def _get_root():
global _root
if _root!=None:
return _root
import subprocess
proc = subprocess.Popen("cygpath -wa /", shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True)
_root=proc.stdout.read().strip().decode('utf-8')
return _root
def _splitpath(path):
parts=[]
(path, tail)=_os.path.split( path)
while path and tail:
parts.insert(0,tail)
(path,tail)=_os.path.split(path)
return parts
def _posix2win(directory):
root=_get_root()
directory=_os.path.abspath(directory)
paths=_splitpath(directory)
if paths[0]=="cygdrive":
paths.pop(0)
drive=paths.pop(0)
paths.insert(0, "%s:"%drive)
return '\\'.join(paths)
paths.insert(0,root)
return '\\'.join(paths)
# needed for testing
__all__.append("_posix2win")
def addClassPath(path1):
""" Add a path to the java class path"""
global _CLASSPATHS
path1=_os.path.abspath(path1)
if _sys.platform=='cygwin':
path1=_posix2win(path1)
_CLASSPATHS.add(str(path1))
#print(getClassPath())
|
jpype-project/jpype
|
jpype/_core.py
|
startJVM
|
python
|
def startJVM(jvm=None, *args, **kwargs):
if jvm is None:
jvm = get_default_jvm_path()
# Check to see that the user has not set the classpath
# Otherwise use the default if not specified
if not _hasClassPath(args) and 'classpath' not in kwargs:
kwargs['classpath']=_classpath.getClassPath()
print("Use default classpath")
if 'ignoreUnrecognized' not in kwargs:
kwargs['ignoreUnrecognized']=False
# Classpath handling
args = list(args)
if 'classpath' in kwargs and kwargs['classpath']!=None:
args.append('-Djava.class.path=%s'%(kwargs['classpath']))
print("Set classpath")
_jpype.startup(jvm, tuple(args), kwargs['ignoreUnrecognized'])
_initialize()
# start the reference daemon thread
if _usePythonThreadForDaemon:
_refdaemon.startPython()
else:
_refdaemon.startJava()
|
Starts a Java Virtual Machine. Without options it will start
the JVM with the default classpath and jvm. The default classpath
will be determined by jpype.getClassPath(). The default JVM is
determined by jpype.getDefaultJVMPath().
Args:
jvm (str): Path to the jvm library file (libjvm.so, jvm.dll, ...)
default=None will use jpype.getDefaultJVMPath()
*args (str[]): Arguments to give to the JVM
classpath (Optional[string]): set the classpath for the jvm.
This will override any classpath supplied in the arguments
list.
ignoreUnrecognized (Optional[bool]): option to jvm to ignore
invalid jvm arguments. (Default False)
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_core.py#L70-L112
|
[
"def getClassPath():\n \"\"\" Get the full java class path.\n\n Includes user added paths and the environment CLASSPATH.\n \"\"\"\n global _CLASSPATHS\n global _SEP\n out=[]\n for path in _CLASSPATHS:\n if path=='':\n continue\n if path.endswith('*'):\n paths=_glob.glob(path+\".jar\")\n if len(path)==0:\n continue \n out.extend(paths)\n else:\n out.append(path)\n return _SEP.join(out)\n",
"def _initialize():\n _properties._initialize()\n _jclass._initialize()\n _jarray._initialize()\n _jwrapper._initialize()\n _jproxy._initialize()\n _jexception._initialize()\n _jcollection._initialize()\n _jobject._initialize()\n nio._initialize()\n reflect._initialize()\n for func in _initializers:\n func()\n",
"def _hasClassPath(args):\n for i in args:\n if i.startswith('-Djava.class.path'):\n return True\n return False\n",
"def get_default_jvm_path():\n \"\"\"\n Retrieves the path to the default or first found JVM library\n\n :return: The path to the JVM shared library file\n :raise ValueError: No JVM library found\n \"\"\"\n if sys.platform == \"cygwin\":\n # Cygwin\n from ._cygwin import WindowsJVMFinder\n finder = WindowsJVMFinder()\n elif sys.platform == \"win32\":\n # Windows\n from ._windows import WindowsJVMFinder\n finder = WindowsJVMFinder()\n elif sys.platform == \"darwin\":\n # Mac OS X\n from ._darwin import DarwinJVMFinder\n finder = DarwinJVMFinder()\n else:\n # Use the Linux way for other systems\n from ._linux import LinuxJVMFinder\n finder = LinuxJVMFinder()\n\n return finder.get_jvm_path()\n"
] |
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import sys
import _jpype
from . import _jclass
from . import _jarray
from . import _jwrapper
from . import _jproxy
from . import _jexception
from . import _jcollection
from . import _jobject
from . import _jio
from . import _properties
from . import nio
from . import reflect
from . import _refdaemon
from . import _classpath
_usePythonThreadForDaemon = False
def setUsePythonThreadForDeamon(v):
global _usePythonThreadForDaemon
_usePythonThreadForDaemon = v
_initializers=[]
def registerJVMInitializer(func):
"""Register a function to be called after jvm is started"""
_initializers.append(func)
def _initialize():
_properties._initialize()
_jclass._initialize()
_jarray._initialize()
_jwrapper._initialize()
_jproxy._initialize()
_jexception._initialize()
_jcollection._initialize()
_jobject._initialize()
nio._initialize()
reflect._initialize()
for func in _initializers:
func()
def isJVMStarted() :
return _jpype.isStarted()
def _hasClassPath(args):
for i in args:
if i.startswith('-Djava.class.path'):
return True
return False
def attachToJVM(jvm):
_jpype.attach(jvm)
_initialize()
def shutdownJVM():
_jpype.shutdown()
def isThreadAttachedToJVM():
return _jpype.isThreadAttachedToJVM()
def attachThreadToJVM():
_jpype.attachThreadToJVM()
def detachThreadFromJVM():
_jpype.detachThreadFromJVM()
def get_default_jvm_path():
"""
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
"""
if sys.platform == "cygwin":
# Cygwin
from ._cygwin import WindowsJVMFinder
finder = WindowsJVMFinder()
elif sys.platform == "win32":
# Windows
from ._windows import WindowsJVMFinder
finder = WindowsJVMFinder()
elif sys.platform == "darwin":
# Mac OS X
from ._darwin import DarwinJVMFinder
finder = DarwinJVMFinder()
else:
# Use the Linux way for other systems
from ._linux import LinuxJVMFinder
finder = LinuxJVMFinder()
return finder.get_jvm_path()
# Naming compatibility
getDefaultJVMPath = get_default_jvm_path
class ConversionConfigClass(object):
def __init__(self):
self._convertString = 1
def _getConvertString(self):
return self._convertString
def _setConvertString(self, value):
if value:
self._convertString = 1
else:
self._convertString = 0
_jpype.setConvertStringObjects(self._convertString)
string = property(_getConvertString, _setConvertString, None)
ConversionConfig = ConversionConfigClass()
|
jpype-project/jpype
|
jpype/_core.py
|
get_default_jvm_path
|
python
|
def get_default_jvm_path():
if sys.platform == "cygwin":
# Cygwin
from ._cygwin import WindowsJVMFinder
finder = WindowsJVMFinder()
elif sys.platform == "win32":
# Windows
from ._windows import WindowsJVMFinder
finder = WindowsJVMFinder()
elif sys.platform == "darwin":
# Mac OS X
from ._darwin import DarwinJVMFinder
finder = DarwinJVMFinder()
else:
# Use the Linux way for other systems
from ._linux import LinuxJVMFinder
finder = LinuxJVMFinder()
return finder.get_jvm_path()
|
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_core.py#L131-L155
|
[
"def get_jvm_path(self):\n \"\"\"\n Retrieves the path to the default or first found JVM library\n\n :return: The path to the JVM shared library file\n :raise ValueError: No JVM library found\n \"\"\"\n for method in self._methods:\n try:\n jvm = method()\n\n # If found check the architecture \n if jvm:\n self.check(jvm)\n except NotImplementedError:\n # Ignore missing implementations\n pass\n except JVMNotFoundException:\n # Ignore not successful methods\n pass\n except JVMNotSupportedException:\n pass\n\n else:\n if jvm is not None:\n return jvm\n"
] |
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import sys
import _jpype
from . import _jclass
from . import _jarray
from . import _jwrapper
from . import _jproxy
from . import _jexception
from . import _jcollection
from . import _jobject
from . import _jio
from . import _properties
from . import nio
from . import reflect
from . import _refdaemon
from . import _classpath
_usePythonThreadForDaemon = False
def setUsePythonThreadForDeamon(v):
global _usePythonThreadForDaemon
_usePythonThreadForDaemon = v
_initializers=[]
def registerJVMInitializer(func):
"""Register a function to be called after jvm is started"""
_initializers.append(func)
def _initialize():
_properties._initialize()
_jclass._initialize()
_jarray._initialize()
_jwrapper._initialize()
_jproxy._initialize()
_jexception._initialize()
_jcollection._initialize()
_jobject._initialize()
nio._initialize()
reflect._initialize()
for func in _initializers:
func()
def isJVMStarted() :
return _jpype.isStarted()
def _hasClassPath(args):
for i in args:
if i.startswith('-Djava.class.path'):
return True
return False
def startJVM(jvm=None, *args, **kwargs):
"""
Starts a Java Virtual Machine. Without options it will start
the JVM with the default classpath and jvm. The default classpath
will be determined by jpype.getClassPath(). The default JVM is
determined by jpype.getDefaultJVMPath().
Args:
jvm (str): Path to the jvm library file (libjvm.so, jvm.dll, ...)
default=None will use jpype.getDefaultJVMPath()
*args (str[]): Arguments to give to the JVM
classpath (Optional[string]): set the classpath for the jvm.
This will override any classpath supplied in the arguments
list.
ignoreUnrecognized (Optional[bool]): option to jvm to ignore
invalid jvm arguments. (Default False)
"""
if jvm is None:
jvm = get_default_jvm_path()
# Check to see that the user has not set the classpath
# Otherwise use the default if not specified
if not _hasClassPath(args) and 'classpath' not in kwargs:
kwargs['classpath']=_classpath.getClassPath()
print("Use default classpath")
if 'ignoreUnrecognized' not in kwargs:
kwargs['ignoreUnrecognized']=False
# Classpath handling
args = list(args)
if 'classpath' in kwargs and kwargs['classpath']!=None:
args.append('-Djava.class.path=%s'%(kwargs['classpath']))
print("Set classpath")
_jpype.startup(jvm, tuple(args), kwargs['ignoreUnrecognized'])
_initialize()
# start the reference daemon thread
if _usePythonThreadForDaemon:
_refdaemon.startPython()
else:
_refdaemon.startJava()
def attachToJVM(jvm):
_jpype.attach(jvm)
_initialize()
def shutdownJVM():
_jpype.shutdown()
def isThreadAttachedToJVM():
return _jpype.isThreadAttachedToJVM()
def attachThreadToJVM():
_jpype.attachThreadToJVM()
def detachThreadFromJVM():
_jpype.detachThreadFromJVM()
# Naming compatibility
getDefaultJVMPath = get_default_jvm_path
class ConversionConfigClass(object):
def __init__(self):
self._convertString = 1
def _getConvertString(self):
return self._convertString
def _setConvertString(self, value):
if value:
self._convertString = 1
else:
self._convertString = 0
_jpype.setConvertStringObjects(self._convertString)
string = property(_getConvertString, _setConvertString, None)
ConversionConfig = ConversionConfigClass()
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder.find_libjvm
|
python
|
def find_libjvm(self, java_home):
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
|
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L48-L82
| null |
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
def check(self, jvm):
"""
Check if the jvm is valid for this architecture.
This method should be overriden for each architecture.
:raise JVMNotSupportedException: If the jvm is not supported.
"""
pass
def get_jvm_path(self):
"""
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
"""
for method in self._methods:
try:
jvm = method()
# If found check the architecture
if jvm:
self.check(jvm)
except NotImplementedError:
# Ignore missing implementations
pass
except JVMNotFoundException:
# Ignore not successful methods
pass
except JVMNotSupportedException:
pass
else:
if jvm is not None:
return jvm
else:
raise JVMNotFoundException("No JVM shared library file ({0}) "
"found. Try setting up the JAVA_HOME "
"environment variable properly."
.format(self._libfile))
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder.find_possible_homes
|
python
|
def find_possible_homes(self, parents):
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
|
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L85-L111
| null |
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
def check(self, jvm):
"""
Check if the jvm is valid for this architecture.
This method should be overriden for each architecture.
:raise JVMNotSupportedException: If the jvm is not supported.
"""
pass
def get_jvm_path(self):
"""
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
"""
for method in self._methods:
try:
jvm = method()
# If found check the architecture
if jvm:
self.check(jvm)
except NotImplementedError:
# Ignore missing implementations
pass
except JVMNotFoundException:
# Ignore not successful methods
pass
except JVMNotSupportedException:
pass
else:
if jvm is not None:
return jvm
else:
raise JVMNotFoundException("No JVM shared library file ({0}) "
"found. Try setting up the JAVA_HOME "
"environment variable properly."
.format(self._libfile))
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder.get_jvm_path
|
python
|
def get_jvm_path(self):
for method in self._methods:
try:
jvm = method()
# If found check the architecture
if jvm:
self.check(jvm)
except NotImplementedError:
# Ignore missing implementations
pass
except JVMNotFoundException:
# Ignore not successful methods
pass
except JVMNotSupportedException:
pass
else:
if jvm is not None:
return jvm
else:
raise JVMNotFoundException("No JVM shared library file ({0}) "
"found. Try setting up the JAVA_HOME "
"environment variable properly."
.format(self._libfile))
|
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L122-L153
|
[
"def check(self, jvm):\n \"\"\"\n Check if the jvm is valid for this architecture.\n\n This method should be overriden for each architecture.\n :raise JVMNotSupportedException: If the jvm is not supported.\n \"\"\"\n pass\n",
"def _get_from_java_home(self):\n \"\"\"\n Retrieves the Java library path according to the JAVA_HOME environment\n variable\n\n :return: The path to the JVM library, or None\n \"\"\"\n # Get the environment variable\n java_home = os.getenv(\"JAVA_HOME\")\n if java_home and os.path.exists(java_home):\n # Get the real installation path\n java_home = os.path.realpath(java_home)\n\n # Cygwin has a bug in realpath\n if not os.path.exists(java_home):\n java_home = os.getenv(\"JAVA_HOME\")\n\n # Look for the library file\n return self.find_libjvm(java_home)\n",
"def _get_from_known_locations(self):\n \"\"\"\n Retrieves the first existing Java library path in the predefined known\n locations\n\n :return: The path to the JVM library, or None\n \"\"\"\n for home in self.find_possible_homes(self._locations):\n jvm = self.find_libjvm(home)\n if jvm is not None:\n return jvm\n"
] |
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
def check(self, jvm):
"""
Check if the jvm is valid for this architecture.
This method should be overriden for each architecture.
:raise JVMNotSupportedException: If the jvm is not supported.
"""
pass
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder._get_from_java_home
|
python
|
def _get_from_java_home(self):
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
|
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L156-L174
|
[
"def find_libjvm(self, java_home):\n \"\"\"\n Recursively looks for the given file\n\n :param java_home: A Java home folder\n :param filename: Name of the file to find\n :return: The first found file path, or None\n \"\"\"\n found_jamvm = False\n non_supported_jvm = ('cacao', 'jamvm')\n found_non_supported_jvm = False\n\n # Look for the file\n for root, _, names in os.walk(java_home):\n if self._libfile in names:\n # Found it, but check for non supported jvms\n candidate = os.path.split(root)[1]\n if candidate in non_supported_jvm:\n found_non_supported_jvm = True\n continue # maybe we will find another one?\n return os.path.join(root, self._libfile)\n"
] |
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
def check(self, jvm):
"""
Check if the jvm is valid for this architecture.
This method should be overriden for each architecture.
:raise JVMNotSupportedException: If the jvm is not supported.
"""
pass
def get_jvm_path(self):
"""
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
"""
for method in self._methods:
try:
jvm = method()
# If found check the architecture
if jvm:
self.check(jvm)
except NotImplementedError:
# Ignore missing implementations
pass
except JVMNotFoundException:
# Ignore not successful methods
pass
except JVMNotSupportedException:
pass
else:
if jvm is not None:
return jvm
else:
raise JVMNotFoundException("No JVM shared library file ({0}) "
"found. Try setting up the JAVA_HOME "
"environment variable properly."
.format(self._libfile))
def _get_from_known_locations(self):
"""
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
"""
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
jpype-project/jpype
|
jpype/_jvmfinder.py
|
JVMFinder._get_from_known_locations
|
python
|
def _get_from_known_locations(self):
for home in self.find_possible_homes(self._locations):
jvm = self.find_libjvm(home)
if jvm is not None:
return jvm
|
Retrieves the first existing Java library path in the predefined known
locations
:return: The path to the JVM library, or None
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/jpype/_jvmfinder.py#L177-L187
|
[
"def find_libjvm(self, java_home):\n \"\"\"\n Recursively looks for the given file\n\n :param java_home: A Java home folder\n :param filename: Name of the file to find\n :return: The first found file path, or None\n \"\"\"\n found_jamvm = False\n non_supported_jvm = ('cacao', 'jamvm')\n found_non_supported_jvm = False\n\n # Look for the file\n for root, _, names in os.walk(java_home):\n if self._libfile in names:\n # Found it, but check for non supported jvms\n candidate = os.path.split(root)[1]\n if candidate in non_supported_jvm:\n found_non_supported_jvm = True\n continue # maybe we will find another one?\n return os.path.join(root, self._libfile)\n",
"def find_possible_homes(self, parents):\n \"\"\"\n Generator that looks for the first-level children folders that could be\n Java installations, according to their name\n\n :param parents: A list of parent directories\n :return: The possible JVM installation folders\n \"\"\"\n homes = []\n java_names = ('jre', 'jdk', 'java')\n\n for parent in parents:\n for childname in sorted(os.listdir(parent)):\n # Compute the real path\n path = os.path.realpath(os.path.join(parent, childname))\n if path in homes or not os.path.isdir(path):\n # Already known path, or not a directory -> ignore\n continue\n\n # Check if the path seems OK\n real_name = os.path.basename(path).lower()\n for java_name in java_names:\n if java_name in real_name:\n # Correct JVM folder name\n homes.append(path)\n yield path\n break\n"
] |
class JVMFinder(object):
"""
JVM library finder base class
"""
def __init__(self):
"""
Sets up members
"""
# Library file name
self._libfile = "libjvm.so"
# Predefined locations
self._locations = ("/usr/lib/jvm", "/usr/java")
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_known_locations)
def find_libjvm(self, java_home):
"""
Recursively looks for the given file
:param java_home: A Java home folder
:param filename: Name of the file to find
:return: The first found file path, or None
"""
found_jamvm = False
non_supported_jvm = ('cacao', 'jamvm')
found_non_supported_jvm = False
# Look for the file
for root, _, names in os.walk(java_home):
if self._libfile in names:
# Found it, but check for non supported jvms
candidate = os.path.split(root)[1]
if candidate in non_supported_jvm:
found_non_supported_jvm = True
continue # maybe we will find another one?
return os.path.join(root, self._libfile)
else:
if found_non_supported_jvm:
raise JVMNotSupportedException("Sorry '{0}' is known to be "
"broken. Please ensure your "
"JAVA_HOME contains at least "
"another JVM implementation "
"(eg. server)"
.format(candidate))
# File not found
raise JVMNotFoundException("Sorry no JVM could be found. "
"Please ensure your JAVA_HOME "
"environment variable is pointing "
"to correct installation.")
def find_possible_homes(self, parents):
"""
Generator that looks for the first-level children folders that could be
Java installations, according to their name
:param parents: A list of parent directories
:return: The possible JVM installation folders
"""
homes = []
java_names = ('jre', 'jdk', 'java')
for parent in parents:
for childname in sorted(os.listdir(parent)):
# Compute the real path
path = os.path.realpath(os.path.join(parent, childname))
if path in homes or not os.path.isdir(path):
# Already known path, or not a directory -> ignore
continue
# Check if the path seems OK
real_name = os.path.basename(path).lower()
for java_name in java_names:
if java_name in real_name:
# Correct JVM folder name
homes.append(path)
yield path
break
def check(self, jvm):
"""
Check if the jvm is valid for this architecture.
This method should be overriden for each architecture.
:raise JVMNotSupportedException: If the jvm is not supported.
"""
pass
def get_jvm_path(self):
"""
Retrieves the path to the default or first found JVM library
:return: The path to the JVM shared library file
:raise ValueError: No JVM library found
"""
for method in self._methods:
try:
jvm = method()
# If found check the architecture
if jvm:
self.check(jvm)
except NotImplementedError:
# Ignore missing implementations
pass
except JVMNotFoundException:
# Ignore not successful methods
pass
except JVMNotSupportedException:
pass
else:
if jvm is not None:
return jvm
else:
raise JVMNotFoundException("No JVM shared library file ({0}) "
"found. Try setting up the JAVA_HOME "
"environment variable properly."
.format(self._libfile))
def _get_from_java_home(self):
"""
Retrieves the Java library path according to the JAVA_HOME environment
variable
:return: The path to the JVM library, or None
"""
# Get the environment variable
java_home = os.getenv("JAVA_HOME")
if java_home and os.path.exists(java_home):
# Get the real installation path
java_home = os.path.realpath(java_home)
# Cygwin has a bug in realpath
if not os.path.exists(java_home):
java_home = os.getenv("JAVA_HOME")
# Look for the library file
return self.find_libjvm(java_home)
|
jpype-project/jpype
|
setupext/build_java.py
|
BuildJavaCommand.run
|
python
|
def run(self):
buildDir = os.path.join("..","build","lib")
buildXmlFile = os.path.join("native","build.xml")
command = [self.distribution.ant, '-Dbuild=%s'%buildDir, '-f', buildXmlFile]
cmdStr= ' '.join(command)
self.announce(" %s"%cmdStr, level=distutils.log.INFO)
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as exc:
distutils.log.error(exc.output)
raise DistutilsPlatformError("Error executing {}".format(exc.cmd))
|
Run command.
|
train
|
https://github.com/jpype-project/jpype/blob/3ce953ae7b35244077249ce650b9acd0a7010d17/setupext/build_java.py#L22-L33
| null |
class BuildJavaCommand(distutils.cmd.Command):
"""A custom command to create jar file during build."""
description = 'run ant to make a jar'
user_options = []
def initialize_options(self):
"""Set default values for options."""
pass
def finalize_options(self):
"""Post-process options."""
pass
|
apacha/OMR-Datasets
|
omrdatasettools/converters/ImageMover.py
|
ImageMover.move_images
|
python
|
def move_images(self, image_directory):
image_paths = glob(image_directory + "/**/*.png", recursive=True)
for image_path in image_paths:
destination = image_path.replace("\\image\\", "\\")
shutil.move(image_path, destination)
image_folders = glob(image_directory + "/**/image", recursive=True)
for image_folder in image_folders:
os.removedirs(image_folder)
|
Moves png-files one directory up from path/image/*.png -> path/*.png
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/converters/ImageMover.py#L9-L18
| null |
class ImageMover():
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusImageGenerator.py
|
HomusImageGenerator.create_images
|
python
|
def create_images(raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int],
canvas_width: int = None,
canvas_height: int = None,
staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
random_position_on_canvas: bool = False) -> dict:
all_symbol_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.txt'))]
staff_line_multiplier = 1
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
staff_line_multiplier = len(staff_line_vertical_offsets)
total_number_of_symbols = len(all_symbol_files) * len(stroke_thicknesses) * staff_line_multiplier
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(all_symbol_files), len(stroke_thicknesses), stroke_thicknesses)
if staff_line_vertical_offsets is not None:
output += " and with staff-lines with {0} different offsets from the top ({1})".format(
staff_line_multiplier, staff_line_vertical_offsets)
if canvas_width is not None and canvas_height is not None:
if random_position_on_canvas is False:
output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
else:
output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
bounding_boxes = dict()
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25)
for symbol_file in all_symbol_files:
with open(symbol_file) as file:
content = file.read()
symbol = HomusSymbol.initialize_from_string(content)
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = os.path.splitext(os.path.basename(symbol_file))[0]
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
if canvas_width is None and canvas_height is None:
symbol.draw_into_bitmap(export_path, stroke_thickness, margin=2)
else:
symbol.draw_onto_canvas(export_path, stroke_thickness, 0, canvas_width,
canvas_height, staff_line_spacing, staff_line_vertical_offsets,
bounding_boxes, random_position_on_canvas)
progress_bar.update(1 * staff_line_multiplier)
progress_bar.close()
return bounding_boxes
|
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusImageGenerator.py#L13-L105
|
[
"def initialize_from_string(content: str) -> 'HomusSymbol':\n \"\"\"\n Create and initializes a new symbol from a string\n\n :param content: The content of a symbol as read from the text-file\n :return: The initialized symbol\n :rtype: HomusSymbol\n \"\"\"\n\n if content is None or content is \"\":\n return None\n\n lines = content.splitlines()\n min_x = sys.maxsize\n max_x = 0\n min_y = sys.maxsize\n max_y = 0\n\n symbol_name = lines[0]\n strokes = []\n\n for stroke_string in lines[1:]:\n stroke = []\n\n for point_string in stroke_string.split(\";\"):\n if point_string is \"\":\n continue # Skip the last element, that is due to a trailing ; in each line\n\n point_x, point_y = point_string.split(\",\")\n x = int(point_x)\n y = int(point_y)\n stroke.append(Point2D(x, y))\n\n max_x = max(max_x, x)\n min_x = min(min_x, x)\n max_y = max(max_y, y)\n min_y = min(min_y, y)\n\n strokes.append(stroke)\n\n dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)\n return HomusSymbol(content, strokes, symbol_name, dimensions)\n"
] |
class HomusImageGenerator:
@staticmethod
@staticmethod
def add_arguments_for_homus_image_generator(parser: argparse.ArgumentParser):
parser.add_argument("-s", "--stroke_thicknesses", dest="stroke_thicknesses", default="3",
help="Stroke thicknesses for drawing the generated bitmaps. May define comma-separated list"
" of multiple stroke thicknesses, e.g. '1,2,3'")
parser.add_argument("--staff_line_spacing", default="14", type=int,
help="Spacing between two staff-lines in pixel")
parser.add_argument("-offsets", "--staff_line_vertical_offsets", dest="offsets", default="",
help="Optional vertical offsets in pixel for drawing the symbols with superimposed "
"staff-lines starting at this pixel-offset from the top. Multiple offsets possible, "
"e.g. '81,88,95'")
parser.add_argument("--disable_fixed_canvas_size", dest="use_fixed_canvas",
action="store_false",
help="True, if the images should be drawn on a fixed canvas with the specified width and height."
"False to draw the symbols with their original sizes (each symbol might be different)")
parser.set_defaults(use_fixed_canvas=True)
parser.add_argument("--random_position_on_canvas", dest="random_position_on_canvas", action="store_true",
help="Provide this flag, if the symbols should be randomly placed on the fixed canvas."
"Omit this flag, if the symbols should be centered in the fixed canvas (default)."
"Note, that this flag only has an effect, if a fixed canvas size is used which gets "
"disabled by the --disable_fixed_canvas_size flag.")
parser.set_defaults(random_position_on_canvas=False)
|
apacha/OMR-Datasets
|
omrdatasettools/downloaders/MuscimaPlusPlusDatasetDownloader.py
|
MuscimaPlusPlusDatasetDownloader.download_and_extract_dataset
|
python
|
def download_and_extract_dataset(self, destination_directory: str):
if not os.path.exists(self.get_dataset_filename()):
print("Downloading MUSCIMA++ Dataset...")
self.download_file(self.get_dataset_download_url(), self.get_dataset_filename())
if not os.path.exists(self.get_imageset_filename()):
print("Downloading MUSCIMA++ Images...")
self.download_file(self.get_images_download_url(), self.get_imageset_filename())
print("Extracting MUSCIMA++ Dataset...")
self.extract_dataset(os.path.abspath(destination_directory))
absolute_path_to_temp_folder = os.path.abspath('MuscimaPpImages')
self.extract_dataset(absolute_path_to_temp_folder, self.get_imageset_filename())
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "fulls"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"images"))
self.clean_up_temp_directory(absolute_path_to_temp_folder)
|
Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset
that were manually annotated (140 out of 1000 images).
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/downloaders/MuscimaPlusPlusDatasetDownloader.py#L33-L54
|
[
"def copytree(src, dst):\n if not os.path.exists(dst):\n os.makedirs(dst)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n DatasetDownloader.copytree(s, d)\n else:\n if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:\n shutil.copy2(s, d)\n",
"def get_dataset_filename(self) -> str:\n return \"MUSCIMA-pp_v1.0.zip\"\n"
] |
class MuscimaPlusPlusDatasetDownloader(DatasetDownloader):
""" Downloads the Muscima++ dataset
https://ufal.mff.cuni.cz/muscima
Copyright 2017 Jan Hajic jr. under CC-BY-NC-SA 4.0 license
"""
def get_dataset_download_url(self) -> str:
# Official URL: "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11372/LRT-2372/MUSCIMA-pp_v1.0.zip?sequence=1&isAllowed=y"
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0.zip"
def get_dataset_filename(self) -> str:
return "MUSCIMA-pp_v1.0.zip"
def get_images_download_url(self) -> str:
# This URL contains the images of the CVC-MUSCIMA dataset, that were annotated in the MUSCIMA++ dataset
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVC_MUSCIMA_PP_Annotated-Images.zip"
def get_imageset_filename(self) -> str:
return "CVC_MUSCIMA_PP_Annotated-Images.zip"
def get_measure_annotation_download_url(self):
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0-measure-annotations.zip"
def get_measure_annotation_filename(self):
return "MUSCIMA-pp_v1.0-measure-annotations.zip"
def download_and_extract_measure_annotations(self, destination_directory: str):
"""
Downloads the annotations only of stave-measures, system-measures and staves that were extracted
from the MUSCIMA++ dataset via the :class:`omrdatasettools.converters.MuscimaPlusPlusAnnotationConverter`.
The annotations from that extraction are provided in a simple json format with one annotation
file per image and in the COCO format, where all annotations are stored in a single file.
"""
if not os.path.exists(self.get_measure_annotation_filename()):
print("Downloading MUSCIMA++ Measure Annotations...")
self.download_file(self.get_measure_annotation_download_url(), self.get_measure_annotation_filename())
print("Extracting MUSCIMA++ Annotations...")
absolute_path_to_temp_folder = os.path.abspath('MuscimaPpMeasureAnnotations')
self.extract_dataset(absolute_path_to_temp_folder, self.get_measure_annotation_filename())
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "coco"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"coco"))
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "json"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"json"))
self.clean_up_temp_directory(absolute_path_to_temp_folder)
def dataset_version(self):
return "v1.0"
|
apacha/OMR-Datasets
|
omrdatasettools/downloaders/MuscimaPlusPlusDatasetDownloader.py
|
MuscimaPlusPlusDatasetDownloader.download_and_extract_measure_annotations
|
python
|
def download_and_extract_measure_annotations(self, destination_directory: str):
if not os.path.exists(self.get_measure_annotation_filename()):
print("Downloading MUSCIMA++ Measure Annotations...")
self.download_file(self.get_measure_annotation_download_url(), self.get_measure_annotation_filename())
print("Extracting MUSCIMA++ Annotations...")
absolute_path_to_temp_folder = os.path.abspath('MuscimaPpMeasureAnnotations')
self.extract_dataset(absolute_path_to_temp_folder, self.get_measure_annotation_filename())
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "coco"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"coco"))
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "json"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"json"))
self.clean_up_temp_directory(absolute_path_to_temp_folder)
|
Downloads the annotations only of stave-measures, system-measures and staves that were extracted
from the MUSCIMA++ dataset via the :class:`omrdatasettools.converters.MuscimaPlusPlusAnnotationConverter`.
The annotations from that extraction are provided in a simple json format with one annotation
file per image and in the COCO format, where all annotations are stored in a single file.
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/downloaders/MuscimaPlusPlusDatasetDownloader.py#L56-L77
|
[
"def copytree(src, dst):\n if not os.path.exists(dst):\n os.makedirs(dst)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n DatasetDownloader.copytree(s, d)\n else:\n if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:\n shutil.copy2(s, d)\n",
"def get_measure_annotation_filename(self):\n return \"MUSCIMA-pp_v1.0-measure-annotations.zip\"\n"
] |
class MuscimaPlusPlusDatasetDownloader(DatasetDownloader):
""" Downloads the Muscima++ dataset
https://ufal.mff.cuni.cz/muscima
Copyright 2017 Jan Hajic jr. under CC-BY-NC-SA 4.0 license
"""
def get_dataset_download_url(self) -> str:
# Official URL: "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11372/LRT-2372/MUSCIMA-pp_v1.0.zip?sequence=1&isAllowed=y"
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0.zip"
def get_dataset_filename(self) -> str:
return "MUSCIMA-pp_v1.0.zip"
def get_images_download_url(self) -> str:
# This URL contains the images of the CVC-MUSCIMA dataset, that were annotated in the MUSCIMA++ dataset
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVC_MUSCIMA_PP_Annotated-Images.zip"
def get_imageset_filename(self) -> str:
return "CVC_MUSCIMA_PP_Annotated-Images.zip"
def get_measure_annotation_download_url(self):
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0-measure-annotations.zip"
def get_measure_annotation_filename(self):
return "MUSCIMA-pp_v1.0-measure-annotations.zip"
def download_and_extract_dataset(self, destination_directory: str):
"""
Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset
that were manually annotated (140 out of 1000 images).
"""
if not os.path.exists(self.get_dataset_filename()):
print("Downloading MUSCIMA++ Dataset...")
self.download_file(self.get_dataset_download_url(), self.get_dataset_filename())
if not os.path.exists(self.get_imageset_filename()):
print("Downloading MUSCIMA++ Images...")
self.download_file(self.get_images_download_url(), self.get_imageset_filename())
print("Extracting MUSCIMA++ Dataset...")
self.extract_dataset(os.path.abspath(destination_directory))
absolute_path_to_temp_folder = os.path.abspath('MuscimaPpImages')
self.extract_dataset(absolute_path_to_temp_folder, self.get_imageset_filename())
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "fulls"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"images"))
self.clean_up_temp_directory(absolute_path_to_temp_folder)
def dataset_version(self):
return "v1.0"
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py
|
MuscimaPlusPlusImageGenerator.extract_and_render_all_symbol_masks
|
python
|
def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):
print("Extracting Symbols from Muscima++ Dataset...")
xml_files = self.get_all_xml_file_paths(raw_data_directory)
crop_objects = self.load_crop_objects_from_xml_files(xml_files)
self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory)
|
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py#L23-L35
|
[
"def get_all_xml_file_paths(self, raw_data_directory: str) -> List[str]:\n \"\"\" Loads all XML-files that are located in the folder.\n :param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to\n \"\"\"\n raw_data_directory = os.path.join(raw_data_directory, \"v1.0\", \"data\", \"cropobjects_manual\")\n xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]\n return xml_files\n",
"def load_crop_objects_from_xml_files(self, xml_files: List[str]) -> List[CropObject]:\n crop_objects = []\n for xml_file in tqdm(xml_files, desc=\"Loading crop-objects from xml-files\", smoothing=0.1):\n crop_objects.extend(self.get_crop_objects_from_xml_file(xml_file))\n\n for crop_object in crop_objects:\n # Some classes have special characters in their class name that we have to remove\n crop_object.clsname = crop_object.clsname.replace('\"', '').replace('/', '').replace('.', '')\n\n print(\"Loaded {0} crop-objects\".format(len(crop_objects)))\n return crop_objects\n",
"def render_masks_of_crop_objects_into_image(self, crop_objects: List[CropObject], destination_directory: str):\n for crop_object in tqdm(crop_objects, desc=\"Generating images from crop-object masks\", smoothing=0.1):\n symbol_class = crop_object.clsname\n # Make a copy of the mask to not temper with the original data\n mask = crop_object.mask.copy()\n # We want to draw black symbols on white canvas. The mask encodes foreground pixels\n # that we are interested in with a 1 and background pixels with a 0 and stores those values in\n # an uint8 numpy array. To use Image.fromarray, we have to generate a greyscale mask, where\n # white pixels have the value 255 and black pixels have the value 0. To achieve this, we simply\n # subtract one from each uint, and by exploiting the underflow of the uint we get the following mapping:\n # 0 (background) => 255 (white) and 1 (foreground) => 0 (black) which is exactly what we wanted.\n mask -= 1\n image = Image.fromarray(mask, mode=\"L\")\n\n target_directory = os.path.join(destination_directory, symbol_class)\n os.makedirs(target_directory, exist_ok=True)\n\n export_path = ExportPath(destination_directory, symbol_class, crop_object.uid)\n image.save(export_path.get_full_path())\n"
] |
class MuscimaPlusPlusImageGenerator:
def __init__(self) -> None:
super().__init__()
self.path_of_this_file = os.path.dirname(os.path.realpath(__file__))
def get_all_xml_file_paths(self, raw_data_directory: str) -> List[str]:
""" Loads all XML-files that are located in the folder.
:param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to
"""
raw_data_directory = os.path.join(raw_data_directory, "v1.0", "data", "cropobjects_manual")
xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
return xml_files
def load_crop_objects_from_xml_file(self, xml_file: str) -> List[CropObject]:
crop_objects = []
crop_objects.extend(self.get_crop_objects_from_xml_file(xml_file))
for crop_object in crop_objects:
# Some classes have special characters in their class name that we have to remove
crop_object.clsname = crop_object.clsname.replace('"', '').replace('/', '').replace('.', '')
#print("Loaded {0} crop-objects from {1}".format(len(crop_objects), xml_file))
return crop_objects
def load_crop_objects_from_xml_files(self, xml_files: List[str]) -> List[CropObject]:
crop_objects = []
for xml_file in tqdm(xml_files, desc="Loading crop-objects from xml-files", smoothing=0.1):
crop_objects.extend(self.get_crop_objects_from_xml_file(xml_file))
for crop_object in crop_objects:
# Some classes have special characters in their class name that we have to remove
crop_object.clsname = crop_object.clsname.replace('"', '').replace('/', '').replace('.', '')
print("Loaded {0} crop-objects".format(len(crop_objects)))
return crop_objects
def get_crop_objects_from_xml_file(self, xml_file: str) -> List[CropObject]:
# e.g., xml_file = 'data/muscima_pp/v0.9/data/cropobjects/CVC-MUSCIMA_W-01_N-10_D-ideal.xml'
crop_objects = parse_cropobject_list(xml_file)
return crop_objects
def render_masks_of_crop_objects_into_image(self, crop_objects: List[CropObject], destination_directory: str):
for crop_object in tqdm(crop_objects, desc="Generating images from crop-object masks", smoothing=0.1):
symbol_class = crop_object.clsname
# Make a copy of the mask to not temper with the original data
mask = crop_object.mask.copy()
# We want to draw black symbols on white canvas. The mask encodes foreground pixels
# that we are interested in with a 1 and background pixels with a 0 and stores those values in
# an uint8 numpy array. To use Image.fromarray, we have to generate a greyscale mask, where
# white pixels have the value 255 and black pixels have the value 0. To achieve this, we simply
# subtract one from each uint, and by exploiting the underflow of the uint we get the following mapping:
# 0 (background) => 255 (white) and 1 (foreground) => 0 (black) which is exactly what we wanted.
mask -= 1
image = Image.fromarray(mask, mode="L")
target_directory = os.path.join(destination_directory, symbol_class)
os.makedirs(target_directory, exist_ok=True)
export_path = ExportPath(destination_directory, symbol_class, crop_object.uid)
image.save(export_path.get_full_path())
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py
|
MuscimaPlusPlusImageGenerator.get_all_xml_file_paths
|
python
|
def get_all_xml_file_paths(self, raw_data_directory: str) -> List[str]:
raw_data_directory = os.path.join(raw_data_directory, "v1.0", "data", "cropobjects_manual")
xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
return xml_files
|
Loads all XML-files that are located in the folder.
:param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py#L37-L43
| null |
class MuscimaPlusPlusImageGenerator:
def __init__(self) -> None:
super().__init__()
self.path_of_this_file = os.path.dirname(os.path.realpath(__file__))
def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):
"""
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Muscima++ Dataset...")
xml_files = self.get_all_xml_file_paths(raw_data_directory)
crop_objects = self.load_crop_objects_from_xml_files(xml_files)
self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory)
def load_crop_objects_from_xml_file(self, xml_file: str) -> List[CropObject]:
crop_objects = []
crop_objects.extend(self.get_crop_objects_from_xml_file(xml_file))
for crop_object in crop_objects:
# Some classes have special characters in their class name that we have to remove
crop_object.clsname = crop_object.clsname.replace('"', '').replace('/', '').replace('.', '')
#print("Loaded {0} crop-objects from {1}".format(len(crop_objects), xml_file))
return crop_objects
def load_crop_objects_from_xml_files(self, xml_files: List[str]) -> List[CropObject]:
crop_objects = []
for xml_file in tqdm(xml_files, desc="Loading crop-objects from xml-files", smoothing=0.1):
crop_objects.extend(self.get_crop_objects_from_xml_file(xml_file))
for crop_object in crop_objects:
# Some classes have special characters in their class name that we have to remove
crop_object.clsname = crop_object.clsname.replace('"', '').replace('/', '').replace('.', '')
print("Loaded {0} crop-objects".format(len(crop_objects)))
return crop_objects
def get_crop_objects_from_xml_file(self, xml_file: str) -> List[CropObject]:
# e.g., xml_file = 'data/muscima_pp/v0.9/data/cropobjects/CVC-MUSCIMA_W-01_N-10_D-ideal.xml'
crop_objects = parse_cropobject_list(xml_file)
return crop_objects
def render_masks_of_crop_objects_into_image(self, crop_objects: List[CropObject], destination_directory: str):
for crop_object in tqdm(crop_objects, desc="Generating images from crop-object masks", smoothing=0.1):
symbol_class = crop_object.clsname
# Make a copy of the mask to not temper with the original data
mask = crop_object.mask.copy()
# We want to draw black symbols on white canvas. The mask encodes foreground pixels
# that we are interested in with a 1 and background pixels with a 0 and stores those values in
# an uint8 numpy array. To use Image.fromarray, we have to generate a greyscale mask, where
# white pixels have the value 255 and black pixels have the value 0. To achieve this, we simply
# subtract one from each uint, and by exploiting the underflow of the uint we get the following mapping:
# 0 (background) => 255 (white) and 1 (foreground) => 0 (black) which is exactly what we wanted.
mask -= 1
image = Image.fromarray(mask, mode="L")
target_directory = os.path.join(destination_directory, symbol_class)
os.makedirs(target_directory, exist_ok=True)
export_path = ExportPath(destination_directory, symbol_class, crop_object.uid)
image.save(export_path.get_full_path())
|
apacha/OMR-Datasets
|
omrdatasettools/converters/ImageColorInverter.py
|
ImageColorInverter.invert_images
|
python
|
def invert_images(self, image_directory: str, image_file_ending: str = "*.bmp"):
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png")
|
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/converters/ImageColorInverter.py#L15-L26
| null |
class ImageColorInverter:
""" Class for inverting white-on-black images to black-on-white images """
def __init__(self) -> None:
super().__init__()
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanImageGenerator.py
|
CapitanImageGenerator.create_capitan_images
|
python
|
def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory)
|
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanImageGenerator.py#L13-L29
|
[
"def load_capitan_symbols(self, raw_data_directory: str) -> List[CapitanSymbol]:\n data_path = os.path.join(raw_data_directory, \"BimodalHandwrittenSymbols\", \"data\")\n with open(data_path) as file:\n data = file.read()\n\n symbol_strings = data.splitlines()\n symbols = []\n for symbol_string in tqdm(symbol_strings, desc=\"Loading symbols from strings\"):\n symbol = CapitanSymbol.initialize_from_string(symbol_string)\n symbols.append(symbol)\n\n return symbols\n",
"def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],\n destination_directory: str,\n stroke_thicknesses: List[int]) -> None:\n \"\"\"\n Creates a visual representation of the Capitan strokes by drawing lines that connect the points\n from each stroke of each symbol.\n\n :param symbols: The list of parsed Capitan-symbols\n :param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per\n symbol category will be generated automatically\n :param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are\n specified, multiple images will be generated that have a different suffix, e.g.\n 1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16\n \"\"\"\n\n total_number_of_symbols = len(symbols) * len(stroke_thicknesses)\n output = \"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\".format(\n total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)\n\n print(output)\n print(\"In directory {0}\".format(os.path.abspath(destination_directory)), flush=True)\n\n progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc=\"Rendering strokes\")\n capitan_file_name_counter = 0\n for symbol in symbols:\n capitan_file_name_counter += 1\n target_directory = os.path.join(destination_directory, symbol.symbol_class)\n os.makedirs(target_directory, exist_ok=True)\n\n raw_file_name_without_extension = \"capitan-{0}-{1}-stroke\".format(symbol.symbol_class,\n capitan_file_name_counter)\n\n for stroke_thickness in stroke_thicknesses:\n export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,\n 'png', stroke_thickness)\n symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)\n progress_bar.update(1)\n\n progress_bar.close()\n",
"def draw_capitan_score_images(self, symbols: List[CapitanSymbol],\n destination_directory: str) -> None:\n \"\"\"\n Draws the image data contained in each symbol\n\n :param symbols: The list of parsed Capitan-symbols\n :param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per\n symbol category will be generated automatically\n :param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are\n specified, multiple images will be generated that have a different suffix, e.g.\n 1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16\n \"\"\"\n\n total_number_of_symbols = len(symbols)\n output = \"Generating {0} images from Capitan symbols\".format(len(symbols))\n\n print(output)\n print(\"In directory {0}\".format(os.path.abspath(destination_directory)), flush=True)\n\n progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc=\"Rendering images\")\n capitan_file_name_counter = 0\n for symbol in symbols:\n capitan_file_name_counter += 1\n target_directory = os.path.join(destination_directory, symbol.symbol_class)\n os.makedirs(target_directory, exist_ok=True)\n\n raw_file_name_without_extension = \"capitan-{0}-{1}-score\".format(symbol.symbol_class,\n capitan_file_name_counter)\n\n export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension, 'png')\n symbol.draw_capitan_score_bitmap(export_path)\n progress_bar.update(1)\n\n progress_bar.close()\n"
] |
class CapitanImageGenerator:
def load_capitan_symbols(self, raw_data_directory: str) -> List[CapitanSymbol]:
data_path = os.path.join(raw_data_directory, "BimodalHandwrittenSymbols", "data")
with open(data_path) as file:
data = file.read()
symbol_strings = data.splitlines()
symbols = []
for symbol_string in tqdm(symbol_strings, desc="Loading symbols from strings"):
symbol = CapitanSymbol.initialize_from_string(symbol_string)
symbols.append(symbol)
return symbols
def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close()
def draw_capitan_score_images(self, symbols: List[CapitanSymbol],
destination_directory: str) -> None:
"""
Draws the image data contained in each symbol
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols)
output = "Generating {0} images from Capitan symbols".format(len(symbols))
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering images")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-score".format(symbol.symbol_class,
capitan_file_name_counter)
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension, 'png')
symbol.draw_capitan_score_bitmap(export_path)
progress_bar.update(1)
progress_bar.close()
@staticmethod
def add_arguments_for_homus_image_generator(parser: argparse.ArgumentParser):
parser.add_argument("-s", "--stroke_thicknesses", dest="stroke_thicknesses", default="3",
help="Stroke thicknesses for drawing the generated bitmaps. May define comma-separated list"
" of multiple stroke thicknesses, e.g. '1,2,3'")
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanImageGenerator.py
|
CapitanImageGenerator.draw_capitan_stroke_images
|
python
|
def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close()
|
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanImageGenerator.py#L44-L82
| null |
class CapitanImageGenerator:
def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory)
def load_capitan_symbols(self, raw_data_directory: str) -> List[CapitanSymbol]:
data_path = os.path.join(raw_data_directory, "BimodalHandwrittenSymbols", "data")
with open(data_path) as file:
data = file.read()
symbol_strings = data.splitlines()
symbols = []
for symbol_string in tqdm(symbol_strings, desc="Loading symbols from strings"):
symbol = CapitanSymbol.initialize_from_string(symbol_string)
symbols.append(symbol)
return symbols
def draw_capitan_score_images(self, symbols: List[CapitanSymbol],
destination_directory: str) -> None:
"""
Draws the image data contained in each symbol
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols)
output = "Generating {0} images from Capitan symbols".format(len(symbols))
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering images")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-score".format(symbol.symbol_class,
capitan_file_name_counter)
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension, 'png')
symbol.draw_capitan_score_bitmap(export_path)
progress_bar.update(1)
progress_bar.close()
@staticmethod
def add_arguments_for_homus_image_generator(parser: argparse.ArgumentParser):
parser.add_argument("-s", "--stroke_thicknesses", dest="stroke_thicknesses", default="3",
help="Stroke thicknesses for drawing the generated bitmaps. May define comma-separated list"
" of multiple stroke thicknesses, e.g. '1,2,3'")
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanSymbol.py
|
CapitanSymbol.initialize_from_string
|
python
|
def initialize_from_string(content: str) -> 'CapitanSymbol':
if content is None or content is "":
return None
parts = content.split(":")
min_x = 100000
max_x = 0
min_y = 100000
max_y = 0
symbol_name = parts[0]
sequence = parts[1]
image_numbers = parts[2].split(',')
image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30))
stroke = []
for point_string in sequence.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = float(point_x)
y = float(point_y)
stroke.append(SimplePoint2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1))
return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
|
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image>
:return: The initialized symbol
:rtype: CapitanSymbol
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanSymbol.py#L30-L70
| null |
class CapitanSymbol:
def __init__(self, content: str, stroke: List[SimplePoint2D], image_data: numpy.ndarray, symbol_class: str,
dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.stroke = stroke
self.image_data = image_data
@staticmethod
def draw_capitan_score_bitmap(self, export_path: ExportPath) -> None:
"""
Draws the 30x30 symbol into the given file
:param export_path: The path, where the symbols should be created on disk
"""
with Image.fromarray(self.image_data, mode='L') as image:
image.save(export_path.get_full_path())
def draw_capitan_stroke_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int):
"""
Draws the symbol strokes onto a canvas
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
"""
width = int(self.dimensions.width + 2 * margin)
height = int(self.dimensions.height + 2 * margin)
offset = Point2D(self.dimensions.origin.x - margin, self.dimensions.origin.y - margin)
image = Image.new('RGB', (width, height), "white") # create a new white image
draw = ImageDraw.Draw(image)
black = (0, 0, 0)
for i in range(0, len(self.stroke) - 1):
start_point = self.__subtract_offset(self.stroke[i], offset)
end_point = self.__subtract_offset(self.stroke[i + 1], offset)
distance = self.__euclidean_distance(start_point, end_point)
if distance > 1600: # User moved more than 40 pixels - probably we should not draw a line here
continue
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
del draw
image.save(export_path.get_full_path())
image.close()
@staticmethod
def __euclidean_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return (a.x - b.x) * (a.x - b.x) + abs(a.y - b.y) * abs(a.y - b.y)
@staticmethod
def __manhatten_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return abs(a.x - b.x) + abs(a.y - b.y)
@staticmethod
def __subtract_offset(a: SimplePoint2D, b: SimplePoint2D) -> SimplePoint2D:
return SimplePoint2D(a.x - b.x, a.y - b.y)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanSymbol.py
|
CapitanSymbol.draw_capitan_score_bitmap
|
python
|
def draw_capitan_score_bitmap(self, export_path: ExportPath) -> None:
with Image.fromarray(self.image_data, mode='L') as image:
image.save(export_path.get_full_path())
|
Draws the 30x30 symbol into the given file
:param export_path: The path, where the symbols should be created on disk
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanSymbol.py#L72-L78
|
[
"def get_full_path(self, offset: int = None):\n \"\"\"\n :return: Returns the full path that will join all fields according to the following format if no offset if provided:\n 'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'.'extension',\n e.g.: data/images/3-4-Time/1-13_3.png\n\n or with an additional offset-appendix if an offset is provided\n 'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'_offset_'offset'.'extension',\n e.g.: data/images/3-4-Time/1-13_3_offset_74.png\n \"\"\"\n stroke_thickness = \"\"\n if self.stroke_thickness is not None:\n stroke_thickness = \"_{0}\".format(self.stroke_thickness)\n\n staffline_offset = \"\"\n if offset is not None:\n staffline_offset = \"_offset_{0}\".format(offset)\n\n return os.path.join(self.destination_directory, self.symbol_class,\n \"{0}{1}{2}.{3}\".format(self.raw_file_name_without_extension,\n stroke_thickness, staffline_offset, self.extension))\n"
] |
class CapitanSymbol:
def __init__(self, content: str, stroke: List[SimplePoint2D], image_data: numpy.ndarray, symbol_class: str,
dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.stroke = stroke
self.image_data = image_data
@staticmethod
def initialize_from_string(content: str) -> 'CapitanSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image>
:return: The initialized symbol
:rtype: CapitanSymbol
"""
if content is None or content is "":
return None
parts = content.split(":")
min_x = 100000
max_x = 0
min_y = 100000
max_y = 0
symbol_name = parts[0]
sequence = parts[1]
image_numbers = parts[2].split(',')
image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30))
stroke = []
for point_string in sequence.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = float(point_x)
y = float(point_y)
stroke.append(SimplePoint2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1))
return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
def draw_capitan_stroke_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int):
"""
Draws the symbol strokes onto a canvas
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
"""
width = int(self.dimensions.width + 2 * margin)
height = int(self.dimensions.height + 2 * margin)
offset = Point2D(self.dimensions.origin.x - margin, self.dimensions.origin.y - margin)
image = Image.new('RGB', (width, height), "white") # create a new white image
draw = ImageDraw.Draw(image)
black = (0, 0, 0)
for i in range(0, len(self.stroke) - 1):
start_point = self.__subtract_offset(self.stroke[i], offset)
end_point = self.__subtract_offset(self.stroke[i + 1], offset)
distance = self.__euclidean_distance(start_point, end_point)
if distance > 1600: # User moved more than 40 pixels - probably we should not draw a line here
continue
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
del draw
image.save(export_path.get_full_path())
image.close()
@staticmethod
def __euclidean_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return (a.x - b.x) * (a.x - b.x) + abs(a.y - b.y) * abs(a.y - b.y)
@staticmethod
def __manhatten_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return abs(a.x - b.x) + abs(a.y - b.y)
@staticmethod
def __subtract_offset(a: SimplePoint2D, b: SimplePoint2D) -> SimplePoint2D:
return SimplePoint2D(a.x - b.x, a.y - b.y)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanSymbol.py
|
CapitanSymbol.draw_capitan_stroke_onto_canvas
|
python
|
def draw_capitan_stroke_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int):
width = int(self.dimensions.width + 2 * margin)
height = int(self.dimensions.height + 2 * margin)
offset = Point2D(self.dimensions.origin.x - margin, self.dimensions.origin.y - margin)
image = Image.new('RGB', (width, height), "white") # create a new white image
draw = ImageDraw.Draw(image)
black = (0, 0, 0)
for i in range(0, len(self.stroke) - 1):
start_point = self.__subtract_offset(self.stroke[i], offset)
end_point = self.__subtract_offset(self.stroke[i + 1], offset)
distance = self.__euclidean_distance(start_point, end_point)
if distance > 1600: # User moved more than 40 pixels - probably we should not draw a line here
continue
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
del draw
image.save(export_path.get_full_path())
image.close()
|
Draws the symbol strokes onto a canvas
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanSymbol.py#L80-L106
|
[
"def __euclidean_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:\n return (a.x - b.x) * (a.x - b.x) + abs(a.y - b.y) * abs(a.y - b.y)\n",
"def __subtract_offset(a: SimplePoint2D, b: SimplePoint2D) -> SimplePoint2D:\n return SimplePoint2D(a.x - b.x, a.y - b.y)\n",
"def get_full_path(self, offset: int = None):\n \"\"\"\n :return: Returns the full path that will join all fields according to the following format if no offset if provided:\n 'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'.'extension',\n e.g.: data/images/3-4-Time/1-13_3.png\n\n or with an additional offset-appendix if an offset is provided\n 'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'_offset_'offset'.'extension',\n e.g.: data/images/3-4-Time/1-13_3_offset_74.png\n \"\"\"\n stroke_thickness = \"\"\n if self.stroke_thickness is not None:\n stroke_thickness = \"_{0}\".format(self.stroke_thickness)\n\n staffline_offset = \"\"\n if offset is not None:\n staffline_offset = \"_offset_{0}\".format(offset)\n\n return os.path.join(self.destination_directory, self.symbol_class,\n \"{0}{1}{2}.{3}\".format(self.raw_file_name_without_extension,\n stroke_thickness, staffline_offset, self.extension))\n"
] |
class CapitanSymbol:
def __init__(self, content: str, stroke: List[SimplePoint2D], image_data: numpy.ndarray, symbol_class: str,
dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.stroke = stroke
self.image_data = image_data
@staticmethod
def initialize_from_string(content: str) -> 'CapitanSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image>
:return: The initialized symbol
:rtype: CapitanSymbol
"""
if content is None or content is "":
return None
parts = content.split(":")
min_x = 100000
max_x = 0
min_y = 100000
max_y = 0
symbol_name = parts[0]
sequence = parts[1]
image_numbers = parts[2].split(',')
image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30))
stroke = []
for point_string in sequence.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = float(point_x)
y = float(point_y)
stroke.append(SimplePoint2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1))
return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
def draw_capitan_score_bitmap(self, export_path: ExportPath) -> None:
"""
Draws the 30x30 symbol into the given file
:param export_path: The path, where the symbols should be created on disk
"""
with Image.fromarray(self.image_data, mode='L') as image:
image.save(export_path.get_full_path())
@staticmethod
def __euclidean_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return (a.x - b.x) * (a.x - b.x) + abs(a.y - b.y) * abs(a.y - b.y)
@staticmethod
def __manhatten_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return abs(a.x - b.x) + abs(a.y - b.y)
@staticmethod
def __subtract_offset(a: SimplePoint2D, b: SimplePoint2D) -> SimplePoint2D:
return SimplePoint2D(a.x - b.x, a.y - b.y)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/Rectangle.py
|
Rectangle.overlap
|
python
|
def overlap(r1: 'Rectangle', r2: 'Rectangle'):
h_overlaps = (r1.left <= r2.right) and (r1.right >= r2.left)
v_overlaps = (r1.bottom >= r2.top) and (r1.top <= r2.bottom)
return h_overlaps and v_overlaps
|
Overlapping rectangles overlap both horizontally & vertically
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/Rectangle.py#L18-L24
| null |
class Rectangle:
def __init__(self, origin: Point2D, width: int, height: int):
super().__init__()
self.height = height
self.width = width
self.origin = origin # Resembles the left top point
self.left = origin.x
self.top = origin.y
self.right = self.left + self.width
self.bottom = self.top + self.height
@staticmethod
@staticmethod
def merge(r1: 'Rectangle', r2: 'Rectangle') -> 'Rectangle':
left = min(r1.left, r2.left)
top = min(r1.top, r2.top)
right = max(r1.right, r2.right)
bottom = max(r1.bottom, r2.bottom)
width = right - left
height = bottom - top
return Rectangle(Point2D(left, top), width, height)
def as_bounding_box_with_margin(self, margin: int = 1) -> Tuple[int, int, int, int]:
bounding_box_with_margin = (self.left - margin,
self.top - margin,
self.left + self.width + 2 * margin,
self.top + self.height + 2 * margin)
return bounding_box_with_margin
def __eq__(self, o: object) -> bool:
are_equal = self.width == o.width and self.height == o.height and self.origin == o.origin
return are_equal
def __str__(self) -> str:
return "Rectangle[Origin:{0},{1}, Width:{2}, Height:{3}]".format(self.origin.x, self.origin.y, self.width,
self.height)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/AudiverisOmrImageGenerator.py
|
AudiverisOmrImageGenerator.extract_symbols
|
python
|
def extract_symbols(self, raw_data_directory: str, destination_directory: str):
print("Extracting Symbols from Audiveris OMR Dataset...")
all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.png'))]
data_pairs = []
for i in range(len(all_xml_files)):
data_pairs.append((all_xml_files[i], all_image_files[i]))
for data_pair in data_pairs:
self.__extract_symbols(data_pair[0], data_pair[1], destination_directory)
|
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/AudiverisOmrImageGenerator.py#L16-L35
|
[
"def __extract_symbols(self, xml_file: str, image_file: str, destination_directory: str):\n # xml_file, image_file = 'data/audiveris_omr_raw\\\\IMSLP06053p1.xml', 'data/audiveris_omr_raw\\\\IMSLP06053p1.png'\n # xml_file, image_file = 'data/audiveris_omr_raw\\\\mops-1.xml', 'data/audiveris_omr_raw\\\\mops-1.png'\n # xml_file, image_file = 'data/audiveris_omr_raw\\\\mtest1-1.xml', 'data/audiveris_omr_raw\\\\mtest1-1.png'\n # xml_file, image_file = 'data/audiveris_omr_raw\\\\mtest2-1.xml', 'data/audiveris_omr_raw\\\\mtest2-1.png'\n image = Image.open(image_file)\n annotations = ElementTree.parse(xml_file).getroot()\n xml_symbols = annotations.findall(\"Symbol\")\n\n file_name_without_extension = os.path.splitext(os.path.basename(xml_file))[0]\n symbols = []\n\n for xml_symbol in xml_symbols:\n symbol_class = xml_symbol.get(\"shape\")\n\n bounds = xml_symbol.find(\"Bounds\")\n x, y, width, height = bounds.get(\"x\"), bounds.get(\"y\"), bounds.get(\"w\"), bounds.get(\"h\")\n x, y, width, height = int(float(x)), int(float(y)), int(float(width)), int(float(height))\n\n symbol = AudiverisOmrSymbol(symbol_class, x, y, width, height)\n symbols.append(symbol)\n\n symbol_number = 0\n for symbol in symbols:\n symbol_class = symbol.symbol_class\n bounding_box_with_one_pixel_margin = symbol.as_bounding_box_with_margin(1)\n symbol_image = image.crop(bounding_box_with_one_pixel_margin)\n\n target_directory = os.path.join(destination_directory, symbol_class)\n os.makedirs(target_directory, exist_ok=True)\n\n export_path = ExportPath(destination_directory, symbol_class,\n file_name_without_extension + str(symbol_number))\n symbol_image.save(export_path.get_full_path())\n symbol_number += 1\n"
] |
class AudiverisOmrImageGenerator:
def __init__(self) -> None:
super().__init__()
def __extract_symbols(self, xml_file: str, image_file: str, destination_directory: str):
# xml_file, image_file = 'data/audiveris_omr_raw\\IMSLP06053p1.xml', 'data/audiveris_omr_raw\\IMSLP06053p1.png'
# xml_file, image_file = 'data/audiveris_omr_raw\\mops-1.xml', 'data/audiveris_omr_raw\\mops-1.png'
# xml_file, image_file = 'data/audiveris_omr_raw\\mtest1-1.xml', 'data/audiveris_omr_raw\\mtest1-1.png'
# xml_file, image_file = 'data/audiveris_omr_raw\\mtest2-1.xml', 'data/audiveris_omr_raw\\mtest2-1.png'
image = Image.open(image_file)
annotations = ElementTree.parse(xml_file).getroot()
xml_symbols = annotations.findall("Symbol")
file_name_without_extension = os.path.splitext(os.path.basename(xml_file))[0]
symbols = []
for xml_symbol in xml_symbols:
symbol_class = xml_symbol.get("shape")
bounds = xml_symbol.find("Bounds")
x, y, width, height = bounds.get("x"), bounds.get("y"), bounds.get("w"), bounds.get("h")
x, y, width, height = int(float(x)), int(float(y)), int(float(width)), int(float(height))
symbol = AudiverisOmrSymbol(symbol_class, x, y, width, height)
symbols.append(symbol)
symbol_number = 0
for symbol in symbols:
symbol_class = symbol.symbol_class
bounding_box_with_one_pixel_margin = symbol.as_bounding_box_with_margin(1)
symbol_image = image.crop(bounding_box_with_one_pixel_margin)
target_directory = os.path.join(destination_directory, symbol_class)
os.makedirs(target_directory, exist_ok=True)
export_path = ExportPath(destination_directory, symbol_class,
file_name_without_extension + str(symbol_number))
symbol_image.save(export_path.get_full_path())
symbol_number += 1
|
apacha/OMR-Datasets
|
omrdatasettools/converters/csv_to_crop_object_conversion.py
|
convert_csv_annotations_to_cropobject
|
python
|
def convert_csv_annotations_to_cropobject(annotations_path: str, image_path: str) -> List[CropObject]:
annotations = pd.read_csv(annotations_path)
image = Image.open(image_path) # type: Image.Image
crop_objects = []
node_id = 0
for index, annotation in annotations.iterrows():
# Annotation example:
# image_name,top,left,bottom,right,class_name,confidence
# CVC-MUSCIMA_W-01_N-10_D-ideal_1.png,138.93,2286.36,185.20,2316.52,8th_flag,1.00
image_name = annotation["image_name"]
class_name = annotation["class_name"]
top = round(annotation["top"])
left = round(annotation["left"])
width = round(annotation["right"] - annotation["left"])
heigth = round(annotation["bottom"] - annotation["top"])
crop_object = CropObject(node_id, class_name, top, left, width, heigth)
crop_object.set_doc(image_name)
crop_image = image.crop((left, top, crop_object.right, crop_object.bottom)).convert("1")
# noinspection PyTypeChecker
cropped_image_mask = np.array(crop_image)
crop_object.set_mask(cropped_image_mask)
crop_objects.append(crop_object)
node_id += 1
return crop_objects
|
Converts a normalized dataset of objects into crop-objects.
:param annotations_path: Path to the csv-file that contains bounding boxes in the following
format for a single image:
image_name,top,left,bottom,right,class_name,confidence
CVC-MUSCIMA_W-01_N-10_D-ideal_1.png,138.93,2286.36,185.20,2316.52,8th_flag,1.00
:param image_path: Image that is being described by the file given under the annotations_path
:return: A list of CropObjects as being used by the MUSCIMA++ dataset including the binary image-masks
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/converters/csv_to_crop_object_conversion.py#L14-L48
| null |
import argparse
import os
from glob import glob
from typing import List
import pandas as pd
from PIL import Image
from muscima.cropobject import CropObject
import numpy as np
from muscima.io import export_cropobject_list
from tqdm import tqdm
def write_crop_objects_to_disk(crop_objects: List[CropObject], output_directory: str, output_filename: str) -> None:
os.makedirs(output_directory, exist_ok=True)
cropobject_xml_string = export_cropobject_list(crop_objects)
with open(os.path.join(output_directory, output_filename), "w") as file:
file.write(cropobject_xml_string)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Downloads a dataset from the Edirom system')
parser.add_argument('--annotations_directory', type=str, required=True,
help='Path to the annotation csv-files directory')
parser.add_argument('--image_directory', type=str, required=True,
help='Path to the images from which the mask should be extracted')
parser.add_argument("--output_directory", type=str, default="../converted_crop_objects",
help="The directory, where the converted CropObjects will be copied to")
flags, unparsed = parser.parse_known_args()
annotation_paths = glob(flags.annotations_directory + "/**/*.csv", recursive=True)
image_paths = glob(flags.image_directory + "/**/*.png", recursive=True)
for annotation_path, image_path in tqdm(zip(annotation_paths, image_paths), desc="Converting annotations", total=len(image_paths)):
crop_objects = convert_csv_annotations_to_cropobject(annotation_path, image_path)
output_filename = os.path.basename(image_path).replace('png', 'xml')
write_crop_objects_to_disk(crop_objects, flags.output_directory, output_filename)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/ExportPath.py
|
ExportPath.get_full_path
|
python
|
def get_full_path(self, offset: int = None):
stroke_thickness = ""
if self.stroke_thickness is not None:
stroke_thickness = "_{0}".format(self.stroke_thickness)
staffline_offset = ""
if offset is not None:
staffline_offset = "_offset_{0}".format(offset)
return os.path.join(self.destination_directory, self.symbol_class,
"{0}{1}{2}.{3}".format(self.raw_file_name_without_extension,
stroke_thickness, staffline_offset, self.extension))
|
:return: Returns the full path that will join all fields according to the following format if no offset if provided:
'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'.'extension',
e.g.: data/images/3-4-Time/1-13_3.png
or with an additional offset-appendix if an offset is provided
'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'_offset_'offset'.'extension',
e.g.: data/images/3-4-Time/1-13_3_offset_74.png
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/ExportPath.py#L14-L34
| null |
class ExportPath:
def __init__(self, destination_directory: str, symbol_class: str, raw_file_name_without_extension: str,
extension: str = "png", stroke_thickness: int = None) -> None:
super().__init__()
self.stroke_thickness = stroke_thickness
self.extension = extension
self.raw_file_name_without_extension = raw_file_name_without_extension
self.symbol_class = symbol_class
self.destination_directory = destination_directory
def get_class_name_and_file_path(self, offset: int = None):
staffline_offset = ""
if offset is not None:
staffline_offset = "_offset_{0}".format(offset)
return os.path.join(self.symbol_class, "{0}_{1}{2}.{3}".format(self.raw_file_name_without_extension,
self.stroke_thickness, staffline_offset,
self.extension))
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusSymbol.py
|
HomusSymbol.initialize_from_string
|
python
|
def initialize_from_string(content: str) -> 'HomusSymbol':
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions)
|
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L21-L62
| null |
class HomusSymbol:
def __init__(self, content: str, strokes: List[List[Point2D]], symbol_class: str, dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.strokes = strokes
@staticmethod
def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
"""
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
"""
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin)
def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
"""
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
"""
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close()
def draw_bounding_box(self, draw, location):
red = (255, 0, 0)
draw.rectangle(
(location.x, location.y, location.x + self.dimensions.width, location.y + self.dimensions.height),
fill=None, outline=red)
@staticmethod
def __draw_staff_lines_into_image(image: Image,
stroke_thickness: int,
staff_line_spacing: int = 14,
vertical_offset=88):
black = (0, 0, 0)
width = image.width
draw = ImageDraw.Draw(image)
for i in range(5):
y = vertical_offset + i * staff_line_spacing
draw.line((0, y, width, y), black, stroke_thickness)
del draw
@staticmethod
def __subtract_offset(a: Point2D, b: Point2D) -> Point2D:
return Point2D(a.x - b.x, a.y - b.y)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusSymbol.py
|
HomusSymbol.draw_into_bitmap
|
python
|
def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin)
|
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L64-L76
|
[
"def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,\n destination_height: int, staff_line_spacing: int = 14,\n staff_line_vertical_offsets: List[int] = None,\n bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:\n \"\"\"\n Draws the symbol onto a canvas with a fixed size\n\n :param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image\n :param export_path: The path, where the symbols should be created on disk\n :param stroke_thickness:\n :param margin:\n :param destination_width:\n :param destination_height:\n :param staff_line_spacing:\n :param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated\n \"\"\"\n width = self.dimensions.width + 2 * margin\n height = self.dimensions.height + 2 * margin\n if random_position_on_canvas:\n # max is required for elements that are larger than the canvas,\n # where the possible range for the random value would be negative\n random_horizontal_offset = random.randint(0, max(0, destination_width - width))\n random_vertical_offset = random.randint(0, max(0, destination_height - height))\n offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,\n self.dimensions.origin.y - margin - random_vertical_offset)\n else:\n width_offset_for_centering = (destination_width - width) / 2\n height_offset_for_centering = (destination_height - height) / 2\n offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,\n self.dimensions.origin.y - margin - height_offset_for_centering)\n\n image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),\n \"white\") # create a new white image\n draw = ImageDraw.Draw(image_without_staff_lines)\n black = (0, 0, 0)\n\n for stroke in self.strokes:\n for i in range(0, len(stroke) - 1):\n start_point = self.__subtract_offset(stroke[i], offset)\n end_point = self.__subtract_offset(stroke[i + 1], offset)\n draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)\n\n location = self.__subtract_offset(self.dimensions.origin, offset)\n bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)\n # self.draw_bounding_box(draw, location)\n\n del draw\n\n if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:\n for staff_line_vertical_offset in staff_line_vertical_offsets:\n image_with_staff_lines = image_without_staff_lines.copy()\n self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,\n staff_line_spacing, staff_line_vertical_offset)\n file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)\n image_with_staff_lines.save(file_name_with_offset)\n image_with_staff_lines.close()\n\n if bounding_boxes is not None:\n # Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and\n # the file_name, e.g. '3-4-Time\\\\1-13_3_offset_74.png', so we store only that part in the dictionary\n class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)\n bounding_boxes[class_and_file_name] = bounding_box_in_image\n else:\n image_without_staff_lines.save(export_path.get_full_path())\n if bounding_boxes is not None:\n # Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and\n # the file_name, e.g. '3-4-Time\\\\1-13_3_offset_74.png', so we store only that part in the dictionary\n class_and_file_name = export_path.get_class_name_and_file_path()\n bounding_boxes[class_and_file_name] = bounding_box_in_image\n\n image_without_staff_lines.close()\n"
] |
class HomusSymbol:
def __init__(self, content: str, strokes: List[List[Point2D]], symbol_class: str, dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.strokes = strokes
@staticmethod
def initialize_from_string(content: str) -> 'HomusSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
"""
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions)
def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
"""
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
"""
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close()
def draw_bounding_box(self, draw, location):
red = (255, 0, 0)
draw.rectangle(
(location.x, location.y, location.x + self.dimensions.width, location.y + self.dimensions.height),
fill=None, outline=red)
@staticmethod
def __draw_staff_lines_into_image(image: Image,
stroke_thickness: int,
staff_line_spacing: int = 14,
vertical_offset=88):
black = (0, 0, 0)
width = image.width
draw = ImageDraw.Draw(image)
for i in range(5):
y = vertical_offset + i * staff_line_spacing
draw.line((0, y, width, y), black, stroke_thickness)
del draw
@staticmethod
def __subtract_offset(a: Point2D, b: Point2D) -> Point2D:
return Point2D(a.x - b.x, a.y - b.y)
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusSymbol.py
|
HomusSymbol.draw_onto_canvas
|
python
|
def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close()
|
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
|
train
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L78-L148
|
[
"def get_full_path(self, offset: int = None):\n \"\"\"\n :return: Returns the full path that will join all fields according to the following format if no offset if provided:\n 'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'.'extension',\n e.g.: data/images/3-4-Time/1-13_3.png\n\n or with an additional offset-appendix if an offset is provided\n 'destination_directory'/'symbol_class'/'raw_file_name_without_extension'_'stroke_thickness'_offset_'offset'.'extension',\n e.g.: data/images/3-4-Time/1-13_3_offset_74.png\n \"\"\"\n stroke_thickness = \"\"\n if self.stroke_thickness is not None:\n stroke_thickness = \"_{0}\".format(self.stroke_thickness)\n\n staffline_offset = \"\"\n if offset is not None:\n staffline_offset = \"_offset_{0}\".format(offset)\n\n return os.path.join(self.destination_directory, self.symbol_class,\n \"{0}{1}{2}.{3}\".format(self.raw_file_name_without_extension,\n stroke_thickness, staffline_offset, self.extension))\n",
"def get_class_name_and_file_path(self, offset: int = None):\n\n staffline_offset = \"\"\n if offset is not None:\n staffline_offset = \"_offset_{0}\".format(offset)\n\n return os.path.join(self.symbol_class, \"{0}_{1}{2}.{3}\".format(self.raw_file_name_without_extension,\n self.stroke_thickness, staffline_offset,\n self.extension))\n",
"def __draw_staff_lines_into_image(image: Image,\n stroke_thickness: int,\n staff_line_spacing: int = 14,\n vertical_offset=88):\n black = (0, 0, 0)\n width = image.width\n draw = ImageDraw.Draw(image)\n\n for i in range(5):\n y = vertical_offset + i * staff_line_spacing\n draw.line((0, y, width, y), black, stroke_thickness)\n del draw\n",
"def __subtract_offset(a: Point2D, b: Point2D) -> Point2D:\n return Point2D(a.x - b.x, a.y - b.y)\n"
] |
class HomusSymbol:
def __init__(self, content: str, strokes: List[List[Point2D]], symbol_class: str, dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.strokes = strokes
@staticmethod
def initialize_from_string(content: str) -> 'HomusSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
"""
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions)
def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
"""
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
"""
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin)
def draw_bounding_box(self, draw, location):
red = (255, 0, 0)
draw.rectangle(
(location.x, location.y, location.x + self.dimensions.width, location.y + self.dimensions.height),
fill=None, outline=red)
@staticmethod
def __draw_staff_lines_into_image(image: Image,
stroke_thickness: int,
staff_line_spacing: int = 14,
vertical_offset=88):
black = (0, 0, 0)
width = image.width
draw = ImageDraw.Draw(image)
for i in range(5):
y = vertical_offset + i * staff_line_spacing
draw.line((0, y, width, y), black, stroke_thickness)
del draw
@staticmethod
def __subtract_offset(a: Point2D, b: Point2D) -> Point2D:
return Point2D(a.x - b.x, a.y - b.y)
|
thoth-station/solver
|
thoth/solver/python/base.py
|
compare_version
|
python
|
def compare_version(a, b): # Ignore PyDocStyleBear
def _range(q):
"""Convert a version string to array of integers.
"1.2.3" -> [1, 2, 3]
:param q: str
:return: List[int]
"""
r = []
for n in q.replace("-", ".").split("."):
try:
r.append(int(n))
except ValueError:
# sort rc*, alpha, beta etc. lower than their non-annotated counterparts
r.append(-1)
return r
def _append_zeros(x, num_zeros):
"""Append `num_zeros` zeros to a copy of `x` and return it.
:param x: List[int]
:param num_zeros: int
:return: List[int]
"""
nx = list(x)
for _ in range(num_zeros):
nx.append(0)
return nx
def _cardinal(x, y):
"""Make both input lists be of same cardinality.
:param x: List[int]
:param y: List[int]
:return: List[int]
"""
lx, ly = len(x), len(y)
if lx == ly:
return x, y
elif lx > ly:
return x, _append_zeros(y, lx - ly)
else:
return _append_zeros(x, ly - lx), y
left, right = _cardinal(_range(a), _range(b))
return (left > right) - (left < right)
|
Compare two version strings.
:param a: str
:param b: str
:return: -1 / 0 / 1
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L40-L94
|
[
"def _range(q):\n \"\"\"Convert a version string to array of integers.\n\n \"1.2.3\" -> [1, 2, 3]\n\n :param q: str\n :return: List[int]\n \"\"\"\n r = []\n for n in q.replace(\"-\", \".\").split(\".\"):\n try:\n r.append(int(n))\n except ValueError:\n # sort rc*, alpha, beta etc. lower than their non-annotated counterparts\n r.append(-1)\n return r\n",
"def _cardinal(x, y):\n \"\"\"Make both input lists be of same cardinality.\n\n :param x: List[int]\n :param y: List[int]\n :return: List[int]\n \"\"\"\n lx, ly = len(x), len(y)\n if lx == ly:\n return x, y\n elif lx > ly:\n return x, _append_zeros(y, lx - ly)\n else:\n return _append_zeros(x, ly - lx), y\n"
] |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Classes for resolving dependencies as specified in each ecosystem."""
from functools import cmp_to_key
import logging
from thoth.python import Source
_LOGGER = logging.getLogger(__name__)
class SolverException(Exception):
"""Exception to be raised in Solver."""
class Tokens(object):
"""Comparison token representation."""
operators = [">=", "<=", "==", ">", "<", "=", "!="]
(GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators))
class ReleasesFetcher(object):
"""Base class for fetching releases."""
def fetch_releases(self, package):
"""Abstract method for getting list of releases versions."""
raise NotImplementedError
@property
def index_url(self):
"""Get URL to index from where releases are fetched."""
raise NotImplementedError
class Dependency(object):
"""A Dependency consists of (package) name and version spec."""
def __init__(self, name, spec):
"""Initialize instance."""
self._name = name
# spec is a list where each item is either 2-tuple (operator, version) or list of these
# example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means:
# (>=0.6.0 and <0.7.0) or >1.0.0
self._spec = spec
@property
def name(self):
"""Get name property."""
return self._name
@property
def spec(self):
"""Get version spec property."""
return self._spec
def __contains__(self, item):
"""Implement 'in' operator."""
return self.check(item[0])
def __repr__(self):
"""Return string representation of this instance."""
return "{} {}".format(self.name, self.spec)
def __eq__(self, other):
"""Implement '==' operator."""
return self.name == other.name and self.spec == other.spec
def check(self, version): # Ignore PyDocStyleBear
"""Check if `version` fits into our dependency specification.
:param version: str
:return: bool
"""
def _compare_spec(spec):
if len(spec) == 1:
spec = ("=", spec[0])
token = Tokens.operators.index(spec[0])
comparison = compare_version(version, spec[1])
if token in [Tokens.EQ1, Tokens.EQ2]:
return comparison == 0
elif token == Tokens.GT:
return comparison == 1
elif token == Tokens.LT:
return comparison == -1
elif token == Tokens.GTE:
return comparison >= 0
elif token == Tokens.LTE:
return comparison <= 0
elif token == Tokens.NEQ:
return comparison != 0
else:
raise ValueError("Invalid comparison token")
results, intermediaries = False, False
for spec in self.spec:
if isinstance(spec, list):
intermediary = True
for sub in spec:
intermediary &= _compare_spec(sub)
intermediaries |= intermediary
elif isinstance(spec, tuple):
results |= _compare_spec(spec)
return results or intermediaries
class DependencyParser(object):
"""Base class for Dependency parsing."""
def __init__(self, **parser_kwargs):
"""Construct dependency parser."""
if parser_kwargs:
raise NotImplementedError
def parse(self, specs):
"""Abstract method for Dependency parsing."""
pass
@staticmethod
def compose_sep(deps, separator):
"""Opposite of parse().
:param deps: list of Dependency()
:param separator: when joining dependencies, use this separator
:return: dict of {name: version spec}
"""
result = {}
for dep in deps:
if dep.name not in result:
result[dep.name] = separator.join([op + ver for op, ver in dep.spec])
else:
result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec])
return result
class NoOpDependencyParser(DependencyParser):
"""Dummy dependency parser for ecosystems that don't support version ranges."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
return [Dependency(*x.split(" ")) for x in specs]
@staticmethod
def compose(deps):
"""Opposite of parse()."""
return DependencyParser.compose_sep(deps, " ")
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps
class Solver(object):
"""Base class for resolving dependencies."""
def __init__(self, dep_parser=None, fetcher=None, highest_dependency_version=True):
"""Initialize instance."""
self._dependency_parser = dep_parser
self._release_fetcher = fetcher
self._highest_dependency_version = highest_dependency_version
@property
def dependency_parser(self):
"""Return DependencyParser instance used by this solver."""
return self._dependency_parser
@property
def release_fetcher(self):
"""Return ReleasesFetcher instance used by this solver."""
return self._release_fetcher
def solve(self, dependencies, graceful=True, all_versions=False): # Ignore PyDocStyleBear
"""Solve `dependencies` against upstream repository.
:param dependencies: List, List of dependencies in native format
:param graceful: bool, Print info output to stdout
:param all_versions: bool, Return all matched versions instead of the latest
:return: Dict[str, str], Matched versions
"""
def _compare_version_index_url(v1, v2):
"""Get a wrapper around compare version to omit index url when sorting."""
return compare_version(v1[0], v2[0])
solved = {}
for dep in self.dependency_parser.parse(dependencies):
_LOGGER.debug("Fetching releases for: {}".format(dep))
name, releases = self.release_fetcher.fetch_releases(dep.name)
if name in solved:
raise SolverException("Dependency: {} is listed multiple times".format(name))
if not releases:
if graceful:
_LOGGER.info("No releases found for package %s", dep.name)
else:
raise SolverException("No releases found for package {}".format(dep.name))
releases = [release for release in releases if release in dep]
matching = sorted(releases, key=cmp_to_key(_compare_version_index_url))
_LOGGER.debug(" matching: %s", matching)
if all_versions:
solved[name] = matching
else:
if not matching:
solved[name] = None
else:
if self._highest_dependency_version:
solved[name] = matching[-1]
else:
solved[name] = matching[0]
return solved
def get_ecosystem_solver(ecosystem_name, parser_kwargs=None, fetcher_kwargs=None):
"""Get Solver subclass instance for particular ecosystem.
:param ecosystem_name: name of ecosystem for which solver should be get
:param parser_kwargs: parser key-value arguments for constructor
:param fetcher_kwargs: fetcher key-value arguments for constructor
:return: Solver
"""
from .python import PythonSolver
if ecosystem_name.lower() == "pypi":
source = Source(url="https://pypi.org/simple", warehouse_api_url="https://pypi.org/pypi", warehouse=True)
return PythonSolver(parser_kwargs, fetcher_kwargs={"source": source})
raise NotImplementedError("Unknown ecosystem: {}".format(ecosystem_name))
|
thoth-station/solver
|
thoth/solver/python/base.py
|
get_ecosystem_solver
|
python
|
def get_ecosystem_solver(ecosystem_name, parser_kwargs=None, fetcher_kwargs=None):
from .python import PythonSolver
if ecosystem_name.lower() == "pypi":
source = Source(url="https://pypi.org/simple", warehouse_api_url="https://pypi.org/pypi", warehouse=True)
return PythonSolver(parser_kwargs, fetcher_kwargs={"source": source})
raise NotImplementedError("Unknown ecosystem: {}".format(ecosystem_name))
|
Get Solver subclass instance for particular ecosystem.
:param ecosystem_name: name of ecosystem for which solver should be get
:param parser_kwargs: parser key-value arguments for constructor
:param fetcher_kwargs: fetcher key-value arguments for constructor
:return: Solver
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L297-L311
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Classes for resolving dependencies as specified in each ecosystem."""
from functools import cmp_to_key
import logging
from thoth.python import Source
_LOGGER = logging.getLogger(__name__)
class SolverException(Exception):
"""Exception to be raised in Solver."""
class Tokens(object):
"""Comparison token representation."""
operators = [">=", "<=", "==", ">", "<", "=", "!="]
(GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators))
def compare_version(a, b): # Ignore PyDocStyleBear
"""Compare two version strings.
:param a: str
:param b: str
:return: -1 / 0 / 1
"""
def _range(q):
"""Convert a version string to array of integers.
"1.2.3" -> [1, 2, 3]
:param q: str
:return: List[int]
"""
r = []
for n in q.replace("-", ".").split("."):
try:
r.append(int(n))
except ValueError:
# sort rc*, alpha, beta etc. lower than their non-annotated counterparts
r.append(-1)
return r
def _append_zeros(x, num_zeros):
"""Append `num_zeros` zeros to a copy of `x` and return it.
:param x: List[int]
:param num_zeros: int
:return: List[int]
"""
nx = list(x)
for _ in range(num_zeros):
nx.append(0)
return nx
def _cardinal(x, y):
"""Make both input lists be of same cardinality.
:param x: List[int]
:param y: List[int]
:return: List[int]
"""
lx, ly = len(x), len(y)
if lx == ly:
return x, y
elif lx > ly:
return x, _append_zeros(y, lx - ly)
else:
return _append_zeros(x, ly - lx), y
left, right = _cardinal(_range(a), _range(b))
return (left > right) - (left < right)
class ReleasesFetcher(object):
"""Base class for fetching releases."""
def fetch_releases(self, package):
"""Abstract method for getting list of releases versions."""
raise NotImplementedError
@property
def index_url(self):
"""Get URL to index from where releases are fetched."""
raise NotImplementedError
class Dependency(object):
"""A Dependency consists of (package) name and version spec."""
def __init__(self, name, spec):
"""Initialize instance."""
self._name = name
# spec is a list where each item is either 2-tuple (operator, version) or list of these
# example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means:
# (>=0.6.0 and <0.7.0) or >1.0.0
self._spec = spec
@property
def name(self):
"""Get name property."""
return self._name
@property
def spec(self):
"""Get version spec property."""
return self._spec
def __contains__(self, item):
"""Implement 'in' operator."""
return self.check(item[0])
def __repr__(self):
"""Return string representation of this instance."""
return "{} {}".format(self.name, self.spec)
def __eq__(self, other):
"""Implement '==' operator."""
return self.name == other.name and self.spec == other.spec
def check(self, version): # Ignore PyDocStyleBear
"""Check if `version` fits into our dependency specification.
:param version: str
:return: bool
"""
def _compare_spec(spec):
if len(spec) == 1:
spec = ("=", spec[0])
token = Tokens.operators.index(spec[0])
comparison = compare_version(version, spec[1])
if token in [Tokens.EQ1, Tokens.EQ2]:
return comparison == 0
elif token == Tokens.GT:
return comparison == 1
elif token == Tokens.LT:
return comparison == -1
elif token == Tokens.GTE:
return comparison >= 0
elif token == Tokens.LTE:
return comparison <= 0
elif token == Tokens.NEQ:
return comparison != 0
else:
raise ValueError("Invalid comparison token")
results, intermediaries = False, False
for spec in self.spec:
if isinstance(spec, list):
intermediary = True
for sub in spec:
intermediary &= _compare_spec(sub)
intermediaries |= intermediary
elif isinstance(spec, tuple):
results |= _compare_spec(spec)
return results or intermediaries
class DependencyParser(object):
"""Base class for Dependency parsing."""
def __init__(self, **parser_kwargs):
"""Construct dependency parser."""
if parser_kwargs:
raise NotImplementedError
def parse(self, specs):
"""Abstract method for Dependency parsing."""
pass
@staticmethod
def compose_sep(deps, separator):
"""Opposite of parse().
:param deps: list of Dependency()
:param separator: when joining dependencies, use this separator
:return: dict of {name: version spec}
"""
result = {}
for dep in deps:
if dep.name not in result:
result[dep.name] = separator.join([op + ver for op, ver in dep.spec])
else:
result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec])
return result
class NoOpDependencyParser(DependencyParser):
"""Dummy dependency parser for ecosystems that don't support version ranges."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
return [Dependency(*x.split(" ")) for x in specs]
@staticmethod
def compose(deps):
"""Opposite of parse()."""
return DependencyParser.compose_sep(deps, " ")
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps
class Solver(object):
"""Base class for resolving dependencies."""
def __init__(self, dep_parser=None, fetcher=None, highest_dependency_version=True):
"""Initialize instance."""
self._dependency_parser = dep_parser
self._release_fetcher = fetcher
self._highest_dependency_version = highest_dependency_version
@property
def dependency_parser(self):
"""Return DependencyParser instance used by this solver."""
return self._dependency_parser
@property
def release_fetcher(self):
"""Return ReleasesFetcher instance used by this solver."""
return self._release_fetcher
def solve(self, dependencies, graceful=True, all_versions=False): # Ignore PyDocStyleBear
"""Solve `dependencies` against upstream repository.
:param dependencies: List, List of dependencies in native format
:param graceful: bool, Print info output to stdout
:param all_versions: bool, Return all matched versions instead of the latest
:return: Dict[str, str], Matched versions
"""
def _compare_version_index_url(v1, v2):
"""Get a wrapper around compare version to omit index url when sorting."""
return compare_version(v1[0], v2[0])
solved = {}
for dep in self.dependency_parser.parse(dependencies):
_LOGGER.debug("Fetching releases for: {}".format(dep))
name, releases = self.release_fetcher.fetch_releases(dep.name)
if name in solved:
raise SolverException("Dependency: {} is listed multiple times".format(name))
if not releases:
if graceful:
_LOGGER.info("No releases found for package %s", dep.name)
else:
raise SolverException("No releases found for package {}".format(dep.name))
releases = [release for release in releases if release in dep]
matching = sorted(releases, key=cmp_to_key(_compare_version_index_url))
_LOGGER.debug(" matching: %s", matching)
if all_versions:
solved[name] = matching
else:
if not matching:
solved[name] = None
else:
if self._highest_dependency_version:
solved[name] = matching[-1]
else:
solved[name] = matching[0]
return solved
|
thoth-station/solver
|
thoth/solver/python/base.py
|
Dependency.check
|
python
|
def check(self, version): # Ignore PyDocStyleBear
def _compare_spec(spec):
if len(spec) == 1:
spec = ("=", spec[0])
token = Tokens.operators.index(spec[0])
comparison = compare_version(version, spec[1])
if token in [Tokens.EQ1, Tokens.EQ2]:
return comparison == 0
elif token == Tokens.GT:
return comparison == 1
elif token == Tokens.LT:
return comparison == -1
elif token == Tokens.GTE:
return comparison >= 0
elif token == Tokens.LTE:
return comparison <= 0
elif token == Tokens.NEQ:
return comparison != 0
else:
raise ValueError("Invalid comparison token")
results, intermediaries = False, False
for spec in self.spec:
if isinstance(spec, list):
intermediary = True
for sub in spec:
intermediary &= _compare_spec(sub)
intermediaries |= intermediary
elif isinstance(spec, tuple):
results |= _compare_spec(spec)
return results or intermediaries
|
Check if `version` fits into our dependency specification.
:param version: str
:return: bool
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L143-L181
|
[
"def _compare_spec(spec):\n if len(spec) == 1:\n spec = (\"=\", spec[0])\n\n token = Tokens.operators.index(spec[0])\n comparison = compare_version(version, spec[1])\n if token in [Tokens.EQ1, Tokens.EQ2]:\n return comparison == 0\n elif token == Tokens.GT:\n return comparison == 1\n elif token == Tokens.LT:\n return comparison == -1\n elif token == Tokens.GTE:\n return comparison >= 0\n elif token == Tokens.LTE:\n return comparison <= 0\n elif token == Tokens.NEQ:\n return comparison != 0\n else:\n raise ValueError(\"Invalid comparison token\")\n"
] |
class Dependency(object):
"""A Dependency consists of (package) name and version spec."""
def __init__(self, name, spec):
"""Initialize instance."""
self._name = name
# spec is a list where each item is either 2-tuple (operator, version) or list of these
# example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means:
# (>=0.6.0 and <0.7.0) or >1.0.0
self._spec = spec
@property
def name(self):
"""Get name property."""
return self._name
@property
def spec(self):
"""Get version spec property."""
return self._spec
def __contains__(self, item):
"""Implement 'in' operator."""
return self.check(item[0])
def __repr__(self):
"""Return string representation of this instance."""
return "{} {}".format(self.name, self.spec)
def __eq__(self, other):
"""Implement '==' operator."""
return self.name == other.name and self.spec == other.spec
|
thoth-station/solver
|
thoth/solver/python/base.py
|
DependencyParser.compose_sep
|
python
|
def compose_sep(deps, separator):
result = {}
for dep in deps:
if dep.name not in result:
result[dep.name] = separator.join([op + ver for op, ver in dep.spec])
else:
result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec])
return result
|
Opposite of parse().
:param deps: list of Dependency()
:param separator: when joining dependencies, use this separator
:return: dict of {name: version spec}
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L197-L210
| null |
class DependencyParser(object):
"""Base class for Dependency parsing."""
def __init__(self, **parser_kwargs):
"""Construct dependency parser."""
if parser_kwargs:
raise NotImplementedError
def parse(self, specs):
"""Abstract method for Dependency parsing."""
pass
@staticmethod
|
thoth-station/solver
|
thoth/solver/python/base.py
|
Solver.solve
|
python
|
def solve(self, dependencies, graceful=True, all_versions=False): # Ignore PyDocStyleBear
def _compare_version_index_url(v1, v2):
"""Get a wrapper around compare version to omit index url when sorting."""
return compare_version(v1[0], v2[0])
solved = {}
for dep in self.dependency_parser.parse(dependencies):
_LOGGER.debug("Fetching releases for: {}".format(dep))
name, releases = self.release_fetcher.fetch_releases(dep.name)
if name in solved:
raise SolverException("Dependency: {} is listed multiple times".format(name))
if not releases:
if graceful:
_LOGGER.info("No releases found for package %s", dep.name)
else:
raise SolverException("No releases found for package {}".format(dep.name))
releases = [release for release in releases if release in dep]
matching = sorted(releases, key=cmp_to_key(_compare_version_index_url))
_LOGGER.debug(" matching: %s", matching)
if all_versions:
solved[name] = matching
else:
if not matching:
solved[name] = None
else:
if self._highest_dependency_version:
solved[name] = matching[-1]
else:
solved[name] = matching[0]
return solved
|
Solve `dependencies` against upstream repository.
:param dependencies: List, List of dependencies in native format
:param graceful: bool, Print info output to stdout
:param all_versions: bool, Return all matched versions instead of the latest
:return: Dict[str, str], Matched versions
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L250-L294
| null |
class Solver(object):
"""Base class for resolving dependencies."""
def __init__(self, dep_parser=None, fetcher=None, highest_dependency_version=True):
"""Initialize instance."""
self._dependency_parser = dep_parser
self._release_fetcher = fetcher
self._highest_dependency_version = highest_dependency_version
@property
def dependency_parser(self):
"""Return DependencyParser instance used by this solver."""
return self._dependency_parser
@property
def release_fetcher(self):
"""Return ReleasesFetcher instance used by this solver."""
return self._release_fetcher
|
thoth-station/solver
|
thoth/solver/compile.py
|
pip_compile
|
python
|
def pip_compile(*packages: str):
result = None
packages = "\n".join(packages)
with tempfile.TemporaryDirectory() as tmp_dirname, cwd(tmp_dirname):
with open("requirements.in", "w") as requirements_file:
requirements_file.write(packages)
runner = CliRunner()
try:
result = runner.invoke(cli, ["requirements.in"], catch_exceptions=False)
except Exception as exc:
raise ThothPipCompileError(str(exc)) from exc
if result.exit_code != 0:
error_msg = (
f"pip-compile returned non-zero ({result.exit_code:d}) " f"output: {result.output_bytes.decode():s}"
)
raise ThothPipCompileError(error_msg)
return result.output_bytes.decode()
|
Run pip-compile to pin down packages, also resolve their transitive dependencies.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/compile.py#L30-L52
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Run pip-compile and verify result."""
import tempfile
from piptools.scripts.compile import cli
from click.testing import CliRunner
from thoth.common import cwd
from .exceptions import ThothPipCompileError
|
thoth-station/solver
|
thoth/solver/cli.py
|
_print_version
|
python
|
def _print_version(ctx, _, value):
if not value or ctx.resilient_parsing:
return
click.echo(analyzer_version)
ctx.exit()
|
Print solver version and exit.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/cli.py#L37-L42
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Thoth-solver CLI."""
import sys
import click
import logging
from thoth.analyzer import print_command_result
from thoth.common import init_logging
from thoth.solver import __title__ as analyzer_name
from thoth.solver import __version__ as analyzer_version
from thoth.solver.python import resolve as resolve_python
init_logging()
_LOG = logging.getLogger(__name__)
@click.group()
@click.pass_context
@click.option("-v", "--verbose", is_flag=True, envvar="THOTH_SOLVER_DEBUG", help="Be verbose about what's going on.")
@click.option(
"--version",
is_flag=True,
is_eager=True,
callback=_print_version,
expose_value=False,
help="Print solver version and exit.",
)
def cli(ctx=None, verbose=0):
"""Thoth solver command line interface."""
if ctx:
ctx.auto_envvar_prefix = "THOTH_SOLVER"
if verbose:
_LOG.setLevel(logging.DEBUG)
_LOG.debug("Debug mode is on")
@cli.command()
@click.pass_context
@click.option(
"--requirements", "-r", type=str, envvar="THOTH_SOLVER_PACKAGES", required=True, help="Requirements to be solved."
)
@click.option(
"--index",
"-i",
type=str,
envvar="THOTH_SOLVER_INDEXES",
show_default=True,
default="https://pypi.org/simple",
help="A comma separated list of Python indexes to be used when resolving version ranges.",
)
@click.option(
"--output",
"-o",
type=str,
envvar="THOTH_SOLVER_OUTPUT",
default="-",
help="Output file or remote API to print results to, in case of URL a POST request is issued.",
)
@click.option("--no-pretty", "-P", is_flag=True, help="Do not print results nicely.")
@click.option(
"--exclude-packages",
"-e",
type=str,
metavar="PKG1,PKG2",
help="A comma separated list of packages that should be excluded from the final listing.",
)
@click.option(
"--no-transitive",
"-T",
is_flag=True,
envvar="THOTH_SOLVER_NO_TRANSITIVE",
help="Do not check transitive dependencies, run only on provided requirements.",
)
@click.option(
"--subgraph-check-api",
type=str,
envvar="THOTH_SOLVER_SUBGRAPH_CHECK_API",
help="An API to be queried to retrieve information whether the given subgraph should be resolved.",
)
def pypi(
click_ctx,
requirements,
index=None,
python_version=3,
exclude_packages=None,
output=None,
subgraph_check_api=None,
no_transitive=True,
no_pretty=False,
):
"""Manipulate with dependency requirements using PyPI."""
requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement]
if not requirements:
_LOG.error("No requirements specified, exiting")
sys.exit(1)
if not subgraph_check_api:
_LOG.info(
"No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided"
) # Ignore PycodestyleBear (E501)
result = resolve_python(
requirements,
index_urls=index.split(",") if index else ("https://pypi.org/simple",),
python_version=int(python_version),
transitive=not no_transitive,
exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))),
subgraph_check_api=subgraph_check_api,
)
print_command_result(
click_ctx,
result,
analyzer=analyzer_name,
analyzer_version=analyzer_version,
output=output or "-",
pretty=not no_pretty,
)
if __name__ == "__main__":
cli()
|
thoth-station/solver
|
thoth/solver/cli.py
|
cli
|
python
|
def cli(ctx=None, verbose=0):
if ctx:
ctx.auto_envvar_prefix = "THOTH_SOLVER"
if verbose:
_LOG.setLevel(logging.DEBUG)
_LOG.debug("Debug mode is on")
|
Thoth solver command line interface.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/cli.py#L56-L64
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Thoth-solver CLI."""
import sys
import click
import logging
from thoth.analyzer import print_command_result
from thoth.common import init_logging
from thoth.solver import __title__ as analyzer_name
from thoth.solver import __version__ as analyzer_version
from thoth.solver.python import resolve as resolve_python
init_logging()
_LOG = logging.getLogger(__name__)
def _print_version(ctx, _, value):
"""Print solver version and exit."""
if not value or ctx.resilient_parsing:
return
click.echo(analyzer_version)
ctx.exit()
@click.group()
@click.pass_context
@click.option("-v", "--verbose", is_flag=True, envvar="THOTH_SOLVER_DEBUG", help="Be verbose about what's going on.")
@click.option(
"--version",
is_flag=True,
is_eager=True,
callback=_print_version,
expose_value=False,
help="Print solver version and exit.",
)
@cli.command()
@click.pass_context
@click.option(
"--requirements", "-r", type=str, envvar="THOTH_SOLVER_PACKAGES", required=True, help="Requirements to be solved."
)
@click.option(
"--index",
"-i",
type=str,
envvar="THOTH_SOLVER_INDEXES",
show_default=True,
default="https://pypi.org/simple",
help="A comma separated list of Python indexes to be used when resolving version ranges.",
)
@click.option(
"--output",
"-o",
type=str,
envvar="THOTH_SOLVER_OUTPUT",
default="-",
help="Output file or remote API to print results to, in case of URL a POST request is issued.",
)
@click.option("--no-pretty", "-P", is_flag=True, help="Do not print results nicely.")
@click.option(
"--exclude-packages",
"-e",
type=str,
metavar="PKG1,PKG2",
help="A comma separated list of packages that should be excluded from the final listing.",
)
@click.option(
"--no-transitive",
"-T",
is_flag=True,
envvar="THOTH_SOLVER_NO_TRANSITIVE",
help="Do not check transitive dependencies, run only on provided requirements.",
)
@click.option(
"--subgraph-check-api",
type=str,
envvar="THOTH_SOLVER_SUBGRAPH_CHECK_API",
help="An API to be queried to retrieve information whether the given subgraph should be resolved.",
)
def pypi(
click_ctx,
requirements,
index=None,
python_version=3,
exclude_packages=None,
output=None,
subgraph_check_api=None,
no_transitive=True,
no_pretty=False,
):
"""Manipulate with dependency requirements using PyPI."""
requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement]
if not requirements:
_LOG.error("No requirements specified, exiting")
sys.exit(1)
if not subgraph_check_api:
_LOG.info(
"No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided"
) # Ignore PycodestyleBear (E501)
result = resolve_python(
requirements,
index_urls=index.split(",") if index else ("https://pypi.org/simple",),
python_version=int(python_version),
transitive=not no_transitive,
exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))),
subgraph_check_api=subgraph_check_api,
)
print_command_result(
click_ctx,
result,
analyzer=analyzer_name,
analyzer_version=analyzer_version,
output=output or "-",
pretty=not no_pretty,
)
if __name__ == "__main__":
cli()
|
thoth-station/solver
|
thoth/solver/cli.py
|
pypi
|
python
|
def pypi(
click_ctx,
requirements,
index=None,
python_version=3,
exclude_packages=None,
output=None,
subgraph_check_api=None,
no_transitive=True,
no_pretty=False,
):
requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement]
if not requirements:
_LOG.error("No requirements specified, exiting")
sys.exit(1)
if not subgraph_check_api:
_LOG.info(
"No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided"
) # Ignore PycodestyleBear (E501)
result = resolve_python(
requirements,
index_urls=index.split(",") if index else ("https://pypi.org/simple",),
python_version=int(python_version),
transitive=not no_transitive,
exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))),
subgraph_check_api=subgraph_check_api,
)
print_command_result(
click_ctx,
result,
analyzer=analyzer_name,
analyzer_version=analyzer_version,
output=output or "-",
pretty=not no_pretty,
)
|
Manipulate with dependency requirements using PyPI.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/cli.py#L110-L149
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Thoth-solver CLI."""
import sys
import click
import logging
from thoth.analyzer import print_command_result
from thoth.common import init_logging
from thoth.solver import __title__ as analyzer_name
from thoth.solver import __version__ as analyzer_version
from thoth.solver.python import resolve as resolve_python
init_logging()
_LOG = logging.getLogger(__name__)
def _print_version(ctx, _, value):
"""Print solver version and exit."""
if not value or ctx.resilient_parsing:
return
click.echo(analyzer_version)
ctx.exit()
@click.group()
@click.pass_context
@click.option("-v", "--verbose", is_flag=True, envvar="THOTH_SOLVER_DEBUG", help="Be verbose about what's going on.")
@click.option(
"--version",
is_flag=True,
is_eager=True,
callback=_print_version,
expose_value=False,
help="Print solver version and exit.",
)
def cli(ctx=None, verbose=0):
"""Thoth solver command line interface."""
if ctx:
ctx.auto_envvar_prefix = "THOTH_SOLVER"
if verbose:
_LOG.setLevel(logging.DEBUG)
_LOG.debug("Debug mode is on")
@cli.command()
@click.pass_context
@click.option(
"--requirements", "-r", type=str, envvar="THOTH_SOLVER_PACKAGES", required=True, help="Requirements to be solved."
)
@click.option(
"--index",
"-i",
type=str,
envvar="THOTH_SOLVER_INDEXES",
show_default=True,
default="https://pypi.org/simple",
help="A comma separated list of Python indexes to be used when resolving version ranges.",
)
@click.option(
"--output",
"-o",
type=str,
envvar="THOTH_SOLVER_OUTPUT",
default="-",
help="Output file or remote API to print results to, in case of URL a POST request is issued.",
)
@click.option("--no-pretty", "-P", is_flag=True, help="Do not print results nicely.")
@click.option(
"--exclude-packages",
"-e",
type=str,
metavar="PKG1,PKG2",
help="A comma separated list of packages that should be excluded from the final listing.",
)
@click.option(
"--no-transitive",
"-T",
is_flag=True,
envvar="THOTH_SOLVER_NO_TRANSITIVE",
help="Do not check transitive dependencies, run only on provided requirements.",
)
@click.option(
"--subgraph-check-api",
type=str,
envvar="THOTH_SOLVER_SUBGRAPH_CHECK_API",
help="An API to be queried to retrieve information whether the given subgraph should be resolved.",
)
if __name__ == "__main__":
cli()
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_create_entry
|
python
|
def _create_entry(entry: dict, source: Source = None) -> dict:
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
|
Filter and normalize the output of pipdeptree entry.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L40-L56
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_get_environment_details
|
python
|
def _get_environment_details(python_bin: str) -> list:
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
|
Get information about packages in environment where packages get installed.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L59-L63
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_should_resolve_subgraph
|
python
|
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
|
Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L66-L99
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_install_requirement
|
python
|
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
|
Install requirements specified using suggested pip binary.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L103-L155
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_pipdeptree
|
python
|
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
|
Get pip dependency tree by executing pipdeptree tool.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L158-L177
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_get_dependency_specification
|
python
|
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
|
Get string representation of dependency specification as provided by PythonDependencyParser.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L180-L182
| null |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
_do_resolve_index
|
python
|
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
|
Perform resolution of requirements against the given solver.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L209-L353
|
[
"def _create_entry(entry: dict, source: Source = None) -> dict:\n \"\"\"Filter and normalize the output of pipdeptree entry.\"\"\"\n entry[\"package_name\"] = entry[\"package\"].pop(\"package_name\")\n entry[\"package_version\"] = entry[\"package\"].pop(\"installed_version\")\n\n if source:\n entry[\"index_url\"] = source.url\n entry[\"sha256\"] = []\n for item in source.get_package_hashes(entry[\"package_name\"], entry[\"package_version\"]):\n entry[\"sha256\"].append(item[\"sha256\"])\n\n entry.pop(\"package\")\n for dependency in entry[\"dependencies\"]:\n dependency.pop(\"key\", None)\n dependency.pop(\"installed_version\", None)\n\n return entry\n",
"def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:\n \"\"\"Ask the given subgraph check API if the given package in the given version should be included in the resolution.\n\n This subgraph resolving avoidence serves two purposes - we don't need to\n resolve dependency subgraphs that were already analyzed and we also avoid\n analyzing of \"core\" packages (like setuptools) where not needed as they\n can break installation environment.\n \"\"\"\n _LOGGER.info(\n \"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved\",\n package_name,\n package_version,\n index_url,\n )\n\n response = requests.get(\n subgraph_check_api,\n params={\"package_name\": package_name, \"package_version\": package_version, \"index_url\": index_url},\n )\n\n if response.status_code == 200:\n return True\n elif response.status_code == 208:\n # This is probably not the correct HTTP status code to be used here, but which one should be used?\n return False\n\n response.raise_for_status()\n raise ValueError(\n \"Unreachable code - subgraph check API responded with unknown HTTP status \"\n \"code %s for package %r in version %r from index %r\",\n package_name,\n package_version,\n index_url,\n )\n",
"def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:\n \"\"\"Get pip dependency tree by executing pipdeptree tool.\"\"\"\n cmd = \"{} -m pipdeptree --json\".format(python_bin)\n\n _LOGGER.debug(\"Obtaining pip dependency tree using: %r\", cmd)\n output = run_command(cmd, is_json=True).stdout\n\n if not package_name:\n return output\n\n for entry in output:\n # In some versions pipdeptree does not work with --packages flag, do the logic on out own.\n # TODO: we should probably do difference of reference this output and original environment\n if entry[\"package\"][\"key\"].lower() == package_name.lower():\n return entry\n\n # The given package was not found.\n if warn:\n _LOGGER.warning(\"Package %r was not found in pipdeptree output %r\", package_name, output)\n return None\n",
"def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:\n \"\"\"Get string representation of dependency specification as provided by PythonDependencyParser.\"\"\"\n return \",\".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)\n",
"def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:\n try:\n resolved_versions = solver.solve([package_name + (version_spec or \"\")], all_versions=True)\n except NotFound:\n _LOGGER.info(\n \"No versions were resovled for %r with version specification %r for package index %r\",\n package_name,\n version_spec,\n source.url,\n )\n return []\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Failed to resolve versions for %r with version spec %r\", package_name, version_spec)\n return []\n\n assert len(resolved_versions.keys()) == 1, \"Resolution of one package version ended with multiple packages.\"\n\n result = []\n for item in list(resolved_versions.values())[0]:\n result.append(item[0]) # We remove information about indexes.\n\n return result\n",
"def parse_python(spec): # Ignore PyDocStyleBear\n \"\"\"Parse PyPI specification of a single dependency.\n\n :param spec: str, for example \"Django>=1.5,<1.8\"\n :return: [Django [[('>=', '1.5'), ('<', '1.8')]]]\n \"\"\"\n\n def _extract_op_version(spec):\n # https://www.python.org/dev/peps/pep-0440/#compatible-release\n if spec.operator == \"~=\":\n version = spec.version.split(\".\")\n if len(version) in {2, 3, 4}:\n if len(version) in {3, 4}:\n del version[-1] # will increase the last but one in next line\n version[-1] = str(int(version[-1]) + 1)\n else:\n raise ValueError(\"%r must not be used with %r\" % (spec.operator, spec.version))\n return [(\">=\", spec.version), (\"<\", \".\".join(version))]\n # Trailing .* is permitted per\n # https://www.python.org/dev/peps/pep-0440/#version-matching\n elif spec.operator == \"==\" and spec.version.endswith(\".*\"):\n try:\n result = check_output([\"/usr/bin/semver-ranger\", spec.version], universal_newlines=True).strip()\n gte, lt = result.split()\n return [(\">=\", gte.lstrip(\">=\")), (\"<\", lt.lstrip(\"<\"))]\n except ValueError:\n _LOGGER.warning(\"couldn't resolve ==%s\", spec.version)\n return spec.operator, spec.version\n # https://www.python.org/dev/peps/pep-0440/#arbitrary-equality\n # Use of this operator is heavily discouraged, so just convert it to 'Version matching'\n elif spec.operator == \"===\":\n return \"==\", spec.version\n else:\n return spec.operator, spec.version\n\n def _get_pip_spec(requirements):\n \"\"\"There is no `specs` field In Pip 8+, take info from `specifier` field.\"\"\"\n if hasattr(requirements, \"specs\"):\n return requirements.specs\n elif hasattr(requirements, \"specifier\"):\n specs = [_extract_op_version(spec) for spec in requirements.specifier]\n if len(specs) == 0:\n # TODO: I'm not sure with this one\n # we should probably return None instead and let pip deal with this\n specs = [(\">=\", \"0.0.0\")]\n return specs\n\n _LOGGER.info(\"Parsing dependency %r\", spec)\n # create a temporary file and store the spec there since\n # `parse_requirements` requires a file\n with NamedTemporaryFile(mode=\"w+\", suffix=\"pysolve\") as f:\n f.write(spec)\n f.flush()\n parsed = parse_requirements(f.name, session=f.name)\n dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop()\n\n return dependency\n"
] |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Resolve given requirements for the given Python version."""
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
thoth-station/solver
|
thoth/solver/python/python.py
|
resolve
|
python
|
def resolve(
requirements: typing.List[str],
index_urls: list = None,
python_version: int = 3,
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
assert python_version in (2, 3), "Unknown Python version"
if subgraph_check_api and not transitive:
_LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved")
sys.exit(2)
python_bin = "python3" if python_version == 3 else "python2"
run_command("virtualenv -p python3 venv")
python_bin = "venv/bin/" + python_bin
run_command("{} -m pip install pipdeptree".format(python_bin))
environment_details = _get_environment_details(python_bin)
result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details}
all_solvers = []
for index_url in index_urls:
source = Source(index_url)
all_solvers.append(PythonSolver(fetcher_kwargs={"source": source}))
for solver in all_solvers:
solver_result = _do_resolve_index(
python_bin=python_bin,
solver=solver,
all_solvers=all_solvers,
requirements=requirements,
exclude_packages=exclude_packages,
transitive=transitive,
subgraph_check_api=subgraph_check_api,
)
result["tree"].extend(solver_result["tree"])
result["errors"].extend(solver_result["errors"])
result["unparsed"].extend(solver_result["unparsed"])
result["unresolved"].extend(solver_result["unresolved"])
return result
|
Resolve given requirements for the given Python version.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L356-L401
|
[
"def _get_environment_details(python_bin: str) -> list:\n \"\"\"Get information about packages in environment where packages get installed.\"\"\"\n cmd = \"{} -m pipdeptree --json\".format(python_bin)\n output = run_command(cmd, is_json=True).stdout\n return [_create_entry(entry) for entry in output]\n",
"def _do_resolve_index(\n python_bin: str,\n solver: PythonSolver,\n *,\n all_solvers: typing.List[PythonSolver],\n requirements: typing.List[str],\n exclude_packages: set = None,\n transitive: bool = True,\n subgraph_check_api: str = None,\n) -> dict:\n \"\"\"Perform resolution of requirements against the given solver.\"\"\"\n index_url = solver.release_fetcher.index_url\n source = solver.release_fetcher.source\n\n packages_seen = set()\n packages = []\n errors = []\n unresolved = []\n unparsed = []\n exclude_packages = exclude_packages or {}\n queue = deque()\n\n for requirement in requirements:\n _LOGGER.debug(\"Parsing requirement %r\", requirement)\n try:\n dependency = PythonDependencyParser.parse_python(requirement)\n except Exception as exc:\n unparsed.append({\"requirement\": requirement, \"details\": str(exc)})\n continue\n\n if dependency.name in exclude_packages:\n continue\n\n version_spec = _get_dependency_specification(dependency.spec)\n resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)\n if not resolved_versions:\n _LOGGER.warning(\"No versions were resolved for dependency %r in version %r\", dependency.name, version_spec)\n unresolved.append({\"package_name\": dependency.name, \"version_spec\": version_spec, \"index\": index_url})\n else:\n for version in resolved_versions:\n entry = (dependency.name, version)\n packages_seen.add(entry)\n queue.append(entry)\n\n while queue:\n package_name, package_version = queue.pop()\n _LOGGER.info(\"Using index %r to discover package %r in version %r\", index_url, package_name, package_version)\n try:\n with _install_requirement(python_bin, package_name, package_version, index_url):\n package_info = _pipdeptree(python_bin, package_name, warn=True)\n except CommandError as exc:\n _LOGGER.debug(\n \"There was an error during package %r in version %r discovery from %r: %s\",\n package_name,\n package_version,\n index_url,\n exc,\n )\n errors.append(\n {\n \"package_name\": package_name,\n \"index\": index_url,\n \"version\": package_version,\n \"type\": \"command_error\",\n \"details\": exc.to_dict(),\n }\n )\n continue\n\n if package_info is None:\n errors.append(\n {\n \"package_name\": package_name,\n \"index\": index_url,\n \"version\": package_version,\n \"type\": \"not_site_package\",\n \"details\": {\n \"message\": \"Failed to get information about installed package, probably not site package\"\n },\n }\n )\n continue\n\n if package_info[\"package\"][\"installed_version\"] != package_version:\n _LOGGER.warning(\n \"Requested to install version %r of package %r, but installed version is %r, error is not fatal\",\n package_version,\n package_name,\n package_info[\"package\"][\"installed_version\"],\n )\n\n if package_info[\"package\"][\"package_name\"] != package_name:\n _LOGGER.warning(\n \"Requested to install package %r, but installed package name is %r, error is not fatal\",\n package_name,\n package_info[\"package\"][\"package_name\"],\n )\n\n entry = _create_entry(package_info, source)\n packages.append(entry)\n\n for dependency in entry[\"dependencies\"]:\n dependency_name, dependency_range = dependency[\"package_name\"], dependency[\"required_version\"]\n dependency[\"resolved_versions\"] = []\n\n for dep_solver in all_solvers:\n _LOGGER.info(\n \"Resolving dependency versions for %r with range %r from %r\",\n dependency_name,\n dependency_range,\n dep_solver.release_fetcher.index_url,\n )\n resolved_versions = _resolve_versions(\n dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range\n )\n _LOGGER.debug(\n \"Resolved versions for package %r with range specifier %r: %s\",\n dependency_name,\n dependency_range,\n resolved_versions,\n )\n dependency[\"resolved_versions\"].append(\n {\"versions\": resolved_versions, \"index\": dep_solver.release_fetcher.index_url}\n )\n\n if not transitive:\n continue\n\n for version in resolved_versions:\n # Did we check this package already - do not check indexes, we manually insert them.\n seen_entry = (dependency_name, version)\n if seen_entry not in packages_seen and (\n not subgraph_check_api\n or (\n subgraph_check_api\n and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)\n )\n ):\n _LOGGER.debug(\n \"Adding package %r in version %r for next resolution round\", dependency_name, version\n )\n packages_seen.add(seen_entry)\n queue.append((dependency_name, version))\n\n return {\"tree\": packages, \"errors\": errors, \"unparsed\": unparsed, \"unresolved\": unresolved}\n"
] |
#!/usr/bin/env python3
# thoth-solver
# Copyright(C) 2018, 2019 Fridolin Pokorny
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dependency requirements solving for Python ecosystem."""
import sys
from collections import deque
from contextlib import contextmanager
import logging
import typing
from shlex import quote
from urllib.parse import urlparse
import requests
from thoth.analyzer import CommandError
from thoth.analyzer import run_command
from thoth.python import Source
from thoth.python.exceptions import NotFound
from .python_solver import PythonDependencyParser
from .python_solver import PythonSolver
_LOGGER = logging.getLogger(__name__)
def _create_entry(entry: dict, source: Source = None) -> dict:
"""Filter and normalize the output of pipdeptree entry."""
entry["package_name"] = entry["package"].pop("package_name")
entry["package_version"] = entry["package"].pop("installed_version")
if source:
entry["index_url"] = source.url
entry["sha256"] = []
for item in source.get_package_hashes(entry["package_name"], entry["package_version"]):
entry["sha256"].append(item["sha256"])
entry.pop("package")
for dependency in entry["dependencies"]:
dependency.pop("key", None)
dependency.pop("installed_version", None)
return entry
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output]
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool:
"""Ask the given subgraph check API if the given package in the given version should be included in the resolution.
This subgraph resolving avoidence serves two purposes - we don't need to
resolve dependency subgraphs that were already analyzed and we also avoid
analyzing of "core" packages (like setuptools) where not needed as they
can break installation environment.
"""
_LOGGER.info(
"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved",
package_name,
package_version,
index_url,
)
response = requests.get(
subgraph_check_api,
params={"package_name": package_name, "package_version": package_version, "index_url": index_url},
)
if response.status_code == 200:
return True
elif response.status_code == 208:
# This is probably not the correct HTTP status code to be used here, but which one should be used?
return False
response.raise_for_status()
raise ValueError(
"Unreachable code - subgraph check API responded with unknown HTTP status "
"code %s for package %r in version %r from index %r",
package_name,
package_version,
index_url,
)
@contextmanager
def _install_requirement(
python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True
) -> None:
"""Install requirements specified using suggested pip binary."""
previous_version = _pipdeptree(python_bin, package)
try:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package))
if version:
cmd += "=={}".format(quote(version))
if index_url:
cmd += ' --index-url "{}" '.format(quote(index_url))
# Supply trusted host by default so we do not get errors - it safe to
# do it here as package indexes are managed by Thoth.
trusted_host = urlparse(index_url).netloc
cmd += " --trusted-host {}".format(trusted_host)
_LOGGER.debug("Installing requirement %r in version %r", package, version)
run_command(cmd)
yield
finally:
if clean:
_LOGGER.debug("Removing installed package %r", package)
cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package))
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment by removing package %r (installed version %r), "
"the error is not fatal but can affect future actions: %s",
package,
version,
result.stderr,
)
_LOGGER.debug(
"Restoring previous environment setup after installation of %r (%s)", package, previous_version
)
if previous_version:
cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format(
python_bin, quote(package), quote(previous_version["package"]["installed_version"])
)
result = run_command(cmd, raise_on_error=False)
if result.return_code != 0:
_LOGGER.warning(
"Failed to restore previous environment for package %r (installed version %r), "
", the error is not fatal but can affect future actions (previous version: %r): %s",
package,
version,
previous_version,
result.stderr,
)
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]:
"""Get pip dependency tree by executing pipdeptree tool."""
cmd = "{} -m pipdeptree --json".format(python_bin)
_LOGGER.debug("Obtaining pip dependency tree using: %r", cmd)
output = run_command(cmd, is_json=True).stdout
if not package_name:
return output
for entry in output:
# In some versions pipdeptree does not work with --packages flag, do the logic on out own.
# TODO: we should probably do difference of reference this output and original environment
if entry["package"]["key"].lower() == package_name.lower():
return entry
# The given package was not found.
if warn:
_LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output)
return None
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
"""Get string representation of dependency specification as provided by PythonDependencyParser."""
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
def _resolve_versions(solver: PythonSolver, source: Source, package_name: str, version_spec: str) -> typing.List[str]:
try:
resolved_versions = solver.solve([package_name + (version_spec or "")], all_versions=True)
except NotFound:
_LOGGER.info(
"No versions were resovled for %r with version specification %r for package index %r",
package_name,
version_spec,
source.url,
)
return []
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to resolve versions for %r with version spec %r", package_name, version_spec)
return []
assert len(resolved_versions.keys()) == 1, "Resolution of one package version ended with multiple packages."
result = []
for item in list(resolved_versions.values())[0]:
result.append(item[0]) # We remove information about indexes.
return result
def _do_resolve_index(
python_bin: str,
solver: PythonSolver,
*,
all_solvers: typing.List[PythonSolver],
requirements: typing.List[str],
exclude_packages: set = None,
transitive: bool = True,
subgraph_check_api: str = None,
) -> dict:
"""Perform resolution of requirements against the given solver."""
index_url = solver.release_fetcher.index_url
source = solver.release_fetcher.source
packages_seen = set()
packages = []
errors = []
unresolved = []
unparsed = []
exclude_packages = exclude_packages or {}
queue = deque()
for requirement in requirements:
_LOGGER.debug("Parsing requirement %r", requirement)
try:
dependency = PythonDependencyParser.parse_python(requirement)
except Exception as exc:
unparsed.append({"requirement": requirement, "details": str(exc)})
continue
if dependency.name in exclude_packages:
continue
version_spec = _get_dependency_specification(dependency.spec)
resolved_versions = _resolve_versions(solver, source, dependency.name, version_spec)
if not resolved_versions:
_LOGGER.warning("No versions were resolved for dependency %r in version %r", dependency.name, version_spec)
unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url})
else:
for version in resolved_versions:
entry = (dependency.name, version)
packages_seen.add(entry)
queue.append(entry)
while queue:
package_name, package_version = queue.pop()
_LOGGER.info("Using index %r to discover package %r in version %r", index_url, package_name, package_version)
try:
with _install_requirement(python_bin, package_name, package_version, index_url):
package_info = _pipdeptree(python_bin, package_name, warn=True)
except CommandError as exc:
_LOGGER.debug(
"There was an error during package %r in version %r discovery from %r: %s",
package_name,
package_version,
index_url,
exc,
)
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "command_error",
"details": exc.to_dict(),
}
)
continue
if package_info is None:
errors.append(
{
"package_name": package_name,
"index": index_url,
"version": package_version,
"type": "not_site_package",
"details": {
"message": "Failed to get information about installed package, probably not site package"
},
}
)
continue
if package_info["package"]["installed_version"] != package_version:
_LOGGER.warning(
"Requested to install version %r of package %r, but installed version is %r, error is not fatal",
package_version,
package_name,
package_info["package"]["installed_version"],
)
if package_info["package"]["package_name"] != package_name:
_LOGGER.warning(
"Requested to install package %r, but installed package name is %r, error is not fatal",
package_name,
package_info["package"]["package_name"],
)
entry = _create_entry(package_info, source)
packages.append(entry)
for dependency in entry["dependencies"]:
dependency_name, dependency_range = dependency["package_name"], dependency["required_version"]
dependency["resolved_versions"] = []
for dep_solver in all_solvers:
_LOGGER.info(
"Resolving dependency versions for %r with range %r from %r",
dependency_name,
dependency_range,
dep_solver.release_fetcher.index_url,
)
resolved_versions = _resolve_versions(
dep_solver, dep_solver.release_fetcher.source, dependency_name, dependency_range
)
_LOGGER.debug(
"Resolved versions for package %r with range specifier %r: %s",
dependency_name,
dependency_range,
resolved_versions,
)
dependency["resolved_versions"].append(
{"versions": resolved_versions, "index": dep_solver.release_fetcher.index_url}
)
if not transitive:
continue
for version in resolved_versions:
# Did we check this package already - do not check indexes, we manually insert them.
seen_entry = (dependency_name, version)
if seen_entry not in packages_seen and (
not subgraph_check_api
or (
subgraph_check_api
and _should_resolve_subgraph(subgraph_check_api, dependency_name, version, index_url)
)
):
_LOGGER.debug(
"Adding package %r in version %r for next resolution round", dependency_name, version
)
packages_seen.add(seen_entry)
queue.append((dependency_name, version))
return {"tree": packages, "errors": errors, "unparsed": unparsed, "unresolved": unresolved}
|
thoth-station/solver
|
thoth/solver/python/python_solver.py
|
PythonReleasesFetcher.fetch_releases
|
python
|
def fetch_releases(self, package_name):
package_name = self.source.normalize_package_name(package_name)
releases = self.source.get_package_versions(package_name)
releases_with_index_url = [(item, self.index_url) for item in releases]
return package_name, releases_with_index_url
|
Fetch package and index_url for a package_name.
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python_solver.py#L49-L54
| null |
class PythonReleasesFetcher(ReleasesFetcher):
"""A releases fetcher based on PEP compatible simple API (also supporting Warehouse API)."""
def __init__(self, source: Source):
"""Initialize an instance of this class."""
self.source = source
@property
def index_url(self):
"""Get URL to package source index from where releases are fetched."""
return self.source.url
|
thoth-station/solver
|
thoth/solver/python/python_solver.py
|
PythonDependencyParser.parse_python
|
python
|
def parse_python(spec): # Ignore PyDocStyleBear
def _extract_op_version(spec):
# https://www.python.org/dev/peps/pep-0440/#compatible-release
if spec.operator == "~=":
version = spec.version.split(".")
if len(version) in {2, 3, 4}:
if len(version) in {3, 4}:
del version[-1] # will increase the last but one in next line
version[-1] = str(int(version[-1]) + 1)
else:
raise ValueError("%r must not be used with %r" % (spec.operator, spec.version))
return [(">=", spec.version), ("<", ".".join(version))]
# Trailing .* is permitted per
# https://www.python.org/dev/peps/pep-0440/#version-matching
elif spec.operator == "==" and spec.version.endswith(".*"):
try:
result = check_output(["/usr/bin/semver-ranger", spec.version], universal_newlines=True).strip()
gte, lt = result.split()
return [(">=", gte.lstrip(">=")), ("<", lt.lstrip("<"))]
except ValueError:
_LOGGER.warning("couldn't resolve ==%s", spec.version)
return spec.operator, spec.version
# https://www.python.org/dev/peps/pep-0440/#arbitrary-equality
# Use of this operator is heavily discouraged, so just convert it to 'Version matching'
elif spec.operator == "===":
return "==", spec.version
else:
return spec.operator, spec.version
def _get_pip_spec(requirements):
"""There is no `specs` field In Pip 8+, take info from `specifier` field."""
if hasattr(requirements, "specs"):
return requirements.specs
elif hasattr(requirements, "specifier"):
specs = [_extract_op_version(spec) for spec in requirements.specifier]
if len(specs) == 0:
# TODO: I'm not sure with this one
# we should probably return None instead and let pip deal with this
specs = [(">=", "0.0.0")]
return specs
_LOGGER.info("Parsing dependency %r", spec)
# create a temporary file and store the spec there since
# `parse_requirements` requires a file
with NamedTemporaryFile(mode="w+", suffix="pysolve") as f:
f.write(spec)
f.flush()
parsed = parse_requirements(f.name, session=f.name)
dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop()
return dependency
|
Parse PyPI specification of a single dependency.
:param spec: str, for example "Django>=1.5,<1.8"
:return: [Django [[('>=', '1.5'), ('<', '1.8')]]]
|
train
|
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python_solver.py#L66-L122
| null |
class PythonDependencyParser(DependencyParser):
"""Python Dependency parsing."""
@staticmethod
def parse(self, specs):
"""Parse specs."""
return [self.parse_python(s) for s in specs]
@staticmethod
def compose(deps):
"""Compose deps."""
return DependencyParser.compose_sep(deps, ",")
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps # TODO
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
headTail_breaks
|
python
|
def headTail_breaks(values, cuts):
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
|
head tail breaks helper function
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L35-L44
|
[
"def headTail_breaks(values, cuts):\n \"\"\"\n head tail breaks helper function\n \"\"\"\n values = np.array(values)\n mean = np.mean(values)\n cuts.append(mean)\n if len(values) > 1:\n return headTail_breaks(values[values >= mean], cuts)\n return cuts\n"
] |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
quantile
|
python
|
def quantile(y, k=4):
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
|
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L47-L97
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
binC
|
python
|
def binC(y, bins):
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
|
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L100-L164
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
bin
|
python
|
def bin(y, bins):
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
|
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L167-L228
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
bin1d
|
python
|
def bin1d(x, bins):
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
|
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L231-L278
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
_kmeans
|
python
|
def _kmeans(y, k=5):
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
|
Helper function to do kmeans in one dimension
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L289-L310
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
natural_breaks
|
python
|
def natural_breaks(values, k=5):
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
|
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L313-L332
|
[
"def _kmeans(y, k=5):\n \"\"\"\n Helper function to do kmeans in one dimension\n \"\"\"\n\n y = y * 1. # KMEANS needs float or double dtype\n centroids = KMEANS(y, k)[0]\n centroids.sort()\n try:\n class_ids = np.abs(y - centroids).argmin(axis=1)\n except:\n class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)\n\n uc = np.unique(class_ids)\n cuts = np.array([y[class_ids == c].max() for c in uc])\n y_cent = np.zeros_like(y)\n for c in uc:\n y_cent[class_ids == c] = centroids[c]\n diffs = y - y_cent\n diffs *= diffs\n\n return class_ids, cuts, diffs.sum(), centroids\n"
] |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
_fisher_jenks_means
|
python
|
def _fisher_jenks_means(values, classes=5, sort=True):
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
|
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L336-L390
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
_fit
|
python
|
def _fit(y, classes):
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
|
Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2226-L2245
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
gadf
|
python
|
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
|
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> qgadf = mc.classifiers.gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.3740257590909283
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c}
|y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2255-L2325
| null |
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = [
'Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'Jenks_Caspall', 'Jenks_Caspall_Forced',
'Jenks_Caspall_Sampled', 'Max_P_Classifier', 'Maximum_Breaks',
'Natural_Breaks', 'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers', 'HeadTail_Breaks', 'CLASSIFIERS'
]
CLASSIFIERS = ('Box_Plot', 'Equal_Interval', 'Fisher_Jenks',
'Fisher_Jenks_Sampled', 'HeadTail_Breaks', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined')
K = 5 # default number of classes in any map scheme with this as an argument
import numpy as np
import scipy.stats as stats
import scipy as sp
import copy
from scipy.cluster.vq import kmeans as KMEANS
from warnings import warn as Warn
try:
from numba import jit
except ImportError:
def jit(func):
return func
def headTail_breaks(values, cuts):
"""
head tail breaks helper function
"""
values = np.array(values)
mean = np.mean(values)
cuts.append(mean)
if len(values) > 1:
return headTail_breaks(values[values >= mean], cuts)
return cuts
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
q : array
(n,1), quantile values
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(1000)
>>> mc.classifiers.quantile(x)
array([249.75, 499.5 , 749.25, 999. ])
>>> mc.classifiers.quantile(x, k = 3)
array([333., 666., 999.])
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be
less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> mc.classifiers.quantile(y)
array([1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
q = np.unique(q)
k_q = len(q)
if k_q < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % k_q, UserWarning)
return q
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = list(range(2, 8))
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = mc.classifiers.binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and warn if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
Warn('value not in bin: {}'.format(val), UserWarning)
Warn('bins: {}'.format(bins), UserWarning)
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = mc.classifiers.bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
"""
if np.ndim(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
Place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts : int
number of elements of x falling in each bin
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = mc.classifiers.bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-float("inf")]
left.extend(bins[0:-1])
right = bins
cuts = list(zip(left, right))
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds, minlength=len(bins))
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
from .datasets import calemp
return calemp.load()
def _kmeans(y, k=5):
"""
Helper function to do kmeans in one dimension
"""
y = y * 1. # KMEANS needs float or double dtype
centroids = KMEANS(y, k)[0]
centroids.sort()
try:
class_ids = np.abs(y - centroids).argmin(axis=1)
except:
class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1)
uc = np.unique(class_ids)
cuts = np.array([y[class_ids == c].max() for c in uc])
y_cent = np.zeros_like(y)
for c in uc:
y_cent[class_ids == c] = centroids[c]
diffs = y - y_cent
diffs *= diffs
return class_ids, cuts, diffs.sum(), centroids
def natural_breaks(values, k=5):
"""
natural breaks helper function
Jenks natural breaks is kmeans in one dimension
"""
values = np.array(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
Warn('Warning: Not enough unique values in array to form k classes',
UserWarning)
Warn('Warning: setting k to %d' % uvk, UserWarning)
k = uvk
kres = _kmeans(values, k)
sids = kres[-1] # centroids
fit = kres[-2]
class_ids = kres[0]
cuts = kres[1]
return (sids, class_ids, fit, cuts)
@jit
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
Notes
-----
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
n_data = len(values)
mat1 = np.zeros((n_data + 1, classes + 1), dtype=np.int32)
mat2 = np.zeros((n_data + 1, classes + 1), dtype=np.float32)
mat1[1, 1:] = 1
mat2[2:, 1:] = np.inf
v = np.float32(0)
for l in range(2, len(values) + 1):
s1 = np.float32(0)
s2 = np.float32(0)
w = np.float32(0)
for m in range(1, l + 1):
i3 = l - m + 1
val = np.float32(values[i3 - 1])
s2 += val * val
s1 += val
w += np.float32(1)
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l, j] >= (v + mat2[i4, j - 1]):
mat1[l, j] = i3
mat2[l, j] = v + mat2[i4, j - 1]
mat1[l, 1] = 1
mat2[l, 1] = v
k = len(values)
kclass = np.zeros(classes + 1, dtype=values.dtype)
kclass[classes] = values[len(values) - 1]
kclass[0] = values[0]
for countNum in range(classes, 1, -1):
pivot = mat1[k, countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
return kclass
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
class HeadTail_Breaks(Map_Classifier):
"""
Head/tail Breaks Map Classification for Heavy-tailed Distributions
Parameters
----------
y : array
(n,1), values to classify
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(10)
>>> cal = mc.load_example()
>>> htb = mc.HeadTail_Breaks(cal)
>>> htb.k
3
>>> htb.counts
array([50, 7, 1])
>>> htb.bins
array([ 125.92810345, 811.26 , 4111.45 ])
>>> np.random.seed(123456)
>>> x = np.random.lognormal(3, 1, 1000)
>>> htb = mc.HeadTail_Breaks(x)
>>> htb.bins
array([ 32.26204423, 72.50205622, 128.07150107, 190.2899093 ,
264.82847377, 457.88157946, 576.76046949])
>>> htb.counts
array([695, 209, 62, 22, 10, 1, 1])
Notes
-----
Head/tail Breaks is a relatively new classification method developed
for data with a heavy-tailed distribution.
Implementation based on contributions by Alessandra Sozzi <alessandra.sozzi@gmail.com>.
For theoretical details see :cite:`Jiang_2013`.
"""
def __init__(self, y):
Map_Classifier.__init__(self, y)
self.name = 'HeadTail_Breaks'
def _set_bins(self):
x = self.y.copy()
bins = []
bins = headTail_breaks(x, bins)
self.bins = np.array(bins)
self.k = len(self.bins)
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ei = mc.Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class
(numpy array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> p = mc.Percentiles(cal)
>>> p.bins
array([1.357000e-01, 5.530000e-01, 9.365000e+00, 2.139140e+02,
2.179948e+03, 4.111450e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = mc.Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high
outliers, otherwise, there will be 6 classes and at least one high
outlier.
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bp = mc.Box_Plot(cal)
>>> bp.bins
array([-5.287625e+01, 2.567500e+00, 9.365000e+00, 3.953000e+01,
9.497375e+01, 4.111450e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids].values
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74, 4111.45,
317.11, 264.93])
>>> bx = mc.Box_Plot(np.arange(100))
>>> bx.bins
array([-49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(bins)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'hinge': kwargs.pop('hinge', self.hinge)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0
otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> q = mc.Quantiles(cal, k = 5)
>>> q.bins
array([1.46400e+00, 5.79800e+00, 1.32780e+01, 5.46160e+01, 4.11145e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> st = mc.Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([-967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = mc.Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'multiples': kwargs.pop('multiples', self.multiples)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy
array k x 1)
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mb = mc.Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'mindiff': kwargs.pop('mindiff', self.mindiff)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> import mapclassify as mc
>>> np.random.seed(123456)
>>> cal = mc.load_example()
>>> nb = mc.Natural_Breaks(cal, k=5)
>>> nb.k
5
>>> nb.counts
array([41, 9, 6, 1, 1])
>>> nb.bins
array([ 29.82, 110.74, 370.5 , 722.85, 4111.45])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = mc.Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a higher
value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
values = np.array(x)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
ms = 'Warning: Not enough unique values in array to form k classes'
Warn(ms, UserWarning)
Warn("Warning: setting k to %d" % uvk, UserWarning)
k = uvk
uv.sort()
# we set the bins equal to the sorted unique values and ramp k
# downwards. no need to call kmeans.
self.bins = uv
self.k = k
else:
# find an initial solution and then try to find an improvement
res0 = natural_breaks(x, k)
fit = res0[2]
for i in list(range(self.initial)):
res = natural_breaks(x, k)
fit_i = res[2]
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> fj = mc.Fisher_Jenks(cal)
>>> fj.adcm
799.24
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jc = mc.Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([1.81000e+00, 7.60000e+00, 2.98200e+01, 1.81270e+02, 4.11145e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = list(range(self.k))
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts), )
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> x = np.random.random(100000)
>>> jc = mc.Jenks_Caspall(x)
>>> jcs = mc.Jenks_Caspall_Sampled(x)
>>> jc.bins
array([0.1988721 , 0.39624334, 0.59441487, 0.79624357, 0.99999251])
>>> jcs.bins
array([0.20998558, 0.42112792, 0.62752937, 0.80543819, 0.99999251])
>>> jc.counts
array([19943, 19510, 19547, 20297, 20703])
>>> jcs.counts
array([21039, 20908, 20425, 17813, 19815])
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> jcf = mc.Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[1.34000e+00],
[5.90000e+00],
[1.67000e+01],
[5.06500e+01],
[4.11145e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = mc.Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[2.51000e+00],
[8.70000e+00],
[3.66800e+01],
[4.11145e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
# class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == c]) for c in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.45]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = mc.User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
def _update(self, y=None, bins=None):
if y is not None:
if hasattr(y, 'values'):
y = y.values
y = np.append(y.flatten(), self.y)
else:
y = self.y
if bins is None:
bins = self.bins
self.__init__(y, bins)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
bins = kwargs.pop('bins', self.bins)
if inplace:
self._update(y=y, bins=bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
class K_classifiers(object):
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier
instances with the best pct for each classifer
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> ks = mc.classifiers.K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.8481032719908105
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = list(kmethods.keys())
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Map_Classifier._update
|
python
|
def _update(self, data, *args, **kwargs):
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
|
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L455-L473
|
[
"def __init__(self, y):\n y = np.asarray(y).flatten()\n self.name = 'Map Classifier'\n self.y = y\n self._classify()\n self._summary()\n",
"def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):\n self.pct = pct\n Map_Classifier.__init__(self, y)\n self.name = 'Percentiles'\n",
"def __init__(self, y, hinge=1.5):\n \"\"\"\n Parameters\n ----------\n y : array (n,1)\n attribute to classify\n hinge : float\n multiple of inter-quartile range (default=1.5)\n \"\"\"\n self.hinge = hinge\n Map_Classifier.__init__(self, y)\n self.name = 'Box Plot'\n",
"def __init__(self, y, multiples=[-2, -1, 1, 2]):\n self.multiples = multiples\n Map_Classifier.__init__(self, y)\n self.name = 'Std_Mean'\n",
"def __init__(self, y, k=5, mindiff=0):\n self.k = k\n self.mindiff = mindiff\n Map_Classifier.__init__(self, y)\n self.name = 'Maximum_Breaks'\n",
"def __init__(self, y, k=K, initial=100):\n self.k = k\n self.initial = initial\n Map_Classifier.__init__(self, y)\n self.name = 'Natural_Breaks'\n",
"def __init__(self, y, k=K, pct=0.10):\n self.k = k\n n = y.size\n if pct * n > 1000:\n pct = 1000. / n\n ids = np.random.random_integers(0, n - 1, int(n * pct))\n yr = y[ids]\n yr[0] = max(y) # make sure we have the upper bound\n self.original_y = y\n self.pct = pct\n self.yr = yr\n self.yr_n = yr.size\n Map_Classifier.__init__(self, yr)\n self.yb, self.counts = bin1d(y, self.bins)\n self.name = \"Jenks_Caspall_Sampled\"\n self.y = y\n self._summary() # have to recalculate summary stats\n",
"def __init__(self, y, k=K, initial=1000):\n self.k = k\n self.initial = initial\n Map_Classifier.__init__(self, y)\n self.name = \"Max_P\"\n"
] |
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Map_Classifier.make
|
python
|
def make(cls, *args, **kwargs):
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
|
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L476-L618
| null |
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Map_Classifier.get_tss
|
python
|
def get_tss(self):
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
|
Total sum of squares around class means
Returns sum of squares over all class means
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L663-L676
| null |
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Map_Classifier.get_adcm
|
python
|
def get_adcm(self):
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
|
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L681-L697
| null |
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Map_Classifier.get_gadf
|
python
|
def get_gadf(self):
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
|
Goodness of absolute deviation of fit
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L699-L705
| null |
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
def find_bin(self, x):
"""
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
"""
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Map_Classifier.find_bin
|
python
|
def find_bin(self, x):
x = np.asarray(x).flatten()
right = np.digitize(x, self.bins, right=True)
if right.max() == len(self.bins):
right[right == len(self.bins)] = len(self.bins) - 1
return right
|
Sort input or inputs according to the current bin estimate
Parameters
----------
x : array or numeric
a value or array of values to fit within the estimated
bins
Returns
-------
a bin index or array of bin indices that classify the input into one of
the classifiers' bins.
Note that this differs from similar functionality in
numpy.digitize(x, classi.bins, right=True).
This will always provide the closest bin, so data "outside" the classifier,
above and below the max/min breaks, will be classified into the nearest bin.
numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0
for data below the lowest bin.
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L751-L779
| null |
class Map_Classifier(object):
"""
Abstract class for all map classifications :cite:`Slocum_2009`
For an array :math:`y` of :math:`n` values, a map classifier places each
value :math:`y_i` into one of :math:`k` mutually exclusive and exhaustive
classes. Each classifer defines the classes based on different criteria,
but in all cases the following hold for the classifiers in PySAL:
.. math:: C_j^l < y_i \le C_j^u \ \forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound
:math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`mapclassify.classifiers.Box_Plot`
* :class:`mapclassify.classifiers.Equal_Interval`
* :class:`mapclassify.classifiers.Fisher_Jenks`
* :class:`mapclassify.classifiers.Fisher_Jenks_Sampled`
* :class:`mapclassify.classifiers.HeadTail_Breaks`
* :class:`mapclassify.classifiers.Jenks_Caspall`
* :class:`mapclassify.classifiers.Jenks_Caspall_Forced`
* :class:`mapclassify.classifiers.Jenks_Caspall_Sampled`
* :class:`mapclassify.classifiers.Max_P_Classifier`
* :class:`mapclassify.classifiers.Maximum_Breaks`
* :class:`mapclassify.classifiers.Natural_Breaks`
* :class:`mapclassify.classifiers.Quantiles`
* :class:`mapclassify.classifiers.Percentiles`
* :class:`mapclassify.classifiers.Std_Mean`
* :class:`mapclassify.classifiers.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that
can be used to evaluate the properties of a specific classifier,
or for automatic selection of a classifier and
number of classes.
* :func:`mapclassify.classifiers.gadf`
* :class:`mapclassify.classifiers.K_classifiers`
"""
def __init__(self, y):
y = np.asarray(y).flatten()
self.name = 'Map Classifier'
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def _update(self, data, *args, **kwargs):
"""
The only thing that *should* happen in this function is
1. input sanitization for pandas
2. classification/reclassification.
Using their __init__ methods, all classifiers can re-classify given
different input parameters or additional data.
If you've got a cleverer updating equation than the intial estimation
equation, remove the call to self.__init__ below and replace it with
the updating function.
"""
if data is not None:
data = np.asarray(data).flatten()
data = np.append(data.flatten(), self.y)
else:
data = self.y
self.__init__(data, *args, **kwargs)
@classmethod
def make(cls, *args, **kwargs):
"""
Configure and create a classifier that will consume data and produce
classifications, given the configuration options specified by this
function.
Note that this like a *partial application* of the relevant class
constructor. `make` creates a function that returns classifications; it
does not actually do the classification.
If you want to classify data directly, use the appropriate class
constructor, like Quantiles, Max_Breaks, etc.
If you *have* a classifier object, but want to find which bins new data
falls into, use find_bin.
Parameters
----------
*args : required positional arguments
all positional arguments required by the classifier,
excluding the input data.
rolling : bool
a boolean configuring the outputted classifier to use
a rolling classifier rather than a new classifier for
each input. If rolling, this adds the current data to
all of the previous data in the classifier, and
rebalances the bins, like a running median
computation.
return_object : bool
a boolean configuring the outputted classifier to
return the classifier object or not
return_bins : bool
a boolean configuring the outputted classifier to
return the bins/breaks or not
return_counts : bool
a boolean configuring the outputted classifier to
return the histogram of objects falling into each bin
or not
Returns
-------
A function that consumes data and returns their bins (and object,
bins/breaks, or counts, if requested).
Note
----
This is most useful when you want to run a classifier many times
with a given configuration, such as when classifying many columns of an
array or dataframe using the same configuration.
Examples
--------
>>> import libpysal as ps
>>> import mapclassify as mc
>>> import geopandas as gpd
>>> df = gpd.read_file(ps.examples.get_path('columbus.dbf'))
>>> classifier = mc.Quantiles.make(k=9)
>>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier)
>>> cl["HOVAL"].values[:10]
array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8])
>>> cl["CRIME"].values[:10]
array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4])
>>> cl["INC"].values[:10]
array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4])
>>> import pandas as pd; from numpy import linspace as lsp
>>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)]
>>> data = pd.DataFrame(data).T
>>> data
0 1 2
0 3.000000 10.000000 -5.000000
1 3.555556 8.888889 -2.777778
2 4.111111 7.777778 -0.555556
3 4.666667 6.666667 1.666667
4 5.222222 5.555556 3.888889
5 5.777778 4.444444 6.111111
6 6.333333 3.333333 8.333333
7 6.888889 2.222222 10.555556
8 7.444444 1.111111 12.777778
9 8.000000 0.000000 15.000000
>>> data.apply(mc.Quantiles.make(rolling=True))
0 1 2
0 0 4 0
1 0 4 0
2 1 4 0
3 1 3 0
4 2 2 1
5 2 1 2
6 3 0 4
7 3 0 4
8 4 0 4
9 4 0 4
>>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf'))
>>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT')
>>> my_bins = [1, 10, 20, 40, 80]
>>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T]
>>> len(cl)
3
>>> cl[0][:10]
array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
"""
# only flag overrides return flag
to_annotate = copy.deepcopy(kwargs)
return_object = kwargs.pop('return_object', False)
return_bins = kwargs.pop('return_bins', False)
return_counts = kwargs.pop('return_counts', False)
rolling = kwargs.pop('rolling', False)
if rolling:
# just initialize a fake classifier
data = list(range(10))
cls_instance = cls(data, *args, **kwargs)
# and empty it, since we'll be using the update
cls_instance.y = np.array([])
else:
cls_instance = None
# wrap init in a closure to make a consumer.
# Qc Na: "Objects/Closures are poor man's Closures/Objects"
def classifier(data, cls_instance=cls_instance):
if rolling:
cls_instance.update(data, inplace=True, **kwargs)
yb = cls_instance.find_bin(data)
else:
cls_instance = cls(data, *args, **kwargs)
yb = cls_instance.yb
outs = [yb, None, None, None]
outs[1] = cls_instance if return_object else None
outs[2] = cls_instance.bins if return_bins else None
outs[3] = cls_instance.counts if return_counts else None
outs = [a for a in outs if a is not None]
if len(outs) == 1:
return outs[0]
else:
return outs
# for debugging/jic, keep around the kwargs.
# in future, we might want to make this a thin class, so that we can
# set a custom repr. Call the class `Binner` or something, that's a
# pre-configured Classifier that just consumes data, bins it, &
# possibly updates the bins.
classifier._options = to_annotate
return classifier
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a copy
estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'k': kwargs.pop('k', self.k)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def __call__(self, *args, **kwargs):
"""
This will allow the classifier to be called like it's a function.
Whether or not we want to make this be "find_bin" or "update" is a
design decision.
I like this as find_bin, since a classifier's job should be to classify
the data given to it using the rules estimated from the `_classify()`
function.
"""
return self.find_bin(*args)
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Fisher_Jenks_Sampled.update
|
python
|
def update(self, y=None, inplace=False, **kwargs):
kwargs.update({'k': kwargs.pop('k', self.k)})
kwargs.update({'pct': kwargs.pop('pct', self.pct)})
kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new
|
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L1586-L1609
|
[
"def _update(self, data, *args, **kwargs):\n \"\"\"\n The only thing that *should* happen in this function is\n 1. input sanitization for pandas\n 2. classification/reclassification.\n\n Using their __init__ methods, all classifiers can re-classify given\n different input parameters or additional data.\n\n If you've got a cleverer updating equation than the intial estimation\n equation, remove the call to self.__init__ below and replace it with\n the updating function.\n \"\"\"\n if data is not None:\n data = np.asarray(data).flatten()\n data = np.append(data.flatten(), self.y)\n else:\n data = self.y\n self.__init__(data, *args, **kwargs)\n"
] |
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
For theoretical details see :cite:`Rey_2016`.
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, int(n * pct))
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self._truncated = truncate
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Max_P_Classifier._ss
|
python
|
def _ss(self, class_def):
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
|
calculates sum of squares for a class
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2178-L2183
| null |
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Max_P_Classifier._swap
|
python
|
def _swap(self, class1, class2, a):
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
|
evaluate cost of moving a from class1 to class2
|
train
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2185-L2200
| null |
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import mapclassify as mc
>>> cal = mc.load_example()
>>> mp = mc.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 20.47, 36.68, 110.74, 4111.45])
>>> mp.counts
array([29, 9, 5, 7, 8])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = list(range(n))
seeds = [
np.nonzero(di == min(di))[0][0]
for di in [np.abs(x - qi) for qi in q]
]
rseeds = np.random.permutation(list(range(k))).tolist()
[remaining.remove(seed) for seed in seeds]
self.classes = classes = []
[classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
while swapping:
rseeds = np.random.permutation(list(range(k))).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def update(self, y=None, inplace=False, **kwargs):
"""
Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor.
"""
kwargs.update({'initial': kwargs.pop('initial', self.initial)})
if inplace:
self._update(y, bins, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, bins, **kwargs)
return new
|
mikemaccana/python-docx
|
docx.py
|
opendocx
|
python
|
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
|
Open a docx file, return a document XML tree
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L81-L86
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
makeelement
|
python
|
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
|
Create an element & return it
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L95-L131
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
pagebreak
|
python
|
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
|
Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L134-L160
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
paragraph
|
python
|
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
|
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L163-L229
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
heading
|
python
|
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
|
Make a new heading, return the heading element
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L278-L294
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
table
|
python
|
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
|
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L297-L431
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n",
"def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):\n \"\"\"\n Return a new paragraph element containing *paratext*. The paragraph's\n default style is 'Body Text', but a new style may be set using the\n *style* parameter.\n\n @param string jc: Paragraph alignment, possible values:\n left, center, right, both (justified), ...\n see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html\n for a full list\n\n If *paratext* is a list, add a run for each (text, char_format_str)\n 2-tuple in the list. char_format_str is a string containing one or more\n of the characters 'b', 'i', or 'u', meaning bold, italic, and underline\n respectively. For example:\n\n paratext = [\n ('some bold text', 'b'),\n ('some normal text', ''),\n ('some italic underlined text', 'iu')\n ]\n \"\"\"\n # Make our elements\n paragraph = makeelement('p')\n\n if not isinstance(paratext, list):\n paratext = [(paratext, '')]\n text_tuples = []\n for pt in paratext:\n text, char_styles_str = (pt if isinstance(pt, (list, tuple))\n else (pt, ''))\n text_elm = makeelement('t', tagtext=text)\n if len(text.strip()) < len(text):\n text_elm.set('{http://www.w3.org/XML/1998/namespace}space',\n 'preserve')\n text_tuples.append([text_elm, char_styles_str])\n pPr = makeelement('pPr')\n pStyle = makeelement('pStyle', attributes={'val': style})\n pJc = makeelement('jc', attributes={'val': jc})\n pPr.append(pStyle)\n pPr.append(pJc)\n\n # Add the text to the run, and the run to the paragraph\n paragraph.append(pPr)\n for text_elm, char_styles_str in text_tuples:\n run = makeelement('r')\n rPr = makeelement('rPr')\n # Apply styles\n if 'b' in char_styles_str:\n b = makeelement('b')\n rPr.append(b)\n if 'i' in char_styles_str:\n i = makeelement('i')\n rPr.append(i)\n if 'u' in char_styles_str:\n u = makeelement('u', attributes={'val': 'single'})\n rPr.append(u)\n run.append(rPr)\n # Insert lastRenderedPageBreak for assistive technologies like\n # document narrators to know when a page break occurred.\n if breakbefore:\n lastRenderedPageBreak = makeelement('lastRenderedPageBreak')\n run.append(lastRenderedPageBreak)\n run.append(text_elm)\n paragraph.append(run)\n # Return the combined paragraph\n return paragraph\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
picture
|
python
|
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
|
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L434-L614
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
search
|
python
|
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
|
Search a document for a regex, return success / fail result
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L617-L626
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
replace
|
python
|
def replace(document, search, replace):
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
|
Replace all occurences of string with a different string, return updated
document
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L629-L641
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
clean
|
python
|
def clean(document):
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
|
Perform misc cleaning operations on documents.
Returns cleaned document.
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L644-L661
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
findTypeParent
|
python
|
def findTypeParent(element, tag):
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
|
Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L664-L680
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
AdvSearch
|
python
|
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
|
Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L683-L756
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
advReplace
|
python
|
def advReplace(document, search, replace, bs=3):
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
|
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L759-L907
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
getdocumenttext
|
python
|
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
|
Return the raw text of a document, as a list of paragraphs.
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L910-L935
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
coreproperties
|
python
|
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
|
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L938-L970
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
appproperties
|
python
|
def appproperties():
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
|
Create app-specific properties. See docproperties() for more common
document properties.
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L973-L1003
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
websettings
|
python
|
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
|
Generate websettings
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L1006-L1011
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
wordrelationships
|
python
|
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
|
Generate a Word relationships file
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L1031-L1049
|
[
"def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,\n attrnsprefix=None):\n '''Create an element & return it'''\n # Deal with list of nsprefix by making namespacemap\n namespacemap = None\n if isinstance(nsprefix, list):\n namespacemap = {}\n for prefix in nsprefix:\n namespacemap[prefix] = nsprefixes[prefix]\n # FIXME: rest of code below expects a single prefix\n nsprefix = nsprefix[0]\n if nsprefix:\n namespace = '{%s}' % nsprefixes[nsprefix]\n else:\n # For when namespace = None\n namespace = ''\n newelement = etree.Element(namespace+tagname, nsmap=namespacemap)\n # Add attributes with namespaces\n if attributes:\n # If they haven't bothered setting attribute namespace, use an empty\n # string (equivalent of no namespace)\n if not attrnsprefix:\n # Quick hack: it seems every element that has a 'w' nsprefix for\n # its tag uses the same prefix for it's attributes\n if nsprefix == 'w':\n attributenamespace = namespace\n else:\n attributenamespace = ''\n else:\n attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'\n\n for tagattribute in attributes:\n newelement.set(attributenamespace+tagattribute,\n attributes[tagattribute])\n if tagtext:\n newelement.text = tagtext\n return newelement\n"
] |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
"""
Save a modified document
"""
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
mikemaccana/python-docx
|
docx.py
|
savedocx
|
python
|
def savedocx(
document, coreprops, appprops, contenttypes, websettings,
wordrelationships, output, imagefiledict=None):
if imagefiledict is None:
warn(
'Using savedocx() without imagefiledict parameter will be deprec'
'ated in the future.', PendingDeprecationWarning
)
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(
output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {
document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'
}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress images, if applicable
if imagefiledict is not None:
for imagepath, picrelid in imagefiledict.items():
archivename = 'word/media/%s_%s' % (picrelid, basename(imagepath))
log.info('Saving: %s', archivename)
docxfile.write(imagepath, archivename)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
Save a modified document
|
train
|
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L1052-L1107
| null |
# encoding: utf-8
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import os
import re
import time
import shutil
import zipfile
from lxml import etree
from os.path import abspath, basename, join
try:
from PIL import Image
except ImportError:
import Image
try:
from PIL.ExifTags import TAGS
except ImportError:
TAGS = {}
from exceptions import PendingDeprecationWarning
from warnings import warn
import logging
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
"""
Return a new paragraph element containing *paratext*. The paragraph's
default style is 'Body Text', but a new style may be set using the
*style* parameter.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If *paratext* is a list, add a run for each (text, char_format_str)
2-tuple in the list. char_format_str is a string containing one or more
of the characters 'b', 'i', or 'u', meaning bold, italic, and underline
respectively. For example:
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu')
]
"""
# Make our elements
paragraph = makeelement('p')
if not isinstance(paratext, list):
paratext = [(paratext, '')]
text_tuples = []
for pt in paratext:
text, char_styles_str = (pt if isinstance(pt, (list, tuple))
else (pt, ''))
text_elm = makeelement('t', tagtext=text)
if len(text.strip()) < len(text):
text_elm.set('{http://www.w3.org/XML/1998/namespace}space',
'preserve')
text_tuples.append([text_elm, char_styles_str])
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text to the run, and the run to the paragraph
paragraph.append(pPr)
for text_elm, char_styles_str in text_tuples:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if 'b' in char_styles_str:
b = makeelement('b')
rPr.append(b)
if 'i' in char_styles_str:
i = makeelement('i')
rPr.append(i)
if 'u' in char_styles_str:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(text_elm)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'
}
for extension in filetypes:
attrs = {
'Extension': extension,
'ContentType': filetypes[extension]
}
default_elm = makeelement('Default', nsprefix=None, attributes=attrs)
types.append(default_elm)
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0,
twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param str twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement(
'tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
attrs = {'w': str(colw[i]) if colw else '2390'}
tablegrid.append(makeelement('gridCol', attributes=attrs))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(
relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
imagefiledict=None):
"""
Take a relationshiplist, picture file name, and return a paragraph
containing the image and an updated relationshiplist
"""
if imagefiledict is None:
warn(
'Using picture() without imagefiledict parameter will be depreca'
'ted in the future.', PendingDeprecationWarning
)
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture
# Set relationship ID to that of the image or the first available one
picid = '2'
picpath = abspath(picname)
if imagefiledict is not None:
# Keep track of the image files in a separate dictionary so they don't
# need to be copied into the template directory
if picpath not in imagefiledict:
picrelid = 'rId' + str(len(relationshiplist) + 1)
imagefiledict[picpath] = picrelid
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relat'
'ionships/image',
'media/%s_%s' % (picrelid, basename(picpath))
])
else:
picrelid = imagefiledict[picpath]
else:
# Copy files into template directory for backwards compatibility
# Images still accumulate in the template directory this way
picrelid = 'rId' + str(len(relationshiplist) + 1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relations'
'hips/image', 'media/' + picname
])
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
image = Image.open(picpath)
# Extract EXIF data, if available
try:
exif = image._getexif()
exif = {} if exif is None else exif
except:
exif = {}
imageExif = {}
for tag, value in exif.items():
imageExif[TAGS.get(tag, tag)] = value
imageOrientation = imageExif.get('Orientation', 1)
imageAngle = {
1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270
}[imageOrientation]
imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'
imageFlipV = 'true' if imageOrientation == 4 else 'false'
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = image.size[0:2]
# Swap width and height if necessary
if imageOrientation in (5, 6, 7, 8):
pixelwidth, pixelheight = pixelheight, pixelwidth
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area
# (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement(
'cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}
)
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement(
'picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement(
'xfrm', nsprefix='a', attributes={
'rot': str(imageAngle * 60000), 'flipH': imageFlipH,
'flipV': imageFlipV
}
)
xfrm.append(
makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})
)
xfrm.append(
makeelement(
'ext', nsprefix='a', attributes={'cx': width, 'cy': height}
)
)
prstgeom = makeelement(
'prstGeom', nsprefix='a', attributes={'prst': 'rect'}
)
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement(
'graphicData', nsprefix='a',
attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'
'6/picture')})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
if imagefiledict is not None:
return relationshiplist, paragraph, imagefiledict
else:
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
"""
Replace all occurences of string with a different string, return updated
document
"""
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s",
searchre.pattern)
log.debug("Requested replacement: %s",
replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s",
map(lambda i: i.text, searchels))
log.debug("Matched at position: %s",
match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF"
" ELEMENTS")
else:
log.debug("Will replace with:",
re.sub(search, replace,
txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
log.debug(
"Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements,
# iterate through each paragraph, appending all text (t) children to that
# paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
"""
Create core properties (common document properties referred to in the
'Dublin Core' specification). See appproperties() for other stuff.
"""
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords),
nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby,
nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(
makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(
makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
elm_str = (
'<dcterms:%s xmlns:xsi="http://www.w3.org/2001/XMLSchema-instanc'
'e" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:'
'W3CDTF">%s</dcterms:%s>'
) % (doctime, currenttime, doctime)
coreprops.append(etree.fromstring(elm_str))
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
|
nephila/djangocms-blog
|
djangocms_blog/models.py
|
BlogMetaMixin.get_meta_attribute
|
python
|
def get_meta_attribute(self, param):
return self._get_meta_value(param, getattr(self.app_config, param)) or ''
|
Retrieves django-meta attributes from apphook config instance
:param param: django-meta attribute passed as key
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/models.py#L58-L63
| null |
class BlogMetaMixin(ModelMeta):
def get_locale(self):
return self.get_current_language()
def get_full_url(self):
"""
Return the url with protocol and domain url
"""
return self.build_absolute_uri(self.get_absolute_url())
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.make_published
|
python
|
def make_published(self, request, queryset):
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
|
Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L121-L139
| null |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_unpublished(self, request, queryset):
""" Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.make_unpublished
|
python
|
def make_unpublished(self, request, queryset):
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
|
Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L141-L151
| null |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_published(self, request, queryset):
""" Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
"""
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.get_urls
|
python
|
def get_urls(self):
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
|
Customize the modeladmin urls
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L224-L233
| null |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_published(self, request, queryset):
""" Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
"""
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
def make_unpublished(self, request, queryset):
""" Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.publish_post
|
python
|
def publish_post(self, request, pk):
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
|
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L247-L265
| null |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_published(self, request, queryset):
""" Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
"""
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
def make_unpublished(self, request, queryset):
""" Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.has_restricted_sites
|
python
|
def has_restricted_sites(self, request):
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
|
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L267-L275
| null |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_published(self, request, queryset):
""" Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
"""
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
def make_unpublished(self, request, queryset):
""" Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.get_restricted_sites
|
python
|
def get_restricted_sites(self, request):
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
|
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L277-L293
| null |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_published(self, request, queryset):
""" Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
"""
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
def make_unpublished(self, request, queryset):
""" Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
PostAdmin.get_fieldsets
|
python
|
def get_fieldsets(self, request, obj=None):
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets
|
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L302-L342
|
[
"def get_setting(name):\n from django.conf import settings\n from django.utils.translation import ugettext_lazy as _\n from meta import settings as meta_settings\n\n PERMALINKS = (\n ('full_date', _('Full date')),\n ('short_date', _('Year / Month')),\n ('category', _('Category')),\n ('slug', _('Just slug')),\n )\n PERMALINKS_URLS = {\n 'full_date': r'^(?P<year>\\d{4})/(?P<month>\\d{1,2})/(?P<day>\\d{1,2})/(?P<slug>\\w[-\\w]*)/$',\n 'short_date': r'^(?P<year>\\d{4})/(?P<month>\\d{1,2})/(?P<slug>\\w[-\\w]*)/$',\n 'category': r'^(?P<category>\\w[-\\w]*)/(?P<slug>\\w[-\\w]*)/$',\n 'slug': r'^(?P<slug>\\w[-\\w]*)/$',\n }\n MENU_TYPES = (\n (MENU_TYPE_COMPLETE, _('Categories and posts')),\n (MENU_TYPE_CATEGORIES, _('Categories only')),\n (MENU_TYPE_POSTS, _('Posts only')),\n (MENU_TYPE_NONE, _('None')),\n )\n SITEMAP_CHANGEFREQ_LIST = (\n ('always', _('always')),\n ('hourly', _('hourly')),\n ('daily', _('daily')),\n ('weekly', _('weekly')),\n ('monthly', _('monthly')),\n ('yearly', _('yearly')),\n ('never', _('never')),\n )\n default = {\n 'BLOG_IMAGE_THUMBNAIL_SIZE': getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', {\n 'size': '120x120',\n 'crop': True,\n 'upscale': False\n }),\n\n 'BLOG_IMAGE_FULL_SIZE': getattr(settings, 'BLOG_IMAGE_FULL_SIZE', {\n 'size': '640x120',\n 'crop': True,\n 'upscale': False\n }),\n\n 'BLOG_URLCONF': getattr(settings, 'BLOG_URLCONF', 'djangocms_blog.urls'),\n 'BLOG_PAGINATION': getattr(settings, 'BLOG_PAGINATION', 10),\n 'BLOG_LATEST_POSTS': getattr(settings, 'BLOG_LATEST_POSTS', 5),\n 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT': getattr(\n settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100\n ),\n 'BLOG_META_DESCRIPTION_LENGTH': getattr(\n settings, 'BLOG_META_DESCRIPTION_LENGTH', 320\n ),\n 'BLOG_META_TITLE_LENGTH': getattr(\n settings, 'BLOG_META_TITLE_LENGTH', 70\n ),\n 'BLOG_MENU_TYPES': MENU_TYPES,\n 'BLOG_MENU_EMPTY_CATEGORIES': getattr(settings, 'MENU_EMPTY_CATEGORIES', True),\n 'BLOG_TYPE': getattr(settings, 'BLOG_TYPE', 'Article'),\n 'BLOG_TYPES': meta_settings.OBJECT_TYPES,\n 'BLOG_FB_TYPE': getattr(settings, 'BLOG_FB_TYPE', 'Article'),\n 'BLOG_FB_TYPES': getattr(settings, 'BLOG_FB_TYPES', meta_settings.FB_TYPES),\n 'BLOG_FB_APPID': getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID),\n 'BLOG_FB_PROFILE_ID': getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID),\n 'BLOG_FB_PUBLISHER': getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER),\n 'BLOG_FB_AUTHOR_URL': getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url'),\n 'BLOG_FB_AUTHOR': getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name'),\n 'BLOG_TWITTER_TYPE': getattr(settings, 'BLOG_TWITTER_TYPE', 'summary'),\n 'BLOG_TWITTER_TYPES': getattr(settings, 'BLOG_TWITTER_TYPES', meta_settings.TWITTER_TYPES),\n 'BLOG_TWITTER_SITE': getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE),\n 'BLOG_TWITTER_AUTHOR': getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter'),\n 'BLOG_GPLUS_TYPE': getattr(settings, 'BLOG_GPLUS_TYPE', 'Blog'),\n 'BLOG_GPLUS_TYPES': getattr(settings, 'BLOG_GPLUS_TYPES', meta_settings.GPLUS_TYPES),\n 'BLOG_GPLUS_AUTHOR': getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus'),\n 'BLOG_ENABLE_COMMENTS': getattr(settings, 'BLOG_ENABLE_COMMENTS', True),\n 'BLOG_USE_ABSTRACT': getattr(settings, 'BLOG_USE_ABSTRACT', True),\n 'BLOG_USE_PLACEHOLDER': getattr(settings, 'BLOG_USE_PLACEHOLDER', True),\n 'BLOG_USE_RELATED': getattr(settings, 'BLOG_USE_RELATED', True),\n 'BLOG_MULTISITE': getattr(settings, 'BLOG_MULTISITE', True),\n 'BLOG_AUTHOR_DEFAULT': getattr(settings, 'BLOG_AUTHOR_DEFAULT', True),\n 'BLOG_DEFAULT_PUBLISHED': getattr(settings, 'BLOG_DEFAULT_PUBLISHED', False),\n 'BLOG_ADMIN_POST_FIELDSET_FILTER': getattr(\n settings, 'BLOG_ADMIN_POST_FIELDSET_FILTER', False),\n 'BLOG_AVAILABLE_PERMALINK_STYLES': getattr(\n settings, 'BLOG_AVAILABLE_PERMALINK_STYLES', PERMALINKS\n ),\n 'BLOG_PERMALINK_URLS': getattr(settings, 'BLOG_PERMALINK_URLS', PERMALINKS_URLS),\n 'BLOG_DEFAULT_OBJECT_NAME': getattr(settings, 'BLOG_DEFAULT_OBJECT_NAME', 'Article'),\n\n 'BLOG_AUTO_SETUP': getattr(settings, 'BLOG_AUTO_SETUP', True),\n 'BLOG_AUTO_HOME_TITLE': getattr(settings, 'BLOG_AUTO_HOME_TITLE', 'Home'),\n 'BLOG_AUTO_BLOG_TITLE': getattr(settings, 'BLOG_AUTO_BLOG_TITLE', 'Blog'),\n 'BLOG_AUTO_APP_TITLE': getattr(settings, 'BLOG_AUTO_APP_TITLE', 'Blog'),\n 'BLOG_AUTO_NAMESPACE': getattr(settings, 'BLOG_AUTO_NAMESPACE', 'Blog'),\n\n 'BLOG_SITEMAP_PRIORITY_DEFAULT': getattr(settings, 'BLOG_SITEMAP_PRIORITY_DEFAULT', '0.5'),\n 'BLOG_SITEMAP_CHANGEFREQ': getattr(\n settings, 'BLOG_SITEMAP_CHANGEFREQ', SITEMAP_CHANGEFREQ_LIST\n ),\n 'BLOG_SITEMAP_CHANGEFREQ_DEFAULT': getattr(\n settings, 'BLOG_SITEMAP_CHANGEFREQ_DEFAULT', 'monthly'\n ),\n\n 'BLOG_ENABLE_SEARCH': getattr(settings, 'BLOG_ENABLE_SEARCH', True),\n 'BLOG_CURRENT_POST_IDENTIFIER': getattr(\n settings, 'BLOG_CURRENT_POST_IDENTIFIER', 'djangocms_post_current'),\n 'BLOG_CURRENT_NAMESPACE': getattr(\n settings, 'BLOG_CURRENT_NAMESPACE', 'djangocms_post_current_config'),\n 'BLOG_ENABLE_THROUGH_TOOLBAR_MENU': getattr(\n settings, 'BLOG_ENABLE_THROUGH_TOOLBAR_MENU', False),\n\n 'BLOG_PLUGIN_MODULE_NAME': getattr(settings, 'BLOG_PLUGIN_MODULE_NAME', _('Blog')),\n 'BLOG_LATEST_ENTRIES_PLUGIN_NAME': getattr(\n settings, 'BLOG_LATEST_ENTRIES_PLUGIN_NAME', _('Latest Blog Articles')),\n 'BLOG_LATEST_ENTRIES_PLUGIN_NAME_CACHED': getattr(\n settings, 'BLOG_LATEST_ENTRIES_PLUGIN_NAME_CACHED', _('Latest Blog Articles - Cache')),\n 'BLOG_AUTHOR_POSTS_PLUGIN_NAME': getattr(\n settings, 'BLOG_AUTHOR_POSTS_PLUGIN_NAME', _('Author Blog Articles')),\n 'BLOG_TAGS_PLUGIN_NAME': getattr(\n settings, 'BLOG_TAGS_PLUGIN_NAME', _('Tags')),\n 'BLOG_CATEGORY_PLUGIN_NAME': getattr(\n settings, 'BLOG_CATEGORY_PLUGIN_NAME', _('Categories')),\n 'BLOG_ARCHIVE_PLUGIN_NAME': getattr(\n settings, 'BLOG_ARCHIVE_PLUGIN_NAME', _('Archive')),\n 'BLOG_FEED_CACHE_TIMEOUT': getattr(\n settings, 'BLOG_FEED_CACHE_TIMEOUT', 3600),\n 'BLOG_FEED_INSTANT_ITEMS': getattr(\n settings, 'BLOG_FEED_INSTANT_ITEMS', 50),\n 'BLOG_FEED_LATEST_ITEMS': getattr(\n settings, 'BLOG_FEED_LATEST_ITEMS', 10),\n 'BLOG_FEED_TAGS_ITEMS': getattr(\n settings, 'BLOG_FEED_TAGS_ITEMS', 10),\n 'BLOG_LIVEBLOG_PLUGINS': getattr(\n settings, 'BLOG_LIVEBLOG_PLUGINS', ('LiveblogPlugin',)),\n\n 'BLOG_PLUGIN_TEMPLATE_FOLDERS': getattr(\n settings, 'BLOG_PLUGIN_TEMPLATE_FOLDERS', (('plugins', _('Default template')),)),\n\n }\n return default['BLOG_%s' % name]\n"
] |
class PostAdmin(PlaceholderAdminMixin, FrontendEditableAdminMixin,
ModelAppHookConfig, TranslatableAdmin):
form = PostAdminForm
list_display = [
'title', 'author', 'date_published', 'app_config', 'all_languages_column',
'date_published_end'
]
search_fields = ('translations__title',)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
actions = [
'make_published',
'make_unpublished',
'enable_comments',
'disable_comments',
]
if apps.is_installed('djangocms_blog.liveblog'):
actions += ['enable_liveblog', 'disable_liveblog']
_fieldsets = [
(None, {
'fields': [
['title', 'subtitle', 'publish'],
['categories', 'app_config']
]
}),
(None, {
'fields': [[]]
}),
(_('Info'), {
'fields': [['slug', 'tags'],
['date_published', 'date_published_end', 'date_featured'],
['enable_comments']],
'classes': ('collapse',)
}),
(_('Images'), {
'fields': [['main_image', 'main_image_thumbnail', 'main_image_full']],
'classes': ('collapse',)
}),
(_('SEO'), {
'fields': [['meta_description', 'meta_title', 'meta_keywords']],
'classes': ('collapse',)
}),
]
app_config_values = {
'default_published': 'publish'
}
_sites = None
# Bulk actions for post admin
def make_published(self, request, queryset):
""" Bulk action to mark selected posts as published. If
the date_published field is empty the current time is
saved as date_published.
queryset must not be empty (ensured by DjangoCMS).
"""
cnt1 = queryset.filter(
date_published__isnull=True,
publish=False,
).update(date_published=timezone.now(), publish=True)
cnt2 = queryset.filter(
date_published__isnull=False,
publish=False,
).update(publish=True)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry published.',
'%(updates)d entries published.', cnt1+cnt2) % {
'updates': cnt1+cnt2, })
def make_unpublished(self, request, queryset):
""" Bulk action to mark selected posts as UNpublished.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(publish=True)\
.update(publish=False)
messages.add_message(
request, messages.INFO,
__('%(updates)d entry unpublished.',
'%(updates)d entries unpublished.', updates) % {
'updates': updates, })
def enable_comments(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=False)\
.update(enable_comments=True)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry enabled.',
'Comments for %(updates)d entries enabled', updates) % {
'updates': updates, })
def disable_comments(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_comments=True)\
.update(enable_comments=False)
messages.add_message(
request, messages.INFO,
__('Comments for %(updates)d entry disabled.',
'Comments for %(updates)d entries disabled.', updates) % {
'updates': updates, })
def enable_liveblog(self, request, queryset):
""" Bulk action to enable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=False)\
.update(enable_liveblog=True)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.', updates) % {
'updates': updates, })
def disable_liveblog(self, request, queryset):
""" Bulk action to disable comments for selected posts.
queryset must not be empty (ensured by DjangoCMS).
"""
updates = queryset.filter(enable_liveblog=True)\
.update(enable_liveblog=False)
messages.add_message(
request, messages.INFO,
__('Liveblog for %(updates)d entry enabled.',
'Liveblog for %(updates)d entries enabled.') % {
'updates': updates, })
# Make bulk action menu entries localizable
make_published.short_description = _("Publish selection")
make_unpublished.short_description = _("Unpublish selection")
enable_comments.short_description = _("Enable comments for selection")
disable_comments.short_description = _("Disable comments for selection ")
enable_liveblog.short_description = _("Enable liveblog for selection")
disable_liveblog.short_description = _("Disable liveblog for selection ")
def get_list_filter(self, request):
filters = ['app_config', 'publish', 'date_published']
if get_setting('MULTISITE'):
filters.append(SiteListFilter)
try:
from taggit_helpers.admin import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError: # pragma: no cover
try:
from taggit_helpers import TaggitListFilter
filters.append(TaggitListFilter)
except ImportError:
pass
return filters
def get_urls(self):
"""
Customize the modeladmin urls
"""
urls = [
url(r'^publish/([0-9]+)/$', self.admin_site.admin_view(self.publish_post),
name='djangocms_blog_publish_article'),
]
urls.extend(super(PostAdmin, self).get_urls())
return urls
def post_add_plugin(self, request, obj1, obj2=None):
if isinstance(obj1, CMSPlugin):
plugin = obj1
elif isinstance(obj2, CMSPlugin):
plugin = obj2
if plugin.plugin_type in get_setting('LIVEBLOG_PLUGINS'):
plugin = plugin.move(plugin.get_siblings().first(), 'first-sibling')
if isinstance(obj1, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, plugin)
elif isinstance(obj2, CMSPlugin):
return super(PostAdmin, self).post_add_plugin(request, obj1, plugin)
def publish_post(self, request, pk):
"""
Admin view to publish a single post
:param request: request
:param pk: primary key of the post to publish
:return: Redirect to the post itself (if found) or fallback urls
"""
language = get_language_from_request(request, check_path=True)
try:
post = Post.objects.get(pk=int(pk))
post.publish = True
post.save()
return HttpResponseRedirect(post.get_absolute_url(language))
except Exception:
try:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except KeyError:
return HttpResponseRedirect(reverse('djangocms_blog:posts-latest'))
def has_restricted_sites(self, request):
"""
Whether the current user has permission on one site only
:param request: current request
:return: boolean: user has permission on only one site
"""
sites = self.get_restricted_sites(request)
return sites and sites.count() == 1
def get_restricted_sites(self, request):
"""
The sites on which the user has permission on.
To return the permissions, the method check for the ``get_sites``
method on the user instance (e.g.: ``return request.user.get_sites()``)
which must return the queryset of enabled sites.
If the attribute does not exists, the user is considered enabled
for all the websites.
:param request: current request
:return: boolean or a queryset of available sites
"""
try:
return request.user.get_sites()
except AttributeError: # pragma: no cover
return Site.objects.none()
def _set_config_defaults(self, request, form, obj=None):
form = super(PostAdmin, self)._set_config_defaults(request, form, obj)
sites = self.get_restricted_sites(request)
if 'sites' in form.base_fields and sites.exists():
form.base_fields['sites'].queryset = self.get_restricted_sites(request).all()
return form
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def save_model(self, request, obj, form, change):
obj._set_default_author(request.user)
super(PostAdmin, self).save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
sites = self.get_restricted_sites(request)
if sites.exists():
pks = list(sites.all().values_list('pk', flat=True))
qs = qs.filter(sites__in=pks)
return qs.distinct()
def save_related(self, request, form, formsets, change):
if self.get_restricted_sites(request).exists():
if 'sites' in form.cleaned_data:
form_sites = form.cleaned_data.get('sites', [])
removed = set(
self.get_restricted_sites(request).all()
).difference(form_sites)
diff_original = set(
form.instance.sites.all()
).difference(removed).union(form_sites)
form.cleaned_data['sites'] = diff_original
else:
form.instance.sites.add(
*self.get_restricted_sites(request).all().values_list('pk', flat=True)
)
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL, 'djangocms_blog_admin.css'),)
}
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
BlogConfigAdmin.get_fieldsets
|
python
|
def get_fieldsets(self, request, obj=None):
return [
(None, {
'fields': ('type', 'namespace', 'app_title', 'object_name')
}),
(_('Generic'), {
'fields': (
'config.default_published', 'config.use_placeholder', 'config.use_abstract',
'config.set_author', 'config.use_related',
)
}),
(_('Layout'), {
'fields': (
'config.paginate_by', 'config.url_patterns', 'config.template_prefix',
'config.menu_structure', 'config.menu_empty_categories',
('config.default_image_full', 'config.default_image_thumbnail'),
),
'classes': ('collapse',)
}),
(_('Notifications'), {
'fields': (
'config.send_knock_create', 'config.send_knock_update'
),
'classes': ('collapse',)
}),
(_('Sitemap'), {
'fields': (
'config.sitemap_changefreq', 'config.sitemap_priority',
),
'classes': ('collapse',)
}),
(_('Meta'), {
'fields': (
'config.object_type',
)
}),
('Open Graph', {
'fields': (
'config.og_type', 'config.og_app_id', 'config.og_profile_id',
'config.og_publisher', 'config.og_author_url', 'config.og_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
('Twitter', {
'fields': (
'config.twitter_type', 'config.twitter_site', 'config.twitter_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
('Google+', {
'fields': (
'config.gplus_type', 'config.gplus_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
]
|
Fieldsets configuration
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L387-L451
| null |
class BlogConfigAdmin(BaseAppHookConfig, TranslatableAdmin):
@property
def declared_fieldsets(self):
return self.get_fieldsets(None)
def save_model(self, request, obj, form, change):
"""
Clear menu cache when changing menu structure
"""
if 'config.menu_structure' in form.changed_data:
from menus.menu_pool import menu_pool
menu_pool.clear(all=True)
return super(BlogConfigAdmin, self).save_model(request, obj, form, change)
|
nephila/djangocms-blog
|
djangocms_blog/admin.py
|
BlogConfigAdmin.save_model
|
python
|
def save_model(self, request, obj, form, change):
if 'config.menu_structure' in form.changed_data:
from menus.menu_pool import menu_pool
menu_pool.clear(all=True)
return super(BlogConfigAdmin, self).save_model(request, obj, form, change)
|
Clear menu cache when changing menu structure
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L453-L460
| null |
class BlogConfigAdmin(BaseAppHookConfig, TranslatableAdmin):
@property
def declared_fieldsets(self):
return self.get_fieldsets(None)
def get_fieldsets(self, request, obj=None):
"""
Fieldsets configuration
"""
return [
(None, {
'fields': ('type', 'namespace', 'app_title', 'object_name')
}),
(_('Generic'), {
'fields': (
'config.default_published', 'config.use_placeholder', 'config.use_abstract',
'config.set_author', 'config.use_related',
)
}),
(_('Layout'), {
'fields': (
'config.paginate_by', 'config.url_patterns', 'config.template_prefix',
'config.menu_structure', 'config.menu_empty_categories',
('config.default_image_full', 'config.default_image_thumbnail'),
),
'classes': ('collapse',)
}),
(_('Notifications'), {
'fields': (
'config.send_knock_create', 'config.send_knock_update'
),
'classes': ('collapse',)
}),
(_('Sitemap'), {
'fields': (
'config.sitemap_changefreq', 'config.sitemap_priority',
),
'classes': ('collapse',)
}),
(_('Meta'), {
'fields': (
'config.object_type',
)
}),
('Open Graph', {
'fields': (
'config.og_type', 'config.og_app_id', 'config.og_profile_id',
'config.og_publisher', 'config.og_author_url', 'config.og_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
('Twitter', {
'fields': (
'config.twitter_type', 'config.twitter_site', 'config.twitter_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
('Google+', {
'fields': (
'config.gplus_type', 'config.gplus_author',
),
'description': _(
'You can provide plain strings, Post model attribute or method names'
)
}),
]
|
nephila/djangocms-blog
|
djangocms_blog/cms_wizards.py
|
PostWizardForm.clean_slug
|
python
|
def clean_slug(self):
source = self.cleaned_data.get('slug', '')
lang_choice = self.language_code
if not source:
source = slugify(self.cleaned_data.get('title', ''))
qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)
used = list(qs.values_list('translations__slug', flat=True))
slug = source
i = 1
while slug in used:
slug = '%s-%s' % (source, i)
i += 1
return slug
|
Generate a valid slug, in case the given one is taken
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/cms_wizards.py#L55-L70
|
[
"def slugify(base):\n if django.VERSION >= (1, 9):\n return django_slugify(base, allow_unicode=True)\n else:\n return django_slugify(base)\n"
] |
class PostWizardForm(PostAdminFormBase):
default_appconfig = None
slug = forms.SlugField(
label=_('Slug'), max_length=767, required=False,
help_text=_('Leave empty for automatic slug, or override as required.'),
)
def __init__(self, *args, **kwargs):
if 'initial' not in kwargs or not kwargs.get('initial', False):
kwargs['initial'] = {}
kwargs['initial']['app_config'] = self.default_appconfig
if 'data' in kwargs and kwargs['data'] is not None:
data = kwargs['data'].copy()
data['1-app_config'] = self.default_appconfig
kwargs['data'] = data
super(PostWizardForm, self).__init__(*args, **kwargs)
self.fields['app_config'].widget = forms.Select(
attrs=self.fields['app_config'].widget.attrs,
choices=self.fields['app_config'].widget.choices,
)
self.fields['app_config'].widget.attrs['disabled'] = True
if 'categories' in self.fields:
self.fields['categories'].queryset = self.available_categories
class Meta:
model = Post
fields = ['app_config', 'title', 'slug', 'abstract', 'categories']
class Media:
js = ('admin/js/vendor/jquery/jquery.js', 'admin/js/jquery.init.js',)
def save(self, commit=True):
self.instance._set_default_author(get_current_user())
return super(PostWizardForm, self).save(commit)
|
nephila/djangocms-blog
|
djangocms_blog/cms_menus.py
|
BlogCategoryMenu.get_nodes
|
python
|
def get_nodes(self, request):
nodes = []
language = get_language_from_request(request, check_path=True)
current_site = get_current_site(request)
page_site = self.instance.node.site
if self.instance and page_site != current_site:
return []
categories_menu = False
posts_menu = False
config = False
if self.instance:
if not self._config.get(self.instance.application_namespace, False):
self._config[self.instance.application_namespace] = BlogConfig.objects.get(
namespace=self.instance.application_namespace
)
config = self._config[self.instance.application_namespace]
if not getattr(request, 'toolbar', False) or not request.toolbar.edit_mode_active:
if self.instance == self.instance.get_draft_object():
return []
else:
if self.instance == self.instance.get_public_object():
return []
if config and config.menu_structure in (MENU_TYPE_COMPLETE, MENU_TYPE_CATEGORIES):
categories_menu = True
if config and config.menu_structure in (MENU_TYPE_COMPLETE, MENU_TYPE_POSTS):
posts_menu = True
if config and config.menu_structure in (MENU_TYPE_NONE, ):
return nodes
used_categories = []
if posts_menu:
posts = Post.objects
if hasattr(self, 'instance') and self.instance:
posts = posts.namespace(self.instance.application_namespace).on_site()
posts = posts.active_translations(language).distinct().\
select_related('app_config').prefetch_related('translations', 'categories')
for post in posts:
post_id = None
parent = None
used_categories.extend(post.categories.values_list('pk', flat=True))
if categories_menu:
category = post.categories.first()
if category:
parent = '{0}-{1}'.format(category.__class__.__name__, category.pk)
post_id = '{0}-{1}'.format(post.__class__.__name__, post.pk),
else:
post_id = '{0}-{1}'.format(post.__class__.__name__, post.pk),
if post_id:
node = NavigationNode(
post.get_title(),
post.get_absolute_url(language),
post_id,
parent
)
nodes.append(node)
if categories_menu:
categories = BlogCategory.objects
if config:
categories = categories.namespace(self.instance.application_namespace)
if config and not config.menu_empty_categories:
categories = categories.active_translations(language).filter(
pk__in=used_categories
).distinct()
else:
categories = categories.active_translations(language).distinct()
categories = categories.order_by('parent__id', 'translations__name').\
select_related('app_config').prefetch_related('translations')
added_categories = []
for category in categories:
if category.pk not in added_categories:
node = NavigationNode(
category.name,
category.get_absolute_url(),
'{0}-{1}'.format(category.__class__.__name__, category.pk),
(
'{0}-{1}'.format(
category.__class__.__name__, category.parent.id
) if category.parent else None
)
)
nodes.append(node)
added_categories.append(category.pk)
return nodes
|
Generates the nodelist
:param request:
:return: list of nodes
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/cms_menus.py#L29-L122
| null |
class BlogCategoryMenu(CMSAttachMenu):
"""
Main menu class
Handles all types of blog menu
"""
name = _('Blog menu')
_config = {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.