code stringlengths 1 2.06M | language stringclasses 1 value |
|---|---|
#ifdef _WIN32
#include <Windows.h>
#include <io.h>
#else
#include <sys/types.h>
#include <unistd.h>
#endif
#include <float.h>
#include "openclwrapper.h"
#include "oclkernels.h"
// for micro-benchmark
#include "otsuthr.h"
#include "thresholder.h"
#ifdef USE_OPENCL
#if ON_APPLE
#define TIMESPEC mach_timespec
#else
#define TIMESPEC timespec
#endif
#include "opencl_device_selection.h"
GPUEnv OpenclDevice::gpuEnv;
#if USE_DEVICE_SELECTION
bool OpenclDevice::deviceIsSelected = false;
ds_device OpenclDevice::selectedDevice;
#endif
int OpenclDevice::isInited =0;
struct tiff_transform {
int vflip; /* if non-zero, image needs a vertical fip */
int hflip; /* if non-zero, image needs a horizontal flip */
int rotate; /* -1 -> counterclockwise 90-degree rotation,
0 -> no rotation
1 -> clockwise 90-degree rotation */
};
static struct tiff_transform tiff_orientation_transforms[] = {
{0, 0, 0},
{0, 1, 0},
{1, 1, 0},
{1, 0, 0},
{0, 1, -1},
{0, 0, 1},
{0, 1, 1},
{0, 0, -1}
};
static const l_int32 MAX_PAGES_IN_TIFF_FILE = 3000;
cl_mem pixsCLBuffer, pixdCLBuffer, pixdCLIntermediate; //Morph operations buffers
cl_mem pixThBuffer; //output from thresholdtopix calculation
cl_int clStatus;
KernelEnv rEnv;
// substitute invalid characters in device name with _
void legalizeFileName( char *fileName) {
//printf("fileName: %s\n", fileName);
char *invalidChars = "/\?:*\"><| "; // space is valid but can cause headaches
// for each invalid char
for (int i = 0; i < strlen(invalidChars); i++) {
char invalidStr[4];
invalidStr[0] = invalidChars[i];
invalidStr[1] = NULL;
//printf("eliminating %s\n", invalidStr);
//char *pos = strstr(fileName, invalidStr);
// initial ./ is valid for present directory
//if (*pos == '.') pos++;
//if (*pos == '/') pos++;
for ( char *pos = strstr(fileName, invalidStr); pos != NULL; pos = strstr(pos+1, invalidStr)) {
//printf("\tfound: %s, ", pos);
pos[0] = '_';
//printf("fileName: %s\n", fileName);
}
}
}
void populateGPUEnvFromDevice( GPUEnv *gpuInfo, cl_device_id device ) {
//printf("[DS] populateGPUEnvFromDevice\n");
size_t size;
gpuInfo->mnIsUserCreated = 1;
// device
gpuInfo->mpDevID = device;
gpuInfo->mpArryDevsID = new cl_device_id[1];
gpuInfo->mpArryDevsID[0] = gpuInfo->mpDevID;
clStatus = clGetDeviceInfo(gpuInfo->mpDevID, CL_DEVICE_TYPE , sizeof(cl_device_type), (void *) &gpuInfo->mDevType , &size);
CHECK_OPENCL( clStatus, "populateGPUEnv::getDeviceInfo(TYPE)");
// platform
clStatus = clGetDeviceInfo(gpuInfo->mpDevID, CL_DEVICE_PLATFORM , sizeof(cl_platform_id), (void *) &gpuInfo->mpPlatformID , &size);
CHECK_OPENCL( clStatus, "populateGPUEnv::getDeviceInfo(PLATFORM)");
// context
cl_context_properties props[3];
props[0] = CL_CONTEXT_PLATFORM;
props[1] = (cl_context_properties) gpuInfo->mpPlatformID;
props[2] = 0;
gpuInfo->mpContext = clCreateContext(props, 1, &gpuInfo->mpDevID, NULL, NULL, &clStatus);
CHECK_OPENCL( clStatus, "populateGPUEnv::createContext");
// queue
cl_command_queue_properties queueProperties = 0;
gpuInfo->mpCmdQueue = clCreateCommandQueue( gpuInfo->mpContext, gpuInfo->mpDevID, queueProperties, &clStatus );
CHECK_OPENCL( clStatus, "populateGPUEnv::createCommandQueue");
}
int OpenclDevice::LoadOpencl()
{
#ifdef WIN32
HINSTANCE HOpenclDll = NULL;
void * OpenclDll = NULL;
//fprintf(stderr, " LoadOpenclDllxx... \n");
OpenclDll = static_cast<HINSTANCE>( HOpenclDll );
OpenclDll = LoadLibrary( "openCL.dll" );
if ( !static_cast<HINSTANCE>( OpenclDll ) )
{
fprintf(stderr, "[OD] Load opencl.dll failed!\n");
FreeLibrary( static_cast<HINSTANCE>( OpenclDll ) );
return 0;
}
fprintf(stderr, "[OD] Load opencl.dll successful!\n");
#endif
return 1;
}
int OpenclDevice::SetKernelEnv( KernelEnv *envInfo )
{
envInfo->mpkContext = gpuEnv.mpContext;
envInfo->mpkCmdQueue = gpuEnv.mpCmdQueue;
envInfo->mpkProgram = gpuEnv.mpArryPrograms[0];
return 1;
}
cl_mem allocateZeroCopyBuffer(KernelEnv rEnv, l_uint32 *hostbuffer, size_t nElements, cl_mem_flags flags, cl_int *pStatus)
{
cl_mem membuffer = clCreateBuffer( rEnv.mpkContext, (cl_mem_flags) (flags),
nElements * sizeof(l_uint32), hostbuffer, pStatus);
return membuffer;
}
PIX* mapOutputCLBuffer(KernelEnv rEnv, cl_mem clbuffer, PIX* pixd, PIX* pixs, int elements, cl_mem_flags flags, bool memcopy = false, bool sync = true)
{
PROCNAME("mapOutputCLBuffer");
if (!pixd)
{
if (memcopy)
{
if ((pixd = pixCreateTemplate(pixs)) == NULL)
(PIX *)ERROR_PTR("pixd not made", procName, NULL);
}
else
{
if ((pixd = pixCreateHeader(pixGetWidth(pixs), pixGetHeight(pixs), pixGetDepth(pixs))) == NULL)
(PIX *)ERROR_PTR("pixd not made", procName, NULL);
}
}
l_uint32 *pValues = (l_uint32 *)clEnqueueMapBuffer(rEnv.mpkCmdQueue, clbuffer, CL_TRUE, flags, 0,
elements * sizeof(l_uint32), 0, NULL, NULL, NULL );
if (memcopy)
{
memcpy(pixGetData(pixd), pValues, elements * sizeof(l_uint32));
}
else
{
pixSetData(pixd, pValues);
}
clEnqueueUnmapMemObject(rEnv.mpkCmdQueue,clbuffer,pValues,0,NULL,NULL);
if (sync)
{
clFinish( rEnv.mpkCmdQueue );
}
return pixd;
}
cl_mem allocateIntBuffer( KernelEnv rEnv, const l_uint32 *_pValues, size_t nElements, cl_int *pStatus , bool sync = false)
{
cl_mem xValues = clCreateBuffer( rEnv.mpkContext, (cl_mem_flags) (CL_MEM_READ_WRITE),
nElements * sizeof(l_int32), NULL, pStatus);
if (_pValues != NULL)
{
l_int32 *pValues = (l_int32 *)clEnqueueMapBuffer( rEnv.mpkCmdQueue, xValues, CL_TRUE, CL_MAP_WRITE, 0,
nElements * sizeof(l_int32), 0, NULL, NULL, NULL );
memcpy(pValues, _pValues, nElements * sizeof(l_int32));
clEnqueueUnmapMemObject(rEnv.mpkCmdQueue,xValues,pValues,0,NULL,NULL);
if (sync)
clFinish( rEnv.mpkCmdQueue );
}
return xValues;
}
int OpenclDevice::InitOpenclRunEnv( GPUEnv *gpuInfo )
{
size_t length;
cl_int clStatus;
cl_uint numPlatforms, numDevices;
cl_platform_id *platforms;
cl_context_properties cps[3];
char platformName[256];
unsigned int i;
// Have a look at the available platforms.
if ( !gpuInfo->mnIsUserCreated )
{
clStatus = clGetPlatformIDs( 0, NULL, &numPlatforms );
if ( clStatus != CL_SUCCESS )
{
return 1;
}
gpuInfo->mpPlatformID = NULL;
if ( 0 < numPlatforms )
{
platforms = (cl_platform_id*) malloc( numPlatforms * sizeof( cl_platform_id ) );
if ( platforms == (cl_platform_id*) NULL )
{
return 1;
}
clStatus = clGetPlatformIDs( numPlatforms, platforms, NULL );
if ( clStatus != CL_SUCCESS )
{
return 1;
}
for ( i = 0; i < numPlatforms; i++ )
{
clStatus = clGetPlatformInfo( platforms[i], CL_PLATFORM_VENDOR,
sizeof( platformName ), platformName, NULL );
if ( clStatus != CL_SUCCESS )
{
return 1;
}
gpuInfo->mpPlatformID = platforms[i];
//if (!strcmp(platformName, "Intel(R) Coporation"))
//if( !strcmp( platformName, "Advanced Micro Devices, Inc." ))
{
gpuInfo->mpPlatformID = platforms[i];
if ( getenv("SC_OPENCLCPU") )
{
clStatus = clGetDeviceIDs(gpuInfo->mpPlatformID, // platform
CL_DEVICE_TYPE_CPU, // device_type for CPU device
0, // num_entries
NULL, // devices
&numDevices);
printf("Selecting OpenCL device: CPU (a)\n");
}
else
{
clStatus = clGetDeviceIDs(gpuInfo->mpPlatformID, // platform
CL_DEVICE_TYPE_GPU, // device_type for GPU device
0, // num_entries
NULL, // devices
&numDevices);
printf("Selecting OpenCL device: GPU (a)\n");
}
if ( clStatus != CL_SUCCESS )
continue;
if ( numDevices )
break;
}
}
if ( clStatus != CL_SUCCESS )
return 1;
free( platforms );
}
if ( NULL == gpuInfo->mpPlatformID )
return 1;
// Use available platform.
cps[0] = CL_CONTEXT_PLATFORM;
cps[1] = (cl_context_properties) gpuInfo->mpPlatformID;
cps[2] = 0;
// Set device type for OpenCL
if ( getenv("SC_OPENCLCPU") )
{
gpuInfo->mDevType = CL_DEVICE_TYPE_CPU;
printf("Selecting OpenCL device: CPU (b)\n");
}
else
{
gpuInfo->mDevType = CL_DEVICE_TYPE_GPU;
printf("Selecting OpenCL device: GPU (b)\n");
}
gpuInfo->mpContext = clCreateContextFromType( cps, gpuInfo->mDevType, NULL, NULL, &clStatus );
if ( ( gpuInfo->mpContext == (cl_context) NULL) || ( clStatus != CL_SUCCESS ) )
{
gpuInfo->mDevType = CL_DEVICE_TYPE_CPU;
gpuInfo->mpContext = clCreateContextFromType( cps, gpuInfo->mDevType, NULL, NULL, &clStatus );
printf("Selecting OpenCL device: CPU (c)\n");
}
if ( ( gpuInfo->mpContext == (cl_context) NULL) || ( clStatus != CL_SUCCESS ) )
{
gpuInfo->mDevType = CL_DEVICE_TYPE_DEFAULT;
gpuInfo->mpContext = clCreateContextFromType( cps, gpuInfo->mDevType, NULL, NULL, &clStatus );
printf("Selecting OpenCL device: DEFAULT (c)\n");
}
if ( ( gpuInfo->mpContext == (cl_context) NULL) || ( clStatus != CL_SUCCESS ) )
return 1;
// Detect OpenCL devices.
// First, get the size of device list data
clStatus = clGetContextInfo( gpuInfo->mpContext, CL_CONTEXT_DEVICES, 0, NULL, &length );
if ( ( clStatus != CL_SUCCESS ) || ( length == 0 ) )
return 1;
// Now allocate memory for device list based on the size we got earlier
gpuInfo->mpArryDevsID = (cl_device_id*) malloc( length );
if ( gpuInfo->mpArryDevsID == (cl_device_id*) NULL )
return 1;
// Now, get the device list data
clStatus = clGetContextInfo( gpuInfo->mpContext, CL_CONTEXT_DEVICES, length,
gpuInfo->mpArryDevsID, NULL );
if ( clStatus != CL_SUCCESS )
return 1;
// Create OpenCL command queue.
gpuInfo->mpCmdQueue = clCreateCommandQueue( gpuInfo->mpContext, gpuInfo->mpArryDevsID[0], 0, &clStatus );
if ( clStatus != CL_SUCCESS )
return 1;
}
clStatus = clGetCommandQueueInfo( gpuInfo->mpCmdQueue, CL_QUEUE_THREAD_HANDLE_AMD, 0, NULL, NULL );
// Check device extensions for double type
size_t aDevExtInfoSize = 0;
clStatus = clGetDeviceInfo( gpuInfo->mpArryDevsID[0], CL_DEVICE_EXTENSIONS, 0, NULL, &aDevExtInfoSize );
CHECK_OPENCL( clStatus, "clGetDeviceInfo" );
char *aExtInfo = new char[aDevExtInfoSize];
clStatus = clGetDeviceInfo( gpuInfo->mpArryDevsID[0], CL_DEVICE_EXTENSIONS,
sizeof(char) * aDevExtInfoSize, aExtInfo, NULL);
CHECK_OPENCL( clStatus, "clGetDeviceInfo" );
gpuInfo->mnKhrFp64Flag = 0;
gpuInfo->mnAmdFp64Flag = 0;
if ( strstr( aExtInfo, "cl_khr_fp64" ) )
{
gpuInfo->mnKhrFp64Flag = 1;
}
else
{
// Check if cl_amd_fp64 extension is supported
if ( strstr( aExtInfo, "cl_amd_fp64" ) )
gpuInfo->mnAmdFp64Flag = 1;
}
delete []aExtInfo;
return 0;
}
void OpenclDevice::releaseMorphCLBuffers()
{
if (pixdCLIntermediate != NULL)
clReleaseMemObject(pixdCLIntermediate);
if (pixsCLBuffer != NULL)
clReleaseMemObject(pixsCLBuffer);
if (pixdCLBuffer != NULL)
clReleaseMemObject(pixdCLBuffer);
if (pixThBuffer != NULL)
clReleaseMemObject(pixThBuffer);
}
int OpenclDevice::initMorphCLAllocations(l_int32 wpl, l_int32 h, PIX* pixs)
{
SetKernelEnv( &rEnv );
if (pixThBuffer != NULL)
{
pixsCLBuffer = allocateZeroCopyBuffer(rEnv, NULL, wpl*h, CL_MEM_ALLOC_HOST_PTR, &clStatus);
//Get the output from ThresholdToPix operation
clStatus = clEnqueueCopyBuffer(rEnv.mpkCmdQueue, pixThBuffer, pixsCLBuffer, 0, 0, sizeof(l_uint32) * wpl*h, 0, NULL, NULL);
}
else
{
//Get data from the source image
l_uint32* srcdata = (l_uint32*) malloc(wpl*h*sizeof(l_uint32));
memcpy(srcdata, pixGetData(pixs), wpl*h*sizeof(l_uint32));
pixsCLBuffer = allocateZeroCopyBuffer(rEnv, srcdata, wpl*h, CL_MEM_USE_HOST_PTR, &clStatus);
}
pixdCLBuffer = allocateZeroCopyBuffer(rEnv, NULL, wpl*h, CL_MEM_ALLOC_HOST_PTR, &clStatus);
pixdCLIntermediate = allocateZeroCopyBuffer(rEnv, NULL, wpl*h, CL_MEM_ALLOC_HOST_PTR, &clStatus);
return (int)clStatus;
}
int OpenclDevice::InitEnv()
{
//PERF_COUNT_START("OD::InitEnv")
// printf("[OD] OpenclDevice::InitEnv()\n");
#ifdef SAL_WIN32
while( 1 )
{
if( 1 == LoadOpencl() )
break;
}
PERF_COUNT_SUB("LoadOpencl")
#endif
// sets up environment, compiles programs
#if USE_DEVICE_SELECTION
InitOpenclRunEnv_DeviceSelection( 0 );
//PERF_COUNT_SUB("called InitOpenclRunEnv_DS")
#else
// init according to device
InitOpenclRunEnv( 0 );
#endif
//PERF_COUNT_END
return 1;
}
int OpenclDevice::ReleaseOpenclRunEnv()
{
ReleaseOpenclEnv( &gpuEnv );
#ifdef SAL_WIN32
FreeOpenclDll();
#endif
return 1;
}
inline int OpenclDevice::AddKernelConfig( int kCount, const char *kName )
{
if ( kCount < 1 )
fprintf(stderr,"Error: ( KCount < 1 ) AddKernelConfig\n" );
strcpy( gpuEnv.mArrykernelNames[kCount-1], kName );
gpuEnv.mnKernelCount++;
return 0;
}
int OpenclDevice::RegistOpenclKernel()
{
if ( !gpuEnv.mnIsUserCreated )
memset( &gpuEnv, 0, sizeof(gpuEnv) );
gpuEnv.mnFileCount = 0; //argc;
gpuEnv.mnKernelCount = 0UL;
AddKernelConfig( 1, (const char*) "oclAverageSub1" );
return 0;
}
int OpenclDevice::InitOpenclRunEnv( int argc )
{
int status = 0;
if ( MAX_CLKERNEL_NUM <= 0 )
{
return 1;
}
if ( ( argc > MAX_CLFILE_NUM ) || ( argc < 0 ) )
return 1;
if ( !isInited )
{
RegistOpenclKernel();
//initialize devices, context, comand_queue
status = InitOpenclRunEnv( &gpuEnv );
if ( status )
{
fprintf(stderr,"init_opencl_env failed.\n");
return 1;
}
fprintf(stderr,"init_opencl_env successed.\n");
//initialize program, kernelName, kernelCount
if( getenv( "SC_FLOAT" ) )
{
gpuEnv.mnKhrFp64Flag = 0;
gpuEnv.mnAmdFp64Flag = 0;
}
if( gpuEnv.mnKhrFp64Flag )
{
fprintf(stderr,"----use khr double type in kernel----\n");
status = CompileKernelFile( &gpuEnv, "-D KHR_DP_EXTENSION -Dfp_t=double -Dfp_t4=double4 -Dfp_t16=double16" );
}
else if( gpuEnv.mnAmdFp64Flag )
{
fprintf(stderr,"----use amd double type in kernel----\n");
status = CompileKernelFile( &gpuEnv, "-D AMD_DP_EXTENSION -Dfp_t=double -Dfp_t4=double4 -Dfp_t16=double16" );
}
else
{
fprintf(stderr,"----use float type in kernel----\n");
status = CompileKernelFile( &gpuEnv, "-Dfp_t=float -Dfp_t4=float4 -Dfp_t16=float16" );
}
if ( status == 0 || gpuEnv.mnKernelCount == 0 )
{
fprintf(stderr,"CompileKernelFile failed.\n");
return 1;
}
fprintf(stderr,"CompileKernelFile successed.\n");
isInited = 1;
}
return 0;
}
int OpenclDevice::InitOpenclRunEnv_DeviceSelection( int argc ) {
//PERF_COUNT_START("InitOpenclRunEnv_DS")
#if USE_DEVICE_SELECTION
if (!isInited) {
// after programs compiled, selects best device
//printf("[DS] InitOpenclRunEnv_DS::Calling performDeviceSelection()\n");
ds_device bestDevice_DS = getDeviceSelection( );
//PERF_COUNT_SUB("called getDeviceSelection()")
cl_device_id bestDevice = bestDevice_DS.oclDeviceID;
// overwrite global static GPUEnv with new device
if (selectedDeviceIsOpenCL() ) {
//printf("[DS] InitOpenclRunEnv_DS::Calling populateGPUEnvFromDevice() for selected device\n");
populateGPUEnvFromDevice( &gpuEnv, bestDevice );
gpuEnv.mnFileCount = 0; //argc;
gpuEnv.mnKernelCount = 0UL;
//PERF_COUNT_SUB("populate gpuEnv")
CompileKernelFile(&gpuEnv, "");
//PERF_COUNT_SUB("CompileKernelFile")
} else {
//printf("[DS] InitOpenclRunEnv_DS::Skipping populateGPUEnvFromDevice() b/c native cpu selected\n");
}
isInited = 1;
}
#endif
//PERF_COUNT_END
return 0;
}
OpenclDevice::OpenclDevice()
{
//InitEnv();
}
OpenclDevice::~OpenclDevice()
{
//ReleaseOpenclRunEnv();
}
int OpenclDevice::ReleaseOpenclEnv( GPUEnv *gpuInfo )
{
int i = 0;
int clStatus = 0;
if ( !isInited )
{
return 1;
}
for ( i = 0; i < gpuEnv.mnFileCount; i++ )
{
if ( gpuEnv.mpArryPrograms[i] )
{
clStatus = clReleaseProgram( gpuEnv.mpArryPrograms[i] );
CHECK_OPENCL( clStatus, "clReleaseProgram" );
gpuEnv.mpArryPrograms[i] = NULL;
}
}
if ( gpuEnv.mpCmdQueue )
{
clReleaseCommandQueue( gpuEnv.mpCmdQueue );
gpuEnv.mpCmdQueue = NULL;
}
if ( gpuEnv.mpContext )
{
clReleaseContext( gpuEnv.mpContext );
gpuEnv.mpContext = NULL;
}
isInited = 0;
gpuInfo->mnIsUserCreated = 0;
free( gpuInfo->mpArryDevsID );
return 1;
}
int OpenclDevice::BinaryGenerated( const char * clFileName, FILE ** fhandle )
{
unsigned int i = 0;
cl_int clStatus;
int status = 0;
char *str = NULL;
FILE *fd = NULL;
cl_uint numDevices=0;
if ( getenv("SC_OPENCLCPU") )
{
clStatus = clGetDeviceIDs(gpuEnv.mpPlatformID, // platform
CL_DEVICE_TYPE_CPU, // device_type for CPU device
0, // num_entries
NULL, // devices ID
&numDevices);
}
else
{
clStatus = clGetDeviceIDs(gpuEnv.mpPlatformID, // platform
CL_DEVICE_TYPE_GPU, // device_type for GPU device
0, // num_entries
NULL, // devices ID
&numDevices);
}
CHECK_OPENCL( clStatus, "clGetDeviceIDs" );
for ( i = 0; i < numDevices; i++ )
{
char fileName[256] = { 0 }, cl_name[128] = { 0 };
if ( gpuEnv.mpArryDevsID[i] != 0 )
{
char deviceName[1024];
clStatus = clGetDeviceInfo( gpuEnv.mpArryDevsID[i], CL_DEVICE_NAME, sizeof(deviceName), deviceName, NULL );
CHECK_OPENCL( clStatus, "clGetDeviceInfo" );
str = (char*) strstr( clFileName, (char*) ".cl" );
memcpy( cl_name, clFileName, str - clFileName );
cl_name[str - clFileName] = '\0';
sprintf( fileName, "%s-%s.bin", cl_name, deviceName );
legalizeFileName(fileName);
fd = fopen( fileName, "rb" );
status = ( fd != NULL ) ? 1 : 0;
}
}
if ( fd != NULL )
{
*fhandle = fd;
}
return status;
}
int OpenclDevice::CachedOfKernerPrg( const GPUEnv *gpuEnvCached, const char * clFileName )
{
int i;
for ( i = 0; i < gpuEnvCached->mnFileCount; i++ )
{
if ( strcasecmp( gpuEnvCached->mArryKnelSrcFile[i], clFileName ) == 0 )
{
if ( gpuEnvCached->mpArryPrograms[i] != NULL )
{
return 1;
}
}
}
return 0;
}
int OpenclDevice::WriteBinaryToFile( const char* fileName, const char* birary, size_t numBytes )
{
FILE *output = NULL;
output = fopen( fileName, "wb" );
if ( output == NULL )
{
return 0;
}
fwrite( birary, sizeof(char), numBytes, output );
fclose( output );
return 1;
}
int OpenclDevice::GeneratBinFromKernelSource( cl_program program, const char * clFileName )
{
unsigned int i = 0;
cl_int clStatus;
size_t *binarySizes, numDevices;
cl_device_id *mpArryDevsID;
char **binaries, *str = NULL;
clStatus = clGetProgramInfo( program, CL_PROGRAM_NUM_DEVICES,
sizeof(numDevices), &numDevices, NULL );
CHECK_OPENCL( clStatus, "clGetProgramInfo" );
mpArryDevsID = (cl_device_id*) malloc( sizeof(cl_device_id) * numDevices );
if ( mpArryDevsID == NULL )
{
return 0;
}
/* grab the handles to all of the devices in the program. */
clStatus = clGetProgramInfo( program, CL_PROGRAM_DEVICES,
sizeof(cl_device_id) * numDevices, mpArryDevsID, NULL );
CHECK_OPENCL( clStatus, "clGetProgramInfo" );
/* figure out the sizes of each of the binaries. */
binarySizes = (size_t*) malloc( sizeof(size_t) * numDevices );
clStatus = clGetProgramInfo( program, CL_PROGRAM_BINARY_SIZES,
sizeof(size_t) * numDevices, binarySizes, NULL );
CHECK_OPENCL( clStatus, "clGetProgramInfo" );
/* copy over all of the generated binaries. */
binaries = (char**) malloc( sizeof(char *) * numDevices );
if ( binaries == NULL )
{
return 0;
}
for ( i = 0; i < numDevices; i++ )
{
if ( binarySizes[i] != 0 )
{
binaries[i] = (char*) malloc( sizeof(char) * binarySizes[i] );
if ( binaries[i] == NULL )
{
// cleanup all memory allocated so far
for(int cleanupIndex = 0; cleanupIndex < i; ++cleanupIndex)
{
free(binaries[cleanupIndex]);
}
// cleanup binary array
free(binaries);
return 0;
}
}
else
{
binaries[i] = NULL;
}
}
clStatus = clGetProgramInfo( program, CL_PROGRAM_BINARIES,
sizeof(char *) * numDevices, binaries, NULL );
CHECK_OPENCL(clStatus,"clGetProgramInfo");
/* dump out each binary into its own separate file. */
for ( i = 0; i < numDevices; i++ )
{
char fileName[256] = { 0 }, cl_name[128] = { 0 };
if ( binarySizes[i] != 0 )
{
char deviceName[1024];
clStatus = clGetDeviceInfo(mpArryDevsID[i], CL_DEVICE_NAME,
sizeof(deviceName), deviceName, NULL);
CHECK_OPENCL( clStatus, "clGetDeviceInfo" );
str = (char*) strstr( clFileName, (char*) ".cl" );
memcpy( cl_name, clFileName, str - clFileName );
cl_name[str - clFileName] = '\0';
sprintf( fileName, "%s-%s.bin", cl_name, deviceName );
legalizeFileName(fileName);
if ( !WriteBinaryToFile( fileName, binaries[i], binarySizes[i] ) )
{
printf("[OD] write binary[%s] failed\n", fileName);
return 0;
} //else
printf("[OD] write binary[%s] succesfully\n", fileName);
}
}
// Release all resouces and memory
for ( i = 0; i < numDevices; i++ )
{
if ( binaries[i] != NULL )
{
free( binaries[i] );
binaries[i] = NULL;
}
}
if ( binaries != NULL )
{
free( binaries );
binaries = NULL;
}
if ( binarySizes != NULL )
{
free( binarySizes );
binarySizes = NULL;
}
if ( mpArryDevsID != NULL )
{
free( mpArryDevsID );
mpArryDevsID = NULL;
}
return 1;
}
void copyIntBuffer( KernelEnv rEnv, cl_mem xValues, const l_uint32 *_pValues, size_t nElements, cl_int *pStatus )
{
l_int32 *pValues = (l_int32 *)clEnqueueMapBuffer( rEnv.mpkCmdQueue, xValues, CL_TRUE, CL_MAP_WRITE, 0,
nElements * sizeof(l_int32), 0, NULL, NULL, NULL );
clFinish( rEnv.mpkCmdQueue );
if (_pValues != NULL)
{
for ( int i = 0; i < (int)nElements; i++ )
pValues[i] = (l_int32)_pValues[i];
}
clEnqueueUnmapMemObject(rEnv.mpkCmdQueue,xValues,pValues,0,NULL,NULL);
//clFinish( rEnv.mpkCmdQueue );
return;
}
int OpenclDevice::CompileKernelFile( GPUEnv *gpuInfo, const char *buildOption )
{
//PERF_COUNT_START("CompileKernelFile")
cl_int clStatus = 0;
size_t length;
char *buildLog = NULL, *binary;
const char *source;
size_t source_size[1];
int b_error, binary_status, binaryExisted, idx;
size_t numDevices;
cl_device_id *mpArryDevsID;
FILE *fd, *fd1;
const char* filename = "kernel.cl";
//fprintf(stderr, "[OD] CompileKernelFile ... \n");
if ( CachedOfKernerPrg(gpuInfo, filename) == 1 )
{
return 1;
}
idx = gpuInfo->mnFileCount;
source = kernel_src;
source_size[0] = strlen( source );
binaryExisted = 0;
binaryExisted = BinaryGenerated( filename, &fd ); // don't check for binary during microbenchmark
//PERF_COUNT_SUB("BinaryGenerated")
if ( binaryExisted == 1 )
{
clStatus = clGetContextInfo( gpuInfo->mpContext, CL_CONTEXT_NUM_DEVICES,
sizeof(numDevices), &numDevices, NULL );
CHECK_OPENCL( clStatus, "clGetContextInfo" );
mpArryDevsID = (cl_device_id*) malloc( sizeof(cl_device_id) * numDevices );
if ( mpArryDevsID == NULL )
{
return 0;
}
//PERF_COUNT_SUB("get numDevices")
b_error = 0;
length = 0;
b_error |= fseek( fd, 0, SEEK_END ) < 0;
b_error |= ( length = ftell(fd) ) <= 0;
b_error |= fseek( fd, 0, SEEK_SET ) < 0;
if ( b_error )
{
return 0;
}
binary = (char*) malloc( length + 2 );
if ( !binary )
{
return 0;
}
memset( binary, 0, length + 2 );
b_error |= fread( binary, 1, length, fd ) != length;
fclose( fd );
//PERF_COUNT_SUB("read file")
fd = NULL;
// grab the handles to all of the devices in the context.
clStatus = clGetContextInfo( gpuInfo->mpContext, CL_CONTEXT_DEVICES,
sizeof( cl_device_id ) * numDevices, mpArryDevsID, NULL );
CHECK_OPENCL( clStatus, "clGetContextInfo" );
//PERF_COUNT_SUB("get devices")
//fprintf(stderr, "[OD] Create kernel from binary\n");
gpuInfo->mpArryPrograms[idx] = clCreateProgramWithBinary( gpuInfo->mpContext,numDevices,
mpArryDevsID, &length, (const unsigned char**) &binary,
&binary_status, &clStatus );
CHECK_OPENCL( clStatus, "clCreateProgramWithBinary" );
//PERF_COUNT_SUB("clCreateProgramWithBinary")
free( binary );
free( mpArryDevsID );
mpArryDevsID = NULL;
//PERF_COUNT_SUB("binaryExisted")
}
else
{
// create a CL program using the kernel source
//fprintf(stderr, "[OD] Create kernel from source\n");
gpuInfo->mpArryPrograms[idx] = clCreateProgramWithSource( gpuInfo->mpContext, 1, &source,
source_size, &clStatus);
CHECK_OPENCL( clStatus, "clCreateProgramWithSource" );
//PERF_COUNT_SUB("!binaryExisted")
}
if ( gpuInfo->mpArryPrograms[idx] == (cl_program) NULL )
{
return 0;
}
//char options[512];
// create a cl program executable for all the devices specified
//printf("[OD] BuildProgram.\n");
PERF_COUNT_START("OD::CompileKernel::clBuildProgram")
if (!gpuInfo->mnIsUserCreated)
{
clStatus = clBuildProgram(gpuInfo->mpArryPrograms[idx], 1, gpuInfo->mpArryDevsID,
buildOption, NULL, NULL);
//PERF_COUNT_SUB("clBuildProgram notUserCreated")
}
else
{
clStatus = clBuildProgram(gpuInfo->mpArryPrograms[idx], 1, &(gpuInfo->mpDevID),
buildOption, NULL, NULL);
//PERF_COUNT_SUB("clBuildProgram isUserCreated")
}
PERF_COUNT_END
if ( clStatus != CL_SUCCESS )
{
printf ("BuildProgram error!\n");
if ( !gpuInfo->mnIsUserCreated )
{
clStatus = clGetProgramBuildInfo( gpuInfo->mpArryPrograms[idx], gpuInfo->mpArryDevsID[0],
CL_PROGRAM_BUILD_LOG, 0, NULL, &length );
}
else
{
clStatus = clGetProgramBuildInfo( gpuInfo->mpArryPrograms[idx], gpuInfo->mpDevID,
CL_PROGRAM_BUILD_LOG, 0, NULL, &length);
}
if ( clStatus != CL_SUCCESS )
{
printf("opencl create build log fail\n");
return 0;
}
buildLog = (char*) malloc( length );
if ( buildLog == (char*) NULL )
{
return 0;
}
if ( !gpuInfo->mnIsUserCreated )
{
clStatus = clGetProgramBuildInfo( gpuInfo->mpArryPrograms[idx], gpuInfo->mpArryDevsID[0],
CL_PROGRAM_BUILD_LOG, length, buildLog, &length );
}
else
{
clStatus = clGetProgramBuildInfo( gpuInfo->mpArryPrograms[idx], gpuInfo->mpDevID,
CL_PROGRAM_BUILD_LOG, length, buildLog, &length );
}
if ( clStatus != CL_SUCCESS )
{
printf("opencl program build info fail\n");
return 0;
}
fd1 = fopen( "kernel-build.log", "w+" );
if ( fd1 != NULL )
{
fwrite( buildLog, sizeof(char), length, fd1 );
fclose( fd1 );
}
free( buildLog );
//PERF_COUNT_SUB("build error log")
return 0;
}
strcpy( gpuInfo->mArryKnelSrcFile[idx], filename );
//PERF_COUNT_SUB("strcpy")
if ( binaryExisted == 0 ) {
GeneratBinFromKernelSource( gpuInfo->mpArryPrograms[idx], filename );
PERF_COUNT_SUB("GenerateBinFromKernelSource")
}
gpuInfo->mnFileCount += 1;
//PERF_COUNT_END
return 1;
}
l_uint32* OpenclDevice::pixReadFromTiffKernel(l_uint32 *tiffdata,l_int32 w,l_int32 h,l_int32 wpl,l_uint32 *line)
{
PERF_COUNT_START("pixReadFromTiffKernel")
cl_int clStatus;
KernelEnv rEnv;
size_t globalThreads[2];
size_t localThreads[2];
int gsize;
cl_mem valuesCl;
cl_mem outputCl;
//global and local work dimensions for Horizontal pass
gsize = (w + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
localThreads[0] = GROUPSIZE_X;
localThreads[1] = GROUPSIZE_Y;
SetKernelEnv( &rEnv );
l_uint32 *pResult = (l_uint32 *)malloc(w*h * sizeof(l_uint32));
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "composeRGBPixel", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel");
//Allocate input and output OCL buffers
valuesCl = allocateZeroCopyBuffer(rEnv, tiffdata, w*h, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, &clStatus);
outputCl = allocateZeroCopyBuffer(rEnv, pResult, w*h, CL_MEM_WRITE_ONLY | CL_MEM_USE_HOST_PTR, &clStatus);
//Kernel arguments
clStatus = clSetKernelArg( rEnv.mpkKernel, 0, sizeof(cl_mem), (void *)&valuesCl );
CHECK_OPENCL( clStatus, "clSetKernelArg");
clStatus = clSetKernelArg( rEnv.mpkKernel, 1, sizeof(w), (void *)&w );
CHECK_OPENCL( clStatus, "clSetKernelArg" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 2, sizeof(h), (void *)&h );
CHECK_OPENCL( clStatus, "clSetKernelArg" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 3, sizeof(wpl), (void *)&wpl );
CHECK_OPENCL( clStatus, "clSetKernelArg" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 4, sizeof(cl_mem), (void *)&outputCl );
CHECK_OPENCL( clStatus, "clSetKernelArg");
//Kernel enqueue
PERF_COUNT_SUB("before")
clStatus = clEnqueueNDRangeKernel( rEnv.mpkCmdQueue, rEnv.mpkKernel, 2, NULL, globalThreads, localThreads, 0, NULL, NULL );
CHECK_OPENCL( clStatus, "clEnqueueNDRangeKernel" );
/* map results back from gpu */
void *ptr = clEnqueueMapBuffer(rEnv.mpkCmdQueue, outputCl, CL_TRUE, CL_MAP_READ, 0, w*h * sizeof(l_uint32), 0, NULL, NULL, &clStatus);
CHECK_OPENCL( clStatus, "clEnqueueMapBuffer outputCl");
clEnqueueUnmapMemObject(rEnv.mpkCmdQueue, outputCl, ptr, 0, NULL, NULL);
//Sync
clFinish( rEnv.mpkCmdQueue );
PERF_COUNT_SUB("kernel & map")
PERF_COUNT_END
return pResult;
}
PIX * OpenclDevice::pixReadTiffCl ( const char *filename, l_int32 n )
{
PERF_COUNT_START("pixReadTiffCL")
FILE *fp;
PIX *pix;
//printf("pixReadTiffCl file");
PROCNAME("pixReadTiff");
if (!filename)
return (PIX *)ERROR_PTR("filename not defined", procName, NULL);
if ((fp = fopenReadStream(filename)) == NULL)
return (PIX *)ERROR_PTR("image file not found", procName, NULL);
if ((pix = pixReadStreamTiffCl(fp, n)) == NULL) {
fclose(fp);
return (PIX *)ERROR_PTR("pix not read", procName, NULL);
}
fclose(fp);
PERF_COUNT_END
return pix;
}
TIFF *
OpenclDevice::fopenTiffCl(FILE *fp,
const char *modestring)
{
l_int32 fd;
PROCNAME("fopenTiff");
if (!fp)
return (TIFF *)ERROR_PTR("stream not opened", procName, NULL);
if (!modestring)
return (TIFF *)ERROR_PTR("modestring not defined", procName, NULL);
if ((fd = fileno(fp)) < 0)
return (TIFF *)ERROR_PTR("invalid file descriptor", procName, NULL);
lseek(fd, 0, SEEK_SET);
return TIFFFdOpen(fd, "TIFFstream", modestring);
}
l_int32 OpenclDevice::getTiffStreamResolutionCl(TIFF *tif,
l_int32 *pxres,
l_int32 *pyres)
{
l_uint16 resunit;
l_int32 foundxres, foundyres;
l_float32 fxres, fyres;
PROCNAME("getTiffStreamResolution");
if (!tif)
return ERROR_INT("tif not opened", procName, 1);
if (!pxres || !pyres)
return ERROR_INT("&xres and &yres not both defined", procName, 1);
*pxres = *pyres = 0;
TIFFGetFieldDefaulted(tif, TIFFTAG_RESOLUTIONUNIT, &resunit);
foundxres = TIFFGetField(tif, TIFFTAG_XRESOLUTION, &fxres);
foundyres = TIFFGetField(tif, TIFFTAG_YRESOLUTION, &fyres);
if (!foundxres && !foundyres) return 1;
if (!foundxres && foundyres)
fxres = fyres;
else if (foundxres && !foundyres)
fyres = fxres;
if (resunit == RESUNIT_CENTIMETER) { /* convert to ppi */
*pxres = (l_int32)(2.54 * fxres + 0.5);
*pyres = (l_int32)(2.54 * fyres + 0.5);
}
else {
*pxres = (l_int32)fxres;
*pyres = (l_int32)fyres;
}
return 0;
}
struct L_Memstream
{
l_uint8 *buffer; /* expands to hold data when written to; */
/* fixed size when read from. */
size_t bufsize; /* current size allocated when written to; */
/* fixed size of input data when read from. */
size_t offset; /* byte offset from beginning of buffer. */
size_t hw; /* high-water mark; max bytes in buffer. */
l_uint8 **poutdata; /* input param for writing; data goes here. */
size_t *poutsize; /* input param for writing; data size goes here. */
};
typedef struct L_Memstream L_MEMSTREAM;
/* These are static functions for memory I/O */
static L_MEMSTREAM *memstreamCreateForRead(l_uint8 *indata, size_t pinsize);
static L_MEMSTREAM *memstreamCreateForWrite(l_uint8 **poutdata,
size_t *poutsize);
static tsize_t tiffReadCallback(thandle_t handle, tdata_t data, tsize_t length);
static tsize_t tiffWriteCallback(thandle_t handle, tdata_t data,
tsize_t length);
static toff_t tiffSeekCallback(thandle_t handle, toff_t offset, l_int32 whence);
static l_int32 tiffCloseCallback(thandle_t handle);
static toff_t tiffSizeCallback(thandle_t handle);
static l_int32 tiffMapCallback(thandle_t handle, tdata_t *data, toff_t *length);
static void tiffUnmapCallback(thandle_t handle, tdata_t data, toff_t length);
static L_MEMSTREAM *
memstreamCreateForRead(l_uint8 *indata,
size_t insize)
{
L_MEMSTREAM *mstream;
mstream = (L_MEMSTREAM *)CALLOC(1, sizeof(L_MEMSTREAM));
mstream->buffer = indata; /* handle to input data array */
mstream->bufsize = insize; /* amount of input data */
mstream->hw = insize; /* high-water mark fixed at input data size */
mstream->offset = 0; /* offset always starts at 0 */
return mstream;
}
static L_MEMSTREAM *
memstreamCreateForWrite(l_uint8 **poutdata,
size_t *poutsize)
{
L_MEMSTREAM *mstream;
mstream = (L_MEMSTREAM *)CALLOC(1, sizeof(L_MEMSTREAM));
mstream->buffer = (l_uint8 *)CALLOC(8 * 1024, 1);
mstream->bufsize = 8 * 1024;
mstream->poutdata = poutdata; /* used only at end of write */
mstream->poutsize = poutsize; /* ditto */
mstream->hw = mstream->offset = 0;
return mstream;
}
static tsize_t
tiffReadCallback(thandle_t handle,
tdata_t data,
tsize_t length)
{
L_MEMSTREAM *mstream;
size_t amount;
mstream = (L_MEMSTREAM *)handle;
amount = L_MIN((size_t)length, mstream->hw - mstream->offset);
memcpy(data, mstream->buffer + mstream->offset, amount);
mstream->offset += amount;
return amount;
}
static tsize_t
tiffWriteCallback(thandle_t handle,
tdata_t data,
tsize_t length)
{
L_MEMSTREAM *mstream;
size_t newsize;
/* reallocNew() uses calloc to initialize the array.
* If malloc is used instead, for some of the encoding methods,
* not all the data in 'bufsize' bytes in the buffer will
* have been initialized by the end of the compression. */
mstream = (L_MEMSTREAM *)handle;
if (mstream->offset + length > mstream->bufsize) {
newsize = 2 * (mstream->offset + length);
mstream->buffer = (l_uint8 *)reallocNew((void **)&mstream->buffer,
mstream->offset, newsize);
mstream->bufsize = newsize;
}
memcpy(mstream->buffer + mstream->offset, data, length);
mstream->offset += length;
mstream->hw = L_MAX(mstream->offset, mstream->hw);
return length;
}
static toff_t
tiffSeekCallback(thandle_t handle,
toff_t offset,
l_int32 whence)
{
L_MEMSTREAM *mstream;
PROCNAME("tiffSeekCallback");
mstream = (L_MEMSTREAM *)handle;
switch (whence) {
case SEEK_SET:
/* fprintf(stderr, "seek_set: offset = %d\n", offset); */
mstream->offset = offset;
break;
case SEEK_CUR:
/* fprintf(stderr, "seek_cur: offset = %d\n", offset); */
mstream->offset += offset;
break;
case SEEK_END:
/* fprintf(stderr, "seek end: hw = %d, offset = %d\n",
mstream->hw, offset); */
mstream->offset = mstream->hw - offset; /* offset >= 0 */
break;
default:
return (toff_t)ERROR_INT("bad whence value", procName,
mstream->offset);
}
return mstream->offset;
}
static l_int32
tiffCloseCallback(thandle_t handle)
{
L_MEMSTREAM *mstream;
mstream = (L_MEMSTREAM *)handle;
if (mstream->poutdata) { /* writing: save the output data */
*mstream->poutdata = mstream->buffer;
*mstream->poutsize = mstream->hw;
}
FREE(mstream); /* never free the buffer! */
return 0;
}
static toff_t
tiffSizeCallback(thandle_t handle)
{
L_MEMSTREAM *mstream;
mstream = (L_MEMSTREAM *)handle;
return mstream->hw;
}
static l_int32
tiffMapCallback(thandle_t handle,
tdata_t *data,
toff_t *length)
{
L_MEMSTREAM *mstream;
mstream = (L_MEMSTREAM *)handle;
*data = mstream->buffer;
*length = mstream->hw;
return 0;
}
static void
tiffUnmapCallback(thandle_t handle,
tdata_t data,
toff_t length)
{
return;
}
/*!
* fopenTiffMemstream()
*
* Input: filename (for error output; can be "")
* operation ("w" for write, "r" for read)
* &data (<return> written data)
* &datasize (<return> size of written data)
* Return: tiff (data structure, opened for write to memory)
*
* Notes:
* (1) This wraps up a number of callbacks for either:
* * reading from tiff in memory buffer --> pix
* * writing from pix --> tiff in memory buffer
* (2) After use, the memstream is automatically destroyed when
* TIFFClose() is called. TIFFCleanup() doesn't free the memstream.
*/
static TIFF *
fopenTiffMemstream(const char *filename,
const char *operation,
l_uint8 **pdata,
size_t *pdatasize)
{
L_MEMSTREAM *mstream;
PROCNAME("fopenTiffMemstream");
if (!filename)
return (TIFF *)ERROR_PTR("filename not defined", procName, NULL);
if (!operation)
return (TIFF *)ERROR_PTR("operation not defined", procName, NULL);
if (!pdata)
return (TIFF *)ERROR_PTR("&data not defined", procName, NULL);
if (!pdatasize)
return (TIFF *)ERROR_PTR("&datasize not defined", procName, NULL);
if (!strcmp(operation, "r") && !strcmp(operation, "w"))
return (TIFF *)ERROR_PTR("operation not 'r' or 'w'}", procName, NULL);
if (!strcmp(operation, "r"))
mstream = memstreamCreateForRead(*pdata, *pdatasize);
else
mstream = memstreamCreateForWrite(pdata, pdatasize);
return TIFFClientOpen(filename, operation, mstream,
tiffReadCallback, tiffWriteCallback,
tiffSeekCallback, tiffCloseCallback,
tiffSizeCallback, tiffMapCallback,
tiffUnmapCallback);
}
PIX *
OpenclDevice::pixReadMemTiffCl(const l_uint8 *data,size_t size,l_int32 n)
{
l_int32 i, pagefound;
PIX *pix;
TIFF *tif;
L_MEMSTREAM *memStream;
PROCNAME("pixReadMemTiffCl");
if (!data)
return (PIX *)ERROR_PTR("data pointer is NULL", procName, NULL);
if ((tif = fopenTiffMemstream("", "r", (l_uint8 **)&data, &size)) == NULL)
return (PIX *)ERROR_PTR("tif not opened", procName, NULL);
pagefound = FALSE;
pix = NULL;
for (i = 0; i < MAX_PAGES_IN_TIFF_FILE; i++) {
if (i == n) {
pagefound = TRUE;
if ((pix = pixReadFromTiffStreamCl(tif)) == NULL) {
TIFFCleanup(tif);
return (PIX *)ERROR_PTR("pix not read", procName, NULL);
}
break;
}
if (TIFFReadDirectory(tif) == 0)
break;
}
if (pagefound == FALSE) {
L_WARNING("tiff page %d not found", procName);
TIFFCleanup(tif);
return NULL;
}
TIFFCleanup(tif);
return pix;
}
PIX *
OpenclDevice::pixReadStreamTiffCl(FILE *fp,
l_int32 n)
{
l_int32 i, pagefound;
PIX *pix;
TIFF *tif;
PROCNAME("pixReadStreamTiff");
if (!fp)
return (PIX *)ERROR_PTR("stream not defined", procName, NULL);
if ((tif = fopenTiffCl(fp, "rb")) == NULL)
return (PIX *)ERROR_PTR("tif not opened", procName, NULL);
pagefound = FALSE;
pix = NULL;
for (i = 0; i < MAX_PAGES_IN_TIFF_FILE; i++) {
if (i == n) {
pagefound = TRUE;
if ((pix = pixReadFromTiffStreamCl(tif)) == NULL) {
TIFFCleanup(tif);
return (PIX *)ERROR_PTR("pix not read", procName, NULL);
}
break;
}
if (TIFFReadDirectory(tif) == 0)
break;
}
if (pagefound == FALSE) {
L_WARNING("tiff page %d not found", procName, n);
TIFFCleanup(tif);
return NULL;
}
TIFFCleanup(tif);
return pix;
}
static l_int32
getTiffCompressedFormat(l_uint16 tiffcomp)
{
l_int32 comptype;
switch (tiffcomp)
{
case COMPRESSION_CCITTFAX4:
comptype = IFF_TIFF_G4;
break;
case COMPRESSION_CCITTFAX3:
comptype = IFF_TIFF_G3;
break;
case COMPRESSION_CCITTRLE:
comptype = IFF_TIFF_RLE;
break;
case COMPRESSION_PACKBITS:
comptype = IFF_TIFF_PACKBITS;
break;
case COMPRESSION_LZW:
comptype = IFF_TIFF_LZW;
break;
case COMPRESSION_ADOBE_DEFLATE:
comptype = IFF_TIFF_ZIP;
break;
default:
comptype = IFF_TIFF;
break;
}
return comptype;
}
void compare(l_uint32 *cpu, l_uint32 *gpu,int size)
{
for(int i=0;i<size;i++)
{
if(cpu[i]!=gpu[i])
{
printf("\ndoesnot match\n");
return;
}
}
printf("\nit matches\n");
}
//OpenCL implementation of pixReadFromTiffStream.
//Similar to the CPU implentation of pixReadFromTiffStream
PIX *
OpenclDevice::pixReadFromTiffStreamCl(TIFF *tif)
{
l_uint8 *linebuf, *data;
l_uint16 spp, bps, bpp, tiffbpl, photometry, tiffcomp, orientation;
l_uint16 *redmap, *greenmap, *bluemap;
l_int32 d, wpl, bpl, comptype, i, ncolors;
l_int32 xres, yres;
l_uint32 w, h;
l_uint32 *line, *tiffdata;
PIX *pix;
PIXCMAP *cmap;
PROCNAME("pixReadFromTiffStream");
if (!tif)
return (PIX *)ERROR_PTR("tif not defined", procName, NULL);
TIFFGetFieldDefaulted(tif, TIFFTAG_BITSPERSAMPLE, &bps);
TIFFGetFieldDefaulted(tif, TIFFTAG_SAMPLESPERPIXEL, &spp);
bpp = bps * spp;
if (bpp > 32)
return (PIX *)ERROR_PTR("can't handle bpp > 32", procName, NULL);
if (spp == 1)
d = bps;
else if (spp == 3 || spp == 4)
d = 32;
else
return (PIX *)ERROR_PTR("spp not in set {1,3,4}", procName, NULL);
TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &w);
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &h);
tiffbpl = TIFFScanlineSize(tif);
if ((pix = pixCreate(w, h, d)) == NULL)
return (PIX *)ERROR_PTR("pix not made", procName, NULL);
data = (l_uint8 *)pixGetData(pix);
wpl = pixGetWpl(pix);
bpl = 4 * wpl;
if (spp == 1) {
if ((linebuf = (l_uint8 *)CALLOC(tiffbpl + 1, sizeof(l_uint8))) == NULL)
return (PIX *)ERROR_PTR("calloc fail for linebuf", procName, NULL);
for (i = 0 ; i < h ; i++) {
if (TIFFReadScanline(tif, linebuf, i, 0) < 0) {
FREE(linebuf);
pixDestroy(&pix);
return (PIX *)ERROR_PTR("line read fail", procName, NULL);
}
memcpy((char *)data, (char *)linebuf, tiffbpl);
data += bpl;
}
if (bps <= 8)
pixEndianByteSwap(pix);
else
pixEndianTwoByteSwap(pix);
FREE(linebuf);
}
else {
if ((tiffdata = (l_uint32 *)CALLOC(w * h, sizeof(l_uint32))) == NULL) {
pixDestroy(&pix);
return (PIX *)ERROR_PTR("calloc fail for tiffdata", procName, NULL);
}
if (!TIFFReadRGBAImageOriented(tif, w, h, (uint32 *)tiffdata,
ORIENTATION_TOPLEFT, 0)) {
FREE(tiffdata);
pixDestroy(&pix);
return (PIX *)ERROR_PTR("failed to read tiffdata", procName, NULL);
}
line = pixGetData(pix);
//Invoke the OpenCL kernel for pixReadFromTiff
l_uint32* output_gpu=pixReadFromTiffKernel(tiffdata,w,h,wpl,line);
pixSetData(pix, output_gpu);
FREE(tiffdata);
}
if (getTiffStreamResolutionCl(tif, &xres, &yres) == 0) {
pixSetXRes(pix, xres);
pixSetYRes(pix, yres);
}
TIFFGetFieldDefaulted(tif, TIFFTAG_COMPRESSION, &tiffcomp);
comptype = getTiffCompressedFormat(tiffcomp);
pixSetInputFormat(pix, comptype);
if (TIFFGetField(tif, TIFFTAG_COLORMAP, &redmap, &greenmap, &bluemap)) {
if ((cmap = pixcmapCreate(bps)) == NULL) {
pixDestroy(&pix);
return (PIX *)ERROR_PTR("cmap not made", procName, NULL);
}
ncolors = 1 << bps;
for (i = 0; i < ncolors; i++)
pixcmapAddColor(cmap, redmap[i] >> 8, greenmap[i] >> 8,
bluemap[i] >> 8);
pixSetColormap(pix, cmap);
}
else {
if (!TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photometry)) {
if (tiffcomp == COMPRESSION_CCITTFAX3 ||
tiffcomp == COMPRESSION_CCITTFAX4 ||
tiffcomp == COMPRESSION_CCITTRLE ||
tiffcomp == COMPRESSION_CCITTRLEW) {
photometry = PHOTOMETRIC_MINISWHITE;
}
else
photometry = PHOTOMETRIC_MINISBLACK;
}
if ((d == 1 && photometry == PHOTOMETRIC_MINISBLACK) ||
(d == 8 && photometry == PHOTOMETRIC_MINISWHITE))
pixInvert(pix, pix);
}
if (TIFFGetField(tif, TIFFTAG_ORIENTATION, &orientation)) {
if (orientation >= 1 && orientation <= 8) {
struct tiff_transform *transform =
&tiff_orientation_transforms[orientation - 1];
if (transform->vflip) pixFlipTB(pix, pix);
if (transform->hflip) pixFlipLR(pix, pix);
if (transform->rotate) {
PIX *oldpix = pix;
pix = pixRotate90(oldpix, transform->rotate);
pixDestroy(&oldpix);
}
}
}
return pix;
}
//Morphology Dilate operation for 5x5 structuring element. Invokes the relevant OpenCL kernels
cl_int
pixDilateCL_55(l_int32 wpl, l_int32 h)
{
size_t globalThreads[2];
cl_mem pixtemp;
cl_int status;
int gsize;
size_t localThreads[2];
//Horizontal pass
gsize = (wpl*h + GROUPSIZE_HMORX - 1)/ GROUPSIZE_HMORX * GROUPSIZE_HMORX;
globalThreads[0] = gsize;
globalThreads[1] = GROUPSIZE_HMORY;
localThreads[0] = GROUPSIZE_HMORX;
localThreads[1] = GROUPSIZE_HMORY;
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoDilateHor_5x5", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(h),
(const void *)&h);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
//Swap source and dest buffers
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
//Vertical
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
localThreads[0] = GROUPSIZE_X;
localThreads[1] = GROUPSIZE_Y;
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoDilateVer_5x5", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(h),
(const void *)&h);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
return status;
}
//Morphology Erode operation for 5x5 structuring element. Invokes the relevant OpenCL kernels
cl_int
pixErodeCL_55(l_int32 wpl, l_int32 h)
{
size_t globalThreads[2];
cl_mem pixtemp;
cl_int status;
int gsize;
l_uint32 fwmask, lwmask;
size_t localThreads[2];
lwmask = lmask32[32 - 2];
fwmask = rmask32[32 - 2];
//Horizontal pass
gsize = (wpl*h + GROUPSIZE_HMORX - 1)/ GROUPSIZE_HMORX * GROUPSIZE_HMORX;
globalThreads[0] = gsize;
globalThreads[1] = GROUPSIZE_HMORY;
localThreads[0] = GROUPSIZE_HMORX;
localThreads[1] = GROUPSIZE_HMORY;
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoErodeHor_5x5", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(h),
(const void *)&h);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
//Swap source and dest buffers
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
//Vertical
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
localThreads[0] = GROUPSIZE_X;
localThreads[1] = GROUPSIZE_Y;
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoErodeVer_5x5", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(h),
(const void *)&h);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(fwmask),
(const void *)&fwmask);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(lwmask),
(const void *)&lwmask);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
return status;
}
//Morphology Dilate operation. Invokes the relevant OpenCL kernels
cl_int
pixDilateCL(l_int32 hsize, l_int32 vsize, l_int32 wpl, l_int32 h)
{
l_int32 xp, yp, xn, yn;
SEL* sel;
size_t globalThreads[2];
cl_mem pixtemp;
cl_int status;
int gsize;
size_t localThreads[2];
char isEven;
OpenclDevice::SetKernelEnv( &rEnv );
if (hsize == 5 && vsize == 5)
{
//Specific case for 5x5
status = pixDilateCL_55(wpl, h);
return status;
}
sel = selCreateBrick(vsize, hsize, vsize / 2, hsize / 2, SEL_HIT);
selFindMaxTranslations(sel, &xp, &yp, &xn, &yn);
//global and local work dimensions for Horizontal pass
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
localThreads[0] = GROUPSIZE_X;
localThreads[1] = GROUPSIZE_Y;
if (xp > 31 || xn > 31)
{
//Generic case.
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoDilateHor", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(xp),
(const void *)&xp);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(xn),
(const void *)&xn);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(h),
(const void *)&h);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
if (yp > 0 || yn > 0)
{
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
}
}
else if (xp > 0 || xn > 0 )
{
//Specfic Horizontal pass kernel for half width < 32
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoDilateHor_32word", &status );
isEven = (xp != xn);
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(xp),
(const void *)&xp);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(h),
(const void *)&h);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(isEven),
(const void *)&isEven);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
if (yp > 0 || yn > 0)
{
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
}
}
if (yp > 0 || yn > 0)
{
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoDilateVer", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(yp),
(const void *)&yp);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(h),
(const void *)&h);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(yn),
(const void *)&yn);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
}
return status;
}
//Morphology Erode operation. Invokes the relevant OpenCL kernels
cl_int
pixErodeCL(l_int32 hsize, l_int32 vsize, l_uint32 wpl, l_uint32 h)
{
l_int32 xp, yp, xn, yn;
SEL* sel;
size_t globalThreads[2];
size_t localThreads[2];
cl_mem pixtemp;
cl_int status;
int gsize;
char isAsymmetric = (MORPH_BC == ASYMMETRIC_MORPH_BC);
l_uint32 rwmask, lwmask;
char isEven;
sel = selCreateBrick(vsize, hsize, vsize / 2, hsize / 2, SEL_HIT);
selFindMaxTranslations(sel, &xp, &yp, &xn, &yn);
OpenclDevice::SetKernelEnv( &rEnv );
if (hsize == 5 && vsize == 5 && isAsymmetric)
{
//Specific kernel for 5x5
status = pixErodeCL_55(wpl, h);
return status;
}
rwmask = rmask32[32 - (xp & 31)];
lwmask = lmask32[32 - (xn & 31)];
//global and local work dimensions for Horizontal pass
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
localThreads[0] = GROUPSIZE_X;
localThreads[1] = GROUPSIZE_Y;
//Horizontal Pass
if (xp > 31 || xn > 31 )
{
//Generic case.
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoErodeHor", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(xp),
(const void *)&xp);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(xn),
(const void *)&xn);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(h),
(const void *)&h);
status = clSetKernelArg(rEnv.mpkKernel,
6,
sizeof(isAsymmetric),
(const void *)&isAsymmetric);
status = clSetKernelArg(rEnv.mpkKernel,
7,
sizeof(rwmask),
(const void *)&rwmask);
status = clSetKernelArg(rEnv.mpkKernel,
8,
sizeof(lwmask),
(const void *)&lwmask);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
if (yp > 0 || yn > 0)
{
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
}
}
else if (xp > 0 || xn > 0)
{
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoErodeHor_32word", &status );
isEven = (xp != xn);
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(xp),
(const void *)&xp);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(h),
(const void *)&h);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(isAsymmetric),
(const void *)&isAsymmetric);
status = clSetKernelArg(rEnv.mpkKernel,
6,
sizeof(rwmask),
(const void *)&rwmask);
status = clSetKernelArg(rEnv.mpkKernel,
7,
sizeof(lwmask),
(const void *)&lwmask);
status = clSetKernelArg(rEnv.mpkKernel,
8,
sizeof(isEven),
(const void *)&isEven);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
if (yp > 0 || yn > 0)
{
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
}
}
//Vertical Pass
if (yp > 0 || yn > 0)
{
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "morphoErodeVer", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&pixsCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&pixdCLBuffer);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(yp),
(const void *)&yp);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(h),
(const void *)&h);
status = clSetKernelArg(rEnv.mpkKernel,
5,
sizeof(isAsymmetric),
(const void *)&isAsymmetric);
status = clSetKernelArg(rEnv.mpkKernel,
6,
sizeof(yn),
(const void *)&yn);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
}
return status;
}
// OpenCL implementation of Morphology Dilate
//Note: Assumes the source and dest opencl buffer are initialized. No check done
PIX*
OpenclDevice::pixDilateBrickCL(PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize, bool reqDataCopy = false)
{
l_uint32 wpl, h;
wpl = pixGetWpl(pixs);
h = pixGetHeight(pixs);
clStatus = pixDilateCL(hsize, vsize, wpl, h);
if (reqDataCopy)
{
pixd = mapOutputCLBuffer(rEnv, pixdCLBuffer, pixd, pixs, wpl*h, CL_MAP_READ, false);
}
return pixd;
}
// OpenCL implementation of Morphology Erode
//Note: Assumes the source and dest opencl buffer are initialized. No check done
PIX*
OpenclDevice::pixErodeBrickCL(PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize, bool reqDataCopy = false)
{
l_uint32 wpl, h;
wpl = pixGetWpl(pixs);
h = pixGetHeight(pixs);
clStatus = pixErodeCL(hsize, vsize, wpl, h);
if (reqDataCopy)
{
pixd = mapOutputCLBuffer(rEnv, pixdCLBuffer, pixd, pixs, wpl*h, CL_MAP_READ);
}
return pixd;
}
//Morphology Open operation. Invokes the relevant OpenCL kernels
cl_int
pixOpenCL(l_int32 hsize, l_int32 vsize, l_int32 wpl, l_int32 h)
{
cl_int status;
cl_mem pixtemp;
//Erode followed by Dilate
status = pixErodeCL(hsize, vsize, wpl, h);
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
status = pixDilateCL(hsize, vsize, wpl, h);
return status;
}
//Morphology Close operation. Invokes the relevant OpenCL kernels
cl_int
pixCloseCL(l_int32 hsize, l_int32 vsize, l_int32 wpl, l_int32 h)
{
cl_int status;
cl_mem pixtemp;
//Dilate followed by Erode
status = pixDilateCL(hsize, vsize, wpl, h);
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
status = pixErodeCL(hsize, vsize, wpl, h);
return status;
}
// OpenCL implementation of Morphology Close
//Note: Assumes the source and dest opencl buffer are initialized. No check done
PIX*
OpenclDevice::pixCloseBrickCL(PIX *pixd,
PIX *pixs,
l_int32 hsize,
l_int32 vsize,
bool reqDataCopy = false)
{
l_uint32 wpl, h;
wpl = pixGetWpl(pixs);
h = pixGetHeight(pixs);
clStatus = pixCloseCL(hsize, vsize, wpl, h);
if (reqDataCopy)
{
pixd = mapOutputCLBuffer(rEnv, pixdCLBuffer, pixd, pixs, wpl*h, CL_MAP_READ);
}
return pixd;
}
// OpenCL implementation of Morphology Open
//Note: Assumes the source and dest opencl buffer are initialized. No check done
PIX*
OpenclDevice::pixOpenBrickCL(PIX *pixd,
PIX *pixs,
l_int32 hsize,
l_int32 vsize,
bool reqDataCopy = false)
{
l_uint32 wpl, h;
wpl = pixGetWpl(pixs);
h = pixGetHeight(pixs);
clStatus = pixOpenCL(hsize, vsize, wpl, h);
if (reqDataCopy)
{
pixd = mapOutputCLBuffer(rEnv, pixdCLBuffer, pixd, pixs, wpl*h, CL_MAP_READ);
}
return pixd;
}
//pix OR operation: outbuffer = buffer1 | buffer2
cl_int
pixORCL_work(l_uint32 wpl, l_uint32 h, cl_mem buffer1, cl_mem buffer2, cl_mem outbuffer)
{
cl_int status;
size_t globalThreads[2];
int gsize;
size_t localThreads[] = {GROUPSIZE_X, GROUPSIZE_Y};
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "pixOR", &status );
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&buffer1);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&buffer2);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(cl_mem),
&outbuffer);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(h),
(const void *)&h);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
return status;
}
//pix AND operation: outbuffer = buffer1 & buffer2
cl_int
pixANDCL_work(l_uint32 wpl, l_uint32 h, cl_mem buffer1, cl_mem buffer2, cl_mem outbuffer)
{
cl_int status;
size_t globalThreads[2];
int gsize;
size_t localThreads[] = {GROUPSIZE_X, GROUPSIZE_Y};
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "pixAND", &status );
// Enqueue a kernel run call.
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&buffer1);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&buffer2);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(cl_mem),
&outbuffer);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(h),
(const void *)&h);
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
return status;
}
//output = buffer1 & ~(buffer2)
cl_int
pixSubtractCL_work(l_uint32 wpl, l_uint32 h, cl_mem buffer1, cl_mem buffer2, cl_mem outBuffer = NULL)
{
cl_int status;
size_t globalThreads[2];
int gsize;
size_t localThreads[] = {GROUPSIZE_X, GROUPSIZE_Y};
gsize = (wpl + GROUPSIZE_X - 1)/ GROUPSIZE_X * GROUPSIZE_X;
globalThreads[0] = gsize;
gsize = (h + GROUPSIZE_Y - 1)/ GROUPSIZE_Y * GROUPSIZE_Y;
globalThreads[1] = gsize;
if (outBuffer != NULL)
{
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "pixSubtract", &status );
}
else
{
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "pixSubtract_inplace", &status );
}
// Enqueue a kernel run call.
status = clSetKernelArg(rEnv.mpkKernel,
0,
sizeof(cl_mem),
&buffer1);
status = clSetKernelArg(rEnv.mpkKernel,
1,
sizeof(cl_mem),
&buffer2);
status = clSetKernelArg(rEnv.mpkKernel,
2,
sizeof(wpl),
(const void *)&wpl);
status = clSetKernelArg(rEnv.mpkKernel,
3,
sizeof(h),
(const void *)&h);
if (outBuffer != NULL)
{
status = clSetKernelArg(rEnv.mpkKernel,
4,
sizeof(cl_mem),
(const void *)&outBuffer);
}
status = clEnqueueNDRangeKernel(rEnv.mpkCmdQueue,
rEnv.mpkKernel,
2,
NULL,
globalThreads,
localThreads,
0,
NULL,
NULL);
return status;
}
// OpenCL implementation of Subtract pix
//Note: Assumes the source and dest opencl buffer are initialized. No check done
PIX*
OpenclDevice::pixSubtractCL(PIX *pixd, PIX *pixs1, PIX *pixs2, bool reqDataCopy = false)
{
l_uint32 wpl, h;
PROCNAME("pixSubtractCL");
if (!pixs1)
return (PIX *)ERROR_PTR("pixs1 not defined", procName, pixd);
if (!pixs2)
return (PIX *)ERROR_PTR("pixs2 not defined", procName, pixd);
if (pixGetDepth(pixs1) != pixGetDepth(pixs2))
return (PIX *)ERROR_PTR("depths of pixs* unequal", procName, pixd);
#if EQUAL_SIZE_WARNING
if (!pixSizesEqual(pixs1, pixs2))
L_WARNING("pixs1 and pixs2 not equal sizes", procName);
#endif /* EQUAL_SIZE_WARNING */
wpl = pixGetWpl(pixs1);
h = pixGetHeight(pixs1);
clStatus = pixSubtractCL_work(wpl, h, pixdCLBuffer, pixsCLBuffer);
if (reqDataCopy)
{
//Read back output data from OCL buffer to cpu
pixd = mapOutputCLBuffer(rEnv, pixdCLBuffer, pixd, pixs1, wpl*h, CL_MAP_READ);
}
return pixd;
}
// OpenCL implementation of Hollow pix
//Note: Assumes the source and dest opencl buffer are initialized. No check done
PIX*
OpenclDevice::pixHollowCL(PIX *pixd,
PIX *pixs,
l_int32 close_hsize,
l_int32 close_vsize,
l_int32 open_hsize,
l_int32 open_vsize,
bool reqDataCopy = false)
{
l_uint32 wpl, h;
cl_mem pixtemp;
wpl = pixGetWpl(pixs);
h = pixGetHeight(pixs);
//First step : Close Morph operation: Dilate followed by Erode
clStatus = pixCloseCL(close_hsize, close_vsize, wpl, h);
//Store the output of close operation in an intermediate buffer
//this will be later used for pixsubtract
clStatus = clEnqueueCopyBuffer(rEnv.mpkCmdQueue, pixdCLBuffer, pixdCLIntermediate, 0, 0, sizeof(int) * wpl*h, 0, NULL, NULL);
//Second step: Open Operation - Erode followed by Dilate
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
clStatus = pixOpenCL(open_hsize, open_vsize, wpl, h);
//Third step: Subtract : (Close - Open)
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixdCLIntermediate;
pixdCLIntermediate = pixtemp;
clStatus = pixSubtractCL_work(wpl, h, pixdCLBuffer, pixsCLBuffer);
if (reqDataCopy)
{
//Read back output data from OCL buffer to cpu
pixd = mapOutputCLBuffer(rEnv, pixdCLBuffer, pixd, pixs, wpl*h, CL_MAP_READ);
}
return pixd;
}
// OpenCL implementation of Get Lines from pix function
//Note: Assumes the source and dest opencl buffer are initialized. No check done
void
OpenclDevice::pixGetLinesCL(PIX *pixd,
PIX *pixs,
PIX** pix_vline,
PIX** pix_hline,
PIX** pixClosed,
bool getpixClosed,
l_int32 close_hsize, l_int32 close_vsize,
l_int32 open_hsize, l_int32 open_vsize,
l_int32 line_hsize, l_int32 line_vsize)
{
l_uint32 wpl, h;
cl_mem pixtemp;
wpl = pixGetWpl(pixs);
h = pixGetHeight(pixs);
//First step : Close Morph operation: Dilate followed by Erode
clStatus = pixCloseCL(close_hsize, close_vsize, wpl, h);
//Copy the Close output to CPU buffer
if (getpixClosed)
{
*pixClosed = mapOutputCLBuffer(rEnv, pixdCLBuffer, *pixClosed, pixs, wpl*h, CL_MAP_READ, true, false);
}
//Store the output of close operation in an intermediate buffer
//this will be later used for pixsubtract
clStatus = clEnqueueCopyBuffer(rEnv.mpkCmdQueue, pixdCLBuffer, pixdCLIntermediate, 0, 0, sizeof(int) * wpl*h, 0, NULL, NULL);
//Second step: Open Operation - Erode followed by Dilate
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
clStatus = pixOpenCL(open_hsize, open_vsize, wpl, h);
//Third step: Subtract : (Close - Open)
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixdCLIntermediate;
pixdCLIntermediate = pixtemp;
clStatus = pixSubtractCL_work(wpl, h, pixdCLBuffer, pixsCLBuffer);
//Store the output of Hollow operation in an intermediate buffer
//this will be later used
clStatus = clEnqueueCopyBuffer(rEnv.mpkCmdQueue, pixdCLBuffer, pixdCLIntermediate, 0, 0, sizeof(int) * wpl*h, 0, NULL, NULL);
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLBuffer;
pixdCLBuffer = pixtemp;
//Fourth step: Get vertical line
//pixOpenBrick(NULL, pix_hollow, 1, min_line_length);
clStatus = pixOpenCL(1, line_vsize, wpl, h);
//Copy the vertical line output to CPU buffer
*pix_vline = mapOutputCLBuffer(rEnv, pixdCLBuffer, *pix_vline, pixs, wpl*h, CL_MAP_READ, true, false);
pixtemp = pixsCLBuffer;
pixsCLBuffer = pixdCLIntermediate;
pixdCLIntermediate = pixtemp;
//Fifth step: Get horizontal line
//pixOpenBrick(NULL, pix_hollow, min_line_length, 1);
clStatus = pixOpenCL(line_hsize, 1, wpl, h);
//Copy the horizontal line output to CPU buffer
*pix_hline = mapOutputCLBuffer(rEnv, pixdCLBuffer, *pix_hline, pixs, wpl*h, CL_MAP_READ, true, true);
return;
}
/*************************************************************************
* HistogramRect
* Otsu Thresholding Operations
* histogramAllChannels is layed out as all channel 0, then all channel 1...
* only supports 1 or 4 channels (bytes_per_pixel)
************************************************************************/
void OpenclDevice::HistogramRectOCL(
const unsigned char* imageData,
int bytes_per_pixel,
int bytes_per_line,
int left, // always 0
int top, // always 0
int width,
int height,
int kHistogramSize,
int* histogramAllChannels)
{
PERF_COUNT_START("HistogramRectOCL")
cl_int clStatus;
KernelEnv histKern;
SetKernelEnv( &histKern );
KernelEnv histRedKern;
SetKernelEnv( &histRedKern );
/* map imagedata to device as read only */
// USE_HOST_PTR uses onion+ bus which is slowest option; also happens to be coherent which we don't need.
// faster option would be to allocate initial image buffer
// using a garlic bus memory type
cl_mem imageBuffer = clCreateBuffer( histKern.mpkContext, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, width*height*bytes_per_pixel*sizeof(char), (void *)imageData, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer imageBuffer");
/* setup work group size parameters */
int block_size = 256;
cl_uint numCUs;
clStatus = clGetDeviceInfo( gpuEnv.mpDevID, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(numCUs), &numCUs, NULL);
CHECK_OPENCL( clStatus, "clCreateBuffer imageBuffer");
int requestedOccupancy = 10;
int numWorkGroups = numCUs * requestedOccupancy;
int numThreads = block_size*numWorkGroups;
size_t local_work_size[] = {static_cast<size_t>(block_size)};
size_t global_work_size[] = {static_cast<size_t>(numThreads)};
size_t red_global_work_size[] = {static_cast<size_t>(block_size*kHistogramSize*bytes_per_pixel)};
/* map histogramAllChannels as write only */
int numBins = kHistogramSize*bytes_per_pixel*numWorkGroups;
cl_mem histogramBuffer = clCreateBuffer( histKern.mpkContext, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, kHistogramSize*bytes_per_pixel*sizeof(int), (void *)histogramAllChannels, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer histogramBuffer");
/* intermediate histogram buffer */
int histRed = 256;
int tmpHistogramBins = kHistogramSize*bytes_per_pixel*histRed;
cl_mem tmpHistogramBuffer = clCreateBuffer( histKern.mpkContext, CL_MEM_READ_WRITE, tmpHistogramBins*sizeof(cl_uint), NULL, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer tmpHistogramBuffer");
/* atomic sync buffer */
int *zeroBuffer = new int[1];
zeroBuffer[0] = 0;
cl_mem atomicSyncBuffer = clCreateBuffer( histKern.mpkContext, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, sizeof(cl_int), (void *)zeroBuffer, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer atomicSyncBuffer");
//Create kernel objects based on bytes_per_pixel
if (bytes_per_pixel == 1)
{
histKern.mpkKernel = clCreateKernel( histKern.mpkProgram, "kernel_HistogramRectOneChannel", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel kernel_HistogramRectOneChannel");
histRedKern.mpkKernel = clCreateKernel( histRedKern.mpkProgram, "kernel_HistogramRectOneChannelReduction", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel kernel_HistogramRectOneChannelReduction");
} else {
histKern.mpkKernel = clCreateKernel( histKern.mpkProgram, "kernel_HistogramRectAllChannels", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel kernel_HistogramRectAllChannels");
histRedKern.mpkKernel = clCreateKernel( histRedKern.mpkProgram, "kernel_HistogramRectAllChannelsReduction", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel kernel_HistogramRectAllChannelsReduction");
}
void *ptr;
//Initialize tmpHistogramBuffer buffer
ptr = clEnqueueMapBuffer(histKern.mpkCmdQueue, tmpHistogramBuffer, CL_TRUE, CL_MAP_WRITE, 0, tmpHistogramBins*sizeof(cl_uint), 0, NULL, NULL, &clStatus);
CHECK_OPENCL( clStatus, "clEnqueueMapBuffer tmpHistogramBuffer");
memset(ptr, 0, tmpHistogramBins*sizeof(cl_uint));
clEnqueueUnmapMemObject(histKern.mpkCmdQueue, tmpHistogramBuffer, ptr, 0, NULL, NULL);
/* set kernel 1 arguments */
clStatus = clSetKernelArg( histKern.mpkKernel, 0, sizeof(cl_mem), (void *)&imageBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg imageBuffer");
cl_uint numPixels = width*height;
clStatus = clSetKernelArg( histKern.mpkKernel, 1, sizeof(cl_uint), (void *)&numPixels );
CHECK_OPENCL( clStatus, "clSetKernelArg numPixels" );
clStatus = clSetKernelArg( histKern.mpkKernel, 2, sizeof(cl_mem), (void *)&tmpHistogramBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg tmpHistogramBuffer");
/* set kernel 2 arguments */
int n = numThreads/bytes_per_pixel;
clStatus = clSetKernelArg( histRedKern.mpkKernel, 0, sizeof(cl_int), (void *)&n );
CHECK_OPENCL( clStatus, "clSetKernelArg imageBuffer");
clStatus = clSetKernelArg( histRedKern.mpkKernel, 1, sizeof(cl_mem), (void *)&tmpHistogramBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg tmpHistogramBuffer");
clStatus = clSetKernelArg( histRedKern.mpkKernel, 2, sizeof(cl_mem), (void *)&histogramBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg histogramBuffer");
/* launch histogram */
PERF_COUNT_SUB("before")
clStatus = clEnqueueNDRangeKernel(
histKern.mpkCmdQueue,
histKern.mpkKernel,
1, NULL, global_work_size, local_work_size,
0, NULL, NULL );
CHECK_OPENCL( clStatus, "clEnqueueNDRangeKernel kernel_HistogramRectAllChannels" );
clFinish( histKern.mpkCmdQueue );
/* launch histogram */
clStatus = clEnqueueNDRangeKernel(
histRedKern.mpkCmdQueue,
histRedKern.mpkKernel,
1, NULL, red_global_work_size, local_work_size,
0, NULL, NULL );
CHECK_OPENCL( clStatus, "clEnqueueNDRangeKernel kernel_HistogramRectAllChannelsReduction" );
clFinish( histRedKern.mpkCmdQueue );
PERF_COUNT_SUB("redKernel")
/* map results back from gpu */
ptr = clEnqueueMapBuffer(histRedKern.mpkCmdQueue, histogramBuffer, CL_TRUE, CL_MAP_READ, 0, kHistogramSize*bytes_per_pixel*sizeof(int), 0, NULL, NULL, &clStatus);
CHECK_OPENCL( clStatus, "clEnqueueMapBuffer histogramBuffer");
clEnqueueUnmapMemObject(histRedKern.mpkCmdQueue, histogramBuffer, ptr, 0, NULL, NULL);
clReleaseMemObject(histogramBuffer);
clReleaseMemObject(imageBuffer);
PERF_COUNT_SUB("after")
PERF_COUNT_END
}
/*************************************************************************
* Threshold the rectangle, taking everything except the image buffer pointer
* from the class, using thresholds/hi_values to the output IMAGE.
* only supports 1 or 4 channels
************************************************************************/
void OpenclDevice::ThresholdRectToPixOCL(
const unsigned char* imageData,
int bytes_per_pixel,
int bytes_per_line,
const int* thresholds,
const int* hi_values,
Pix** pix,
int height,
int width,
int top,
int left) {
PERF_COUNT_START("ThresholdRectToPixOCL")
/* create pix result buffer */
*pix = pixCreate(width, height, 1);
uinT32* pixData = pixGetData(*pix);
int wpl = pixGetWpl(*pix);
int pixSize = wpl*height*sizeof(uinT32);
cl_int clStatus;
KernelEnv rEnv;
SetKernelEnv( &rEnv );
/* setup work group size parameters */
int block_size = 256;
cl_uint numCUs = 6;
clStatus = clGetDeviceInfo( gpuEnv.mpDevID, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(numCUs), &numCUs, NULL);
CHECK_OPENCL( clStatus, "clCreateBuffer imageBuffer");
int requestedOccupancy = 10;
int numWorkGroups = numCUs * requestedOccupancy;
int numThreads = block_size*numWorkGroups;
size_t local_work_size[] = {(size_t) block_size};
size_t global_work_size[] = {(size_t) numThreads};
/* map imagedata to device as read only */
// USE_HOST_PTR uses onion+ bus which is slowest option; also happens to be coherent which we don't need.
// faster option would be to allocate initial image buffer
// using a garlic bus memory type
cl_mem imageBuffer = clCreateBuffer( rEnv.mpkContext, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, width*height*bytes_per_pixel*sizeof(char), (void *)imageData, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer imageBuffer");
/* map pix as write only */
pixThBuffer = clCreateBuffer( rEnv.mpkContext, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, pixSize, (void *)pixData, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer pix");
/* map thresholds and hi_values */
cl_mem thresholdsBuffer = clCreateBuffer( rEnv.mpkContext, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, bytes_per_pixel*sizeof(int), (void *)thresholds, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer thresholdBuffer");
cl_mem hiValuesBuffer = clCreateBuffer( rEnv.mpkContext, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, bytes_per_pixel*sizeof(int), (void *)hi_values, &clStatus );
CHECK_OPENCL( clStatus, "clCreateBuffer hiValuesBuffer");
/* compile kernel */
if (bytes_per_pixel == 4) {
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "kernel_ThresholdRectToPix", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel kernel_ThresholdRectToPix");
} else {
rEnv.mpkKernel = clCreateKernel( rEnv.mpkProgram, "kernel_ThresholdRectToPix_OneChan", &clStatus );
CHECK_OPENCL( clStatus, "clCreateKernel kernel_ThresholdRectToPix_OneChan");
}
/* set kernel arguments */
clStatus = clSetKernelArg( rEnv.mpkKernel, 0, sizeof(cl_mem), (void *)&imageBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg imageBuffer");
cl_uint numPixels = width*height;
clStatus = clSetKernelArg( rEnv.mpkKernel, 1, sizeof(int), (void *)&height );
CHECK_OPENCL( clStatus, "clSetKernelArg height" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 2, sizeof(int), (void *)&width );
CHECK_OPENCL( clStatus, "clSetKernelArg width" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 3, sizeof(int), (void *)&wpl );
CHECK_OPENCL( clStatus, "clSetKernelArg wpl" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 4, sizeof(cl_mem), (void *)&thresholdsBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg thresholdsBuffer" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 5, sizeof(cl_mem), (void *)&hiValuesBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg hiValuesBuffer" );
clStatus = clSetKernelArg( rEnv.mpkKernel, 6, sizeof(cl_mem), (void *)&pixThBuffer );
CHECK_OPENCL( clStatus, "clSetKernelArg pixThBuffer");
/* launch kernel & wait */
PERF_COUNT_SUB("before")
clStatus = clEnqueueNDRangeKernel(
rEnv.mpkCmdQueue,
rEnv.mpkKernel,
1, NULL, global_work_size, local_work_size,
0, NULL, NULL );
CHECK_OPENCL( clStatus, "clEnqueueNDRangeKernel kernel_ThresholdRectToPix" );
clFinish( rEnv.mpkCmdQueue );
PERF_COUNT_SUB("kernel")
/* map results back from gpu */
void *ptr = clEnqueueMapBuffer(rEnv.mpkCmdQueue, pixThBuffer, CL_TRUE, CL_MAP_READ, 0, pixSize, 0, NULL, NULL, &clStatus);
CHECK_OPENCL( clStatus, "clEnqueueMapBuffer histogramBuffer");
clEnqueueUnmapMemObject(rEnv.mpkCmdQueue, pixThBuffer, ptr, 0, NULL, NULL);
clReleaseMemObject(imageBuffer);
clReleaseMemObject(thresholdsBuffer);
clReleaseMemObject(hiValuesBuffer);
PERF_COUNT_SUB("after")
PERF_COUNT_END
}
#if USE_DEVICE_SELECTION
/******************************************************************************
* Data Types for Device Selection
*****************************************************************************/
typedef struct _TessScoreEvaluationInputData {
int height;
int width;
int numChannels;
unsigned char *imageData;
Pix *pix;
} TessScoreEvaluationInputData;
void populateTessScoreEvaluationInputData( TessScoreEvaluationInputData *input ) {
srand(1);
// 8.5x11 inches @ 300dpi rounded to clean multiples
int height = 3328; // %256
int width = 2560; // %512
int numChannels = 4;
input->height = height;
input->width = width;
input->numChannels = numChannels;
unsigned char (*imageData4)[4] = (unsigned char (*)[4]) malloc(height*width*numChannels*sizeof(unsigned char)); // new unsigned char[4][height*width];
input->imageData = (unsigned char *) &imageData4[0];
// zero out image
unsigned char pixelWhite[4] = { 0, 0, 0, 255};
unsigned char pixelBlack[4] = {255, 255, 255, 255};
for (int p = 0; p < height*width; p++) {
//unsigned char tmp[4] = imageData4[0];
imageData4[p][0] = pixelWhite[0];
imageData4[p][1] = pixelWhite[1];
imageData4[p][2] = pixelWhite[2];
imageData4[p][3] = pixelWhite[3];
}
// random lines to be eliminated
int maxLineWidth = 64; // pixels wide
int numLines = 10;
// vertical lines
for (int i = 0; i < numLines; i++) {
int lineWidth = rand()%maxLineWidth;
int vertLinePos = lineWidth + rand()%(width-2*lineWidth);
//printf("[PI] VerticalLine @ %i (w=%i)\n", vertLinePos, lineWidth);
for (int row = vertLinePos-lineWidth/2; row < vertLinePos+lineWidth/2; row++) {
for (int col = 0; col < height; col++) {
//imageData4[row*width+col] = pixelBlack;
imageData4[row*width+col][0] = pixelBlack[0];
imageData4[row*width+col][1] = pixelBlack[1];
imageData4[row*width+col][2] = pixelBlack[2];
imageData4[row*width+col][3] = pixelBlack[3];
}
}
}
// horizontal lines
for (int i = 0; i < numLines; i++) {
int lineWidth = rand()%maxLineWidth;
int horLinePos = lineWidth + rand()%(height-2*lineWidth);
//printf("[PI] HorizontalLine @ %i (w=%i)\n", horLinePos, lineWidth);
for (int row = 0; row < width; row++) {
for (int col = horLinePos-lineWidth/2; col < horLinePos+lineWidth/2; col++) { // for (int row = vertLinePos-lineWidth/2; row < vertLinePos+lineWidth/2; row++) {
//printf("[PI] HoizLine pix @ (%3i, %3i)\n", row, col);
//imageData4[row*width+col] = pixelBlack;
imageData4[row*width+col][0] = pixelBlack[0];
imageData4[row*width+col][1] = pixelBlack[1];
imageData4[row*width+col][2] = pixelBlack[2];
imageData4[row*width+col][3] = pixelBlack[3];
}
}
}
// spots (noise, squares)
float fractionBlack = 0.1; // how much of the image should be blackened
int numSpots = (height*width)*fractionBlack/(maxLineWidth*maxLineWidth/2/2);
for (int i = 0; i < numSpots; i++) {
int lineWidth = rand()%maxLineWidth;
int col = lineWidth + rand()%(width-2*lineWidth);
int row = lineWidth + rand()%(height-2*lineWidth);
//printf("[PI] Spot[%i/%i] @ (%3i, %3i)\n", i, numSpots, row, col );
for (int r = row-lineWidth/2; r < row+lineWidth/2; r++) {
for (int c = col-lineWidth/2; c < col+lineWidth/2; c++) {
//printf("[PI] \tSpot[%i/%i] @ (%3i, %3i)\n", i, numSpots, r, c );
//imageData4[row*width+col] = pixelBlack;
imageData4[r*width+c][0] = pixelBlack[0];
imageData4[r*width+c][1] = pixelBlack[1];
imageData4[r*width+c][2] = pixelBlack[2];
imageData4[r*width+c][3] = pixelBlack[3];
}
}
}
input->pix = pixCreate(input->width, input->height, 1);
}
typedef struct _TessDeviceScore {
float time; // small time means faster device
bool clError; // were there any opencl errors
bool valid; // was the correct response generated
} TessDeviceScore;
/******************************************************************************
* Micro Benchmarks for Device Selection
*****************************************************************************/
double composeRGBPixelMicroBench( GPUEnv *env, TessScoreEvaluationInputData input, ds_device_type type ) {
double time = 0;
#if ON_WINDOWS
LARGE_INTEGER freq, time_funct_start, time_funct_end;
QueryPerformanceFrequency(&freq);
#else
TIMESPEC time_funct_start, time_funct_end;
#endif
// input data
l_uint32 *tiffdata = (l_uint32 *)input.imageData;// same size and random data; data doesn't change workload
// function call
if (type == DS_DEVICE_OPENCL_DEVICE) {
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
OpenclDevice::gpuEnv = *env;
int wpl = pixGetWpl(input.pix);
OpenclDevice::pixReadFromTiffKernel(tiffdata, input.width, input.height, wpl, NULL);
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
} else {
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
Pix *pix = pixCreate(input.width, input.height, 32);
l_uint32 *pixData = pixGetData(pix);
int wpl = pixGetWpl(pix);
//l_uint32* output_gpu=pixReadFromTiffKernel(tiffdata,w,h,wpl,line);
//pixSetData(pix, output_gpu);
int i, j;
int idx = 0;
for (i = 0; i < input.height ; i++) {
for (j = 0; j < input.width; j++) {
l_uint32 tiffword = tiffdata[i * input.width + j];
l_int32 rval = ((tiffword) & 0xff);
l_int32 gval = (((tiffword) >> 8) & 0xff);
l_int32 bval = (((tiffword) >> 16) & 0xff);
l_uint32 value = (rval << 24) | (gval << 16) | (bval << 8);
pixData[idx] = value;
idx++;
}
}
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
pixDestroy(&pix);
}
// cleanup
return time;
}
double histogramRectMicroBench( GPUEnv *env, TessScoreEvaluationInputData input, ds_device_type type ) {
double time;
#if ON_WINDOWS
LARGE_INTEGER freq, time_funct_start, time_funct_end;
QueryPerformanceFrequency(&freq);
#else
TIMESPEC time_funct_start, time_funct_end;
#endif
unsigned char pixelHi = (unsigned char)255;
int left = 0;
int top = 0;
int kHistogramSize = 256;
int bytes_per_line = input.width*input.numChannels;
int *histogramAllChannels = new int[kHistogramSize*input.numChannels];
// function call
if (type == DS_DEVICE_OPENCL_DEVICE) {
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
OpenclDevice::gpuEnv = *env;
int wpl = pixGetWpl(input.pix);
OpenclDevice::HistogramRectOCL(input.imageData, input.numChannels, bytes_per_line, top, left, input.width, input.height, kHistogramSize, histogramAllChannels);
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
} else {
int *histogram = new int[kHistogramSize];
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
for (int ch = 0; ch < input.numChannels; ++ch) {
tesseract::HistogramRect(input.pix, input.numChannels,
left, top, input.width, input.height, histogram);
}
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
delete[] histogram;
}
// cleanup
//delete[] imageData;
delete[] histogramAllChannels;
return time;
}
//Reproducing the ThresholdRectToPix native version
void ThresholdRectToPix_Native(const unsigned char* imagedata,
int bytes_per_pixel,
int bytes_per_line,
const int* thresholds,
const int* hi_values,
Pix** pix) {
int top = 0;
int left = 0;
int width = pixGetWidth(*pix);
int height = pixGetHeight(*pix);
*pix = pixCreate(width, height, 1);
uinT32* pixdata = pixGetData(*pix);
int wpl = pixGetWpl(*pix);
const unsigned char* srcdata = imagedata + top * bytes_per_line +
left * bytes_per_pixel;
for (int y = 0; y < height; ++y) {
const uinT8* linedata = srcdata;
uinT32* pixline = pixdata + y * wpl;
for (int x = 0; x < width; ++x, linedata += bytes_per_pixel) {
bool white_result = true;
for (int ch = 0; ch < bytes_per_pixel; ++ch) {
if (hi_values[ch] >= 0 &&
(linedata[ch] > thresholds[ch]) == (hi_values[ch] == 0)) {
white_result = false;
break;
}
}
if (white_result)
CLEAR_DATA_BIT(pixline, x);
else
SET_DATA_BIT(pixline, x);
}
srcdata += bytes_per_line;
}
}
double thresholdRectToPixMicroBench( GPUEnv *env, TessScoreEvaluationInputData input, ds_device_type type ) {
double time;
#if ON_WINDOWS
LARGE_INTEGER freq, time_funct_start, time_funct_end;
QueryPerformanceFrequency(&freq);
#else
TIMESPEC time_funct_start, time_funct_end;
#endif
// input data
unsigned char pixelHi = (unsigned char)255;
int* thresholds = new int[4];
thresholds[0] = pixelHi/2;
thresholds[1] = pixelHi/2;
thresholds[2] = pixelHi/2;
thresholds[3] = pixelHi/2;
int *hi_values = new int[4];
thresholds[0] = pixelHi;
thresholds[1] = pixelHi;
thresholds[2] = pixelHi;
thresholds[3] = pixelHi;
//Pix* pix = pixCreate(width, height, 1);
int top = 0;
int left = 0;
int bytes_per_line = input.width*input.numChannels;
// function call
if (type == DS_DEVICE_OPENCL_DEVICE) {
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
OpenclDevice::gpuEnv = *env;
int wpl = pixGetWpl(input.pix);
OpenclDevice::ThresholdRectToPixOCL(input.imageData, input.numChannels, bytes_per_line, thresholds, hi_values, &input.pix, input.height, input.width, top, left);
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
} else {
tesseract::ImageThresholder thresholder;
thresholder.SetImage( input.pix );
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
ThresholdRectToPix_Native( input.imageData, input.numChannels, bytes_per_line,
thresholds, hi_values, &input.pix );
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
}
// cleanup
delete[] thresholds;
delete[] hi_values;
return time;
}
double getLineMasksMorphMicroBench( GPUEnv *env, TessScoreEvaluationInputData input, ds_device_type type ) {
double time = 0;
#if ON_WINDOWS
LARGE_INTEGER freq, time_funct_start, time_funct_end;
QueryPerformanceFrequency(&freq);
#else
TIMESPEC time_funct_start, time_funct_end;
#endif
// input data
int resolution = 300;
int wpl = pixGetWpl(input.pix);
int kThinLineFraction = 20; // tess constant
int kMinLineLengthFraction = 4; // tess constant
int max_line_width = resolution / kThinLineFraction;
int min_line_length = resolution / kMinLineLengthFraction;
int closing_brick = max_line_width / 3;
// function call
if (type == DS_DEVICE_OPENCL_DEVICE) {
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
Pix *src_pix = input.pix;
OpenclDevice::gpuEnv = *env;
OpenclDevice::initMorphCLAllocations(wpl, input.height, input.pix);
Pix *pix_vline = NULL, *pix_hline = NULL, *pix_closed = NULL;
OpenclDevice::pixGetLinesCL(NULL, input.pix, &pix_vline, &pix_hline, &pix_closed, true, closing_brick, closing_brick, max_line_width, max_line_width, min_line_length, min_line_length);
OpenclDevice::releaseMorphCLBuffers();
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
} else {
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_start);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_start );
#endif
// native serial code
Pix *src_pix = input.pix;
Pix *pix_closed = pixCloseBrick(NULL, src_pix, closing_brick, closing_brick);
Pix *pix_solid = pixOpenBrick(NULL, pix_closed, max_line_width, max_line_width);
Pix *pix_hollow = pixSubtract(NULL, pix_closed, pix_solid);
pixDestroy(&pix_solid);
Pix *pix_vline = pixOpenBrick(NULL, pix_hollow, 1, min_line_length);
Pix *pix_hline = pixOpenBrick(NULL, pix_hollow, min_line_length, 1);
pixDestroy(&pix_hollow);
#if ON_WINDOWS
QueryPerformanceCounter(&time_funct_end);
time = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart);
#else
clock_gettime( CLOCK_MONOTONIC, &time_funct_end );
time = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0;
#endif
}
return time;
}
/******************************************************************************
* Device Selection
*****************************************************************************/
#include "stdlib.h"
// encode score object as byte string
ds_status serializeScore( ds_device* device, void **serializedScore, unsigned int* serializedScoreSize ) {
*serializedScoreSize = sizeof(TessDeviceScore);
*serializedScore = (void *) new unsigned char[*serializedScoreSize];
memcpy(*serializedScore, device->score, *serializedScoreSize);
return DS_SUCCESS;
}
// parses byte string and stores in score object
ds_status deserializeScore( ds_device* device, const unsigned char* serializedScore, unsigned int serializedScoreSize ) {
// check that serializedScoreSize == sizeof(TessDeviceScore);
device->score = new TessDeviceScore;
memcpy(device->score, serializedScore, serializedScoreSize);
return DS_SUCCESS;
}
// evaluate devices
ds_status evaluateScoreForDevice( ds_device *device, void *inputData) {
// overwrite statuc gpuEnv w/ current device
// so native opencl calls can be used; they use static gpuEnv
printf("\n[DS] Device: \"%s\" (%s) evaluation...\n", device->oclDeviceName, device->type==DS_DEVICE_OPENCL_DEVICE ? "OpenCL" : "Native" );
GPUEnv *env = NULL;
if (device->type == DS_DEVICE_OPENCL_DEVICE) {
env = new GPUEnv;
//printf("[DS] populating tmp GPUEnv from device\n");
populateGPUEnvFromDevice( env, device->oclDeviceID);
env->mnFileCount = 0; //argc;
env->mnKernelCount = 0UL;
//printf("[DS] compiling kernels for tmp GPUEnv\n");
OpenclDevice::gpuEnv = *env;
OpenclDevice::CompileKernelFile(env, "");
}
TessScoreEvaluationInputData *input = (TessScoreEvaluationInputData *)inputData;
// pixReadTiff
double composeRGBPixelTime = composeRGBPixelMicroBench( env, *input, device->type );
// HistogramRect
double histogramRectTime = histogramRectMicroBench( env, *input, device->type );
// ThresholdRectToPix
double thresholdRectToPixTime = thresholdRectToPixMicroBench( env, *input, device->type );
// getLineMasks
double getLineMasksMorphTime = getLineMasksMorphMicroBench( env, *input, device->type );
// weigh times (% of cpu time)
// these weights should be the % execution time that the native cpu code took
float composeRGBPixelWeight = 1.2f;
float histogramRectWeight = 2.4f;
float thresholdRectToPixWeight = 4.5f;
float getLineMasksMorphWeight = 5.0f;
float weightedTime =
composeRGBPixelWeight * composeRGBPixelTime +
histogramRectWeight * histogramRectTime +
thresholdRectToPixWeight * thresholdRectToPixTime +
getLineMasksMorphWeight * getLineMasksMorphTime
;
device->score = (void *)new TessDeviceScore;
((TessDeviceScore *)device->score)->time = weightedTime;
printf("[DS] Device: \"%s\" (%s) evaluated\n", device->oclDeviceName, device->type==DS_DEVICE_OPENCL_DEVICE ? "OpenCL" : "Native" );
printf("[DS]%25s: %f (w=%.1f)\n", "composeRGBPixel", composeRGBPixelTime, composeRGBPixelWeight );
printf("[DS]%25s: %f (w=%.1f)\n", "HistogramRect", histogramRectTime, histogramRectWeight );
printf("[DS]%25s: %f (w=%.1f)\n", "ThresholdRectToPix", thresholdRectToPixTime, thresholdRectToPixWeight );
printf("[DS]%25s: %f (w=%.1f)\n", "getLineMasksMorph", getLineMasksMorphTime, getLineMasksMorphWeight );
printf("[DS]%25s: %f\n", "Score", ((TessDeviceScore *)device->score)->time );
return DS_SUCCESS;
}
// initial call to select device
ds_device OpenclDevice::getDeviceSelection( ) {
//PERF_COUNT_START("getDeviceSelection")
if (!deviceIsSelected) {
PERF_COUNT_START("getDeviceSelection")
// check if opencl is available at runtime
if( 1 == LoadOpencl() ) {
// opencl is available
//PERF_COUNT_SUB("LoadOpencl")
// setup devices
ds_status status;
ds_profile *profile;
status = initDSProfile( &profile, "v0.1" );
PERF_COUNT_SUB("initDSProfile")
// try reading scores from file
char *fileName = "tesseract_opencl_profile_devices.dat";
status = readProfileFromFile( profile, deserializeScore, fileName);
if (status != DS_SUCCESS) {
// need to run evaluation
printf("[DS] Profile file not available (%s); performing profiling.\n", fileName);
// create input data
TessScoreEvaluationInputData input;
populateTessScoreEvaluationInputData( &input );
//PERF_COUNT_SUB("populateTessScoreEvaluationInputData")
// perform evaluations
unsigned int numUpdates;
status = profileDevices( profile, DS_EVALUATE_ALL, evaluateScoreForDevice, (void *)&input, &numUpdates );
PERF_COUNT_SUB("profileDevices")
// write scores to file
if ( status == DS_SUCCESS ) {
status = writeProfileToFile( profile, serializeScore, fileName);
PERF_COUNT_SUB("writeProfileToFile")
if ( status == DS_SUCCESS ) {
printf("[DS] Scores written to file (%s).\n", fileName);
} else {
printf("[DS] Error saving scores to file (%s); scores not written to file.\n", fileName);
}
} else {
printf("[DS] Unable to evaluate performance; scores not written to file.\n");
}
} else {
PERF_COUNT_SUB("readProfileFromFile")
printf("[DS] Profile read from file (%s).\n", fileName);
}
// we now have device scores either from file or evaluation
// select fastest using custom Tesseract selection algorithm
float bestTime = FLT_MAX; // begin search with worst possible time
int bestDeviceIdx = -1;
for (int d = 0; d < profile->numDevices; d++) {
//((TessDeviceScore *)device->score)->time
ds_device device = profile->devices[d];
TessDeviceScore score = *(TessDeviceScore *)device.score;
float time = score.time;
printf("[DS] Device[%i] %i:%s score is %f\n", d+1, device.type, device.oclDeviceName, time);
if (time < bestTime) {
bestTime = time;
bestDeviceIdx = d;
}
}
printf("[DS] Selected Device[%i]: \"%s\" (%s)\n", bestDeviceIdx+1, profile->devices[bestDeviceIdx].oclDeviceName, profile->devices[bestDeviceIdx].type==DS_DEVICE_OPENCL_DEVICE ? "OpenCL" : "Native");
// cleanup
// TODO: call destructor for profile object?
bool overrided = false;
char *overrideDeviceStr = getenv("TESSERACT_OPENCL_DEVICE");
if (overrideDeviceStr != NULL) {
int overrideDeviceIdx = atoi(overrideDeviceStr);
if (overrideDeviceIdx > 0 && overrideDeviceIdx <= profile->numDevices ) {
printf("[DS] Overriding Device Selection (TESSERACT_OPENCL_DEVICE=%s, %i)\n", overrideDeviceStr, overrideDeviceIdx);
bestDeviceIdx = overrideDeviceIdx - 1;
overrided = true;
} else {
printf("[DS] Ignoring invalid TESSERACT_OPENCL_DEVICE=%s ([1,%i] are valid devices).\n", overrideDeviceStr, profile->numDevices);
}
}
if (overrided) {
printf("[DS] Overridden Device[%i]: \"%s\" (%s)\n", bestDeviceIdx+1, profile->devices[bestDeviceIdx].oclDeviceName, profile->devices[bestDeviceIdx].type==DS_DEVICE_OPENCL_DEVICE ? "OpenCL" : "Native");
}
selectedDevice = profile->devices[bestDeviceIdx];
} else {
// opencl isn't available at runtime, select native cpu device
printf("[DS] OpenCL runtime not available.\n");
selectedDevice.type = DS_DEVICE_NATIVE_CPU;
selectedDevice.oclDeviceName = "(null)";
selectedDevice.score = NULL;
selectedDevice.oclDeviceID = NULL;
selectedDevice.oclDriverVersion = NULL;
}
deviceIsSelected = true;
PERF_COUNT_SUB("select from Profile")
PERF_COUNT_END
}
//PERF_COUNT_END
return selectedDevice;
}
#endif
bool OpenclDevice::selectedDeviceIsOpenCL() {
#if USE_DEVICE_SELECTION
ds_device device = getDeviceSelection();
return (device.type == DS_DEVICE_OPENCL_DEVICE);
#else
return true;
#endif
}
bool OpenclDevice::selectedDeviceIsNativeCPU() {
#if USE_DEVICE_SELECTION
ds_device device = getDeviceSelection();
return (device.type == DS_DEVICE_NATIVE_CPU);
#else
return false;
#endif
}
#endif
| C++ |
#include <stdio.h>
#include "allheaders.h"
#include "pix.h"
#ifdef USE_OPENCL
#include "tiff.h"
#include "tiffio.h"
#endif
#include "tprintf.h"
// including CL/cl.h doesn't occur until USE_OPENCL defined below
// platform preprocessor commands
#if defined( WIN32 ) || defined( __WIN32__ ) || defined( _WIN32 ) || defined( __CYGWIN32__ ) || defined( __MINGW32__ )
#define ON_WINDOWS 1
#define ON_LINUX 0
#define ON_APPLE 0
#define ON_OTHER 0
#define IF_WINDOWS(X) X
#define IF_LINUX(X)
#define IF_APPLE(X)
#define IF_OTHER(X)
#define NOT_WINDOWS(X)
#elif defined( __linux__ )
#define ON_WINDOWS 0
#define ON_LINUX 1
#define ON_APPLE 0
#define ON_OTHER 0
#define IF_WINDOWS(X)
#define IF_LINUX(X) X
#define IF_APPLE(X)
#define IF_OTHER(X)
#define NOT_WINDOWS(X) X
#elif defined( __APPLE__ )
#define ON_WINDOWS 0
#define ON_LINUX 0
#define ON_APPLE 1
#define ON_OTHER 0
#define IF_WINDOWS(X)
#define IF_LINUX(X)
#define IF_APPLE(X) X
#define IF_OTHER(X)
#define NOT_WINDOWS(X) X
#else
#define ON_WINDOWS 0
#define ON_LINUX 0
#define ON_APPLE 0
#define ON_OTHER 1
#define IF_WINDOWS(X)
#define IF_LINUX(X)
#define IF_APPLE(X)
#define IF_OTHER(X) X
#define NOT_WINDOWS(X) X
#endif
#if ON_LINUX
#include <time.h>
#endif
#if ON_APPLE
#include <mach/clock.h>
#include <mach/mach.h>
#define CLOCK_MONOTONIC SYSTEM_CLOCK
#define clock_gettime clock_get_time
#endif
/************************************************************************************
* enable/disable reporting of performance
* PERF_REPORT_LEVEL
* 0 - no reporting
* 1 - no reporting
* 2 - report total function call time for functions we're tracking
* 3 - optionally report breakdown of function calls (kernel launch, kernel time, data copies)
************************************************************************************/
#define PERF_COUNT_VERBOSE 1
#define PERF_COUNT_REPORT_STR "[%36s], %24s, %11.6f\n"
#if ON_WINDOWS
#if PERF_COUNT_VERBOSE >= 2
#define PERF_COUNT_START(FUNCT_NAME) \
char *funct_name = FUNCT_NAME; \
double elapsed_time_sec; \
LARGE_INTEGER freq, time_funct_start, time_funct_end, time_sub_start, time_sub_end; \
QueryPerformanceFrequency(&freq); \
QueryPerformanceCounter(&time_funct_start); \
time_sub_start = time_funct_start; \
time_sub_end = time_funct_start;
#define PERF_COUNT_END \
QueryPerformanceCounter(&time_funct_end); \
elapsed_time_sec = (time_funct_end.QuadPart-time_funct_start.QuadPart)/(double)(freq.QuadPart); \
tprintf(PERF_COUNT_REPORT_STR, funct_name, "total", elapsed_time_sec);
#else
#define PERF_COUNT_START(FUNCT_NAME)
#define PERF_COUNT_END
#endif
#if PERF_COUNT_VERBOSE >= 3
#define PERF_COUNT_SUB(SUB) \
QueryPerformanceCounter(&time_sub_end); \
elapsed_time_sec = (time_sub_end.QuadPart-time_sub_start.QuadPart)/(double)(freq.QuadPart); \
tprintf(PERF_COUNT_REPORT_STR, funct_name, SUB, elapsed_time_sec); \
time_sub_start = time_sub_end;
#else
#define PERF_COUNT_SUB(SUB)
#endif
// not on windows
#else
#if PERF_COUNT_VERBOSE >= 2
#define PERF_COUNT_START(FUNCT_NAME) \
char *funct_name = FUNCT_NAME; \
double elapsed_time_sec; \
timespec time_funct_start, time_funct_end, time_sub_start, time_sub_end; \
clock_gettime( CLOCK_MONOTONIC, &time_funct_start ); \
time_sub_start = time_funct_start; \
time_sub_end = time_funct_start;
#define PERF_COUNT_END \
clock_gettime( CLOCK_MONOTONIC, &time_funct_end ); \
elapsed_time_sec = (time_funct_end.tv_sec - time_funct_start.tv_sec)*1.0 + (time_funct_end.tv_nsec - time_funct_start.tv_nsec)/1000000000.0; \
tprintf(PERF_COUNT_REPORT_STR, funct_name, "total", elapsed_time_sec);
#else
#define PERF_COUNT_START(FUNCT_NAME)
#define PERF_COUNT_END
#endif
#if PERF_COUNT_VERBOSE >= 3
#define PERF_COUNT_SUB(SUB) \
clock_gettime( CLOCK_MONOTONIC, &time_sub_end ); \
elapsed_time_sec = (time_sub_end.tv_sec - time_sub_start.tv_sec)*1.0 + (time_sub_end.tv_nsec - time_sub_start.tv_nsec)/1000000000.0; \
tprintf(PERF_COUNT_REPORT_STR, funct_name, SUB, elapsed_time_sec); \
time_sub_start = time_sub_end;
#else
#define PERF_COUNT_SUB(SUB)
#endif
#endif
/**************************************************************************
* enable/disable use of OpenCL
**************************************************************************/
#ifdef USE_OPENCL
#define USE_DEVICE_SELECTION 1
#include "opencl_device_selection.h"
#ifndef strcasecmp
#define strcasecmp strcmp
#endif
#define MAX_KERNEL_STRING_LEN 64
#define MAX_CLFILE_NUM 50
#define MAX_CLKERNEL_NUM 200
#define MAX_KERNEL_NAME_LEN 64
#define CL_QUEUE_THREAD_HANDLE_AMD 0x403E
#define GROUPSIZE_X 16
#define GROUPSIZE_Y 16
#define GROUPSIZE_HMORX 256
#define GROUPSIZE_HMORY 1
typedef struct _KernelEnv
{
cl_context mpkContext;
cl_command_queue mpkCmdQueue;
cl_program mpkProgram;
cl_kernel mpkKernel;
char mckKernelName[150];
} KernelEnv;
typedef struct _OpenCLEnv
{
cl_platform_id mpOclPlatformID;
cl_context mpOclContext;
cl_device_id mpOclDevsID;
cl_command_queue mpOclCmdQueue;
} OpenCLEnv;
typedef int ( *cl_kernel_function )( void **userdata, KernelEnv *kenv );
static l_int32 MORPH_BC = ASYMMETRIC_MORPH_BC;
static const l_uint32 lmask32[] = {0x0,
0x80000000, 0xc0000000, 0xe0000000, 0xf0000000,
0xf8000000, 0xfc000000, 0xfe000000, 0xff000000,
0xff800000, 0xffc00000, 0xffe00000, 0xfff00000,
0xfff80000, 0xfffc0000, 0xfffe0000, 0xffff0000,
0xffff8000, 0xffffc000, 0xffffe000, 0xfffff000,
0xfffff800, 0xfffffc00, 0xfffffe00, 0xffffff00,
0xffffff80, 0xffffffc0, 0xffffffe0, 0xfffffff0,
0xfffffff8, 0xfffffffc, 0xfffffffe, 0xffffffff};
static const l_uint32 rmask32[] = {0x0,
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
0x0001ffff, 0x0003ffff, 0x0007ffff, 0x000fffff,
0x001fffff, 0x003fffff, 0x007fffff, 0x00ffffff,
0x01ffffff, 0x03ffffff, 0x07ffffff, 0x0fffffff,
0x1fffffff, 0x3fffffff, 0x7fffffff, 0xffffffff};
#define CHECK_OPENCL(status,name) \
if( status != CL_SUCCESS ) \
{ \
printf ("OpenCL error code is %d at when %s .\n", status, name); \
}
typedef struct _GPUEnv
{
//share vb in all modules in hb library
cl_platform_id mpPlatformID;
cl_device_type mDevType;
cl_context mpContext;
cl_device_id *mpArryDevsID;
cl_device_id mpDevID;
cl_command_queue mpCmdQueue;
cl_kernel mpArryKernels[MAX_CLFILE_NUM];
cl_program mpArryPrograms[MAX_CLFILE_NUM]; //one program object maps one kernel source file
char mArryKnelSrcFile[MAX_CLFILE_NUM][256], //the max len of kernel file name is 256
mArrykernelNames[MAX_CLKERNEL_NUM][MAX_KERNEL_STRING_LEN + 1];
cl_kernel_function mpArryKnelFuncs[MAX_CLKERNEL_NUM];
int mnKernelCount, mnFileCount, // only one kernel file
mnIsUserCreated; // 1: created , 0:no create and needed to create by opencl wrapper
int mnKhrFp64Flag;
int mnAmdFp64Flag;
} GPUEnv;
class OpenclDevice
{
public:
static GPUEnv gpuEnv;
static int isInited;
OpenclDevice();
~OpenclDevice();
static int InitEnv(); // load dll, call InitOpenclRunEnv(0)
static int InitOpenclRunEnv( int argc ); // RegistOpenclKernel, double flags, compile kernels
static int InitOpenclRunEnv_DeviceSelection( int argc ); // RegistOpenclKernel, double flags, compile kernels
static int InitOpenclRunEnv( GPUEnv *gpu ); // select device by env_CPU or selector
static int RegistOpenclKernel();
static int ReleaseOpenclRunEnv();
static int ReleaseOpenclEnv( GPUEnv *gpuInfo );
static int CompileKernelFile( GPUEnv *gpuInfo, const char *buildOption );
static int CachedOfKernerPrg( const GPUEnv *gpuEnvCached, const char * clFileName );
static int GeneratBinFromKernelSource( cl_program program, const char * clFileName );
static int WriteBinaryToFile( const char* fileName, const char* birary, size_t numBytes );
static int BinaryGenerated( const char * clFileName, FILE ** fhandle );
//static int CompileKernelFile( const char *filename, GPUEnv *gpuInfo, const char *buildOption );
static l_uint32* pixReadFromTiffKernel(l_uint32 *tiffdata,l_int32 w,l_int32 h,l_int32 wpl, l_uint32 *line);
static Pix* pixReadTiffCl( const char *filename, l_int32 n );
static PIX * pixReadStreamTiffCl ( FILE *fp, l_int32 n );
static PIX * pixReadMemTiffCl(const l_uint8 *data, size_t size, l_int32 n);
static PIX* pixReadFromTiffStreamCl(TIFF *tif);
static int composeRGBPixelCl(int *tiffdata,int *line,int h,int w);
static l_int32 getTiffStreamResolutionCl(TIFF *tif,l_int32 *pxres,l_int32 *pyres);
static TIFF* fopenTiffCl(FILE *fp,const char *modestring);
/* OpenCL implementations of Morphological operations*/
//Initialiation of OCL buffers used in Morph operations
static int initMorphCLAllocations(l_int32 wpl, l_int32 h, PIX* pixs);
static void releaseMorphCLBuffers();
// OpenCL implementation of Morphology Dilate
static PIX* pixDilateBrickCL(PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize, bool reqDataCopy);
// OpenCL implementation of Morphology Erode
static PIX* pixErodeBrickCL(PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize, bool reqDataCopy);
// OpenCL implementation of Morphology Close
static PIX* pixCloseBrickCL(PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize, bool reqDataCopy);
// OpenCL implementation of Morphology Open
static PIX* pixOpenBrickCL(PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize, bool reqDataCopy);
// OpenCL implementation of Morphology Open
static PIX* pixSubtractCL(PIX *pixd, PIX *pixs1, PIX *pixs2, bool reqDataCopy);
// OpenCL implementation of Morphology (Hollow = Closed - Open)
static PIX* pixHollowCL(PIX *pixd, PIX *pixs, l_int32 close_hsize, l_int32 close_vsize, l_int32 open_hsize, l_int32 open_vsize, bool reqDataCopy);
static void pixGetLinesCL(PIX *pixd, PIX *pixs,
PIX** pix_vline, PIX** pix_hline,
PIX** pixClosed, bool getpixClosed,
l_int32 close_hsize, l_int32 close_vsize,
l_int32 open_hsize, l_int32 open_vsize,
l_int32 line_hsize, l_int32 line_vsize);
//int InitOpenclAttr( OpenCLEnv * env );
//int ReleaseKernel( KernelEnv * env );
static int SetKernelEnv( KernelEnv *envInfo );
//int CreateKernel( char * kernelname, KernelEnv * env );
//int RunKernel( const char *kernelName, void **userdata );
//int ConvertToString( const char *filename, char **source );
//int CheckKernelName( KernelEnv *envInfo, const char *kernelName );
//int RegisterKernelWrapper( const char *kernelName, cl_kernel_function function );
//int RunKernelWrapper( cl_kernel_function function, const char * kernelName, void **usrdata );
//int GetKernelEnvAndFunc( const char *kernelName, KernelEnv *env, cl_kernel_function *function );
// static cl_device_id performDeviceSelection( );
//static bool thresholdRectToPixMicroBench( TessScoreEvaluationInputData input, ds_device_type type);
static int LoadOpencl();
#ifdef WIN32
//static int OpenclInite();
static void FreeOpenclDll();
#endif
//int GetOpenclState();
//void SetOpenclState( int state );
inline static int AddKernelConfig( int kCount, const char *kName );
/* for binarization */
static void HistogramRectOCL(
const unsigned char *imagedata,
int bytes_per_pixel,
int bytes_per_line,
int left,
int top,
int width,
int height,
int kHistogramSize,
int *histogramAllChannels);
static void ThresholdRectToPixOCL(
const unsigned char* imagedata,
int bytes_per_pixel,
int bytes_per_line,
const int* thresholds,
const int* hi_values,
Pix** pix,
int rect_height,
int rect_width,
int rect_top,
int rect_left);
#if USE_DEVICE_SELECTION
static ds_device getDeviceSelection();
static ds_device selectedDevice;
static bool deviceIsSelected;
#endif
static bool selectedDeviceIsOpenCL();
static bool selectedDeviceIsNativeCPU();
};
#endif
| C++ |
/******************************************************************************
** Filename: stopper.c
** Purpose: Stopping criteria for word classifier.
** Author: Dan Johnson
** History: Mon Apr 29 14:56:49 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <math.h>
#include "stopper.h"
#include "ambigs.h"
#include "ccutil.h"
#include "const.h"
#include "danerror.h"
#include "dict.h"
#include "efio.h"
#include "helpers.h"
#include "matchdefs.h"
#include "pageres.h"
#include "params.h"
#include "ratngs.h"
#include "scanutils.h"
#include "unichar.h"
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#pragma warning(disable:4800) // int/bool warnings
#endif
using tesseract::ScriptPos;
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
namespace tesseract {
bool Dict::AcceptableChoice(const WERD_CHOICE& best_choice,
XHeightConsistencyEnum xheight_consistency) {
float CertaintyThreshold = stopper_nondict_certainty_base;
int WordSize;
if (stopper_no_acceptable_choices) return false;
if (best_choice.length() == 0) return false;
bool no_dang_ambigs = !best_choice.dangerous_ambig_found();
bool is_valid_word = valid_word_permuter(best_choice.permuter(), false);
bool is_case_ok = case_ok(best_choice, getUnicharset());
if (stopper_debug_level >= 1) {
const char *xht = "UNKNOWN";
switch (xheight_consistency) {
case XH_GOOD: xht = "NORMAL"; break;
case XH_SUBNORMAL: xht = "SUBNORMAL"; break;
case XH_INCONSISTENT: xht = "INCONSISTENT"; break;
default: xht = "UNKNOWN";
}
tprintf("\nStopper: %s (word=%c, case=%c, xht_ok=%s=[%g,%g])\n",
best_choice.unichar_string().string(),
(is_valid_word ? 'y' : 'n'),
(is_case_ok ? 'y' : 'n'),
xht,
best_choice.min_x_height(),
best_choice.max_x_height());
}
// Do not accept invalid words in PASS1.
if (reject_offset_ <= 0.0f && !is_valid_word) return false;
if (is_valid_word && is_case_ok) {
WordSize = LengthOfShortestAlphaRun(best_choice);
WordSize -= stopper_smallword_size;
if (WordSize < 0)
WordSize = 0;
CertaintyThreshold += WordSize * stopper_certainty_per_char;
}
if (stopper_debug_level >= 1)
tprintf("Stopper: Rating = %4.1f, Certainty = %4.1f, Threshold = %4.1f\n",
best_choice.rating(), best_choice.certainty(), CertaintyThreshold);
if (no_dang_ambigs &&
best_choice.certainty() > CertaintyThreshold &&
xheight_consistency < XH_INCONSISTENT &&
UniformCertainties(best_choice)) {
return true;
} else {
if (stopper_debug_level >= 1) {
tprintf("AcceptableChoice() returned false"
" (no_dang_ambig:%d cert:%.4g thresh:%g uniform:%d)\n",
no_dang_ambigs, best_choice.certainty(),
CertaintyThreshold,
UniformCertainties(best_choice));
}
return false;
}
}
bool Dict::AcceptableResult(WERD_RES* word) {
if (word->best_choice == NULL) return false;
float CertaintyThreshold = stopper_nondict_certainty_base - reject_offset_;
int WordSize;
if (stopper_debug_level >= 1) {
tprintf("\nRejecter: %s (word=%c, case=%c, unambig=%c, multiple=%c)\n",
word->best_choice->debug_string().string(),
(valid_word(*word->best_choice) ? 'y' : 'n'),
(case_ok(*word->best_choice, getUnicharset()) ? 'y' : 'n'),
word->best_choice->dangerous_ambig_found() ? 'n' : 'y',
word->best_choices.singleton() ? 'n' : 'y');
}
if (word->best_choice->length() == 0 || !word->best_choices.singleton())
return false;
if (valid_word(*word->best_choice) &&
case_ok(*word->best_choice, getUnicharset())) {
WordSize = LengthOfShortestAlphaRun(*word->best_choice);
WordSize -= stopper_smallword_size;
if (WordSize < 0)
WordSize = 0;
CertaintyThreshold += WordSize * stopper_certainty_per_char;
}
if (stopper_debug_level >= 1)
tprintf("Rejecter: Certainty = %4.1f, Threshold = %4.1f ",
word->best_choice->certainty(), CertaintyThreshold);
if (word->best_choice->certainty() > CertaintyThreshold &&
!stopper_no_acceptable_choices) {
if (stopper_debug_level >= 1)
tprintf("ACCEPTED\n");
return true;
} else {
if (stopper_debug_level >= 1)
tprintf("REJECTED\n");
return false;
}
}
bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice,
DANGERR *fixpt,
bool fix_replaceable,
MATRIX *ratings) {
if (stopper_debug_level > 2) {
tprintf("\nRunning NoDangerousAmbig() for %s\n",
best_choice->debug_string().string());
}
// Construct BLOB_CHOICE_LIST_VECTOR with ambiguities
// for each unichar id in BestChoice.
BLOB_CHOICE_LIST_VECTOR ambig_blob_choices;
int i;
bool ambigs_found = false;
// For each position in best_choice:
// -- choose AMBIG_SPEC_LIST that corresponds to unichar_id at best_choice[i]
// -- initialize wrong_ngram with a single unichar_id at best_choice[i]
// -- look for ambiguities corresponding to wrong_ngram in the list while
// adding the following unichar_ids from best_choice to wrong_ngram
//
// Repeat the above procedure twice: first time look through
// ambigs to be replaced and replace all the ambiguities found;
// second time look through dangerous ambiguities and construct
// ambig_blob_choices with fake a blob choice for each ambiguity
// and pass them to dawg_permute_and_select() to search for
// ambiguous words in the dictionaries.
//
// Note that during the execution of the for loop (on the first pass)
// if replacements are made the length of best_choice might change.
for (int pass = 0; pass < (fix_replaceable ? 2 : 1); ++pass) {
bool replace = (fix_replaceable && pass == 0);
const UnicharAmbigsVector &table = replace ?
getUnicharAmbigs().replace_ambigs() : getUnicharAmbigs().dang_ambigs();
if (!replace) {
// Initialize ambig_blob_choices with lists containing a single
// unichar id for the correspoding position in best_choice.
// best_choice consisting from only the original letters will
// have a rating of 0.0.
for (i = 0; i < best_choice->length(); ++i) {
BLOB_CHOICE_LIST *lst = new BLOB_CHOICE_LIST();
BLOB_CHOICE_IT lst_it(lst);
// TODO(rays/antonova) Put real xheights and y shifts here.
lst_it.add_to_end(new BLOB_CHOICE(best_choice->unichar_id(i),
0.0, 0.0, -1, -1, -1, 0, 1, 0,
BCC_AMBIG));
ambig_blob_choices.push_back(lst);
}
}
UNICHAR_ID wrong_ngram[MAX_AMBIG_SIZE + 1];
int wrong_ngram_index;
int next_index;
int blob_index = 0;
for (i = 0; i < best_choice->length(); blob_index += best_choice->state(i),
++i) {
UNICHAR_ID curr_unichar_id = best_choice->unichar_id(i);
if (stopper_debug_level > 2) {
tprintf("Looking for %s ngrams starting with %s:\n",
replace ? "replaceable" : "ambiguous",
getUnicharset().debug_str(curr_unichar_id).string());
}
int num_wrong_blobs = best_choice->state(i);
wrong_ngram_index = 0;
wrong_ngram[wrong_ngram_index] = curr_unichar_id;
if (curr_unichar_id == INVALID_UNICHAR_ID ||
curr_unichar_id >= table.size() ||
table[curr_unichar_id] == NULL) {
continue; // there is no ambig spec for this unichar id
}
AmbigSpec_IT spec_it(table[curr_unichar_id]);
for (spec_it.mark_cycle_pt(); !spec_it.cycled_list();) {
const AmbigSpec *ambig_spec = spec_it.data();
wrong_ngram[wrong_ngram_index+1] = INVALID_UNICHAR_ID;
int compare = UnicharIdArrayUtils::compare(wrong_ngram,
ambig_spec->wrong_ngram);
if (stopper_debug_level > 2) {
tprintf("candidate ngram: ");
UnicharIdArrayUtils::print(wrong_ngram, getUnicharset());
tprintf("current ngram from spec: ");
UnicharIdArrayUtils::print(ambig_spec->wrong_ngram, getUnicharset());
tprintf("comparison result: %d\n", compare);
}
if (compare == 0) {
// Record the place where we found an ambiguity.
if (fixpt != NULL) {
UNICHAR_ID leftmost_id = ambig_spec->correct_fragments[0];
fixpt->push_back(DANGERR_INFO(
blob_index, blob_index + num_wrong_blobs, replace,
getUnicharset().get_isngram(ambig_spec->correct_ngram_id),
leftmost_id));
if (stopper_debug_level > 1) {
tprintf("fixpt+=(%d %d %d %d %s)\n", blob_index,
blob_index + num_wrong_blobs, false,
getUnicharset().get_isngram(
ambig_spec->correct_ngram_id),
getUnicharset().id_to_unichar(leftmost_id));
}
}
if (replace) {
if (stopper_debug_level > 2) {
tprintf("replace ambiguity with %s : ",
getUnicharset().id_to_unichar(
ambig_spec->correct_ngram_id));
UnicharIdArrayUtils::print(
ambig_spec->correct_fragments, getUnicharset());
}
ReplaceAmbig(i, ambig_spec->wrong_ngram_size,
ambig_spec->correct_ngram_id,
best_choice, ratings);
} else if (i > 0 || ambig_spec->type != CASE_AMBIG) {
// We found dang ambig - update ambig_blob_choices.
if (stopper_debug_level > 2) {
tprintf("found ambiguity: ");
UnicharIdArrayUtils::print(
ambig_spec->correct_fragments, getUnicharset());
}
ambigs_found = true;
for (int tmp_index = 0; tmp_index <= wrong_ngram_index;
++tmp_index) {
// Add a blob choice for the corresponding fragment of the
// ambiguity. These fake blob choices are initialized with
// negative ratings (which are not possible for real blob
// choices), so that dawg_permute_and_select() considers any
// word not consisting of only the original letters a better
// choice and stops searching for alternatives once such a
// choice is found.
BLOB_CHOICE_IT bc_it(ambig_blob_choices[i+tmp_index]);
bc_it.add_to_end(new BLOB_CHOICE(
ambig_spec->correct_fragments[tmp_index], -1.0, 0.0,
-1, -1, -1, 0, 1, 0, BCC_AMBIG));
}
}
spec_it.forward();
} else if (compare == -1) {
if (wrong_ngram_index+1 < ambig_spec->wrong_ngram_size &&
((next_index = wrong_ngram_index+1+i) < best_choice->length())) {
// Add the next unichar id to wrong_ngram and keep looking for
// more ambigs starting with curr_unichar_id in AMBIG_SPEC_LIST.
wrong_ngram[++wrong_ngram_index] =
best_choice->unichar_id(next_index);
num_wrong_blobs += best_choice->state(next_index);
} else {
break; // no more matching ambigs in this AMBIG_SPEC_LIST
}
} else {
spec_it.forward();
}
} // end searching AmbigSpec_LIST
} // end searching best_choice
} // end searching replace and dangerous ambigs
// If any ambiguities were found permute the constructed ambig_blob_choices
// to see if an alternative dictionary word can be found.
if (ambigs_found) {
if (stopper_debug_level > 2) {
tprintf("\nResulting ambig_blob_choices:\n");
for (i = 0; i < ambig_blob_choices.length(); ++i) {
print_ratings_list("", ambig_blob_choices.get(i), getUnicharset());
tprintf("\n");
}
}
WERD_CHOICE *alt_word = dawg_permute_and_select(ambig_blob_choices, 0.0);
ambigs_found = (alt_word->rating() < 0.0);
if (ambigs_found) {
if (stopper_debug_level >= 1) {
tprintf ("Stopper: Possible ambiguous word = %s\n",
alt_word->debug_string().string());
}
if (fixpt != NULL) {
// Note: Currently character choices combined from fragments can only
// be generated by NoDangrousAmbigs(). This code should be updated if
// the capability to produce classifications combined from character
// fragments is added to other functions.
int orig_i = 0;
for (i = 0; i < alt_word->length(); ++i) {
const UNICHARSET &uchset = getUnicharset();
bool replacement_is_ngram =
uchset.get_isngram(alt_word->unichar_id(i));
UNICHAR_ID leftmost_id = alt_word->unichar_id(i);
if (replacement_is_ngram) {
// we have to extract the leftmost unichar from the ngram.
const char *str = uchset.id_to_unichar(leftmost_id);
int step = uchset.step(str);
if (step) leftmost_id = uchset.unichar_to_id(str, step);
}
int end_i = orig_i + alt_word->state(i);
if (alt_word->state(i) > 1 ||
(orig_i + 1 == end_i && replacement_is_ngram)) {
// Compute proper blob indices.
int blob_start = 0;
for (int j = 0; j < orig_i; ++j)
blob_start += best_choice->state(j);
int blob_end = blob_start;
for (int j = orig_i; j < end_i; ++j)
blob_end += best_choice->state(j);
fixpt->push_back(DANGERR_INFO(blob_start, blob_end, true,
replacement_is_ngram, leftmost_id));
if (stopper_debug_level > 1) {
tprintf("fixpt->dangerous+=(%d %d %d %d %s)\n", orig_i, end_i,
true, replacement_is_ngram,
uchset.id_to_unichar(leftmost_id));
}
}
orig_i += alt_word->state(i);
}
}
}
delete alt_word;
}
if (output_ambig_words_file_ != NULL) {
fprintf(output_ambig_words_file_, "\n");
}
ambig_blob_choices.delete_data_pointers();
return !ambigs_found;
}
void Dict::EndDangerousAmbigs() {}
void Dict::SettupStopperPass1() {
reject_offset_ = 0.0;
}
void Dict::SettupStopperPass2() {
reject_offset_ = stopper_phase2_certainty_rejection_offset;
}
void Dict::ReplaceAmbig(int wrong_ngram_begin_index, int wrong_ngram_size,
UNICHAR_ID correct_ngram_id, WERD_CHOICE *werd_choice,
MATRIX *ratings) {
int num_blobs_to_replace = 0;
int begin_blob_index = 0;
int i;
// Rating and certainty for the new BLOB_CHOICE are derived from the
// replaced choices.
float new_rating = 0.0f;
float new_certainty = 0.0f;
BLOB_CHOICE* old_choice = NULL;
for (i = 0; i < wrong_ngram_begin_index + wrong_ngram_size; ++i) {
if (i >= wrong_ngram_begin_index) {
int num_blobs = werd_choice->state(i);
int col = begin_blob_index + num_blobs_to_replace;
int row = col + num_blobs - 1;
BLOB_CHOICE_LIST* choices = ratings->get(col, row);
ASSERT_HOST(choices != NULL);
old_choice = FindMatchingChoice(werd_choice->unichar_id(i), choices);
ASSERT_HOST(old_choice != NULL);
new_rating += old_choice->rating();
new_certainty += old_choice->certainty();
num_blobs_to_replace += num_blobs;
} else {
begin_blob_index += werd_choice->state(i);
}
}
new_certainty /= wrong_ngram_size;
// If there is no entry in the ratings matrix, add it.
MATRIX_COORD coord(begin_blob_index,
begin_blob_index + num_blobs_to_replace - 1);
if (!coord.Valid(*ratings)) {
ratings->IncreaseBandSize(coord.row - coord.col + 1);
}
if (ratings->get(coord.col, coord.row) == NULL)
ratings->put(coord.col, coord.row, new BLOB_CHOICE_LIST);
BLOB_CHOICE_LIST* new_choices = ratings->get(coord.col, coord.row);
BLOB_CHOICE* choice = FindMatchingChoice(correct_ngram_id, new_choices);
if (choice != NULL) {
// Already there. Upgrade if new rating better.
if (new_rating < choice->rating())
choice->set_rating(new_rating);
if (new_certainty < choice->certainty())
choice->set_certainty(new_certainty);
// DO NOT SORT!! It will mess up the iterator in LanguageModel::UpdateState.
} else {
// Need a new choice with the correct_ngram_id.
choice = new BLOB_CHOICE(*old_choice);
choice->set_unichar_id(correct_ngram_id);
choice->set_rating(new_rating);
choice->set_certainty(new_certainty);
choice->set_classifier(BCC_AMBIG);
choice->set_matrix_cell(coord.col, coord.row);
BLOB_CHOICE_IT it (new_choices);
it.add_to_end(choice);
}
// Remove current unichar from werd_choice. On the last iteration
// set the correct replacement unichar instead of removing a unichar.
for (int replaced_count = 0; replaced_count < wrong_ngram_size;
++replaced_count) {
if (replaced_count + 1 == wrong_ngram_size) {
werd_choice->set_blob_choice(wrong_ngram_begin_index,
num_blobs_to_replace, choice);
} else {
werd_choice->remove_unichar_id(wrong_ngram_begin_index + 1);
}
}
if (stopper_debug_level >= 1) {
werd_choice->print("ReplaceAmbig() ");
tprintf("Modified blob_choices: ");
print_ratings_list("\n", new_choices, getUnicharset());
}
}
int Dict::LengthOfShortestAlphaRun(const WERD_CHOICE &WordChoice) {
int shortest = MAX_INT32;
int curr_len = 0;
for (int w = 0; w < WordChoice.length(); ++w) {
if (getUnicharset().get_isalpha(WordChoice.unichar_id(w))) {
curr_len++;
} else if (curr_len > 0) {
if (curr_len < shortest) shortest = curr_len;
curr_len = 0;
}
}
if (curr_len > 0 && curr_len < shortest) {
shortest = curr_len;
} else if (shortest == MAX_INT32) {
shortest = 0;
}
return shortest;
}
int Dict::UniformCertainties(const WERD_CHOICE& word) {
float Certainty;
float WorstCertainty = MAX_FLOAT32;
float CertaintyThreshold;
FLOAT64 TotalCertainty;
FLOAT64 TotalCertaintySquared;
FLOAT64 Variance;
FLOAT32 Mean, StdDev;
int word_length = word.length();
if (word_length < 3)
return true;
TotalCertainty = TotalCertaintySquared = 0.0;
for (int i = 0; i < word_length; ++i) {
Certainty = word.certainty(i);
TotalCertainty += Certainty;
TotalCertaintySquared += Certainty * Certainty;
if (Certainty < WorstCertainty)
WorstCertainty = Certainty;
}
// Subtract off worst certainty from statistics.
word_length--;
TotalCertainty -= WorstCertainty;
TotalCertaintySquared -= WorstCertainty * WorstCertainty;
Mean = TotalCertainty / word_length;
Variance = ((word_length * TotalCertaintySquared -
TotalCertainty * TotalCertainty) /
(word_length * (word_length - 1)));
if (Variance < 0.0)
Variance = 0.0;
StdDev = sqrt(Variance);
CertaintyThreshold = Mean - stopper_allowable_character_badness * StdDev;
if (CertaintyThreshold > stopper_nondict_certainty_base)
CertaintyThreshold = stopper_nondict_certainty_base;
if (word.certainty() < CertaintyThreshold) {
if (stopper_debug_level >= 1)
tprintf("Stopper: Non-uniform certainty = %4.1f"
" (m=%4.1f, s=%4.1f, t=%4.1f)\n",
word.certainty(), Mean, StdDev, CertaintyThreshold);
return false;
} else {
return true;
}
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: dawg_cache.h
// Description: A class that knows about loading and caching dawgs.
// Author: David Eger
// Created: Fri Jan 27 12:08:00 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "dawg_cache.h"
#include "dawg.h"
#include "object_cache.h"
#include "strngs.h"
#include "tessdatamanager.h"
namespace tesseract {
struct DawgLoader {
DawgLoader(const STRING &lang,
const char *data_file_name,
TessdataType tessdata_dawg_type,
int dawg_debug_level)
: lang_(lang),
data_file_name_(data_file_name),
tessdata_dawg_type_(tessdata_dawg_type),
dawg_debug_level_(dawg_debug_level) {}
Dawg *Load();
STRING lang_;
const char *data_file_name_;
TessdataType tessdata_dawg_type_;
int dawg_debug_level_;
};
Dawg *DawgCache::GetSquishedDawg(
const STRING &lang,
const char *data_file_name,
TessdataType tessdata_dawg_type,
int debug_level) {
STRING data_id = data_file_name;
data_id += kTessdataFileSuffixes[tessdata_dawg_type];
DawgLoader loader(lang, data_file_name, tessdata_dawg_type, debug_level);
return dawgs_.Get(data_id, NewTessCallback(&loader, &DawgLoader::Load));
}
Dawg *DawgLoader::Load() {
TessdataManager data_loader;
if (!data_loader.Init(data_file_name_, dawg_debug_level_)) {
return NULL;
}
if (!data_loader.SeekToStart(tessdata_dawg_type_)) return NULL;
FILE *fp = data_loader.GetDataFilePtr();
DawgType dawg_type;
PermuterType perm_type;
switch (tessdata_dawg_type_) {
case TESSDATA_PUNC_DAWG:
dawg_type = DAWG_TYPE_PUNCTUATION;
perm_type = PUNC_PERM;
break;
case TESSDATA_SYSTEM_DAWG:
dawg_type = DAWG_TYPE_WORD;
perm_type = SYSTEM_DAWG_PERM;
break;
case TESSDATA_NUMBER_DAWG:
dawg_type = DAWG_TYPE_NUMBER;
perm_type = NUMBER_PERM;
break;
case TESSDATA_BIGRAM_DAWG:
dawg_type = DAWG_TYPE_WORD; // doesn't actually matter
perm_type = COMPOUND_PERM; // doesn't actually matter
break;
case TESSDATA_UNAMBIG_DAWG:
dawg_type = DAWG_TYPE_WORD;
perm_type = SYSTEM_DAWG_PERM;
break;
case TESSDATA_FREQ_DAWG:
dawg_type = DAWG_TYPE_WORD;
perm_type = FREQ_DAWG_PERM;
break;
default:
data_loader.End();
return NULL;
}
SquishedDawg *retval =
new SquishedDawg(fp, dawg_type, lang_, perm_type, dawg_debug_level_);
data_loader.End();
return retval;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: dict.cpp
// Description: dict class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include "dict.h"
#include "unicodes.h"
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#include "tprintf.h"
namespace tesseract {
class Image;
Dict::Dict(CCUtil* ccutil)
: letter_is_okay_(&tesseract::Dict::def_letter_is_okay),
probability_in_context_(&tesseract::Dict::def_probability_in_context),
params_model_classify_(NULL),
ccutil_(ccutil),
STRING_MEMBER(user_words_file, "",
"A filename of user-provided words.",
getCCUtil()->params()),
STRING_INIT_MEMBER(user_words_suffix, "",
"A suffix of user-provided words located in tessdata.",
getCCUtil()->params()),
STRING_MEMBER(user_patterns_file, "",
"A filename of user-provided patterns.",
getCCUtil()->params()),
STRING_INIT_MEMBER(user_patterns_suffix, "",
"A suffix of user-provided patterns located in "
"tessdata.",
getCCUtil()->params()),
BOOL_INIT_MEMBER(load_system_dawg, true, "Load system word dawg.",
getCCUtil()->params()),
BOOL_INIT_MEMBER(load_freq_dawg, true, "Load frequent word dawg.",
getCCUtil()->params()),
BOOL_INIT_MEMBER(load_unambig_dawg, true, "Load unambiguous word dawg.",
getCCUtil()->params()),
BOOL_INIT_MEMBER(load_punc_dawg, true, "Load dawg with punctuation"
" patterns.", getCCUtil()->params()),
BOOL_INIT_MEMBER(load_number_dawg, true, "Load dawg with number"
" patterns.", getCCUtil()->params()),
BOOL_INIT_MEMBER(load_bigram_dawg, true, "Load dawg with special word "
"bigrams.", getCCUtil()->params()),
double_MEMBER(xheight_penalty_subscripts, 0.125,
"Score penalty (0.1 = 10%) added if there are subscripts "
"or superscripts in a word, but it is otherwise OK.",
getCCUtil()->params()),
double_MEMBER(xheight_penalty_inconsistent, 0.25,
"Score penalty (0.1 = 10%) added if an xheight is "
"inconsistent.", getCCUtil()->params()),
double_MEMBER(segment_penalty_dict_frequent_word, 1.0,
"Score multiplier for word matches which have good case and"
"are frequent in the given language (lower is better).",
getCCUtil()->params()),
double_MEMBER(segment_penalty_dict_case_ok, 1.1,
"Score multiplier for word matches that have good case "
"(lower is better).", getCCUtil()->params()),
double_MEMBER(segment_penalty_dict_case_bad, 1.3125,
"Default score multiplier for word matches, which may have "
"case issues (lower is better).",
getCCUtil()->params()),
double_MEMBER(segment_penalty_ngram_best_choice, 1.24,
"Multipler to for the best choice from the ngram model.",
getCCUtil()->params()),
double_MEMBER(segment_penalty_dict_nonword, 1.25,
"Score multiplier for glyph fragment segmentations which "
"do not match a dictionary word (lower is better).",
getCCUtil()->params()),
double_MEMBER(segment_penalty_garbage, 1.50,
"Score multiplier for poorly cased strings that are not in"
" the dictionary and generally look like garbage (lower is"
" better).", getCCUtil()->params()),
STRING_MEMBER(output_ambig_words_file, "",
"Output file for ambiguities found in the dictionary",
getCCUtil()->params()),
INT_MEMBER(dawg_debug_level, 0, "Set to 1 for general debug info"
", to 2 for more details, to 3 to see all the debug messages",
getCCUtil()->params()),
INT_MEMBER(hyphen_debug_level, 0, "Debug level for hyphenated words.",
getCCUtil()->params()),
INT_MEMBER(max_viterbi_list_size, 10, "Maximum size of viterbi list.",
getCCUtil()->params()),
BOOL_MEMBER(use_only_first_uft8_step, false,
"Use only the first UTF8 step of the given string"
" when computing log probabilities.",
getCCUtil()->params()),
double_MEMBER(certainty_scale, 20.0, "Certainty scaling factor",
getCCUtil()->params()),
double_MEMBER(stopper_nondict_certainty_base, -2.50,
"Certainty threshold for non-dict words",
getCCUtil()->params()),
double_MEMBER(stopper_phase2_certainty_rejection_offset, 1.0,
"Reject certainty offset",
getCCUtil()->params()),
INT_MEMBER(stopper_smallword_size, 2,
"Size of dict word to be treated as non-dict word",
getCCUtil()->params()),
double_MEMBER(stopper_certainty_per_char, -0.50, "Certainty to add"
" for each dict char above small word size.",
getCCUtil()->params()),
double_MEMBER(stopper_allowable_character_badness, 3.0,
"Max certaintly variation allowed in a word (in sigma)",
getCCUtil()->params()),
INT_MEMBER(stopper_debug_level, 0, "Stopper debug level",
getCCUtil()->params()),
BOOL_MEMBER(stopper_no_acceptable_choices, false,
"Make AcceptableChoice() always return false. Useful"
" when there is a need to explore all segmentations",
getCCUtil()->params()),
BOOL_MEMBER(save_raw_choices, false,
"Deprecated- backward compatablity only",
getCCUtil()->params()),
INT_MEMBER(tessedit_truncate_wordchoice_log, 10,
"Max words to keep in list",
getCCUtil()->params()),
STRING_MEMBER(word_to_debug, "", "Word for which stopper debug"
" information should be printed to stdout",
getCCUtil()->params()),
STRING_MEMBER(word_to_debug_lengths, "",
"Lengths of unichars in word_to_debug",
getCCUtil()->params()),
INT_MEMBER(fragments_debug, 0, "Debug character fragments",
getCCUtil()->params()),
BOOL_MEMBER(segment_nonalphabetic_script, false,
"Don't use any alphabetic-specific tricks."
"Set to true in the traineddata config file for"
" scripts that are cursive or inherently fixed-pitch",
getCCUtil()->params()),
BOOL_MEMBER(save_doc_words, 0, "Save Document Words",
getCCUtil()->params()),
double_MEMBER(doc_dict_pending_threshold, 0.0,
"Worst certainty for using pending dictionary",
getCCUtil()->params()),
double_MEMBER(doc_dict_certainty_threshold, -2.25,
"Worst certainty for words that can be inserted into the"
"document dictionary", getCCUtil()->params()),
INT_MEMBER(max_permuter_attempts, 10000, "Maximum number of different"
" character choices to consider during permutation."
" This limit is especially useful when user patterns"
" are specified, since overly generic patterns can result in"
" dawg search exploring an overly large number of options.",
getCCUtil()->params()) {
dang_ambigs_table_ = NULL;
replace_ambigs_table_ = NULL;
reject_offset_ = 0.0;
go_deeper_fxn_ = NULL;
hyphen_word_ = NULL;
last_word_on_line_ = false;
hyphen_unichar_id_ = INVALID_UNICHAR_ID;
document_words_ = NULL;
dawg_cache_ = NULL;
dawg_cache_is_ours_ = false;
pending_words_ = NULL;
bigram_dawg_ = NULL;
freq_dawg_ = NULL;
punc_dawg_ = NULL;
unambig_dawg_ = NULL;
wordseg_rating_adjust_factor_ = -1.0f;
output_ambig_words_file_ = NULL;
}
Dict::~Dict() {
if (hyphen_word_ != NULL) delete hyphen_word_;
if (output_ambig_words_file_ != NULL) fclose(output_ambig_words_file_);
}
DawgCache *Dict::GlobalDawgCache() {
// We dynamically allocate this global cache (a singleton) so it will outlive
// every Tesseract instance (even those that someone else might declare as
// global statics).
static DawgCache *cache = new DawgCache(); // evil global singleton
return cache;
}
void Dict::Load(DawgCache *dawg_cache) {
STRING name;
STRING &lang = getCCUtil()->lang;
if (dawgs_.length() != 0) this->End();
apostrophe_unichar_id_ = getUnicharset().unichar_to_id(kApostropheSymbol);
question_unichar_id_ = getUnicharset().unichar_to_id(kQuestionSymbol);
slash_unichar_id_ = getUnicharset().unichar_to_id(kSlashSymbol);
hyphen_unichar_id_ = getUnicharset().unichar_to_id(kHyphenSymbol);
if (dawg_cache != NULL) {
dawg_cache_ = dawg_cache;
dawg_cache_is_ours_ = false;
} else {
dawg_cache_ = new DawgCache();
dawg_cache_is_ours_ = true;
}
TessdataManager &tessdata_manager = getCCUtil()->tessdata_manager;
const char *data_file_name = tessdata_manager.GetDataFileName().string();
// Load dawgs_.
if (load_punc_dawg) {
punc_dawg_ = dawg_cache_->GetSquishedDawg(
lang, data_file_name, TESSDATA_PUNC_DAWG, dawg_debug_level);
if (punc_dawg_) dawgs_ += punc_dawg_;
}
if (load_system_dawg) {
Dawg *system_dawg = dawg_cache_->GetSquishedDawg(
lang, data_file_name, TESSDATA_SYSTEM_DAWG, dawg_debug_level);
if (system_dawg) dawgs_ += system_dawg;
}
if (load_number_dawg) {
Dawg *number_dawg = dawg_cache_->GetSquishedDawg(
lang, data_file_name, TESSDATA_NUMBER_DAWG, dawg_debug_level);
if (number_dawg) dawgs_ += number_dawg;
}
if (load_bigram_dawg) {
bigram_dawg_ = dawg_cache_->GetSquishedDawg(
lang, data_file_name, TESSDATA_BIGRAM_DAWG, dawg_debug_level);
}
if (load_freq_dawg) {
freq_dawg_ = dawg_cache_->GetSquishedDawg(
lang, data_file_name, TESSDATA_FREQ_DAWG, dawg_debug_level);
if (freq_dawg_) { dawgs_ += freq_dawg_; }
}
if (load_unambig_dawg) {
unambig_dawg_ = dawg_cache_->GetSquishedDawg(
lang, data_file_name, TESSDATA_UNAMBIG_DAWG, dawg_debug_level);
if (unambig_dawg_) dawgs_ += unambig_dawg_;
}
if (((STRING &)user_words_suffix).length() > 0 ||
((STRING &)user_words_file).length() > 0) {
Trie *trie_ptr = new Trie(DAWG_TYPE_WORD, lang, USER_DAWG_PERM,
getUnicharset().size(), dawg_debug_level);
if (((STRING &)user_words_file).length() > 0) {
name = user_words_file;
} else {
name = getCCUtil()->language_data_path_prefix;
name += user_words_suffix;
}
if (!trie_ptr->read_and_add_word_list(name.string(), getUnicharset(),
Trie::RRP_REVERSE_IF_HAS_RTL)) {
tprintf("Error: failed to load %s\n", name.string());
delete trie_ptr;
} else {
dawgs_ += trie_ptr;
}
}
if (((STRING &)user_patterns_suffix).length() > 0 ||
((STRING &)user_patterns_file).length() > 0) {
Trie *trie_ptr = new Trie(DAWG_TYPE_PATTERN, lang, USER_PATTERN_PERM,
getUnicharset().size(), dawg_debug_level);
trie_ptr->initialize_patterns(&(getUnicharset()));
if (((STRING &)user_patterns_file).length() > 0) {
name = user_patterns_file;
} else {
name = getCCUtil()->language_data_path_prefix;
name += user_patterns_suffix;
}
if (!trie_ptr->read_pattern_list(name.string(), getUnicharset())) {
tprintf("Error: failed to load %s\n", name.string());
delete trie_ptr;
} else {
dawgs_ += trie_ptr;
}
}
document_words_ = new Trie(DAWG_TYPE_WORD, lang, DOC_DAWG_PERM,
getUnicharset().size(), dawg_debug_level);
dawgs_ += document_words_;
// This dawg is temporary and should not be searched by letter_is_ok.
pending_words_ = new Trie(DAWG_TYPE_WORD, lang, NO_PERM,
getUnicharset().size(), dawg_debug_level);
// Construct a list of corresponding successors for each dawg. Each entry i
// in the successors_ vector is a vector of integers that represent the
// indices into the dawgs_ vector of the successors for dawg i.
successors_.reserve(dawgs_.length());
for (int i = 0; i < dawgs_.length(); ++i) {
const Dawg *dawg = dawgs_[i];
SuccessorList *lst = new SuccessorList();
for (int j = 0; j < dawgs_.length(); ++j) {
const Dawg *other = dawgs_[j];
if (dawg != NULL && other != NULL &&
(dawg->lang() == other->lang()) &&
kDawgSuccessors[dawg->type()][other->type()]) *lst += j;
}
successors_ += lst;
}
}
void Dict::End() {
if (dawgs_.length() == 0)
return; // Not safe to call twice.
for (int i = 0; i < dawgs_.size(); i++) {
if (!dawg_cache_->FreeDawg(dawgs_[i])) {
delete dawgs_[i];
}
}
dawg_cache_->FreeDawg(bigram_dawg_);
if (dawg_cache_is_ours_) {
delete dawg_cache_;
dawg_cache_ = NULL;
}
successors_.delete_data_pointers();
dawgs_.clear();
successors_.clear();
document_words_ = NULL;
if (pending_words_ != NULL) {
delete pending_words_;
pending_words_ = NULL;
}
}
// Returns true if in light of the current state unichar_id is allowed
// according to at least one of the dawgs in the dawgs_ vector.
// See more extensive comments in dict.h where this function is declared.
int Dict::def_letter_is_okay(void* void_dawg_args,
UNICHAR_ID unichar_id,
bool word_end) const {
DawgArgs *dawg_args = reinterpret_cast<DawgArgs*>(void_dawg_args);
if (dawg_debug_level >= 3) {
tprintf("def_letter_is_okay: current unichar=%s word_end=%d"
" num active dawgs=%d\n",
getUnicharset().debug_str(unichar_id).string(), word_end,
dawg_args->active_dawgs->length());
}
// Do not accept words that contain kPatternUnicharID.
// (otherwise pattern dawgs would not function correctly).
// Do not accept words containing INVALID_UNICHAR_IDs.
if (unichar_id == Dawg::kPatternUnicharID ||
unichar_id == INVALID_UNICHAR_ID) {
dawg_args->permuter = NO_PERM;
return NO_PERM;
}
// Initialization.
PermuterType curr_perm = NO_PERM;
dawg_args->updated_dawgs->clear();
// Go over the active_dawgs vector and insert DawgPosition records
// with the updated ref (an edge with the corresponding unichar id) into
// dawg_args->updated_pos.
for (int a = 0; a < dawg_args->active_dawgs->length(); ++a) {
const DawgPosition &pos = (*dawg_args->active_dawgs)[a];
const Dawg *punc_dawg = pos.punc_index >= 0 ? dawgs_[pos.punc_index] : NULL;
const Dawg *dawg = pos.dawg_index >= 0 ? dawgs_[pos.dawg_index] : NULL;
if (!dawg && !punc_dawg) {
// shouldn't happen.
tprintf("Received DawgPosition with no dawg or punc_dawg. wth?\n");
continue;
}
if (!dawg) {
// We're in the punctuation dawg. A core dawg has not been chosen.
NODE_REF punc_node = GetStartingNode(punc_dawg, pos.punc_ref);
EDGE_REF punc_transition_edge = punc_dawg->edge_char_of(
punc_node, Dawg::kPatternUnicharID, word_end);
if (punc_transition_edge != NO_EDGE) {
// Find all successors, and see which can transition.
const SuccessorList &slist = *(successors_[pos.punc_index]);
for (int s = 0; s < slist.length(); ++s) {
int sdawg_index = slist[s];
const Dawg *sdawg = dawgs_[sdawg_index];
UNICHAR_ID ch = char_for_dawg(unichar_id, sdawg);
EDGE_REF dawg_edge = sdawg->edge_char_of(0, ch, word_end);
if (dawg_edge != NO_EDGE) {
if (dawg_debug_level >=3) {
tprintf("Letter found in dawg %d\n", sdawg_index);
}
dawg_args->updated_dawgs->add_unique(
DawgPosition(sdawg_index, dawg_edge,
pos.punc_index, punc_transition_edge, false),
dawg_debug_level > 0,
"Append transition from punc dawg to current dawgs: ");
if (sdawg->permuter() > curr_perm) curr_perm = sdawg->permuter();
}
}
}
EDGE_REF punc_edge = punc_dawg->edge_char_of(punc_node, unichar_id,
word_end);
if (punc_edge != NO_EDGE) {
if (dawg_debug_level >=3) {
tprintf("Letter found in punctuation dawg\n");
}
dawg_args->updated_dawgs->add_unique(
DawgPosition(-1, NO_EDGE, pos.punc_index, punc_edge, false),
dawg_debug_level > 0,
"Extend punctuation dawg: ");
if (PUNC_PERM > curr_perm) curr_perm = PUNC_PERM;
}
continue;
}
if (punc_dawg && dawg->end_of_word(pos.dawg_ref)) {
// We can end the main word here.
// If we can continue on the punc ref, add that possibility.
NODE_REF punc_node = GetStartingNode(punc_dawg, pos.punc_ref);
EDGE_REF punc_edge = punc_node == NO_EDGE ? NO_EDGE
: punc_dawg->edge_char_of(punc_node, unichar_id, word_end);
if (punc_edge != NO_EDGE) {
dawg_args->updated_dawgs->add_unique(
DawgPosition(pos.dawg_index, pos.dawg_ref,
pos.punc_index, punc_edge, true),
dawg_debug_level > 0,
"Return to punctuation dawg: ");
if (dawg->permuter() > curr_perm) curr_perm = dawg->permuter();
}
}
if (pos.back_to_punc) continue;
// If we are dealing with the pattern dawg, look up all the
// possible edges, not only for the exact unichar_id, but also
// for all its character classes (alpha, digit, etc).
if (dawg->type() == DAWG_TYPE_PATTERN) {
ProcessPatternEdges(dawg, pos, unichar_id, word_end,
dawg_args->updated_dawgs, &curr_perm);
// There can't be any successors to dawg that is of type
// DAWG_TYPE_PATTERN, so we are done examining this DawgPosition.
continue;
}
// Find the edge out of the node for the unichar_id.
NODE_REF node = GetStartingNode(dawg, pos.dawg_ref);
EDGE_REF edge = (node == NO_EDGE) ? NO_EDGE
: dawg->edge_char_of(node, char_for_dawg(unichar_id, dawg), word_end);
if (dawg_debug_level >= 3) {
tprintf("Active dawg: [%d, " REFFORMAT "] edge=" REFFORMAT "\n",
pos.dawg_index, node, edge);
}
if (edge != NO_EDGE) { // the unichar was found in the current dawg
if (dawg_debug_level >=3) {
tprintf("Letter found in dawg %d\n", pos.dawg_index);
}
if (word_end && punc_dawg && !punc_dawg->end_of_word(pos.punc_ref)) {
if (dawg_debug_level >= 3) {
tprintf("Punctuation constraint not satisfied at end of word.\n");
}
continue;
}
if (dawg->permuter() > curr_perm) curr_perm = dawg->permuter();
dawg_args->updated_dawgs->add_unique(
DawgPosition(pos.dawg_index, edge, pos.punc_index, pos.punc_ref,
false),
dawg_debug_level > 0,
"Append current dawg to updated active dawgs: ");
}
} // end for
// Update dawg_args->permuter if it used to be NO_PERM or became NO_PERM
// or if we found the current letter in a non-punctuation dawg. This
// allows preserving information on which dawg the "core" word came from.
// Keep the old value of dawg_args->permuter if it is COMPOUND_PERM.
if (dawg_args->permuter == NO_PERM || curr_perm == NO_PERM ||
(curr_perm != PUNC_PERM && dawg_args->permuter != COMPOUND_PERM)) {
dawg_args->permuter = curr_perm;
}
if (dawg_debug_level >= 2) {
tprintf("Returning %d for permuter code for this character.\n");
}
return dawg_args->permuter;
}
void Dict::ProcessPatternEdges(const Dawg *dawg, const DawgPosition &pos,
UNICHAR_ID unichar_id, bool word_end,
DawgPositionVector *updated_dawgs,
PermuterType *curr_perm) const {
NODE_REF node = GetStartingNode(dawg, pos.dawg_ref);
// Try to find the edge corresponding to the exact unichar_id and to all the
// edges corresponding to the character class of unichar_id.
GenericVector<UNICHAR_ID> unichar_id_patterns;
unichar_id_patterns.push_back(unichar_id);
dawg->unichar_id_to_patterns(unichar_id, getUnicharset(),
&unichar_id_patterns);
for (int i = 0; i < unichar_id_patterns.size(); ++i) {
// On the first iteration check all the outgoing edges.
// On the second iteration check all self-loops.
for (int k = 0; k < 2; ++k) {
EDGE_REF edge = (k == 0)
? dawg->edge_char_of(node, unichar_id_patterns[i], word_end)
: dawg->pattern_loop_edge(pos.dawg_ref, unichar_id_patterns[i], word_end);
if (edge == NO_EDGE) continue;
if (dawg_debug_level >= 3) {
tprintf("Pattern dawg: [%d, " REFFORMAT "] edge=" REFFORMAT "\n",
pos.dawg_index, node, edge);
tprintf("Letter found in pattern dawg %d\n", pos.dawg_index);
}
if (dawg->permuter() > *curr_perm) *curr_perm = dawg->permuter();
updated_dawgs->add_unique(
DawgPosition(pos.dawg_index, edge, pos.punc_index, pos.punc_ref,
pos.back_to_punc),
dawg_debug_level > 0,
"Append current dawg to updated active dawgs: ");
}
}
}
// Fill the given active_dawgs vector with dawgs that could contain the
// beginning of the word. If hyphenated() returns true, copy the entries
// from hyphen_active_dawgs_ instead.
void Dict::init_active_dawgs(DawgPositionVector *active_dawgs,
bool ambigs_mode) const {
int i;
if (hyphenated()) {
*active_dawgs = hyphen_active_dawgs_;
if (dawg_debug_level >= 3) {
for (i = 0; i < hyphen_active_dawgs_.size(); ++i) {
tprintf("Adding hyphen beginning dawg [%d, " REFFORMAT "]\n",
hyphen_active_dawgs_[i].dawg_index,
hyphen_active_dawgs_[i].dawg_ref);
}
}
} else {
default_dawgs(active_dawgs, ambigs_mode);
}
}
void Dict::default_dawgs(DawgPositionVector *dawg_pos_vec,
bool suppress_patterns) const {
bool punc_dawg_available =
(punc_dawg_ != NULL) &&
punc_dawg_->edge_char_of(0, Dawg::kPatternUnicharID, true) != NO_EDGE;
for (int i = 0; i < dawgs_.length(); i++) {
if (dawgs_[i] != NULL &&
!(suppress_patterns && (dawgs_[i])->type() == DAWG_TYPE_PATTERN)) {
int dawg_ty = dawgs_[i]->type();
bool subsumed_by_punc = kDawgSuccessors[DAWG_TYPE_PUNCTUATION][dawg_ty];
if (dawg_ty == DAWG_TYPE_PUNCTUATION) {
*dawg_pos_vec += DawgPosition(-1, NO_EDGE, i, NO_EDGE, false);
if (dawg_debug_level >= 3) {
tprintf("Adding beginning punc dawg [%d, " REFFORMAT "]\n", i,
NO_EDGE);
}
} else if (!punc_dawg_available || !subsumed_by_punc) {
*dawg_pos_vec += DawgPosition(i, NO_EDGE, -1, NO_EDGE, false);
if (dawg_debug_level >= 3) {
tprintf("Adding beginning dawg [%d, " REFFORMAT "]\n", i, NO_EDGE);
}
}
}
}
}
void Dict::add_document_word(const WERD_CHOICE &best_choice) {
// Do not add hyphenated word parts to the document dawg.
// hyphen_word_ will be non-NULL after the set_hyphen_word() is
// called when the first part of the hyphenated word is
// discovered and while the second part of the word is recognized.
// hyphen_word_ is cleared in cc_recg() before the next word on
// the line is recognized.
if (hyphen_word_) return;
char filename[CHARS_PER_LINE];
FILE *doc_word_file;
int stringlen = best_choice.length();
if (valid_word(best_choice) || stringlen < 2)
return;
// Discard words that contain >= kDocDictMaxRepChars repeating unichars.
if (best_choice.length() >= kDocDictMaxRepChars) {
int num_rep_chars = 1;
UNICHAR_ID uch_id = best_choice.unichar_id(0);
for (int i = 1; i < best_choice.length(); ++i) {
if (best_choice.unichar_id(i) != uch_id) {
num_rep_chars = 1;
uch_id = best_choice.unichar_id(i);
} else {
++num_rep_chars;
if (num_rep_chars == kDocDictMaxRepChars) return;
}
}
}
if (best_choice.certainty() < doc_dict_certainty_threshold ||
stringlen == 2) {
if (best_choice.certainty() < doc_dict_pending_threshold)
return;
if (!pending_words_->word_in_dawg(best_choice)) {
if (stringlen > 2 ||
(stringlen == 2 &&
getUnicharset().get_isupper(best_choice.unichar_id(0)) &&
getUnicharset().get_isupper(best_choice.unichar_id(1)))) {
pending_words_->add_word_to_dawg(best_choice);
}
return;
}
}
if (save_doc_words) {
strcpy(filename, getCCUtil()->imagefile.string());
strcat(filename, ".doc");
doc_word_file = open_file (filename, "a");
fprintf(doc_word_file, "%s\n",
best_choice.debug_string().string());
fclose(doc_word_file);
}
document_words_->add_word_to_dawg(best_choice);
}
void Dict::adjust_word(WERD_CHOICE *word,
bool nonword,
XHeightConsistencyEnum xheight_consistency,
float additional_adjust,
bool modify_rating,
bool debug) {
bool is_han = (getUnicharset().han_sid() != getUnicharset().null_sid() &&
word->GetTopScriptID() == getUnicharset().han_sid());
bool case_is_ok = (is_han || case_ok(*word, getUnicharset()));
bool punc_is_ok = (is_han || !nonword || valid_punctuation(*word));
float adjust_factor = additional_adjust;
float new_rating = word->rating();
new_rating += kRatingPad;
const char *xheight_triggered = "";
if (word->length() > 1) {
// Calculate x-height and y-offset consistency penalties.
switch (xheight_consistency) {
case XH_INCONSISTENT:
adjust_factor += xheight_penalty_inconsistent;
xheight_triggered = ", xhtBAD";
break;
case XH_SUBNORMAL:
adjust_factor += xheight_penalty_subscripts;
xheight_triggered = ", xhtSUB";
break;
case XH_GOOD:
// leave the factor alone - all good!
break;
}
// TODO(eger): if nonword is true, but there is a "core" thats' a dict
// word, negate nonword status.
} else {
if (debug) {
tprintf("Consistency could not be calculated.\n");
}
}
if (debug) {
tprintf("%sWord: %s %4.2f%s", nonword ? "Non-" : "",
word->unichar_string().string(), word->rating(),
xheight_triggered);
}
if (nonword) { // non-dictionary word
if (case_is_ok && punc_is_ok) {
adjust_factor += segment_penalty_dict_nonword;
new_rating *= adjust_factor;
if (debug) tprintf(", W");
} else {
adjust_factor += segment_penalty_garbage;
new_rating *= adjust_factor;
if (debug) {
if (!case_is_ok) tprintf(", C");
if (!punc_is_ok) tprintf(", P");
}
}
} else { // dictionary word
if (case_is_ok) {
if (!is_han && freq_dawg_ != NULL && freq_dawg_->word_in_dawg(*word)) {
word->set_permuter(FREQ_DAWG_PERM);
adjust_factor += segment_penalty_dict_frequent_word;
new_rating *= adjust_factor;
if (debug) tprintf(", F");
} else {
adjust_factor += segment_penalty_dict_case_ok;
new_rating *= adjust_factor;
if (debug) tprintf(", ");
}
} else {
adjust_factor += segment_penalty_dict_case_bad;
new_rating *= adjust_factor;
if (debug) tprintf(", C");
}
}
new_rating -= kRatingPad;
if (modify_rating) word->set_rating(new_rating);
if (debug) tprintf(" %4.2f --> %4.2f\n", adjust_factor, new_rating);
word->set_adjust_factor(adjust_factor);
}
int Dict::valid_word(const WERD_CHOICE &word, bool numbers_ok) const {
const WERD_CHOICE *word_ptr = &word;
WERD_CHOICE temp_word(word.unicharset());
if (hyphenated() && hyphen_word_->unicharset() == word.unicharset()) {
copy_hyphen_info(&temp_word);
temp_word += word;
word_ptr = &temp_word;
}
if (word_ptr->length() == 0) return NO_PERM;
// Allocate vectors for holding current and updated
// active_dawgs and initialize them.
DawgPositionVector *active_dawgs = new DawgPositionVector[2];
init_active_dawgs(&(active_dawgs[0]), false);
DawgArgs dawg_args(&(active_dawgs[0]), &(active_dawgs[1]), NO_PERM);
int last_index = word_ptr->length() - 1;
// Call leter_is_okay for each letter in the word.
for (int i = hyphen_base_size(); i <= last_index; ++i) {
if (!((this->*letter_is_okay_)(&dawg_args, word_ptr->unichar_id(i),
i == last_index))) break;
// Swap active_dawgs, constraints with the corresponding updated vector.
if (dawg_args.updated_dawgs == &(active_dawgs[1])) {
dawg_args.updated_dawgs = &(active_dawgs[0]);
++(dawg_args.active_dawgs);
} else {
++(dawg_args.updated_dawgs);
dawg_args.active_dawgs = &(active_dawgs[0]);
}
}
delete[] active_dawgs;
return valid_word_permuter(dawg_args.permuter, numbers_ok) ?
dawg_args.permuter : NO_PERM;
}
bool Dict::valid_bigram(const WERD_CHOICE &word1,
const WERD_CHOICE &word2) const {
if (bigram_dawg_ == NULL) return false;
// Extract the core word from the middle of each word with any digits
// replaced with question marks.
int w1start, w1end, w2start, w2end;
word1.punct_stripped(&w1start, &w1end);
word2.punct_stripped(&w2start, &w2end);
// We don't want to penalize a single guillemet, hyphen, etc.
// But our bigram list doesn't have any information about punctuation.
if (w1start >= w1end) return word1.length() < 3;
if (w2start >= w2end) return word2.length() < 3;
const UNICHARSET& uchset = getUnicharset();
GenericVector<UNICHAR_ID> bigram_string;
bigram_string.reserve(w1end + w2end + 1);
for (int i = w1start; i < w1end; i++) {
const GenericVector<UNICHAR_ID>& normed_ids =
getUnicharset().normed_ids(word1.unichar_id(i));
if (normed_ids.size() == 1 && uchset.get_isdigit(normed_ids[0]))
bigram_string.push_back(question_unichar_id_);
else
bigram_string += normed_ids;
}
bigram_string.push_back(UNICHAR_SPACE);
for (int i = w2start; i < w2end; i++) {
const GenericVector<UNICHAR_ID>& normed_ids =
getUnicharset().normed_ids(word2.unichar_id(i));
if (normed_ids.size() == 1 && uchset.get_isdigit(normed_ids[0]))
bigram_string.push_back(question_unichar_id_);
else
bigram_string += normed_ids;
}
WERD_CHOICE normalized_word(&uchset, bigram_string.size());
for (int i = 0; i < bigram_string.size(); ++i) {
normalized_word.append_unichar_id_space_allocated(bigram_string[i], 1,
0.0f, 0.0f);
}
return bigram_dawg_->word_in_dawg(normalized_word);
}
bool Dict::valid_punctuation(const WERD_CHOICE &word) {
if (word.length() == 0) return NO_PERM;
int i;
WERD_CHOICE new_word(word.unicharset());
int last_index = word.length() - 1;
int new_len = 0;
for (i = 0; i <= last_index; ++i) {
UNICHAR_ID unichar_id = (word.unichar_id(i));
if (getUnicharset().get_ispunctuation(unichar_id)) {
new_word.append_unichar_id(unichar_id, 1, 0.0, 0.0);
} else if (!getUnicharset().get_isalpha(unichar_id) &&
!getUnicharset().get_isdigit(unichar_id)) {
return false; // neither punc, nor alpha, nor digit
} else if ((new_len = new_word.length()) == 0 ||
new_word.unichar_id(new_len-1) != Dawg::kPatternUnicharID) {
new_word.append_unichar_id(Dawg::kPatternUnicharID, 1, 0.0, 0.0);
}
}
for (i = 0; i < dawgs_.size(); ++i) {
if (dawgs_[i] != NULL &&
dawgs_[i]->type() == DAWG_TYPE_PUNCTUATION &&
dawgs_[i]->word_in_dawg(new_word)) return true;
}
return false;
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: stopper.h
** Purpose: Stopping criteria for word classifier.
** Author: Dan Johnson
** History: Wed May 1 09:42:57 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef STOPPER_H
#define STOPPER_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "genericvector.h"
#include "params.h"
#include "ratngs.h"
#include "unichar.h"
class WERD_CHOICE;
typedef uinT8 BLOB_WIDTH;
struct DANGERR_INFO {
DANGERR_INFO() :
begin(-1), end(-1), dangerous(false), correct_is_ngram(false),
leftmost(INVALID_UNICHAR_ID) {}
DANGERR_INFO(int b, int e, bool d, bool n, UNICHAR_ID l) :
begin(b), end(e), dangerous(d), correct_is_ngram(n), leftmost(l) {}
int begin;
int end;
bool dangerous;
bool correct_is_ngram;
UNICHAR_ID leftmost; // in the replacement, what's the leftmost character?
};
typedef GenericVector<DANGERR_INFO> DANGERR;
#endif
| C++ |
///////////////////////////////////////////////////////////////////////
// File: dict.h
// Description: dict class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_DICT_DICT_H_
#define TESSERACT_DICT_DICT_H_
#include "ambigs.h"
#include "dawg.h"
#include "dawg_cache.h"
#include "host.h"
#include "oldlist.h"
#include "ratngs.h"
#include "stopper.h"
#include "trie.h"
#include "unicharset.h"
#include "params_training_featdef.h"
class MATRIX;
class WERD_RES;
#define MAX_WERD_LENGTH (inT64) 128
#define NO_RATING -1
/** Struct used to hold temporary information about fragments. */
struct CHAR_FRAGMENT_INFO {
UNICHAR_ID unichar_id;
const CHAR_FRAGMENT *fragment;
int num_fragments;
float rating;
float certainty;
};
namespace tesseract {
typedef GenericVector<Dawg *> DawgVector;
//
// Constants
//
static const int kRatingPad = 4;
static const char kDictWildcard[] = "\u2606"; // WHITE STAR
static const int kDictMaxWildcards = 2; // max wildcards for a word
// TODO(daria): If hyphens are different in different languages and can be
// inferred from training data we should load their values dynamically.
static const char kHyphenSymbol[] = "-";
static const char kSlashSymbol[] = "/";
static const char kQuestionSymbol[] = "?";
static const char kApostropheSymbol[] = "'";
static const float kSimCertaintyScale = -10.0; // similarity matcher scaling
static const float kSimCertaintyOffset = -10.0; // similarity matcher offset
static const float kSimilarityFloor = 100.0; // worst E*L product to stop on
static const int kDocDictMaxRepChars = 4;
// Enum for describing whether the x-height for the word is consistent:
// 0 - everything is good.
// 1 - there are one or two secondary (but consistent) baselines
// [think subscript and superscript], or there is an oversized
// first character.
// 2 - the word is inconsistent.
enum XHeightConsistencyEnum {XH_GOOD, XH_SUBNORMAL, XH_INCONSISTENT};
struct DawgArgs {
DawgArgs(DawgPositionVector *d, DawgPositionVector *up, PermuterType p)
: active_dawgs(d), updated_dawgs(up), permuter(p) {}
DawgPositionVector *active_dawgs;
DawgPositionVector *updated_dawgs;
PermuterType permuter;
};
class Dict {
public:
Dict(CCUtil* image_ptr);
~Dict();
const CCUtil* getCCUtil() const {
return ccutil_;
}
CCUtil* getCCUtil() {
return ccutil_;
}
const UNICHARSET& getUnicharset() const {
return getCCUtil()->unicharset;
}
UNICHARSET& getUnicharset() {
return getCCUtil()->unicharset;
}
const UnicharAmbigs &getUnicharAmbigs() const {
return getCCUtil()->unichar_ambigs;
}
// Returns true if unichar_id is a word compounding character like - or /.
inline bool compound_marker(UNICHAR_ID unichar_id) {
const GenericVector<UNICHAR_ID>& normed_ids =
getUnicharset().normed_ids(unichar_id);
return normed_ids.size() == 1 &&
(normed_ids[0] == hyphen_unichar_id_ ||
normed_ids[0] == slash_unichar_id_);
}
// Returns true if unichar_id is an apostrophe-like character that may
// separate prefix/suffix words from a main body word.
inline bool is_apostrophe(UNICHAR_ID unichar_id) {
const GenericVector<UNICHAR_ID>& normed_ids =
getUnicharset().normed_ids(unichar_id);
return normed_ids.size() == 1 && normed_ids[0] == apostrophe_unichar_id_;
}
/* hyphen.cpp ************************************************************/
/// Returns true if we've recorded the beginning of a hyphenated word.
inline bool hyphenated() const { return
!last_word_on_line_ && hyphen_word_;
}
/// Size of the base word (the part on the line before) of a hyphenated word.
inline int hyphen_base_size() const {
return this->hyphenated() ? hyphen_word_->length() : 0;
}
/// If this word is hyphenated copy the base word (the part on
/// the line before) of a hyphenated word into the given word.
/// This function assumes that word is not NULL.
inline void copy_hyphen_info(WERD_CHOICE *word) const {
if (this->hyphenated()) {
*word = *hyphen_word_;
if (hyphen_debug_level) word->print("copy_hyphen_info: ");
}
}
/// Check whether the word has a hyphen at the end.
inline bool has_hyphen_end(UNICHAR_ID unichar_id, bool first_pos) const {
if (!last_word_on_line_ || first_pos)
return false;
const GenericVector<UNICHAR_ID>& normed_ids =
getUnicharset().normed_ids(unichar_id);
return normed_ids.size() == 1 && normed_ids[0] == hyphen_unichar_id_;
}
/// Same as above, but check the unichar at the end of the word.
inline bool has_hyphen_end(const WERD_CHOICE &word) const {
int word_index = word.length() - 1;
return has_hyphen_end(word.unichar_id(word_index), word_index == 0);
}
/// Unless the previous word was the last one on the line, and the current
/// one is not (thus it is the first one on the line), erase hyphen_word_,
/// clear hyphen_active_dawgs_, update last_word_on_line_.
void reset_hyphen_vars(bool last_word_on_line);
/// Update hyphen_word_, and copy the given DawgPositionVectors into
/// hyphen_active_dawgs_ .
void set_hyphen_word(const WERD_CHOICE &word,
const DawgPositionVector &active_dawgs);
/* permdawg.cpp ************************************************************/
// Note: Functions in permdawg.cpp are only used by NoDangerousAmbig().
// When this function is refactored, permdawg.cpp can be removed.
/// Copies word into best_choice if its rating is smaller
/// than that of best_choice.
inline void update_best_choice(const WERD_CHOICE &word,
WERD_CHOICE *best_choice) {
if (word.rating() < best_choice->rating()) {
*best_choice = word;
}
}
/// Fill the given active_dawgs vector with dawgs that could contain the
/// beginning of the word. If hyphenated() returns true, copy the entries
/// from hyphen_active_dawgs_ instead.
void init_active_dawgs(DawgPositionVector *active_dawgs,
bool ambigs_mode) const;
// Fill the given vector with the default collection of any-length dawgs
void default_dawgs(DawgPositionVector *anylength_dawgs,
bool suppress_patterns) const;
/// Recursively explore all the possible character combinations in
/// the given char_choices. Use go_deeper_dawg_fxn() to explore all the
/// dawgs in the dawgs_ vector in parallel and discard invalid words.
///
/// Allocate and return a WERD_CHOICE with the best valid word found.
WERD_CHOICE *dawg_permute_and_select(
const BLOB_CHOICE_LIST_VECTOR &char_choices, float rating_limit);
/// If the choice being composed so far could be a dictionary word
/// and we have not reached the end of the word keep exploring the
/// char_choices further.
void go_deeper_dawg_fxn(
const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info,
bool word_ending, WERD_CHOICE *word, float certainties[],
float *limit, WERD_CHOICE *best_choice, int *attempts_left,
void *void_more_args);
/// Pointer to go_deeper function.
void (Dict::*go_deeper_fxn_)(const char *debug,
const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index,
const CHAR_FRAGMENT_INFO *prev_char_frag_info,
bool word_ending, WERD_CHOICE *word,
float certainties[], float *limit,
WERD_CHOICE *best_choice, int *attempts_left,
void *void_more_args);
//
// Helper functions for dawg_permute_and_select().
//
void permute_choices(
const char *debug,
const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index,
const CHAR_FRAGMENT_INFO *prev_char_frag_info,
WERD_CHOICE *word,
float certainties[],
float *limit,
WERD_CHOICE *best_choice,
int *attempts_left,
void *more_args);
void append_choices(
const char *debug,
const BLOB_CHOICE_LIST_VECTOR &char_choices,
const BLOB_CHOICE &blob_choice,
int char_choice_index,
const CHAR_FRAGMENT_INFO *prev_char_frag_info,
WERD_CHOICE *word,
float certainties[],
float *limit,
WERD_CHOICE *best_choice,
int *attempts_left,
void *more_args);
bool fragment_state_okay(UNICHAR_ID curr_unichar_id,
float curr_rating, float curr_certainty,
const CHAR_FRAGMENT_INFO *prev_char_frag_info,
const char *debug, int word_ending,
CHAR_FRAGMENT_INFO *char_frag_info);
/* stopper.cpp *************************************************************/
bool NoDangerousAmbig(WERD_CHOICE *BestChoice,
DANGERR *fixpt,
bool fix_replaceable,
MATRIX* ratings);
// Replaces the corresponding wrong ngram in werd_choice with the correct
// one. The whole correct n-gram is inserted into the ratings matrix and
// the werd_choice: no more fragments!. Rating and certainty of new entries
// in matrix and werd_choice are the sum and mean of the wrong ngram
// respectively.
// E.g. for werd_choice mystring'' and ambiguity ''->": werd_choice becomes
// mystring", with a new entry in the ratings matrix for ".
void ReplaceAmbig(int wrong_ngram_begin_index, int wrong_ngram_size,
UNICHAR_ID correct_ngram_id, WERD_CHOICE *werd_choice,
MATRIX *ratings);
/// Returns the length of the shortest alpha run in WordChoice.
int LengthOfShortestAlphaRun(const WERD_CHOICE &WordChoice);
/// Returns true if the certainty of the BestChoice word is within a
/// reasonable range of the average certainties for the best choices for
/// each character in the segmentation. This test is used to catch words
/// in which one character is much worse than the other characters in the
/// word (i.e. false will be returned in that case). The algorithm computes
/// the mean and std deviation of the certainties in the word with the worst
/// certainty thrown out.
int UniformCertainties(const WERD_CHOICE& word);
/// Returns true if the given best_choice is good enough to stop.
bool AcceptableChoice(const WERD_CHOICE& best_choice,
XHeightConsistencyEnum xheight_consistency);
/// Returns false if the best choice for the current word is questionable
/// and should be tried again on the second pass or should be flagged to
/// the user.
bool AcceptableResult(WERD_RES* word);
void EndDangerousAmbigs();
/// Prints the current choices for this word to stdout.
void DebugWordChoices();
/// Sets up stopper variables in preparation for the first pass.
void SettupStopperPass1();
/// Sets up stopper variables in preparation for the second pass.
void SettupStopperPass2();
/* context.cpp *************************************************************/
/// Check a string to see if it matches a set of lexical rules.
int case_ok(const WERD_CHOICE &word, const UNICHARSET &unicharset);
/// Returns true if the word looks like an absolute garbage
/// (e.g. image mistakenly recognized as text).
bool absolute_garbage(const WERD_CHOICE &word, const UNICHARSET &unicharset);
/* dict.cpp ****************************************************************/
/// Initialize Dict class - load dawgs from [lang].traineddata and
/// user-specified wordlist and parttern list.
static DawgCache *GlobalDawgCache();
void Load(DawgCache *dawg_cache);
void End();
// Resets the document dictionary analogous to ResetAdaptiveClassifier.
void ResetDocumentDictionary() {
if (pending_words_ != NULL)
pending_words_->clear();
if (document_words_ != NULL)
document_words_->clear();
}
/**
* Returns the maximal permuter code (from ccstruct/ratngs.h) if in light
* of the current state the letter at word_index in the given word
* is allowed according to at least one of the dawgs in dawgs_,
* otherwise returns NO_PERM.
*
* The state is described by void_dawg_args, which are interpreted as
* DawgArgs and contain relevant active dawg positions.
* Each entry in the active_dawgs vector contains an index
* into the dawgs_ vector and an EDGE_REF that indicates the last edge
* followed in the dawg. It also may contain a position in the punctuation
* dawg which describes surrounding punctuation (see struct DawgPosition).
*
* Input:
* At word_index 0 dawg_args->active_dawgs should contain an entry for each
* dawg that may start at the beginning of a word, with punc_ref and edge_ref
* initialized to NO_EDGE. Since the punctuation dawg includes the empty
* pattern " " (meaning anything without surrounding punctuation), having a
* single entry for the punctuation dawg will cover all dawgs reachable
* therefrom -- that includes all number and word dawgs. The only dawg
* non-reachable from the punctuation_dawg is the pattern dawg.
* If hyphen state needs to be applied, initial dawg_args->active_dawgs can
* be copied from the saved hyphen state (maintained by Dict).
* For word_index > 0 the corresponding state (active_dawgs and punc position)
* can be obtained from dawg_args->updated_dawgs passed to
* def_letter_is_okay for word_index-1.
* Note: the function assumes that active_dawgs, nd updated_dawgs
* member variables of dawg_args are not NULL.
*
* Output:
* The function fills in dawg_args->updated_dawgs vector with the
* entries for dawgs that contain the word up to the letter at word_index.
*
*/
//
int def_letter_is_okay(void* void_dawg_args,
UNICHAR_ID unichar_id, bool word_end) const;
int (Dict::*letter_is_okay_)(void* void_dawg_args,
UNICHAR_ID unichar_id, bool word_end) const;
/// Calls letter_is_okay_ member function.
int LetterIsOkay(void* void_dawg_args,
UNICHAR_ID unichar_id, bool word_end) const {
return (this->*letter_is_okay_)(void_dawg_args, unichar_id, word_end);
}
/// Probability in context function used by the ngram permuter.
double (Dict::*probability_in_context_)(const char* lang,
const char* context,
int context_bytes,
const char* character,
int character_bytes);
/// Calls probability_in_context_ member function.
double ProbabilityInContext(const char* context,
int context_bytes,
const char* character,
int character_bytes) {
return (this->*probability_in_context_)(
getCCUtil()->lang.string(),
context, context_bytes,
character, character_bytes);
}
/// Default (no-op) implementation of probability in context function.
double def_probability_in_context(
const char* lang, const char* context, int context_bytes,
const char* character, int character_bytes) {
(void) context;
(void) context_bytes;
(void) character;
(void) character_bytes;
return 0.0;
}
double ngram_probability_in_context(const char* lang,
const char* context,
int context_bytes,
const char* character,
int character_bytes);
// Interface with params model.
float (Dict::*params_model_classify_)(const char *lang, void *path);
float ParamsModelClassify(const char *lang, void *path);
// Call params_model_classify_ member function.
float CallParamsModelClassify(void *path) {
ASSERT_HOST(params_model_classify_ != NULL); // ASSERT_HOST -> assert
return (this->*params_model_classify_)(
getCCUtil()->lang.string(), path);
}
inline void SetWildcardID(UNICHAR_ID id) { wildcard_unichar_id_ = id; }
inline const UNICHAR_ID WildcardID() const {
return wildcard_unichar_id_;
}
/// Return the number of dawgs in the dawgs_ vector.
inline const int NumDawgs() const { return dawgs_.size(); }
/// Return i-th dawg pointer recorded in the dawgs_ vector.
inline const Dawg *GetDawg(int index) const { return dawgs_[index]; }
/// Return the points to the punctuation dawg.
inline const Dawg *GetPuncDawg() const { return punc_dawg_; }
/// Return the points to the unambiguous words dawg.
inline const Dawg *GetUnambigDawg() const { return unambig_dawg_; }
/// Returns the appropriate next node given the EDGE_REF.
static inline NODE_REF GetStartingNode(const Dawg *dawg, EDGE_REF edge_ref) {
if (edge_ref == NO_EDGE) return 0; // beginning to explore the dawg
NODE_REF node = dawg->next_node(edge_ref);
if (node == 0) node = NO_EDGE; // end of word
return node;
}
// Given a unichar from a string and a given dawg, return the unichar
// we should use to match in that dawg type. (for example, in the number
// dawg, all numbers are transformed to kPatternUnicharId).
inline UNICHAR_ID char_for_dawg(UNICHAR_ID ch, const Dawg *dawg) const {
if (!dawg) return ch;
switch (dawg->type()) {
case DAWG_TYPE_NUMBER:
return getUnicharset().get_isdigit(ch) ? Dawg::kPatternUnicharID : ch;
default:
return ch;
}
}
/// For each of the character classes of the given unichar_id (and the
/// unichar_id itself) finds the corresponding outgoing node or self-loop
/// in the given dawg and (after checking that it is valid) records it in
/// dawg_args->updated_ative_dawgs. Updates current_permuter if any valid
/// edges were found.
void ProcessPatternEdges(const Dawg *dawg, const DawgPosition &info,
UNICHAR_ID unichar_id, bool word_end,
DawgPositionVector *updated_dawgs,
PermuterType *current_permuter) const;
/// Read/Write/Access special purpose dawgs which contain words
/// only of a certain length (used for phrase search for
/// non-space-delimited languages).
/// Check all the DAWGs to see if this word is in any of them.
inline static bool valid_word_permuter(uinT8 perm, bool numbers_ok) {
return (perm == SYSTEM_DAWG_PERM || perm == FREQ_DAWG_PERM ||
perm == DOC_DAWG_PERM || perm == USER_DAWG_PERM ||
perm == USER_PATTERN_PERM || perm == COMPOUND_PERM ||
(numbers_ok && perm == NUMBER_PERM));
}
int valid_word(const WERD_CHOICE &word, bool numbers_ok) const;
int valid_word(const WERD_CHOICE &word) const {
return valid_word(word, false); // return NO_PERM for words with digits
}
int valid_word_or_number(const WERD_CHOICE &word) const {
return valid_word(word, true); // return NUMBER_PERM for valid numbers
}
/// This function is used by api/tesseract_cube_combiner.cpp
int valid_word(const char *string) const {
WERD_CHOICE word(string, getUnicharset());
return valid_word(word);
}
// Do the two WERD_CHOICEs form a meaningful bigram?
bool valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) const;
/// Returns true if the word contains a valid punctuation pattern.
/// Note: Since the domains of punctuation symbols and symblos
/// used in numbers are not disjoint, a valid number might contain
/// an invalid punctuation pattern (e.g. .99).
bool valid_punctuation(const WERD_CHOICE &word);
/// Returns true if a good answer is found for the unknown blob rating.
int good_choice(const WERD_CHOICE &choice);
/// Adds a word found on this document to the document specific dictionary.
void add_document_word(const WERD_CHOICE &best_choice);
/// Adjusts the rating of the given word.
void adjust_word(WERD_CHOICE *word,
bool nonword, XHeightConsistencyEnum xheight_consistency,
float additional_adjust,
bool modify_rating,
bool debug);
/// Set wordseg_rating_adjust_factor_ to the given value.
inline void SetWordsegRatingAdjustFactor(float f) {
wordseg_rating_adjust_factor_ = f;
}
private:
/** Private member variables. */
CCUtil* ccutil_;
/**
* Table that stores ambiguities computed during training
* (loaded when NoDangerousAmbigs() is called for the first time).
* Each entry i in the table stores a set of amibiguities whose
* wrong ngram starts with unichar id i.
*/
UnicharAmbigs *dang_ambigs_table_;
/** Same as above, but for ambiguities with replace flag set. */
UnicharAmbigs *replace_ambigs_table_;
/** Additional certainty padding allowed before a word is rejected. */
FLOAT32 reject_offset_;
// Cached UNICHAR_IDs:
UNICHAR_ID wildcard_unichar_id_; // kDictWildcard.
UNICHAR_ID apostrophe_unichar_id_; // kApostropheSymbol.
UNICHAR_ID question_unichar_id_; // kQuestionSymbol.
UNICHAR_ID slash_unichar_id_; // kSlashSymbol.
UNICHAR_ID hyphen_unichar_id_; // kHyphenSymbol.
// Hyphen-related variables.
WERD_CHOICE *hyphen_word_;
DawgPositionVector hyphen_active_dawgs_;
bool last_word_on_line_;
// List of lists of "equivalent" UNICHAR_IDs for the purposes of dictionary
// matching. The first member of each list is taken as canonical. For
// example, the first list contains hyphens and dashes with the first symbol
// being the ASCII hyphen minus.
GenericVector<GenericVectorEqEq<UNICHAR_ID> > equivalent_symbols_;
// Dawg Cache reference - this is who we ask to allocate/deallocate dawgs.
DawgCache *dawg_cache_;
bool dawg_cache_is_ours_; // we should delete our own dawg_cache_
// Dawgs.
DawgVector dawgs_;
SuccessorListsVector successors_;
Trie *pending_words_;
// bigram_dawg_ points to a dawg of two-word bigrams which always supercede if
// any of them are present on the best choices list for a word pair.
// the bigrams are stored as space-separated words where:
// (1) leading and trailing punctuation has been removed from each word and
// (2) any digits have been replaced with '?' marks.
Dawg *bigram_dawg_;
/// The following pointers are only cached for convenience.
/// The dawgs will be deleted when dawgs_ vector is destroyed.
// TODO(daria): need to support multiple languages in the future,
// so maybe will need to maintain a list of dawgs of each kind.
Dawg *freq_dawg_;
Dawg *unambig_dawg_;
Dawg *punc_dawg_;
Trie *document_words_;
/// Current segmentation cost adjust factor for word rating.
/// See comments in incorporate_segcost.
float wordseg_rating_adjust_factor_;
// File for recording ambiguities discovered during dictionary search.
FILE *output_ambig_words_file_;
public:
/// Variable members.
/// These have to be declared and initialized after image_ptr_, which contains
/// the pointer to the params vector - the member of its base CCUtil class.
STRING_VAR_H(user_words_file, "", "A filename of user-provided words.");
STRING_VAR_H(user_words_suffix, "",
"A suffix of user-provided words located in tessdata.");
STRING_VAR_H(user_patterns_file, "",
"A filename of user-provided patterns.");
STRING_VAR_H(user_patterns_suffix, "",
"A suffix of user-provided patterns located in tessdata.");
BOOL_VAR_H(load_system_dawg, true, "Load system word dawg.");
BOOL_VAR_H(load_freq_dawg, true, "Load frequent word dawg.");
BOOL_VAR_H(load_unambig_dawg, true, "Load unambiguous word dawg.");
BOOL_VAR_H(load_punc_dawg, true,
"Load dawg with punctuation patterns.");
BOOL_VAR_H(load_number_dawg, true, "Load dawg with number patterns.");
BOOL_VAR_H(load_bigram_dawg, true,
"Load dawg with special word bigrams.");
double_VAR_H(xheight_penalty_subscripts, 0.125,
"Score penalty (0.1 = 10%) added if there are subscripts "
"or superscripts in a word, but it is otherwise OK.");
double_VAR_H(xheight_penalty_inconsistent, 0.25,
"Score penalty (0.1 = 10%) added if an xheight is "
"inconsistent.");
double_VAR_H(segment_penalty_dict_frequent_word, 1.0,
"Score multiplier for word matches which have good case and"
"are frequent in the given language (lower is better).");
double_VAR_H(segment_penalty_dict_case_ok, 1.1,
"Score multiplier for word matches that have good case "
"(lower is better).");
double_VAR_H(segment_penalty_dict_case_bad, 1.3125,
"Default score multiplier for word matches, which may have "
"case issues (lower is better).");
// TODO(daria): remove this param when ngram permuter is deprecated.
double_VAR_H(segment_penalty_ngram_best_choice, 1.24,
"Multipler to for the best choice from the ngram model.");
double_VAR_H(segment_penalty_dict_nonword, 1.25,
"Score multiplier for glyph fragment segmentations which "
"do not match a dictionary word (lower is better).");
double_VAR_H(segment_penalty_garbage, 1.50,
"Score multiplier for poorly cased strings that are not in"
" the dictionary and generally look like garbage (lower is"
" better).");
STRING_VAR_H(output_ambig_words_file, "",
"Output file for ambiguities found in the dictionary");
INT_VAR_H(dawg_debug_level, 0, "Set to 1 for general debug info"
", to 2 for more details, to 3 to see all the debug messages");
INT_VAR_H(hyphen_debug_level, 0, "Debug level for hyphenated words.");
INT_VAR_H(max_viterbi_list_size, 10, "Maximum size of viterbi list.");
BOOL_VAR_H(use_only_first_uft8_step, false,
"Use only the first UTF8 step of the given string"
" when computing log probabilities.");
double_VAR_H(certainty_scale, 20.0, "Certainty scaling factor");
double_VAR_H(stopper_nondict_certainty_base, -2.50,
"Certainty threshold for non-dict words");
double_VAR_H(stopper_phase2_certainty_rejection_offset, 1.0,
"Reject certainty offset");
INT_VAR_H(stopper_smallword_size, 2,
"Size of dict word to be treated as non-dict word");
double_VAR_H(stopper_certainty_per_char, -0.50,
"Certainty to add for each dict char above small word size.");
double_VAR_H(stopper_allowable_character_badness, 3.0,
"Max certaintly variation allowed in a word (in sigma)");
INT_VAR_H(stopper_debug_level, 0, "Stopper debug level");
BOOL_VAR_H(stopper_no_acceptable_choices, false,
"Make AcceptableChoice() always return false. Useful"
" when there is a need to explore all segmentations");
BOOL_VAR_H(save_raw_choices, false,
"Deprecated- backward compatability only");
INT_VAR_H(tessedit_truncate_wordchoice_log, 10, "Max words to keep in list");
STRING_VAR_H(word_to_debug, "", "Word for which stopper debug information"
" should be printed to stdout");
STRING_VAR_H(word_to_debug_lengths, "",
"Lengths of unichars in word_to_debug");
INT_VAR_H(fragments_debug, 0, "Debug character fragments");
BOOL_VAR_H(segment_nonalphabetic_script, false,
"Don't use any alphabetic-specific tricks."
"Set to true in the traineddata config file for"
" scripts that are cursive or inherently fixed-pitch");
BOOL_VAR_H(save_doc_words, 0, "Save Document Words");
double_VAR_H(doc_dict_pending_threshold, 0.0,
"Worst certainty for using pending dictionary");
double_VAR_H(doc_dict_certainty_threshold, -2.25, "Worst certainty"
" for words that can be inserted into the document dictionary");
INT_VAR_H(max_permuter_attempts, 10000, "Maximum number of different"
" character choices to consider during permutation."
" This limit is especially useful when user patterns"
" are specified, since overly generic patterns can result in"
" dawg search exploring an overly large number of options.");
};
} // namespace tesseract
#endif // THIRD_PARTY_TESSERACT_DICT_DICT_H_
| C++ |
///////////////////////////////////////////////////////////////////////
// File: dawg_cache.h
// Description: A class that knows about loading and caching dawgs.
// Author: David Eger
// Created: Fri Jan 27 12:08:00 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_DICT_DAWG_CACHE_H_
#define TESSERACT_DICT_DAWG_CACHE_H_
#include "dawg.h"
#include "object_cache.h"
#include "strngs.h"
#include "tessdatamanager.h"
namespace tesseract {
class DawgCache {
public:
Dawg *GetSquishedDawg(
const STRING &lang,
const char *data_file_name,
TessdataType tessdata_dawg_type,
int debug_level);
// If we manage the given dawg, decrement its count,
// and possibly delete it if the count reaches zero.
// If dawg is unknown to us, return false.
bool FreeDawg(Dawg *dawg) {
return dawgs_.Free(dawg);
}
// Free up any currently unused dawgs.
void DeleteUnusedDawgs() {
dawgs_.DeleteUnusedObjects();
}
private:
ObjectCache<Dawg> dawgs_;
};
} // namespace tesseract
#endif // TESSERACT_DICT_DAWG_CACHE_H_
| C++ |
/**********************************************************************
* File: tfacepp.cpp (Formerly tface++.c)
* Description: C++ side of the C/C++ Tess/Editor interface.
* Author: Ray Smith
* Created: Thu Apr 23 15:39:23 BST 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#pragma warning(disable:4305) // int/float warnings
#pragma warning(disable:4800) // int/bool warnings
#endif
#include <math.h>
#include "blamer.h"
#include "errcode.h"
#include "ratngs.h"
#include "reject.h"
#include "tesseractclass.h"
#include "werd.h"
#define MAX_UNDIVIDED_LENGTH 24
/**********************************************************************
* recog_word
*
* Convert the word to tess form and pass it to the tess segmenter.
* Convert the output back to editor form.
**********************************************************************/
namespace tesseract {
void Tesseract::recog_word(WERD_RES *word) {
if (wordrec_skip_no_truth_words && (word->blamer_bundle == NULL ||
word->blamer_bundle->incorrect_result_reason() == IRR_NO_TRUTH)) {
if (classify_debug_level) tprintf("No truth for word - skipping\n");
word->tess_failed = true;
return;
}
ASSERT_HOST(!word->chopped_word->blobs.empty());
recog_word_recursive(word);
word->SetupBoxWord();
if (word->best_choice->length() != word->box_word->length()) {
tprintf("recog_word ASSERT FAIL String:\"%s\"; "
"Strlen=%d; #Blobs=%d\n",
word->best_choice->debug_string().string(),
word->best_choice->length(), word->box_word->length());
}
ASSERT_HOST(word->best_choice->length() == word->box_word->length());
// Check that the ratings matrix size matches the sum of all the
// segmentation states.
if (!word->StatesAllValid()) {
tprintf("Not all words have valid states relative to ratings matrix!!");
word->DebugWordChoices(true, NULL);
ASSERT_HOST(word->StatesAllValid());
}
if (tessedit_override_permuter) {
/* Override the permuter type if a straight dictionary check disagrees. */
uinT8 perm_type = word->best_choice->permuter();
if ((perm_type != SYSTEM_DAWG_PERM) &&
(perm_type != FREQ_DAWG_PERM) && (perm_type != USER_DAWG_PERM)) {
uinT8 real_dict_perm_type = dict_word(*word->best_choice);
if (((real_dict_perm_type == SYSTEM_DAWG_PERM) ||
(real_dict_perm_type == FREQ_DAWG_PERM) ||
(real_dict_perm_type == USER_DAWG_PERM)) &&
(alpha_count(word->best_choice->unichar_string().string(),
word->best_choice->unichar_lengths().string()) > 0)) {
word->best_choice->set_permuter(real_dict_perm_type); // use dict perm
}
}
if (tessedit_rejection_debug &&
perm_type != word->best_choice->permuter()) {
tprintf("Permuter Type Flipped from %d to %d\n",
perm_type, word->best_choice->permuter());
}
}
// Factored out from control.cpp
ASSERT_HOST((word->best_choice == NULL) == (word->raw_choice == NULL));
if (word->best_choice == NULL || word->best_choice->length() == 0 ||
static_cast<int>(strspn(word->best_choice->unichar_string().string(),
" ")) == word->best_choice->length()) {
word->tess_failed = true;
word->reject_map.initialise(word->box_word->length());
word->reject_map.rej_word_tess_failure();
} else {
word->tess_failed = false;
}
}
/**********************************************************************
* recog_word_recursive
*
* Convert the word to tess form and pass it to the tess segmenter.
* Convert the output back to editor form.
**********************************************************************/
void Tesseract::recog_word_recursive(WERD_RES *word) {
int word_length = word->chopped_word->NumBlobs(); // no of blobs
if (word_length > MAX_UNDIVIDED_LENGTH) {
return split_and_recog_word(word);
}
cc_recog(word);
word_length = word->rebuild_word->NumBlobs(); // No of blobs in output.
// Do sanity checks and minor fixes on best_choice.
if (word->best_choice->length() > word_length) {
word->best_choice->make_bad(); // should never happen
tprintf("recog_word: Discarded long string \"%s\""
" (%d characters vs %d blobs)\n",
word->best_choice->unichar_string().string(),
word->best_choice->length(), word_length);
tprintf("Word is at:");
word->word->bounding_box().print();
}
if (word->best_choice->length() < word_length) {
UNICHAR_ID space_id = unicharset.unichar_to_id(" ");
while (word->best_choice->length() < word_length) {
word->best_choice->append_unichar_id(space_id, 1, 0.0,
word->best_choice->certainty());
}
}
}
/**********************************************************************
* split_and_recog_word
*
* Split the word into 2 smaller pieces at the largest gap.
* Recognize the pieces and stick the results back together.
**********************************************************************/
void Tesseract::split_and_recog_word(WERD_RES *word) {
// Find the biggest blob gap in the chopped_word.
int bestgap = -MAX_INT32;
int split_index = 0;
for (int b = 1; b < word->chopped_word->NumBlobs(); ++b) {
TBOX prev_box = word->chopped_word->blobs[b - 1]->bounding_box();
TBOX blob_box = word->chopped_word->blobs[b]->bounding_box();
int gap = blob_box.left() - prev_box.right();
if (gap > bestgap) {
bestgap = gap;
split_index = b;
}
}
ASSERT_HOST(split_index > 0);
WERD_RES *word2 = NULL;
BlamerBundle *orig_bb = NULL;
split_word(word, split_index, &word2, &orig_bb);
// Recognize the first part of the word.
recog_word_recursive(word);
// Recognize the second part of the word.
recog_word_recursive(word2);
join_words(word, word2, orig_bb);
}
/**********************************************************************
* split_word
*
* Split a given WERD_RES in place into two smaller words for recognition.
* split_pt is the index of the first blob to go in the second word.
* The underlying word is left alone, only the TWERD (and subsequent data)
* are split up. orig_blamer_bundle is set to the original blamer bundle,
* and will now be owned by the caller. New blamer bundles are forged for the
* two pieces.
**********************************************************************/
void Tesseract::split_word(WERD_RES *word,
int split_pt,
WERD_RES **right_piece,
BlamerBundle **orig_blamer_bundle) const {
ASSERT_HOST(split_pt >0 && split_pt < word->chopped_word->NumBlobs());
// Save a copy of the blamer bundle so we can try to reconstruct it below.
BlamerBundle *orig_bb =
word->blamer_bundle ? new BlamerBundle(*word->blamer_bundle) : NULL;
WERD_RES *word2 = new WERD_RES(*word);
// blow away the copied chopped_word, as we want to work with
// the blobs from the input chopped_word so seam_arrays can be merged.
TWERD *chopped = word->chopped_word;
TWERD *chopped2 = new TWERD;
chopped2->blobs.reserve(chopped->NumBlobs() - split_pt);
for (int i = split_pt; i < chopped->NumBlobs(); ++i) {
chopped2->blobs.push_back(chopped->blobs[i]);
}
chopped->blobs.truncate(split_pt);
word->chopped_word = NULL;
delete word2->chopped_word;
word2->chopped_word = NULL;
const UNICHARSET &unicharset = *word->uch_set;
word->ClearResults();
word2->ClearResults();
word->chopped_word = chopped;
word2->chopped_word = chopped2;
word->SetupBasicsFromChoppedWord(unicharset);
word2->SetupBasicsFromChoppedWord(unicharset);
// Try to adjust the blamer bundle.
if (orig_bb != NULL) {
// TODO(rays) Looks like a leak to me.
// orig_bb should take, rather than copy.
word->blamer_bundle = new BlamerBundle();
word2->blamer_bundle = new BlamerBundle();
orig_bb->SplitBundle(chopped->blobs.back()->bounding_box().right(),
word2->chopped_word->blobs[0]->bounding_box().left(),
wordrec_debug_blamer,
word->blamer_bundle, word2->blamer_bundle);
}
*right_piece = word2;
*orig_blamer_bundle = orig_bb;
}
/**********************************************************************
* join_words
*
* The opposite of split_word():
* join word2 (including any recognized data / seam array / etc)
* onto the right of word and then delete word2.
* Also, if orig_bb is provided, stitch it back into word.
**********************************************************************/
void Tesseract::join_words(WERD_RES *word,
WERD_RES *word2,
BlamerBundle *orig_bb) const {
TBOX prev_box = word->chopped_word->blobs.back()->bounding_box();
TBOX blob_box = word2->chopped_word->blobs[0]->bounding_box();
// Tack the word2 outputs onto the end of the word outputs.
word->chopped_word->blobs += word2->chopped_word->blobs;
word->rebuild_word->blobs += word2->rebuild_word->blobs;
word2->chopped_word->blobs.clear();
word2->rebuild_word->blobs.clear();
TPOINT split_pt;
split_pt.x = (prev_box.right() + blob_box.left()) / 2;
split_pt.y = (prev_box.top() + prev_box.bottom() +
blob_box.top() + blob_box.bottom()) / 4;
// Move the word2 seams onto the end of the word1 seam_array.
// Since the seam list is one element short, an empty seam marking the
// end of the last blob in the first word is needed first.
word->seam_array.push_back(new SEAM(0.0f, split_pt, NULL, NULL, NULL));
word->seam_array += word2->seam_array;
word2->seam_array.truncate(0);
// Fix widths and gaps.
word->blob_widths += word2->blob_widths;
word->blob_gaps += word2->blob_gaps;
// Fix the ratings matrix.
int rat1 = word->ratings->dimension();
int rat2 = word2->ratings->dimension();
word->ratings->AttachOnCorner(word2->ratings);
ASSERT_HOST(word->ratings->dimension() == rat1 + rat2);
word->best_state += word2->best_state;
// Append the word choices.
*word->raw_choice += *word2->raw_choice;
// How many alt choices from each should we try to get?
const int kAltsPerPiece = 2;
// When do we start throwing away extra alt choices?
const int kTooManyAltChoices = 100;
// Construct the cartesian product of the best_choices of word(1) and word2.
WERD_CHOICE_LIST joined_choices;
WERD_CHOICE_IT jc_it(&joined_choices);
WERD_CHOICE_IT bc1_it(&word->best_choices);
WERD_CHOICE_IT bc2_it(&word2->best_choices);
int num_word1_choices = word->best_choices.length();
int total_joined_choices = num_word1_choices;
// Nota Bene: For the main loop here, we operate only on the 2nd and greater
// word2 choices, and put them in the joined_choices list. The 1st word2
// choice gets added to the original word1 choices in-place after we have
// finished with them.
int bc2_index = 1;
for (bc2_it.forward(); !bc2_it.at_first(); bc2_it.forward(), ++bc2_index) {
if (total_joined_choices >= kTooManyAltChoices &&
bc2_index > kAltsPerPiece)
break;
int bc1_index = 0;
for (bc1_it.move_to_first(); bc1_index < num_word1_choices;
++bc1_index, bc1_it.forward()) {
if (total_joined_choices >= kTooManyAltChoices &&
bc1_index > kAltsPerPiece)
break;
WERD_CHOICE *wc = new WERD_CHOICE(*bc1_it.data());
*wc += *bc2_it.data();
jc_it.add_after_then_move(wc);
++total_joined_choices;
}
}
// Now that we've filled in as many alternates as we want, paste the best
// choice for word2 onto the original word alt_choices.
bc1_it.move_to_first();
bc2_it.move_to_first();
for (bc1_it.mark_cycle_pt(); !bc1_it.cycled_list(); bc1_it.forward()) {
*bc1_it.data() += *bc2_it.data();
}
bc1_it.move_to_last();
bc1_it.add_list_after(&joined_choices);
// Restore the pointer to original blamer bundle and combine blamer
// information recorded in the splits.
if (orig_bb != NULL) {
orig_bb->JoinBlames(*word->blamer_bundle, *word2->blamer_bundle,
wordrec_debug_blamer);
delete word->blamer_bundle;
word->blamer_bundle = orig_bb;
}
word->SetupBoxWord();
word->reject_map.initialise(word->box_word->length());
delete word2;
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: paragraphs.h
* Description: Paragraph Detection data structures.
* Author: David Eger
* Created: 25 February 2011
*
* (C) Copyright 2011, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCMAIN_PARAGRAPHS_H_
#define TESSERACT_CCMAIN_PARAGRAPHS_H_
#include "rect.h"
#include "ocrpara.h"
#include "genericvector.h"
#include "strngs.h"
class WERD;
class UNICHARSET;
namespace tesseract {
class MutableIterator;
// This structure captures all information needed about a text line for the
// purposes of paragraph detection. It is meant to be exceedingly light-weight
// so that we can easily test paragraph detection independent of the rest of
// Tesseract.
class RowInfo {
public:
// Constant data derived from Tesseract output.
STRING text; // the full UTF-8 text of the line.
bool ltr; // whether the majority of the text is left-to-right
// TODO(eger) make this more fine-grained.
bool has_leaders; // does the line contain leader dots (.....)?
bool has_drop_cap; // does the line have a drop cap?
int pix_ldistance; // distance to the left pblock boundary in pixels
int pix_rdistance; // distance to the right pblock boundary in pixels
float pix_xheight; // guessed xheight for the line
int average_interword_space; // average space between words in pixels.
int num_words;
TBOX lword_box; // in normalized (horiz text rows) space
TBOX rword_box; // in normalized (horiz text rows) space
STRING lword_text; // the UTF-8 text of the leftmost werd
STRING rword_text; // the UTF-8 text of the rightmost werd
// The text of a paragraph typically starts with the start of an idea and
// ends with the end of an idea. Here we define paragraph as something that
// may have a first line indent and a body indent which may be different.
// Typical words that start an idea are:
// 1. Words in western scripts that start with
// a capital letter, for example "The"
// 2. Bulleted or numbered list items, for
// example "2."
// Typical words which end an idea are words ending in punctuation marks. In
// this vocabulary, each list item is represented as a paragraph.
bool lword_indicates_list_item;
bool lword_likely_starts_idea;
bool lword_likely_ends_idea;
bool rword_indicates_list_item;
bool rword_likely_starts_idea;
bool rword_likely_ends_idea;
};
// Main entry point for Paragraph Detection Algorithm.
//
// Given a set of equally spaced textlines (described by row_infos),
// Split them into paragraphs. See http://goto/paragraphstalk
//
// Output:
// row_owners - one pointer for each row, to the paragraph it belongs to.
// paragraphs - this is the actual list of PARA objects.
// models - the list of paragraph models referenced by the PARA objects.
// caller is responsible for deleting the models.
void DetectParagraphs(int debug_level,
GenericVector<RowInfo> *row_infos,
GenericVector<PARA *> *row_owners,
PARA_LIST *paragraphs,
GenericVector<ParagraphModel *> *models);
// Given a MutableIterator to the start of a block, run DetectParagraphs on
// that block and commit the results to the underlying ROW and BLOCK structs,
// saving the ParagraphModels in models. Caller owns the models.
// We use unicharset during the function to answer questions such as "is the
// first letter of this word upper case?"
void DetectParagraphs(int debug_level,
bool after_text_recognition,
const MutableIterator *block_start,
GenericVector<ParagraphModel *> *models);
} // namespace
#endif // TESSERACT_CCMAIN_PARAGRAPHS_H_
| C++ |
/******************************************************************
* File: superscript.cpp
* Description: Correction pass to fix superscripts and subscripts.
* Author: David Eger
* Created: Mon Mar 12 14:05:00 PDT 2012
*
* (C) Copyright 2012, Google, Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "normalis.h"
#include "tesseractclass.h"
static int LeadingUnicharsToChopped(WERD_RES *word, int num_unichars) {
int num_chopped = 0;
for (int i = 0; i < num_unichars; i++)
num_chopped += word->best_state[i];
return num_chopped;
}
static int TrailingUnicharsToChopped(WERD_RES *word, int num_unichars) {
int num_chopped = 0;
for (int i = 0; i < num_unichars; i++)
num_chopped += word->best_state[word->best_state.size() - 1 - i];
return num_chopped;
}
namespace tesseract {
/**
* Given a recognized blob, see if a contiguous collection of sub-pieces
* (chopped blobs) starting at its left might qualify as being a subscript
* or superscript letter based only on y position. Also do this for the
* right side.
*/
void YOutlierPieces(WERD_RES *word, int rebuilt_blob_index,
int super_y_bottom, int sub_y_top,
ScriptPos *leading_pos, int *num_leading_outliers,
ScriptPos *trailing_pos, int *num_trailing_outliers) {
ScriptPos sp_unused1, sp_unused2;
int unused1, unused2;
if (!leading_pos) leading_pos = &sp_unused1;
if (!num_leading_outliers) num_leading_outliers = &unused1;
if (!trailing_pos) trailing_pos = &sp_unused2;
if (!num_trailing_outliers) num_trailing_outliers = &unused2;
*num_leading_outliers = *num_trailing_outliers = 0;
*leading_pos = *trailing_pos = SP_NORMAL;
int chopped_start = LeadingUnicharsToChopped(word, rebuilt_blob_index);
int num_chopped_pieces = word->best_state[rebuilt_blob_index];
ScriptPos last_pos = SP_NORMAL;
int trailing_outliers = 0;
for (int i = 0; i < num_chopped_pieces; i++) {
TBOX box = word->chopped_word->blobs[chopped_start + i]->bounding_box();
ScriptPos pos = SP_NORMAL;
if (box.bottom() >= super_y_bottom) {
pos = SP_SUPERSCRIPT;
} else if (box.top() <= sub_y_top) {
pos = SP_SUBSCRIPT;
}
if (pos == SP_NORMAL) {
if (trailing_outliers == i) {
*num_leading_outliers = trailing_outliers;
*leading_pos = last_pos;
}
trailing_outliers = 0;
} else {
if (pos == last_pos) {
trailing_outliers++;
} else {
trailing_outliers = 1;
}
}
last_pos = pos;
}
*num_trailing_outliers = trailing_outliers;
*trailing_pos = last_pos;
}
/**
* Attempt to split off any high (or low) bits at the ends of the word with poor
* certainty and recognize them separately. If the certainty gets much better
* and other sanity checks pass, acccept.
*
* This superscript fix is meant to be called in the second pass of recognition
* when we have tried once and already have a preliminary answer for word.
*
* @return Whether we modified the given word.
*/
bool Tesseract::SubAndSuperscriptFix(WERD_RES *word) {
if (word->tess_failed || word->word->flag(W_REP_CHAR) ||
!word->best_choice) {
return false;
}
int num_leading, num_trailing;
ScriptPos sp_leading, sp_trailing;
float leading_certainty, trailing_certainty;
float avg_certainty, unlikely_threshold;
// Calculate the number of whole suspicious characters at the edges.
GetSubAndSuperscriptCandidates(
word, &num_leading, &sp_leading, &leading_certainty,
&num_trailing, &sp_trailing, &trailing_certainty,
&avg_certainty, &unlikely_threshold);
const char *leading_pos = sp_leading == SP_SUBSCRIPT ? "sub" : "super";
const char *trailing_pos = sp_trailing == SP_SUBSCRIPT ? "sub" : "super";
int num_blobs = word->best_choice->length();
// Calculate the remainder (partial characters) at the edges.
// This accounts for us having classified the best version of
// a word as [speaker?'] when it was instead [speaker.^{21}]
// (that is we accidentally thought the 2 was attached to the period).
int num_remainder_leading = 0, num_remainder_trailing = 0;
if (num_leading + num_trailing < num_blobs && unlikely_threshold < 0.0) {
int super_y_bottom =
kBlnBaselineOffset + kBlnXHeight * superscript_min_y_bottom;
int sub_y_top =
kBlnBaselineOffset + kBlnXHeight * subscript_max_y_top;
int last_word_char = num_blobs - 1 - num_trailing;
float last_char_certainty = word->best_choice->certainty(last_word_char);
if (word->best_choice->unichar_id(last_word_char) != 0 &&
last_char_certainty <= unlikely_threshold) {
ScriptPos rpos;
YOutlierPieces(word, last_word_char, super_y_bottom, sub_y_top,
NULL, NULL, &rpos, &num_remainder_trailing);
if (num_trailing > 0 && rpos != sp_trailing) num_remainder_trailing = 0;
if (num_remainder_trailing > 0 &&
last_char_certainty < trailing_certainty) {
trailing_certainty = last_char_certainty;
}
}
bool another_blob_available = (num_remainder_trailing == 0) ||
num_leading + num_trailing + 1 < num_blobs;
int first_char_certainty = word->best_choice->certainty(num_leading);
if (another_blob_available &&
word->best_choice->unichar_id(num_leading) != 0 &&
first_char_certainty <= unlikely_threshold) {
ScriptPos lpos;
YOutlierPieces(word, num_leading, super_y_bottom, sub_y_top,
&lpos, &num_remainder_leading, NULL, NULL);
if (num_leading > 0 && lpos != sp_leading) num_remainder_leading = 0;
if (num_remainder_leading > 0 &&
first_char_certainty < leading_certainty) {
leading_certainty = first_char_certainty;
}
}
}
// If nothing to do, bail now.
if (num_leading + num_trailing +
num_remainder_leading + num_remainder_trailing == 0) {
return false;
}
if (superscript_debug >= 1) {
tprintf("Candidate for superscript detection: %s (",
word->best_choice->unichar_string().string());
if (num_leading || num_remainder_leading) {
tprintf("%d.%d %s-leading ", num_leading, num_remainder_leading,
leading_pos);
}
if (num_trailing || num_remainder_trailing) {
tprintf("%d.%d %s-trailing ", num_trailing, num_remainder_trailing,
trailing_pos);
}
tprintf(")\n");
}
if (superscript_debug >= 3) {
word->best_choice->print();
}
if (superscript_debug >= 2) {
tprintf(" Certainties -- Average: %.2f Unlikely thresh: %.2f ",
avg_certainty, unlikely_threshold);
if (num_leading)
tprintf("Orig. leading (min): %.2f ", leading_certainty);
if (num_trailing)
tprintf("Orig. trailing (min): %.2f ", trailing_certainty);
tprintf("\n");
}
// We've now calculated the number of rebuilt blobs we want to carve off.
// However, split_word() works from TBLOBs in chopped_word, so we need to
// convert to those.
int num_chopped_leading =
LeadingUnicharsToChopped(word, num_leading) + num_remainder_leading;
int num_chopped_trailing =
TrailingUnicharsToChopped(word, num_trailing) + num_remainder_trailing;
int retry_leading = 0;
int retry_trailing = 0;
bool is_good = false;
WERD_RES *revised = TrySuperscriptSplits(
num_chopped_leading, leading_certainty, sp_leading,
num_chopped_trailing, trailing_certainty, sp_trailing,
word, &is_good, &retry_leading, &retry_trailing);
if (is_good) {
word->ConsumeWordResults(revised);
} else if (retry_leading || retry_trailing) {
int retry_chopped_leading =
LeadingUnicharsToChopped(revised, retry_leading);
int retry_chopped_trailing =
TrailingUnicharsToChopped(revised, retry_trailing);
WERD_RES *revised2 = TrySuperscriptSplits(
retry_chopped_leading, leading_certainty, sp_leading,
retry_chopped_trailing, trailing_certainty, sp_trailing,
revised, &is_good, &retry_leading, &retry_trailing);
if (is_good) {
word->ConsumeWordResults(revised2);
}
delete revised2;
}
delete revised;
return is_good;
}
/**
* Determine how many characters (rebuilt blobs) on each end of a given word
* might plausibly be superscripts so SubAndSuperscriptFix can try to
* re-recognize them. Even if we find no whole blobs at either end,
* we will set *unlikely_threshold to a certainty that might be used to
* select "bad enough" outlier characters. If *unlikely_threshold is set to 0,
* though, there's really no hope.
*
* @param[in] word The word to examine.
* @param[out] num_rebuilt_leading the number of rebuilt blobs at the start
* of the word which are all up or down and
* seem badly classified.
* @param[out] leading_pos "super" or "sub" (for debugging)
* @param[out] leading_certainty the worst certainty in the leading blobs.
* @param[out] num_rebuilt_trailing the number of rebuilt blobs at the end
* of the word which are all up or down and
* seem badly classified.
* @param[out] trailing_pos "super" or "sub" (for debugging)
* @param[out] trailing_certainty the worst certainty in the trailing blobs.
* @param[out] avg_certainty the average certainty of "normal" blobs in
* the word.
* @param[out] unlikely_threshold the threshold (on certainty) we used to
* select "bad enough" outlier characters.
*/
void Tesseract::GetSubAndSuperscriptCandidates(const WERD_RES *word,
int *num_rebuilt_leading,
ScriptPos *leading_pos,
float *leading_certainty,
int *num_rebuilt_trailing,
ScriptPos *trailing_pos,
float *trailing_certainty,
float *avg_certainty,
float *unlikely_threshold) {
*avg_certainty = *unlikely_threshold = 0.0f;
*num_rebuilt_leading = *num_rebuilt_trailing = 0;
*leading_certainty = *trailing_certainty = 0.0f;
int super_y_bottom =
kBlnBaselineOffset + kBlnXHeight * superscript_min_y_bottom;
int sub_y_top =
kBlnBaselineOffset + kBlnXHeight * subscript_max_y_top;
// Step one: Get an average certainty for "normally placed" characters.
// Counts here are of blobs in the rebuild_word / unichars in best_choice.
*leading_pos = *trailing_pos = SP_NORMAL;
int leading_outliers = 0;
int trailing_outliers = 0;
int num_normal = 0;
float normal_certainty_total = 0.0f;
float worst_normal_certainty = 0.0f;
ScriptPos last_pos = SP_NORMAL;
int num_blobs = word->rebuild_word->NumBlobs();
for (int b = 0; b < num_blobs; ++b) {
TBOX box = word->rebuild_word->blobs[b]->bounding_box();
ScriptPos pos = SP_NORMAL;
if (box.bottom() >= super_y_bottom) {
pos = SP_SUPERSCRIPT;
} else if (box.top() <= sub_y_top) {
pos = SP_SUBSCRIPT;
}
if (pos == SP_NORMAL) {
if (word->best_choice->unichar_id(b) != 0) {
float char_certainty = word->best_choice->certainty(b);
if (char_certainty < worst_normal_certainty) {
worst_normal_certainty = char_certainty;
}
num_normal++;
normal_certainty_total += char_certainty;
}
if (trailing_outliers == b) {
leading_outliers = trailing_outliers;
*leading_pos = last_pos;
}
trailing_outliers = 0;
} else {
if (last_pos == pos) {
trailing_outliers++;
} else {
trailing_outliers = 1;
}
}
last_pos = pos;
}
*trailing_pos = last_pos;
if (num_normal >= 3) { // throw out the worst as an outlier.
num_normal--;
normal_certainty_total -= worst_normal_certainty;
}
if (num_normal > 0) {
*avg_certainty = normal_certainty_total / num_normal;
*unlikely_threshold = superscript_worse_certainty * (*avg_certainty);
}
if (num_normal == 0 ||
(leading_outliers == 0 && trailing_outliers == 0)) {
return;
}
// Step two: Try to split off bits of the word that are both outliers
// and have much lower certainty than average
// Calculate num_leading and leading_certainty.
for (*leading_certainty = 0.0f, *num_rebuilt_leading = 0;
*num_rebuilt_leading < leading_outliers;
(*num_rebuilt_leading)++) {
float char_certainty = word->best_choice->certainty(*num_rebuilt_leading);
if (char_certainty > *unlikely_threshold) {
break;
}
if (char_certainty < *leading_certainty) {
*leading_certainty = char_certainty;
}
}
// Calculate num_trailing and trailing_certainty.
for (*trailing_certainty = 0.0f, *num_rebuilt_trailing = 0;
*num_rebuilt_trailing < trailing_outliers;
(*num_rebuilt_trailing)++) {
int blob_idx = num_blobs - 1 - *num_rebuilt_trailing;
float char_certainty = word->best_choice->certainty(blob_idx);
if (char_certainty > *unlikely_threshold) {
break;
}
if (char_certainty < *trailing_certainty) {
*trailing_certainty = char_certainty;
}
}
}
/**
* Try splitting off the given number of (chopped) blobs from the front and
* back of the given word and recognizing the pieces.
*
* @param[in] num_chopped_leading how many chopped blobs from the left
* end of the word to chop off and try recognizing as a
* superscript (or subscript)
* @param[in] leading_certainty the (minimum) certainty had by the
* characters in the original leading section.
* @param[in] leading_pos "super" or "sub" (for debugging)
* @param[in] num_chopped_trailing how many chopped blobs from the right
* end of the word to chop off and try recognizing as a
* superscript (or subscript)
* @param[in] trailing_certainty the (minimum) certainty had by the
* characters in the original trailing section.
* @param[in] trailing_pos "super" or "sub" (for debugging)
* @param[in] word the word to try to chop up.
* @param[out] is_good do we believe our result?
* @param[out] retry_rebuild_leading, retry_rebuild_trailing
* If non-zero, and !is_good, then the caller may have luck trying
* to split the returned word with this number of (rebuilt) leading
* and trailing blobs / unichars.
* @return A word which is the result of re-recognizing as asked.
*/
WERD_RES *Tesseract::TrySuperscriptSplits(
int num_chopped_leading, float leading_certainty, ScriptPos leading_pos,
int num_chopped_trailing, float trailing_certainty,
ScriptPos trailing_pos,
WERD_RES *word,
bool *is_good,
int *retry_rebuild_leading, int *retry_rebuild_trailing) {
int num_chopped = word->chopped_word->NumBlobs();
*retry_rebuild_leading = *retry_rebuild_trailing = 0;
// Chop apart the word into up to three pieces.
BlamerBundle *bb0 = NULL;
BlamerBundle *bb1 = NULL;
WERD_RES *prefix = NULL;
WERD_RES *core = NULL;
WERD_RES *suffix = NULL;
if (num_chopped_leading > 0) {
prefix = new WERD_RES(*word);
split_word(prefix, num_chopped_leading, &core, &bb0);
} else {
core = new WERD_RES(*word);
}
if (num_chopped_trailing > 0) {
int split_pt = num_chopped - num_chopped_trailing - num_chopped_leading;
split_word(core, split_pt, &suffix, &bb1);
}
// Recognize the pieces in turn.
int saved_cp_multiplier = classify_class_pruner_multiplier;
int saved_im_multiplier = classify_integer_matcher_multiplier;
if (prefix) {
// Turn off Tesseract's y-position penalties for the leading superscript.
classify_class_pruner_multiplier.set_value(0);
classify_integer_matcher_multiplier.set_value(0);
// Adjust our expectations about the baseline for this prefix.
if (superscript_debug >= 3) {
tprintf(" recognizing first %d chopped blobs\n", num_chopped_leading);
}
recog_word_recursive(prefix);
if (superscript_debug >= 2) {
tprintf(" The leading bits look like %s %s\n",
ScriptPosToString(leading_pos),
prefix->best_choice->unichar_string().string());
}
// Restore the normal y-position penalties.
classify_class_pruner_multiplier.set_value(saved_cp_multiplier);
classify_integer_matcher_multiplier.set_value(saved_im_multiplier);
}
if (superscript_debug >= 3) {
tprintf(" recognizing middle %d chopped blobs\n",
num_chopped - num_chopped_leading - num_chopped_trailing);
}
if (suffix) {
// Turn off Tesseract's y-position penalties for the trailing superscript.
classify_class_pruner_multiplier.set_value(0);
classify_integer_matcher_multiplier.set_value(0);
if (superscript_debug >= 3) {
tprintf(" recognizing last %d chopped blobs\n", num_chopped_trailing);
}
recog_word_recursive(suffix);
if (superscript_debug >= 2) {
tprintf(" The trailing bits look like %s %s\n",
ScriptPosToString(trailing_pos),
suffix->best_choice->unichar_string().string());
}
// Restore the normal y-position penalties.
classify_class_pruner_multiplier.set_value(saved_cp_multiplier);
classify_integer_matcher_multiplier.set_value(saved_im_multiplier);
}
// Evaluate whether we think the results are believably better
// than what we already had.
bool good_prefix = !prefix || BelievableSuperscript(
superscript_debug >= 1, *prefix,
superscript_bettered_certainty * leading_certainty,
retry_rebuild_leading, NULL);
bool good_suffix = !suffix || BelievableSuperscript(
superscript_debug >= 1, *suffix,
superscript_bettered_certainty * trailing_certainty,
NULL, retry_rebuild_trailing);
*is_good = good_prefix && good_suffix;
if (!*is_good && !*retry_rebuild_leading && !*retry_rebuild_trailing) {
// None of it is any good. Quit now.
delete core;
delete prefix;
delete suffix;
return NULL;
}
recog_word_recursive(core);
// Now paste the results together into core.
if (suffix) {
suffix->SetAllScriptPositions(trailing_pos);
join_words(core, suffix, bb1);
}
if (prefix) {
prefix->SetAllScriptPositions(leading_pos);
join_words(prefix, core, bb0);
core = prefix;
prefix = NULL;
}
if (superscript_debug >= 1) {
tprintf("%s superscript fix: %s\n", *is_good ? "ACCEPT" : "REJECT",
core->best_choice->unichar_string().string());
}
return core;
}
/**
* Return whether this is believable superscript or subscript text.
*
* We insist that:
* + there are no punctuation marks.
* + there are no italics.
* + no normal-sized character is smaller than superscript_scaledown_ratio
* of what it ought to be, and
* + each character is at least as certain as certainty_threshold.
*
* @param[in] debug If true, spew debug output
* @param[in] word The word whose best_choice we're evaluating
* @param[in] certainty_threshold If any of the characters have less
* certainty than this, reject.
* @param[out] left_ok How many left-side characters were ok?
* @param[out] right_ok How many right-side characters were ok?
* @return Whether the complete best choice is believable as a superscript.
*/
bool Tesseract::BelievableSuperscript(bool debug,
const WERD_RES &word,
float certainty_threshold,
int *left_ok,
int *right_ok) const {
int initial_ok_run_count = 0;
int ok_run_count = 0;
float worst_certainty = 0.0f;
const WERD_CHOICE &wc = *word.best_choice;
const UnicityTable<FontInfo>& fontinfo_table = get_fontinfo_table();
for (int i = 0; i < wc.length(); i++) {
TBLOB *blob = word.rebuild_word->blobs[i];
UNICHAR_ID unichar_id = wc.unichar_id(i);
float char_certainty = wc.certainty(i);
bool bad_certainty = char_certainty < certainty_threshold;
bool is_punc = wc.unicharset()->get_ispunctuation(unichar_id);
bool is_italic = word.fontinfo && word.fontinfo->is_italic();
BLOB_CHOICE *choice = word.GetBlobChoice(i);
if (choice && fontinfo_table.size() > 0) {
// Get better information from the specific choice, if available.
int font_id1 = choice->fontinfo_id();
bool font1_is_italic = font_id1 >= 0
? fontinfo_table.get(font_id1).is_italic() : false;
int font_id2 = choice->fontinfo_id2();
is_italic = font1_is_italic &&
(font_id2 < 0 || fontinfo_table.get(font_id2).is_italic());
}
float height_fraction = 1.0f;
float char_height = blob->bounding_box().height();
float normal_height = char_height;
if (wc.unicharset()->top_bottom_useful()) {
int min_bot, max_bot, min_top, max_top;
wc.unicharset()->get_top_bottom(unichar_id,
&min_bot, &max_bot,
&min_top, &max_top);
float hi_height = max_top - max_bot;
float lo_height = min_top - min_bot;
normal_height = (hi_height + lo_height) / 2;
if (normal_height >= kBlnXHeight) {
// Only ding characters that we have decent information for because
// they're supposed to be normal sized, not tiny specks or dashes.
height_fraction = char_height / normal_height;
}
}
bool bad_height = height_fraction < superscript_scaledown_ratio;
if (debug) {
if (is_italic) {
tprintf(" Rejecting: superscript is italic.\n");
}
if (is_punc) {
tprintf(" Rejecting: punctuation present.\n");
}
const char *char_str = wc.unicharset()->id_to_unichar(unichar_id);
if (bad_certainty) {
tprintf(" Rejecting: don't believe character %s with certainty %.2f "
"which is less than threshold %.2f\n", char_str,
char_certainty, certainty_threshold);
}
if (bad_height) {
tprintf(" Rejecting: character %s seems too small @ %.2f versus "
"expected %.2f\n", char_str, char_height, normal_height);
}
}
if (bad_certainty || bad_height || is_punc || is_italic) {
if (ok_run_count == i) {
initial_ok_run_count = ok_run_count;
}
ok_run_count = 0;
} else {
ok_run_count++;
}
if (char_certainty < worst_certainty) {
worst_certainty = char_certainty;
}
}
bool all_ok = ok_run_count == wc.length();
if (all_ok && debug) {
tprintf(" Accept: worst revised certainty is %.2f\n", worst_certainty);
}
if (!all_ok) {
if (left_ok) *left_ok = initial_ok_run_count;
if (right_ok) *right_ok = ok_run_count;
}
return all_ok;
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: cube_reco_context.h
* Description: Declaration of the Cube Recognition Context Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CubeRecoContext class abstracts the Cube OCR Engine. Typically a process
// (or a thread) would create one CubeRecoContext object per language.
// The CubeRecoContext object also provides methods to get and set the
// different attribues of the Cube OCR Engine.
#ifndef CUBE_RECO_CONTEXT_H
#define CUBE_RECO_CONTEXT_H
#include <string>
#include "neural_net.h"
#include "lang_model.h"
#include "classifier_base.h"
#include "feature_base.h"
#include "char_set.h"
#include "word_size_model.h"
#include "char_bigrams.h"
#include "word_unigrams.h"
namespace tesseract {
class Tesseract;
class TessdataManager;
class CubeRecoContext {
public:
// Reading order enum type
enum ReadOrder {
L2R,
R2L
};
// Instantiate using a Tesseract object
CubeRecoContext(Tesseract *tess_obj);
~CubeRecoContext();
// accessor functions
inline const string & Lang() const { return lang_; }
inline CharSet *CharacterSet() const { return char_set_; }
const UNICHARSET *TessUnicharset() const { return tess_unicharset_; }
inline CharClassifier *Classifier() const { return char_classifier_; }
inline WordSizeModel *SizeModel() const { return word_size_model_; }
inline CharBigrams *Bigrams() const { return char_bigrams_; }
inline WordUnigrams *WordUnigramsObj() const { return word_unigrams_; }
inline TuningParams *Params() const { return params_; }
inline LangModel *LangMod() const { return lang_mod_; }
// the reading order of the language
inline ReadOrder ReadingOrder() const {
return ((lang_ == "ara") ? R2L : L2R);
}
// does the language support case
inline bool HasCase() const {
return (lang_ != "ara" && lang_ != "hin");
}
inline bool Cursive() const {
return (lang_ == "ara");
}
inline bool HasItalics() const {
return (lang_ != "ara" && lang_ != "hin");
}
inline bool Contextual() const {
return (lang_ == "ara");
}
// RecoContext runtime flags accessor functions
inline bool SizeNormalization() const { return size_normalization_; }
inline bool NoisyInput() const { return noisy_input_; }
inline bool OOD() const { return lang_mod_->OOD(); }
inline bool Numeric() const { return lang_mod_->Numeric(); }
inline bool WordList() const { return lang_mod_->WordList(); }
inline bool Punc() const { return lang_mod_->Punc(); }
inline bool CaseSensitive() const {
return char_classifier_->CaseSensitive();
}
inline void SetSizeNormalization(bool size_normalization) {
size_normalization_ = size_normalization;
}
inline void SetNoisyInput(bool noisy_input) {
noisy_input_ = noisy_input;
}
inline void SetOOD(bool ood_enabled) {
lang_mod_->SetOOD(ood_enabled);
}
inline void SetNumeric(bool numeric_enabled) {
lang_mod_->SetNumeric(numeric_enabled);
}
inline void SetWordList(bool word_list_enabled) {
lang_mod_->SetWordList(word_list_enabled);
}
inline void SetPunc(bool punc_enabled) {
lang_mod_->SetPunc(punc_enabled);
}
inline void SetCaseSensitive(bool case_sensitive) {
char_classifier_->SetCaseSensitive(case_sensitive);
}
inline tesseract::Tesseract *TesseractObject() const {
return tess_obj_;
}
// Returns the path of the data files
bool GetDataFilePath(string *path) const;
// Creates a CubeRecoContext object using a tesseract object. Data
// files are loaded via the tessdata_manager, and the tesseract
// unicharset is provided in order to map Cube's unicharset to
// Tesseract's in the case where the two unicharsets differ.
static CubeRecoContext *Create(Tesseract *tess_obj,
TessdataManager *tessdata_manager,
UNICHARSET *tess_unicharset);
private:
bool loaded_;
string lang_;
CharSet *char_set_;
UNICHARSET *tess_unicharset_;
WordSizeModel *word_size_model_;
CharClassifier *char_classifier_;
CharBigrams *char_bigrams_;
WordUnigrams *word_unigrams_;
TuningParams *params_;
LangModel *lang_mod_;
Tesseract *tess_obj_; // CubeRecoContext does not own this pointer
bool size_normalization_;
bool noisy_input_;
// Loads and initialized all the necessary components of a
// CubeRecoContext. See .cpp for more details.
bool Load(TessdataManager *tessdata_manager,
UNICHARSET *tess_unicharset);
};
}
#endif // CUBE_RECO_CONTEXT_H
| C++ |
/**********************************************************************
* File: pagewalk.cpp (Formerly walkers.c)
* Description: Block list processors
* Author: Phil Cheatle
* Created: Thu Oct 10 16:25:24 BST 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "pageres.h"
#include "tesseractclass.h"
/**
* process_selected_words()
*
* Walk the current block list applying the specified word processor function
* to each word that overlaps the selection_box.
*/
namespace tesseract {
void Tesseract::process_selected_words(
PAGE_RES* page_res, // blocks to check
TBOX & selection_box,
BOOL8(tesseract::Tesseract::*word_processor)(PAGE_RES_IT* pr_it)) {
for (PAGE_RES_IT page_res_it(page_res); page_res_it.word() != NULL;
page_res_it.forward()) {
WERD* word = page_res_it.word()->word;
if (word->bounding_box().overlap(selection_box)) {
if (!(this->*word_processor)(&page_res_it))
return;
}
}
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: resultiterator.cpp
// Description: Iterator for tesseract results that is capable of
// iterating in proper reading order over Bi Directional
// (e.g. mixed Hebrew and English) text.
// Author: David Eger
// Created: Fri May 27 13:58:06 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "resultiterator.h"
#include "allheaders.h"
#include "pageres.h"
#include "strngs.h"
#include "tesseractclass.h"
#include "unicharset.h"
#include "unicodes.h"
namespace tesseract {
ResultIterator::ResultIterator(const LTRResultIterator &resit)
: LTRResultIterator(resit) {
in_minor_direction_ = false;
at_beginning_of_minor_run_ = false;
current_paragraph_is_ltr_ = CurrentParagraphIsLtr();
MoveToLogicalStartOfTextline();
}
ResultIterator *ResultIterator::StartOfParagraph(
const LTRResultIterator &resit) {
return new ResultIterator(resit);
}
bool ResultIterator::ParagraphIsLtr() const {
return current_paragraph_is_ltr_;
}
bool ResultIterator::CurrentParagraphIsLtr() const {
if (!it_->word())
return true; // doesn't matter.
LTRResultIterator it(*this);
it.RestartParagraph();
// Try to figure out the ltr-ness of the paragraph. The rules below
// make more sense in the context of a difficult paragraph example.
// Here we denote {ltr characters, RTL CHARACTERS}:
//
// "don't go in there!" DAIS EH
// EHT OTNI DEPMUJ FELSMIH NEHT DNA
// .GNIDLIUB GNINRUB
//
// On the first line, the left-most word is LTR and the rightmost word
// is RTL. Thus, we are better off taking the majority direction for
// the whole paragraph contents. So instead of "the leftmost word is LTR"
// indicating an LTR paragraph, we use a heuristic about what RTL paragraphs
// would not do: Typically an RTL paragraph would *not* start with an LTR
// word. So our heuristics are as follows:
//
// (1) If the first text line has an RTL word in the left-most position
// it is RTL.
// (2) If the first text line has an LTR word in the right-most position
// it is LTR.
// (3) If neither of the above is true, take the majority count for the
// paragraph -- if there are more rtl words, it is RTL. If there
// are more LTR words, it's LTR.
bool leftmost_rtl = it.WordDirection() == DIR_RIGHT_TO_LEFT;
bool rightmost_ltr = it.WordDirection() == DIR_LEFT_TO_RIGHT;
int num_ltr, num_rtl;
num_rtl = leftmost_rtl ? 1 : 0;
num_ltr = (it.WordDirection() == DIR_LEFT_TO_RIGHT) ? 1 : 0;
for (it.Next(RIL_WORD);
!it.Empty(RIL_WORD) && !it.IsAtBeginningOf(RIL_TEXTLINE);
it.Next(RIL_WORD)) {
StrongScriptDirection dir = it.WordDirection();
rightmost_ltr = (dir == DIR_LEFT_TO_RIGHT);
num_rtl += (dir == DIR_RIGHT_TO_LEFT) ? 1 : 0;
num_ltr += rightmost_ltr ? 1 : 0;
}
if (leftmost_rtl)
return false;
if (rightmost_ltr)
return true;
// First line is ambiguous. Take statistics on the whole paragraph.
if (!it.Empty(RIL_WORD) && !it.IsAtBeginningOf(RIL_PARA)) do {
StrongScriptDirection dir = it.WordDirection();
num_rtl += (dir == DIR_RIGHT_TO_LEFT) ? 1 : 0;
num_ltr += (dir == DIR_LEFT_TO_RIGHT) ? 1 : 0;
} while (it.Next(RIL_WORD) && !it.IsAtBeginningOf(RIL_PARA));
return num_ltr >= num_rtl;
}
const int ResultIterator::kMinorRunStart = -1;
const int ResultIterator::kMinorRunEnd = -2;
const int ResultIterator::kComplexWord = -3;
void ResultIterator::CalculateBlobOrder(
GenericVector<int> *blob_indices) const {
bool context_is_ltr = current_paragraph_is_ltr_ ^ in_minor_direction_;
blob_indices->clear();
if (Empty(RIL_WORD)) return;
if (context_is_ltr || it_->word()->UnicharsInReadingOrder()) {
// Easy! just return the blobs in order;
for (int i = 0; i < word_length_; i++)
blob_indices->push_back(i);
return;
}
// The blobs are in left-to-right order, but the current reading context
// is right-to-left.
const int U_LTR = UNICHARSET::U_LEFT_TO_RIGHT;
const int U_RTL = UNICHARSET::U_RIGHT_TO_LEFT;
const int U_EURO_NUM = UNICHARSET::U_EUROPEAN_NUMBER;
const int U_EURO_NUM_SEP = UNICHARSET::U_EUROPEAN_NUMBER_SEPARATOR;
const int U_EURO_NUM_TERM = UNICHARSET::U_EUROPEAN_NUMBER_TERMINATOR;
const int U_COMMON_NUM_SEP = UNICHARSET::U_COMMON_NUMBER_SEPARATOR;
const int U_OTHER_NEUTRAL = UNICHARSET::U_OTHER_NEUTRAL;
// Step 1: Scan for and mark European Number sequences
// [:ET:]*[:EN:]+(([:ES:]|[:CS:])?[:EN:]+)*[:ET:]*
GenericVector<int> letter_types;
for (int i = 0; i < word_length_; i++) {
letter_types.push_back(it_->word()->SymbolDirection(i));
}
// Convert a single separtor sandwiched between two EN's into an EN.
for (int i = 0; i + 2 < word_length_; i++) {
if (letter_types[i] == U_EURO_NUM && letter_types[i + 2] == U_EURO_NUM &&
(letter_types[i + 1] == U_EURO_NUM_SEP ||
letter_types[i + 1] == U_COMMON_NUM_SEP)) {
letter_types[i + 1] = U_EURO_NUM;
}
}
// Scan for sequences of European Number Terminators around ENs and convert
// them to ENs.
for (int i = 0; i < word_length_; i++) {
if (letter_types[i] == U_EURO_NUM_TERM) {
int j = i + 1;
while (j < word_length_ && letter_types[j] == U_EURO_NUM_TERM) { j++; }
if (j < word_length_ && letter_types[j] == U_EURO_NUM) {
// The sequence [i..j] should be converted to all European Numbers.
for (int k = i; k < j; k++) letter_types[k] = U_EURO_NUM;
}
j = i - 1;
while (j > -1 && letter_types[j] == U_EURO_NUM_TERM) { j--; }
if (j > -1 && letter_types[j] == U_EURO_NUM) {
// The sequence [j..i] should be converted to all European Numbers.
for (int k = j; k <= i; k++) letter_types[k] = U_EURO_NUM;
}
}
}
// Step 2: Convert all remaining types to either L or R.
// Sequences ([:L:]|[:EN:])+ (([:CS:]|[:ON:])+ ([:L:]|[:EN:])+)* -> L.
// All other are R.
for (int i = 0; i < word_length_;) {
int ti = letter_types[i];
if (ti == U_LTR || ti == U_EURO_NUM) {
// Left to right sequence; scan to the end of it.
int last_good = i;
for (int j = i + 1; j < word_length_; j++) {
int tj = letter_types[j];
if (tj == U_LTR || tj == U_EURO_NUM) {
last_good = j;
} else if (tj == U_COMMON_NUM_SEP || tj == U_OTHER_NEUTRAL) {
// do nothing.
} else {
break;
}
}
// [i..last_good] is the L sequence
for (int k = i; k <= last_good; k++) letter_types[k] = U_LTR;
i = last_good + 1;
} else {
letter_types[i] = U_RTL;
i++;
}
}
// At this point, letter_types is entirely U_LTR or U_RTL.
for (int i = word_length_ - 1; i >= 0;) {
if (letter_types[i] == U_RTL) {
blob_indices->push_back(i);
i--;
} else {
// left to right sequence. scan to the beginning.
int j = i - 1;
for (; j >= 0 && letter_types[j] != U_RTL; j--) { } // pass
// Now (j, i] is LTR
for (int k = j + 1; k <= i; k++) blob_indices->push_back(k);
i = j;
}
}
ASSERT_HOST(blob_indices->size() == word_length_);
}
static void PrintScriptDirs(const GenericVector<StrongScriptDirection> &dirs) {
for (int i = 0; i < dirs.size(); i++) {
switch (dirs[i]) {
case DIR_NEUTRAL: tprintf ("N "); break;
case DIR_LEFT_TO_RIGHT: tprintf("L "); break;
case DIR_RIGHT_TO_LEFT: tprintf("R "); break;
case DIR_MIX: tprintf("Z "); break;
default: tprintf("? "); break;
}
}
tprintf("\n");
}
void ResultIterator::CalculateTextlineOrder(
bool paragraph_is_ltr,
const LTRResultIterator &resit,
GenericVectorEqEq<int> *word_indices) const {
GenericVector<StrongScriptDirection> directions;
CalculateTextlineOrder(paragraph_is_ltr, resit, &directions, word_indices);
}
void ResultIterator::CalculateTextlineOrder(
bool paragraph_is_ltr,
const LTRResultIterator &resit,
GenericVector<StrongScriptDirection> *dirs_arg,
GenericVectorEqEq<int> *word_indices) const {
GenericVector<StrongScriptDirection> dirs;
GenericVector<StrongScriptDirection> *directions;
directions = (dirs_arg != NULL) ? dirs_arg : &dirs;
directions->truncate(0);
// A LTRResultIterator goes strictly left-to-right word order.
LTRResultIterator ltr_it(resit);
ltr_it.RestartRow();
if (ltr_it.Empty(RIL_WORD)) return;
do {
directions->push_back(ltr_it.WordDirection());
} while (ltr_it.Next(RIL_WORD) && !ltr_it.IsAtBeginningOf(RIL_TEXTLINE));
word_indices->truncate(0);
CalculateTextlineOrder(paragraph_is_ltr, *directions, word_indices);
}
void ResultIterator::CalculateTextlineOrder(
bool paragraph_is_ltr,
const GenericVector<StrongScriptDirection> &word_dirs,
GenericVectorEqEq<int> *reading_order) {
reading_order->truncate(0);
if (word_dirs.size() == 0) return;
// Take all of the runs of minor direction words and insert them
// in reverse order.
int minor_direction, major_direction, major_step, start, end;
if (paragraph_is_ltr) {
start = 0;
end = word_dirs.size();
major_step = 1;
major_direction = DIR_LEFT_TO_RIGHT;
minor_direction = DIR_RIGHT_TO_LEFT;
} else {
start = word_dirs.size() - 1;
end = -1;
major_step = -1;
major_direction = DIR_RIGHT_TO_LEFT;
minor_direction = DIR_LEFT_TO_RIGHT;
// Special rule: if there are neutral words at the right most side
// of a line adjacent to a left-to-right word in the middle of the
// line, we interpret the end of the line as a single LTR sequence.
if (word_dirs[start] == DIR_NEUTRAL) {
int neutral_end = start;
while (neutral_end > 0 && word_dirs[neutral_end] == DIR_NEUTRAL) {
neutral_end--;
}
if (neutral_end >= 0 && word_dirs[neutral_end] == DIR_LEFT_TO_RIGHT) {
// LTR followed by neutrals.
// Scan for the beginning of the minor left-to-right run.
int left = neutral_end;
for (int i = left; i >= 0 && word_dirs[i] != DIR_RIGHT_TO_LEFT; i--) {
if (word_dirs[i] == DIR_LEFT_TO_RIGHT) left = i;
}
reading_order->push_back(kMinorRunStart);
for (int i = left; i < word_dirs.size(); i++) {
reading_order->push_back(i);
if (word_dirs[i] == DIR_MIX) reading_order->push_back(kComplexWord);
}
reading_order->push_back(kMinorRunEnd);
start = left - 1;
}
}
}
for (int i = start; i != end;) {
if (word_dirs[i] == minor_direction) {
int j = i;
while (j != end && word_dirs[j] != major_direction)
j += major_step;
if (j == end) j -= major_step;
while (j != i && word_dirs[j] != minor_direction)
j -= major_step;
// [j..i] is a minor direction run.
reading_order->push_back(kMinorRunStart);
for (int k = j; k != i; k -= major_step) {
reading_order->push_back(k);
}
reading_order->push_back(i);
reading_order->push_back(kMinorRunEnd);
i = j + major_step;
} else {
reading_order->push_back(i);
if (word_dirs[i] == DIR_MIX) reading_order->push_back(kComplexWord);
i += major_step;
}
}
}
int ResultIterator::LTRWordIndex() const {
int this_word_index = 0;
LTRResultIterator textline(*this);
textline.RestartRow();
while (!textline.PositionedAtSameWord(it_)) {
this_word_index++;
textline.Next(RIL_WORD);
}
return this_word_index;
}
void ResultIterator::MoveToLogicalStartOfWord() {
if (word_length_ == 0) {
BeginWord(0);
return;
}
GenericVector<int> blob_order;
CalculateBlobOrder(&blob_order);
if (blob_order.size() == 0 || blob_order[0] == 0) return;
BeginWord(blob_order[0]);
}
bool ResultIterator::IsAtFinalSymbolOfWord() const {
if (!it_->word()) return true;
GenericVector<int> blob_order;
CalculateBlobOrder(&blob_order);
return blob_order.size() == 0 || blob_order.back() == blob_index_;
}
bool ResultIterator::IsAtFirstSymbolOfWord() const {
if (!it_->word()) return true;
GenericVector<int> blob_order;
CalculateBlobOrder(&blob_order);
return blob_order.size() == 0 || blob_order[0] == blob_index_;
}
void ResultIterator::AppendSuffixMarks(STRING *text) const {
if (!it_->word()) return;
bool reading_direction_is_ltr =
current_paragraph_is_ltr_ ^ in_minor_direction_;
// scan forward to see what meta-information the word ordering algorithm
// left us.
// If this word is at the *end* of a minor run, insert the other
// direction's mark; else if this was a complex word, insert the
// current reading order's mark.
GenericVectorEqEq<int> textline_order;
CalculateTextlineOrder(current_paragraph_is_ltr_,
*this, &textline_order);
int this_word_index = LTRWordIndex();
int i = textline_order.get_index(this_word_index);
if (i < 0) return;
int last_non_word_mark = 0;
for (i++; i < textline_order.size() && textline_order[i] < 0; i++) {
last_non_word_mark = textline_order[i];
}
if (last_non_word_mark == kComplexWord) {
*text += reading_direction_is_ltr ? kLRM : kRLM;
} else if (last_non_word_mark == kMinorRunEnd) {
if (current_paragraph_is_ltr_) {
*text += kLRM;
} else {
*text += kRLM;
}
}
}
void ResultIterator::MoveToLogicalStartOfTextline() {
GenericVectorEqEq<int> word_indices;
RestartRow();
CalculateTextlineOrder(current_paragraph_is_ltr_,
dynamic_cast<const LTRResultIterator&>(*this),
&word_indices);
int i = 0;
for (; i < word_indices.size() && word_indices[i] < 0; i++) {
if (word_indices[i] == kMinorRunStart) in_minor_direction_ = true;
else if (word_indices[i] == kMinorRunEnd) in_minor_direction_ = false;
}
if (in_minor_direction_) at_beginning_of_minor_run_ = true;
if (i >= word_indices.size()) return;
int first_word_index = word_indices[i];
for (int j = 0; j < first_word_index; j++) {
PageIterator::Next(RIL_WORD);
}
MoveToLogicalStartOfWord();
}
void ResultIterator::Begin() {
LTRResultIterator::Begin();
current_paragraph_is_ltr_ = CurrentParagraphIsLtr();
in_minor_direction_ = false;
at_beginning_of_minor_run_ = false;
MoveToLogicalStartOfTextline();
}
bool ResultIterator::Next(PageIteratorLevel level) {
if (it_->block() == NULL) return false; // already at end!
switch (level) {
case RIL_BLOCK: // explicit fall-through
case RIL_PARA: // explicit fall-through
case RIL_TEXTLINE:
if (!PageIterator::Next(level)) return false;
if (IsWithinFirstTextlineOfParagraph()) {
// if we've advanced to a new paragraph,
// recalculate current_paragraph_is_ltr_
current_paragraph_is_ltr_ = CurrentParagraphIsLtr();
}
in_minor_direction_ = false;
MoveToLogicalStartOfTextline();
return it_->block() != NULL;
case RIL_SYMBOL:
{
GenericVector<int> blob_order;
CalculateBlobOrder(&blob_order);
int next_blob = 0;
while (next_blob < blob_order.size() &&
blob_index_ != blob_order[next_blob])
next_blob++;
next_blob++;
if (next_blob < blob_order.size()) {
// we're in the same word; simply advance one blob.
BeginWord(blob_order[next_blob]);
at_beginning_of_minor_run_ = false;
return true;
}
level = RIL_WORD; // we've fallen through to the next word.
}
case RIL_WORD: // explicit fall-through.
{
if (it_->word() == NULL) return Next(RIL_BLOCK);
GenericVectorEqEq<int> word_indices;
int this_word_index = LTRWordIndex();
CalculateTextlineOrder(current_paragraph_is_ltr_,
*this,
&word_indices);
int final_real_index = word_indices.size() - 1;
while (final_real_index > 0 && word_indices[final_real_index] < 0)
final_real_index--;
for (int i = 0; i < final_real_index; i++) {
if (word_indices[i] == this_word_index) {
int j = i + 1;
for (; j < final_real_index && word_indices[j] < 0; j++) {
if (word_indices[j] == kMinorRunStart) in_minor_direction_ = true;
if (word_indices[j] == kMinorRunEnd) in_minor_direction_ = false;
}
at_beginning_of_minor_run_ = (word_indices[j - 1] == kMinorRunStart);
// awesome, we move to word_indices[j]
if (BidiDebug(3)) {
tprintf("Next(RIL_WORD): %d -> %d\n",
this_word_index, word_indices[j]);
}
PageIterator::RestartRow();
for (int k = 0; k < word_indices[j]; k++) {
PageIterator::Next(RIL_WORD);
}
MoveToLogicalStartOfWord();
return true;
}
}
if (BidiDebug(3)) {
tprintf("Next(RIL_WORD): %d -> EOL\n", this_word_index);
}
// we're going off the end of the text line.
return Next(RIL_TEXTLINE);
}
}
ASSERT_HOST(false); // shouldn't happen.
return false;
}
bool ResultIterator::IsAtBeginningOf(PageIteratorLevel level) const {
if (it_->block() == NULL) return false; // Already at the end!
if (it_->word() == NULL) return true; // In an image block.
if (level == RIL_SYMBOL) return true; // Always at beginning of a symbol.
bool at_word_start = IsAtFirstSymbolOfWord();
if (level == RIL_WORD) return at_word_start;
ResultIterator line_start(*this);
// move to the first word in the line...
line_start.MoveToLogicalStartOfTextline();
bool at_textline_start = at_word_start && *line_start.it_ == *it_;
if (level == RIL_TEXTLINE) return at_textline_start;
// now we move to the left-most word...
line_start.RestartRow();
bool at_block_start = at_textline_start &&
line_start.it_->block() != line_start.it_->prev_block();
if (level == RIL_BLOCK) return at_block_start;
bool at_para_start = at_block_start ||
(at_textline_start &&
line_start.it_->row()->row->para() !=
line_start.it_->prev_row()->row->para());
if (level == RIL_PARA) return at_para_start;
ASSERT_HOST(false); // shouldn't happen.
return false;
}
/**
* NOTE! This is an exact copy of PageIterator::IsAtFinalElement with the
* change that the variable next is now a ResultIterator instead of a
* PageIterator.
*/
bool ResultIterator::IsAtFinalElement(PageIteratorLevel level,
PageIteratorLevel element) const {
if (Empty(element)) return true; // Already at the end!
// The result is true if we step forward by element and find we are
// at the the end of the page or at beginning of *all* levels in:
// [level, element).
// When there is more than one level difference between element and level,
// we could for instance move forward one symbol and still be at the first
// word on a line, so we also have to be at the first symbol in a word.
ResultIterator next(*this);
next.Next(element);
if (next.Empty(element)) return true; // Reached the end of the page.
while (element > level) {
element = static_cast<PageIteratorLevel>(element - 1);
if (!next.IsAtBeginningOf(element))
return false;
}
return true;
}
/**
* Returns the null terminated UTF-8 encoded text string for the current
* object at the given level. Use delete [] to free after use.
*/
char* ResultIterator::GetUTF8Text(PageIteratorLevel level) const {
if (it_->word() == NULL) return NULL; // Already at the end!
STRING text;
switch (level) {
case RIL_BLOCK:
{
ResultIterator pp(*this);
do {
pp.AppendUTF8ParagraphText(&text);
} while (pp.Next(RIL_PARA) && pp.it_->block() == it_->block());
}
break;
case RIL_PARA:
AppendUTF8ParagraphText(&text);
break;
case RIL_TEXTLINE:
{
ResultIterator it(*this);
it.MoveToLogicalStartOfTextline();
it.IterateAndAppendUTF8TextlineText(&text);
}
break;
case RIL_WORD:
AppendUTF8WordText(&text);
break;
case RIL_SYMBOL:
{
bool reading_direction_is_ltr =
current_paragraph_is_ltr_ ^ in_minor_direction_;
if (at_beginning_of_minor_run_) {
text += reading_direction_is_ltr ? kLRM : kRLM;
}
text = it_->word()->BestUTF8(blob_index_, !reading_direction_is_ltr);
if (IsAtFinalSymbolOfWord()) AppendSuffixMarks(&text);
}
break;
}
int length = text.length() + 1;
char* result = new char[length];
strncpy(result, text.string(), length);
return result;
}
void ResultIterator::AppendUTF8WordText(STRING *text) const {
if (!it_->word()) return;
ASSERT_HOST(it_->word()->best_choice != NULL);
bool reading_direction_is_ltr =
current_paragraph_is_ltr_ ^ in_minor_direction_;
if (at_beginning_of_minor_run_) {
*text += reading_direction_is_ltr ? kLRM : kRLM;
}
GenericVector<int> blob_order;
CalculateBlobOrder(&blob_order);
for (int i = 0; i < blob_order.size(); i++) {
*text += it_->word()->BestUTF8(blob_order[i], !reading_direction_is_ltr);
}
AppendSuffixMarks(text);
}
void ResultIterator::IterateAndAppendUTF8TextlineText(STRING *text) {
if (Empty(RIL_WORD)) {
Next(RIL_WORD);
return;
}
if (BidiDebug(1)) {
GenericVectorEqEq<int> textline_order;
GenericVector<StrongScriptDirection> dirs;
CalculateTextlineOrder(current_paragraph_is_ltr_,
*this, &dirs, &textline_order);
tprintf("Strong Script dirs [%p/P=%s]: ", it_->row(),
current_paragraph_is_ltr_ ? "ltr" : "rtl");
PrintScriptDirs(dirs);
tprintf("Logical textline order [%p/P=%s]: ", it_->row(),
current_paragraph_is_ltr_ ? "ltr" : "rtl");
for (int i = 0; i < textline_order.size(); i++) {
tprintf("%d ", textline_order[i]);
}
tprintf("\n");
}
int words_appended = 0;
do {
AppendUTF8WordText(text);
words_appended++;
*text += " ";
} while (Next(RIL_WORD) && !IsAtBeginningOf(RIL_TEXTLINE));
if (BidiDebug(1)) {
tprintf("%d words printed\n", words_appended);
}
text->truncate_at(text->length() - 1);
*text += line_separator_;
// If we just finished a paragraph, add an extra newline.
if (it_->block() == NULL || IsAtBeginningOf(RIL_PARA))
*text += paragraph_separator_;
}
void ResultIterator::AppendUTF8ParagraphText(STRING *text) const {
ResultIterator it(*this);
it.RestartParagraph();
it.MoveToLogicalStartOfTextline();
if (it.Empty(RIL_WORD)) return;
do {
it.IterateAndAppendUTF8TextlineText(text);
} while (it.it_->block() != NULL && !it.IsAtBeginningOf(RIL_PARA));
}
bool ResultIterator::BidiDebug(int min_level) const {
int debug_level = 1;
IntParam *p = ParamUtils::FindParam<IntParam>(
"bidi_debug", GlobalParams()->int_params,
tesseract_->params()->int_params);
if (p != NULL) debug_level = (inT32)(*p);
return debug_level >= min_level;
}
} // namespace tesseract.
| C++ |
///////////////////////////////////////////////////////////////////////
// File: par_control.cpp
// Description: Control code for parallel implementation.
// Author: Ray Smith
// Created: Mon Nov 04 13:23:15 PST 2013
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tesseractclass.h"
namespace tesseract {
struct BlobData {
BlobData() : blob(NULL), choices(NULL) {}
BlobData(int index, Tesseract* tess, const WERD_RES& word)
: blob(word.chopped_word->blobs[index]),
tesseract(tess),
choices(&(*word.ratings)(index, index)) {}
TBLOB* blob;
Tesseract* tesseract;
BLOB_CHOICE_LIST** choices;
};
void Tesseract::PrerecAllWordsPar(const GenericVector<WordData>& words) {
// Prepare all the blobs.
GenericVector<BlobData> blobs;
for (int w = 0; w < words.size(); ++w) {
if (words[w].word->ratings != NULL &&
words[w].word->ratings->get(0, 0) == NULL) {
for (int s = 0; s < words[w].lang_words.size(); ++s) {
Tesseract* sub = s < sub_langs_.size() ? sub_langs_[s] : this;
const WERD_RES& word = *words[w].lang_words[s];
for (int b = 0; b < word.chopped_word->NumBlobs(); ++b) {
blobs.push_back(BlobData(b, sub, word));
}
}
}
}
// Pre-classify all the blobs.
if (tessedit_parallelize > 1) {
#pragma omp parallel for num_threads(10)
for (int b = 0; b < blobs.size(); ++b) {
*blobs[b].choices =
blobs[b].tesseract->classify_blob(blobs[b].blob, "par", White, NULL);
}
} else {
// TODO(AMD) parallelize this.
for (int b = 0; b < blobs.size(); ++b) {
*blobs[b].choices =
blobs[b].tesseract->classify_blob(blobs[b].blob, "par", White, NULL);
}
}
}
} // namespace tesseract.
| C++ |
///////////////////////////////////////////////////////////////////////
// File: mutableiterator.h
// Description: Iterator for tesseract results providing access to
// both high-level API and Tesseract internal data structures.
// Author: David Eger
// Created: Thu Feb 24 19:01:06 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_MUTABLEITERATOR_H__
#define TESSERACT_CCMAIN_MUTABLEITERATOR_H__
#include "resultiterator.h"
class BLOB_CHOICE_IT;
namespace tesseract {
class Tesseract;
// Class to iterate over tesseract results, providing access to all levels
// of the page hierarchy, without including any tesseract headers or having
// to handle any tesseract structures.
// WARNING! This class points to data held within the TessBaseAPI class, and
// therefore can only be used while the TessBaseAPI class still exists and
// has not been subjected to a call of Init, SetImage, Recognize, Clear, End
// DetectOS, or anything else that changes the internal PAGE_RES.
// See apitypes.h for the definition of PageIteratorLevel.
// See also base class PageIterator, which contains the bulk of the interface.
// ResultIterator adds text-specific methods for access to OCR output.
// MutableIterator adds access to internal data structures.
class MutableIterator : public ResultIterator {
public:
// See argument descriptions in ResultIterator()
MutableIterator(PAGE_RES* page_res, Tesseract* tesseract,
int scale, int scaled_yres,
int rect_left, int rect_top,
int rect_width, int rect_height)
: ResultIterator(
LTRResultIterator(page_res, tesseract, scale, scaled_yres, rect_left,
rect_top, rect_width, rect_height)) {}
virtual ~MutableIterator() {}
// See PageIterator and ResultIterator for most calls.
// Return access to Tesseract internals.
const PAGE_RES_IT *PageResIt() const { return it_; }
};
} // namespace tesseract.
#endif // TESSERACT_CCMAIN_MUTABLEITERATOR_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: recogtraining.cpp
// Description: Functions for ambiguity and parameter training.
// Author: Daria Antonova
// Created: Mon Aug 13 11:26:43 PDT 2009
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tesseractclass.h"
#include "boxread.h"
#include "control.h"
#include "cutil.h"
#include "host.h"
#include "ratngs.h"
#include "reject.h"
#include "stopper.h"
namespace tesseract {
const inT16 kMaxBoxEdgeDiff = 2;
// Sets flags necessary for recognition in the training mode.
// Opens and returns the pointer to the output file.
FILE *Tesseract::init_recog_training(const STRING &fname) {
if (tessedit_ambigs_training) {
tessedit_tess_adaption_mode.set_value(0); // turn off adaption
tessedit_enable_doc_dict.set_value(0); // turn off document dictionary
// Explore all segmentations.
getDict().stopper_no_acceptable_choices.set_value(1);
}
STRING output_fname = fname;
const char *lastdot = strrchr(output_fname.string(), '.');
if (lastdot != NULL) output_fname[lastdot - output_fname.string()] = '\0';
output_fname += ".txt";
FILE *output_file = open_file(output_fname.string(), "a+");
return output_file;
}
// Copies the bounding box from page_res_it->word() to the given TBOX.
bool read_t(PAGE_RES_IT *page_res_it, TBOX *tbox) {
while (page_res_it->block() != NULL && page_res_it->word() == NULL)
page_res_it->forward();
if (page_res_it->word() != NULL) {
*tbox = page_res_it->word()->word->bounding_box();
// If tbox->left() is negative, the training image has vertical text and
// all the coordinates of bounding boxes of page_res are rotated by 90
// degrees in a counterclockwise direction. We need to rotate the TBOX back
// in order to compare with the TBOXes of box files.
if (tbox->left() < 0) {
tbox->rotate(FCOORD(0.0, -1.0));
}
return true;
} else {
return false;
}
}
// This function takes tif/box pair of files and runs recognition on the image,
// while making sure that the word bounds that tesseract identified roughly
// match to those specified by the input box file. For each word (ngram in a
// single bounding box from the input box file) it outputs the ocred result,
// the correct label, rating and certainty.
void Tesseract::recog_training_segmented(const STRING &fname,
PAGE_RES *page_res,
volatile ETEXT_DESC *monitor,
FILE *output_file) {
STRING box_fname = fname;
const char *lastdot = strrchr(box_fname.string(), '.');
if (lastdot != NULL) box_fname[lastdot - box_fname.string()] = '\0';
box_fname += ".box";
// read_next_box() will close box_file
FILE *box_file = open_file(box_fname.string(), "r");
PAGE_RES_IT page_res_it;
page_res_it.page_res = page_res;
page_res_it.restart_page();
STRING label;
// Process all the words on this page.
TBOX tbox; // tesseract-identified box
TBOX bbox; // box from the box file
bool keep_going;
int line_number = 0;
int examined_words = 0;
do {
keep_going = read_t(&page_res_it, &tbox);
keep_going &= ReadNextBox(applybox_page, &line_number, box_file, &label,
&bbox);
// Align bottom left points of the TBOXes.
while (keep_going &&
!NearlyEqual<int>(tbox.bottom(), bbox.bottom(), kMaxBoxEdgeDiff)) {
if (bbox.bottom() < tbox.bottom()) {
page_res_it.forward();
keep_going = read_t(&page_res_it, &tbox);
} else {
keep_going = ReadNextBox(applybox_page, &line_number, box_file, &label,
&bbox);
}
}
while (keep_going &&
!NearlyEqual<int>(tbox.left(), bbox.left(), kMaxBoxEdgeDiff)) {
if (bbox.left() > tbox.left()) {
page_res_it.forward();
keep_going = read_t(&page_res_it, &tbox);
} else {
keep_going = ReadNextBox(applybox_page, &line_number, box_file, &label,
&bbox);
}
}
// OCR the word if top right points of the TBOXes are similar.
if (keep_going &&
NearlyEqual<int>(tbox.right(), bbox.right(), kMaxBoxEdgeDiff) &&
NearlyEqual<int>(tbox.top(), bbox.top(), kMaxBoxEdgeDiff)) {
ambigs_classify_and_output(label.string(), &page_res_it, output_file);
examined_words++;
}
page_res_it.forward();
} while (keep_going);
fclose(box_file);
// Set up scripts on all of the words that did not get sent to
// ambigs_classify_and_output. They all should have, but if all the
// werd_res's don't get uch_sets, tesseract will crash when you try
// to iterate over them. :-(
int total_words = 0;
for (page_res_it.restart_page(); page_res_it.block() != NULL;
page_res_it.forward()) {
if (page_res_it.word()) {
if (page_res_it.word()->uch_set == NULL)
page_res_it.word()->SetupFake(unicharset);
total_words++;
}
}
if (examined_words < 0.85 * total_words) {
tprintf("TODO(antonova): clean up recog_training_segmented; "
" It examined only a small fraction of the ambigs image.\n");
}
tprintf("recog_training_segmented: examined %d / %d words.\n",
examined_words, total_words);
}
// Helper prints the given set of blob choices.
static void PrintPath(int length, const BLOB_CHOICE** blob_choices,
const UNICHARSET& unicharset,
const char *label, FILE *output_file) {
float rating = 0.0f;
float certainty = 0.0f;
for (int i = 0; i < length; ++i) {
const BLOB_CHOICE* blob_choice = blob_choices[i];
fprintf(output_file, "%s",
unicharset.id_to_unichar(blob_choice->unichar_id()));
rating += blob_choice->rating();
if (certainty > blob_choice->certainty())
certainty = blob_choice->certainty();
}
fprintf(output_file, "\t%s\t%.4f\t%.4f\n",
label, rating, certainty);
}
// Helper recursively prints all paths through the ratings matrix, starting
// at column col.
static void PrintMatrixPaths(int col, int dim,
const MATRIX& ratings,
int length, const BLOB_CHOICE** blob_choices,
const UNICHARSET& unicharset,
const char *label, FILE *output_file) {
for (int row = col; row < dim && row - col < ratings.bandwidth(); ++row) {
if (ratings.get(col, row) != NOT_CLASSIFIED) {
BLOB_CHOICE_IT bc_it(ratings.get(col, row));
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
blob_choices[length] = bc_it.data();
if (row + 1 < dim) {
PrintMatrixPaths(row + 1, dim, ratings, length + 1, blob_choices,
unicharset, label, output_file);
} else {
PrintPath(length + 1, blob_choices, unicharset, label, output_file);
}
}
}
}
}
// Runs classify_word_pass1() on the current word. Outputs Tesseract's
// raw choice as a result of the classification. For words labeled with a
// single unichar also outputs all alternatives from blob_choices of the
// best choice.
void Tesseract::ambigs_classify_and_output(const char *label,
PAGE_RES_IT* pr_it,
FILE *output_file) {
// Classify word.
fflush(stdout);
WordData word_data(*pr_it);
SetupWordPassN(1, &word_data);
classify_word_and_language(&Tesseract::classify_word_pass1,
pr_it, &word_data);
WERD_RES* werd_res = word_data.word;
WERD_CHOICE *best_choice = werd_res->best_choice;
ASSERT_HOST(best_choice != NULL);
// Compute the number of unichars in the label.
GenericVector<UNICHAR_ID> encoding;
if (!unicharset.encode_string(label, true, &encoding, NULL, NULL)) {
tprintf("Not outputting illegal unichar %s\n", label);
return;
}
// Dump all paths through the ratings matrix (which is normally small).
int dim = werd_res->ratings->dimension();
const BLOB_CHOICE** blob_choices = new const BLOB_CHOICE*[dim];
PrintMatrixPaths(0, dim, *werd_res->ratings, 0, blob_choices,
unicharset, label, output_file);
delete [] blob_choices;
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: paragraphs.h
* Description: Paragraph Detection internal data structures.
* Author: David Eger
* Created: 11 March 2011
*
* (C) Copyright 2011, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCMAIN_PARAGRAPHS_INTERNAL_H_
#define TESSERACT_CCMAIN_PARAGRAPHS_INTERNAL_H_
#include "paragraphs.h"
#ifdef _MSC_VER
#include <string>
#else
#include "strings.h"
#endif
// NO CODE OUTSIDE OF paragraphs.cpp AND TESTS SHOULD NEED TO ACCESS
// DATA STRUCTURES OR FUNCTIONS IN THIS FILE.
class WERD_CHOICE;
namespace tesseract {
// Return whether the given word is likely to be a list item start word.
bool AsciiLikelyListItem(const STRING &word);
// Return the first Unicode Codepoint from werd[pos].
int UnicodeFor(const UNICHARSET *u, const WERD_CHOICE *werd, int pos);
// Set right word attributes given either a unicharset and werd or a utf8
// string.
void RightWordAttributes(const UNICHARSET *unicharset, const WERD_CHOICE *werd,
const STRING &utf8,
bool *is_list, bool *starts_idea, bool *ends_idea);
// Set left word attributes given either a unicharset and werd or a utf8 string.
void LeftWordAttributes(const UNICHARSET *unicharset, const WERD_CHOICE *werd,
const STRING &utf8,
bool *is_list, bool *starts_idea, bool *ends_idea);
enum LineType {
LT_START = 'S', // First line of a paragraph.
LT_BODY = 'C', // Continuation line of a paragraph.
LT_UNKNOWN = 'U', // No clues.
LT_MULTIPLE = 'M', // Matches for both LT_START and LT_BODY.
};
// The first paragraph in a page of body text is often un-indented.
// This is a typographic convention which is common to indicate either that:
// (1) The paragraph is the continuation of a previous paragraph, or
// (2) The paragraph is the first paragraph in a chapter.
//
// I refer to such paragraphs as "crown"s, and the output of the paragraph
// detection algorithm attempts to give them the same paragraph model as
// the rest of the body text.
//
// Nonetheless, while building hypotheses, it is useful to mark the lines
// of crown paragraphs temporarily as crowns, either aligned left or right.
extern const ParagraphModel *kCrownLeft;
extern const ParagraphModel *kCrownRight;
inline bool StrongModel(const ParagraphModel *model) {
return model != NULL && model != kCrownLeft && model != kCrownRight;
}
struct LineHypothesis {
LineHypothesis() : ty(LT_UNKNOWN), model(NULL) {}
LineHypothesis(LineType line_type, const ParagraphModel *m)
: ty(line_type), model(m) {}
LineHypothesis(const LineHypothesis &other)
: ty(other.ty), model(other.model) {}
bool operator==(const LineHypothesis &other) const {
return ty == other.ty && model == other.model;
}
LineType ty;
const ParagraphModel *model;
};
class ParagraphTheory; // Forward Declaration
typedef GenericVectorEqEq<const ParagraphModel *> SetOfModels;
// Row Scratch Registers are data generated by the paragraph detection
// algorithm based on a RowInfo input.
class RowScratchRegisters {
public:
// We presume row will outlive us.
void Init(const RowInfo &row);
LineType GetLineType() const;
LineType GetLineType(const ParagraphModel *model) const;
// Mark this as a start line type, sans model. This is useful for the
// initial marking of probable body lines or paragraph start lines.
void SetStartLine();
// Mark this as a body line type, sans model. This is useful for the
// initial marking of probably body lines or paragraph start lines.
void SetBodyLine();
// Record that this row fits as a paragraph start line in the given model,
void AddStartLine(const ParagraphModel *model);
// Record that this row fits as a paragraph body line in the given model,
void AddBodyLine(const ParagraphModel *model);
// Clear all hypotheses about this line.
void SetUnknown() { hypotheses_.truncate(0); }
// Append all hypotheses of strong models that match this row as a start.
void StartHypotheses(SetOfModels *models) const;
// Append all hypotheses of strong models matching this row.
void StrongHypotheses(SetOfModels *models) const;
// Append all hypotheses for this row.
void NonNullHypotheses(SetOfModels *models) const;
// Discard any hypotheses whose model is not in the given list.
void DiscardNonMatchingHypotheses(const SetOfModels &models);
// If we have only one hypothesis and that is that this line is a paragraph
// start line of a certain model, return that model. Else return NULL.
const ParagraphModel *UniqueStartHypothesis() const;
// If we have only one hypothesis and that is that this line is a paragraph
// body line of a certain model, return that model. Else return NULL.
const ParagraphModel *UniqueBodyHypothesis() const;
// Return the indentation for the side opposite of the aligned side.
int OffsideIndent(tesseract::ParagraphJustification just) const {
switch (just) {
case tesseract::JUSTIFICATION_RIGHT: return lindent_;
case tesseract::JUSTIFICATION_LEFT: return rindent_;
default: return lindent_ > rindent_ ? lindent_ : rindent_;
}
}
// Return the indentation for the side the text is aligned to.
int AlignsideIndent(tesseract::ParagraphJustification just) const {
switch (just) {
case tesseract::JUSTIFICATION_RIGHT: return rindent_;
case tesseract::JUSTIFICATION_LEFT: return lindent_;
default: return lindent_ > rindent_ ? lindent_ : rindent_;
}
}
// Append header fields to a vector of row headings.
static void AppendDebugHeaderFields(GenericVector<STRING> *header);
// Append data for this row to a vector of debug strings.
void AppendDebugInfo(const ParagraphTheory &theory,
GenericVector<STRING> *dbg) const;
const RowInfo *ri_;
// These four constants form a horizontal box model for the white space
// on the edges of each line. At each point in the algorithm, the following
// shall hold:
// ri_->pix_ldistance = lmargin_ + lindent_
// ri_->pix_rdistance = rindent_ + rmargin_
int lmargin_;
int lindent_;
int rindent_;
int rmargin_;
private:
// Hypotheses of either LT_START or LT_BODY
GenericVectorEqEq<LineHypothesis> hypotheses_;
};
// A collection of convenience functions for wrapping the set of
// Paragraph Models we believe correctly model the paragraphs in the image.
class ParagraphTheory {
public:
// We presume models will outlive us, and that models will take ownership
// of any ParagraphModel *'s we add.
explicit ParagraphTheory(GenericVector<ParagraphModel *> *models)
: models_(models) {}
GenericVector<ParagraphModel *> &models() { return *models_; }
const GenericVector<ParagraphModel *> &models() const { return *models_; }
// Return an existing model if one that is Comparable() can be found.
// Else, allocate a new copy of model to save and return a pointer to it.
const ParagraphModel *AddModel(const ParagraphModel &model);
// Discard any models we've made that are not in the list of used models.
void DiscardUnusedModels(const SetOfModels &used_models);
// Return the set of all non-centered models.
void NonCenteredModels(SetOfModels *models);
// If any of the non-centered paragraph models we know about fit
// rows[start, end), return it. Else NULL.
const ParagraphModel *Fits(const GenericVector<RowScratchRegisters> *rows,
int start, int end) const;
int IndexOf(const ParagraphModel *model) const;
private:
GenericVector<ParagraphModel *> *models_;
GenericVectorEqEq<ParagraphModel *> models_we_added_;
};
bool ValidFirstLine(const GenericVector<RowScratchRegisters> *rows,
int row, const ParagraphModel *model);
bool ValidBodyLine(const GenericVector<RowScratchRegisters> *rows,
int row, const ParagraphModel *model);
bool CrownCompatible(const GenericVector<RowScratchRegisters> *rows,
int a, int b, const ParagraphModel *model);
// A class for smearing Paragraph Model hypotheses to surrounding rows.
// The idea here is that StrongEvidenceClassify first marks only exceedingly
// obvious start and body rows and constructs models of them. Thereafter,
// we may have left over unmarked lines (mostly end-of-paragraph lines) which
// were too short to have much confidence about, but which fit the models we've
// constructed perfectly and which we ought to mark. This class is used to
// "smear" our models over the text.
class ParagraphModelSmearer {
public:
ParagraphModelSmearer(GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
ParagraphTheory *theory);
// Smear forward paragraph models from existing row markings to subsequent
// text lines if they fit, and mark any thereafter still unmodeled rows
// with any model in the theory that fits them.
void Smear();
private:
// Record in open_models_ for rows [start_row, end_row) the list of models
// currently open at each row.
// A model is still open in a row if some previous row has said model as a
// start hypothesis, and all rows since (including this row) would fit as
// either a body or start line in that model.
void CalculateOpenModels(int row_start, int row_end);
SetOfModels &OpenModels(int row) {
return open_models_[row - row_start_ + 1];
}
ParagraphTheory *theory_;
GenericVector<RowScratchRegisters> *rows_;
int row_start_;
int row_end_;
// open_models_ corresponds to rows[start_row_ - 1, end_row_]
//
// open_models_: Contains models which there was an active (open) paragraph
// as of the previous line and for which the left and right
// indents admit the possibility that this text line continues
// to fit the same model.
// TODO(eger): Think about whether we can get rid of "Open" models and just
// use the current hypotheses on RowScratchRegisters.
GenericVector<SetOfModels> open_models_;
};
// Clear all hypotheses about lines [start, end) and reset the margins to the
// percentile (0..100) value of the left and right row edges for this run of
// rows.
void RecomputeMarginsAndClearHypotheses(
GenericVector<RowScratchRegisters> *rows, int start, int end,
int percentile);
// Return the median inter-word space in rows[row_start, row_end).
int InterwordSpace(const GenericVector<RowScratchRegisters> &rows,
int row_start, int row_end);
// Return whether the first word on the after line can fit in the space at
// the end of the before line (knowing which way the text is aligned and read).
bool FirstWordWouldHaveFit(const RowScratchRegisters &before,
const RowScratchRegisters &after,
tesseract::ParagraphJustification justification);
// Return whether the first word on the after line can fit in the space at
// the end of the before line (not knowing the text alignment).
bool FirstWordWouldHaveFit(const RowScratchRegisters &before,
const RowScratchRegisters &after);
// Do rows[start, end) form a single instance of the given paragraph model?
bool RowsFitModel(const GenericVector<RowScratchRegisters> *rows,
int start, int end, const ParagraphModel *model);
// Do the text and geometry of two rows support a paragraph break between them?
bool LikelyParagraphStart(const RowScratchRegisters &before,
const RowScratchRegisters &after,
tesseract::ParagraphJustification j);
// Given a set of row_owners pointing to PARAs or NULL (no paragraph known),
// normalize each row_owner to point to an actual PARA, and output the
// paragraphs in order onto paragraphs.
void CanonicalizeDetectionResults(
GenericVector<PARA *> *row_owners,
PARA_LIST *paragraphs);
} // namespace
#endif // TESSERACT_CCMAIN_PARAGRAPHS_INTERNAL_H_
| C++ |
///////////////////////////////////////////////////////////////////////
// File: pageiterator.cpp
// Description: Iterator for tesseract page structure that avoids using
// tesseract internal data structures.
// Author: Ray Smith
// Created: Fri Feb 26 14:32:09 PST 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "pageiterator.h"
#include "allheaders.h"
#include "helpers.h"
#include "pageres.h"
#include "tesseractclass.h"
namespace tesseract {
PageIterator::PageIterator(PAGE_RES* page_res, Tesseract* tesseract,
int scale, int scaled_yres,
int rect_left, int rect_top,
int rect_width, int rect_height)
: page_res_(page_res), tesseract_(tesseract),
word_(NULL), word_length_(0), blob_index_(0), cblob_it_(NULL),
scale_(scale), scaled_yres_(scaled_yres),
rect_left_(rect_left), rect_top_(rect_top),
rect_width_(rect_width), rect_height_(rect_height) {
it_ = new PAGE_RES_IT(page_res);
PageIterator::Begin();
}
PageIterator::~PageIterator() {
delete it_;
delete cblob_it_;
}
/**
* PageIterators may be copied! This makes it possible to iterate over
* all the objects at a lower level, while maintaining an iterator to
* objects at a higher level.
*/
PageIterator::PageIterator(const PageIterator& src)
: page_res_(src.page_res_), tesseract_(src.tesseract_),
word_(NULL), word_length_(src.word_length_),
blob_index_(src.blob_index_), cblob_it_(NULL),
scale_(src.scale_), scaled_yres_(src.scaled_yres_),
rect_left_(src.rect_left_), rect_top_(src.rect_top_),
rect_width_(src.rect_width_), rect_height_(src.rect_height_) {
it_ = new PAGE_RES_IT(*src.it_);
BeginWord(src.blob_index_);
}
const PageIterator& PageIterator::operator=(const PageIterator& src) {
page_res_ = src.page_res_;
tesseract_ = src.tesseract_;
scale_ = src.scale_;
scaled_yres_ = src.scaled_yres_;
rect_left_ = src.rect_left_;
rect_top_ = src.rect_top_;
rect_width_ = src.rect_width_;
rect_height_ = src.rect_height_;
if (it_ != NULL) delete it_;
it_ = new PAGE_RES_IT(*src.it_);
BeginWord(src.blob_index_);
return *this;
}
bool PageIterator::PositionedAtSameWord(const PAGE_RES_IT* other) const {
return (it_ == NULL && it_ == other) ||
((other != NULL) && (it_ != NULL) && (*it_ == *other));
}
// ============= Moving around within the page ============.
/** Resets the iterator to point to the start of the page. */
void PageIterator::Begin() {
it_->restart_page_with_empties();
BeginWord(0);
}
void PageIterator::RestartParagraph() {
if (it_->block() == NULL) return; // At end of the document.
PAGE_RES_IT para(page_res_);
PAGE_RES_IT next_para(para);
next_para.forward_paragraph();
while (next_para.cmp(*it_) <= 0) {
para = next_para;
next_para.forward_paragraph();
}
*it_ = para;
BeginWord(0);
}
bool PageIterator::IsWithinFirstTextlineOfParagraph() const {
PageIterator p_start(*this);
p_start.RestartParagraph();
return p_start.it_->row() == it_->row();
}
void PageIterator::RestartRow() {
it_->restart_row();
BeginWord(0);
}
/**
* Moves to the start of the next object at the given level in the
* page hierarchy, and returns false if the end of the page was reached.
* NOTE (CHANGED!) that ALL PageIteratorLevel level values will visit each
* non-text block at least once.
* Think of non text blocks as containing a single para, with at least one
* line, with a single imaginary word, containing a single symbol.
* The bounding boxes mark out any polygonal nature of the block, and
* PTIsTextType(BLockType()) is false for non-text blocks.
* Calls to Next with different levels may be freely intermixed.
* This function iterates words in right-to-left scripts correctly, if
* the appropriate language has been loaded into Tesseract.
*/
bool PageIterator::Next(PageIteratorLevel level) {
if (it_->block() == NULL) return false; // Already at the end!
if (it_->word() == NULL)
level = RIL_BLOCK;
switch (level) {
case RIL_BLOCK:
it_->forward_block();
break;
case RIL_PARA:
it_->forward_paragraph();
break;
case RIL_TEXTLINE:
for (it_->forward_with_empties(); it_->row() == it_->prev_row();
it_->forward_with_empties());
break;
case RIL_WORD:
it_->forward_with_empties();
break;
case RIL_SYMBOL:
if (cblob_it_ != NULL)
cblob_it_->forward();
++blob_index_;
if (blob_index_ >= word_length_)
it_->forward_with_empties();
else
return true;
break;
}
BeginWord(0);
return it_->block() != NULL;
}
/**
* Returns true if the iterator is at the start of an object at the given
* level. Possible uses include determining if a call to Next(RIL_WORD)
* moved to the start of a RIL_PARA.
*/
bool PageIterator::IsAtBeginningOf(PageIteratorLevel level) const {
if (it_->block() == NULL) return false; // Already at the end!
if (it_->word() == NULL) return true; // In an image block.
switch (level) {
case RIL_BLOCK:
return blob_index_ == 0 && it_->block() != it_->prev_block();
case RIL_PARA:
return blob_index_ == 0 &&
(it_->block() != it_->prev_block() ||
it_->row()->row->para() != it_->prev_row()->row->para());
case RIL_TEXTLINE:
return blob_index_ == 0 && it_->row() != it_->prev_row();
case RIL_WORD:
return blob_index_ == 0;
case RIL_SYMBOL:
return true;
}
return false;
}
/**
* Returns whether the iterator is positioned at the last element in a
* given level. (e.g. the last word in a line, the last line in a block)
*/
bool PageIterator::IsAtFinalElement(PageIteratorLevel level,
PageIteratorLevel element) const {
if (Empty(element)) return true; // Already at the end!
// The result is true if we step forward by element and find we are
// at the the end of the page or at beginning of *all* levels in:
// [level, element).
// When there is more than one level difference between element and level,
// we could for instance move forward one symbol and still be at the first
// word on a line, so we also have to be at the first symbol in a word.
PageIterator next(*this);
next.Next(element);
if (next.Empty(element)) return true; // Reached the end of the page.
while (element > level) {
element = static_cast<PageIteratorLevel>(element - 1);
if (!next.IsAtBeginningOf(element))
return false;
}
return true;
}
/**
* Returns whether this iterator is positioned
* before other: -1
* equal to other: 0
* after other: 1
*/
int PageIterator::Cmp(const PageIterator &other) const {
int word_cmp = it_->cmp(*other.it_);
if (word_cmp != 0)
return word_cmp;
if (blob_index_ < other.blob_index_)
return -1;
if (blob_index_ == other.blob_index_)
return 0;
return 1;
}
// ============= Accessing data ==============.
// Coordinate system:
// Integer coordinates are at the cracks between the pixels.
// The top-left corner of the top-left pixel in the image is at (0,0).
// The bottom-right corner of the bottom-right pixel in the image is at
// (width, height).
// Every bounding box goes from the top-left of the top-left contained
// pixel to the bottom-right of the bottom-right contained pixel, so
// the bounding box of the single top-left pixel in the image is:
// (0,0)->(1,1).
// If an image rectangle has been set in the API, then returned coordinates
// relate to the original (full) image, rather than the rectangle.
/**
* Returns the bounding rectangle of the current object at the given level in
* the coordinates of the working image that is pix_binary().
* See comment on coordinate system above.
* Returns false if there is no such object at the current position.
*/
bool PageIterator::BoundingBoxInternal(PageIteratorLevel level,
int* left, int* top,
int* right, int* bottom) const {
if (Empty(level))
return false;
TBOX box;
PARA *para = NULL;
switch (level) {
case RIL_BLOCK:
box = it_->block()->block->bounding_box();
break;
case RIL_PARA:
para = it_->row()->row->para();
// explicit fall-through.
case RIL_TEXTLINE:
box = it_->row()->row->bounding_box();
break;
case RIL_WORD:
box = it_->word()->word->bounding_box();
break;
case RIL_SYMBOL:
if (cblob_it_ == NULL)
box = it_->word()->box_word->BlobBox(blob_index_);
else
box = cblob_it_->data()->bounding_box();
}
if (level == RIL_PARA) {
PageIterator other = *this;
other.Begin();
do {
if (other.it_->block() &&
other.it_->block()->block == it_->block()->block &&
other.it_->row() && other.it_->row()->row &&
other.it_->row()->row->para() == para) {
box = box.bounding_union(other.it_->row()->row->bounding_box());
}
} while (other.Next(RIL_TEXTLINE));
}
if (level != RIL_SYMBOL || cblob_it_ != NULL)
box.rotate(it_->block()->block->re_rotation());
// Now we have a box in tesseract coordinates relative to the image rectangle,
// we have to convert the coords to a top-down system.
const int pix_height = pixGetHeight(tesseract_->pix_binary());
const int pix_width = pixGetWidth(tesseract_->pix_binary());
*left = ClipToRange(static_cast<int>(box.left()), 0, pix_width);
*top = ClipToRange(pix_height - box.top(), 0, pix_height);
*right = ClipToRange(static_cast<int>(box.right()), *left, pix_width);
*bottom = ClipToRange(pix_height - box.bottom(), *top, pix_height);
return true;
}
/**
* Returns the bounding rectangle of the current object at the given level in
* coordinates of the original image.
* See comment on coordinate system above.
* Returns false if there is no such object at the current position.
*/
bool PageIterator::BoundingBox(PageIteratorLevel level,
int* left, int* top,
int* right, int* bottom) const {
return BoundingBox(level, 0, left, top, right, bottom);
}
bool PageIterator::BoundingBox(PageIteratorLevel level, const int padding,
int* left, int* top,
int* right, int* bottom) const {
if (!BoundingBoxInternal(level, left, top, right, bottom))
return false;
// Convert to the coordinate system of the original image.
*left = ClipToRange(*left / scale_ + rect_left_ - padding,
rect_left_, rect_left_ + rect_width_);
*top = ClipToRange(*top / scale_ + rect_top_ - padding,
rect_top_, rect_top_ + rect_height_);
*right = ClipToRange((*right + scale_ - 1) / scale_ + rect_left_ + padding,
*left, rect_left_ + rect_width_);
*bottom = ClipToRange((*bottom + scale_ - 1) / scale_ + rect_top_ + padding,
*top, rect_top_ + rect_height_);
return true;
}
/** Return that there is no such object at a given level. */
bool PageIterator::Empty(PageIteratorLevel level) const {
if (it_->block() == NULL) return true; // Already at the end!
if (it_->word() == NULL && level != RIL_BLOCK) return true; // image block
if (level == RIL_SYMBOL && blob_index_ >= word_length_)
return true; // Zero length word, or already at the end of it.
return false;
}
/** Returns the type of the current block. See apitypes.h for PolyBlockType. */
PolyBlockType PageIterator::BlockType() const {
if (it_->block() == NULL || it_->block()->block == NULL)
return PT_UNKNOWN; // Already at the end!
if (it_->block()->block->poly_block() == NULL)
return PT_FLOWING_TEXT; // No layout analysis used - assume text.
return it_->block()->block->poly_block()->isA();
}
/** Returns the polygon outline of the current block. The returned Pta must
* be ptaDestroy-ed after use. */
Pta* PageIterator::BlockPolygon() const {
if (it_->block() == NULL || it_->block()->block == NULL)
return NULL; // Already at the end!
if (it_->block()->block->poly_block() == NULL)
return NULL; // No layout analysis used - no polygon.
ICOORDELT_IT it(it_->block()->block->poly_block()->points());
Pta* pta = ptaCreate(it.length());
int num_pts = 0;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward(), ++num_pts) {
ICOORD* pt = it.data();
// Convert to top-down coords within the input image.
float x = static_cast<float>(pt->x()) / scale_ + rect_left_;
float y = rect_top_ + rect_height_ - static_cast<float>(pt->y()) / scale_;
ptaAddPt(pta, x, y);
}
return pta;
}
/**
* Returns a binary image of the current object at the given level.
* The position and size match the return from BoundingBoxInternal, and so this
* could be upscaled with respect to the original input image.
* Use pixDestroy to delete the image after use.
* The following methods are used to generate the images:
* RIL_BLOCK: mask the page image with the block polygon.
* RIL_TEXTLINE: Clip the rectangle of the line box from the page image.
* TODO(rays) fix this to generate and use a line polygon.
* RIL_WORD: Clip the rectangle of the word box from the page image.
* RIL_SYMBOL: Render the symbol outline to an image for cblobs (prior
* to recognition) or the bounding box otherwise.
* A reconstruction of the original image (using xor to check for double
* representation) should be reasonably accurate,
* apart from removed noise, at the block level. Below the block level, the
* reconstruction will be missing images and line separators.
* At the symbol level, kerned characters will be invade the bounding box
* if rendered after recognition, making an xor reconstruction inaccurate, but
* an or construction better. Before recognition, symbol-level reconstruction
* should be good, even with xor, since the images come from the connected
* components.
*/
Pix* PageIterator::GetBinaryImage(PageIteratorLevel level) const {
int left, top, right, bottom;
if (!BoundingBoxInternal(level, &left, &top, &right, &bottom))
return NULL;
Pix* pix = NULL;
switch (level) {
case RIL_BLOCK:
case RIL_PARA:
int bleft, btop, bright, bbottom;
BoundingBoxInternal(RIL_BLOCK, &bleft, &btop, &bright, &bbottom);
pix = it_->block()->block->render_mask();
// AND the mask and the image.
pixRasterop(pix, 0, 0, pixGetWidth(pix), pixGetHeight(pix),
PIX_SRC & PIX_DST, tesseract_->pix_binary(),
bleft, btop);
if (level == RIL_PARA) {
// RIL_PARA needs further attention:
// clip the paragraph from the block mask.
Box* box = boxCreate(left - bleft, top - btop,
right - left, bottom - top);
Pix* pix2 = pixClipRectangle(pix, box, NULL);
boxDestroy(&box);
pixDestroy(&pix);
pix = pix2;
}
break;
case RIL_TEXTLINE:
case RIL_WORD:
case RIL_SYMBOL:
if (level == RIL_SYMBOL && cblob_it_ != NULL &&
cblob_it_->data()->area() != 0)
return cblob_it_->data()->render();
// Just clip from the bounding box.
Box* box = boxCreate(left, top, right - left, bottom - top);
pix = pixClipRectangle(tesseract_->pix_binary(), box, NULL);
boxDestroy(&box);
break;
}
return pix;
}
/**
* Returns an image of the current object at the given level in greyscale
* if available in the input. To guarantee a binary image use BinaryImage.
* NOTE that in order to give the best possible image, the bounds are
* expanded slightly over the binary connected component, by the supplied
* padding, so the top-left position of the returned image is returned
* in (left,top). These will most likely not match the coordinates
* returned by BoundingBox.
* If you do not supply an original image, you will get a binary one.
* Use pixDestroy to delete the image after use.
*/
Pix* PageIterator::GetImage(PageIteratorLevel level, int padding,
Pix* original_img,
int* left, int* top) const {
int right, bottom;
if (!BoundingBox(level, left, top, &right, &bottom))
return NULL;
if (original_img == NULL)
return GetBinaryImage(level);
// Expand the box.
*left = MAX(*left - padding, 0);
*top = MAX(*top - padding, 0);
right = MIN(right + padding, rect_width_);
bottom = MIN(bottom + padding, rect_height_);
Box* box = boxCreate(*left, *top, right - *left, bottom - *top);
Pix* grey_pix = pixClipRectangle(original_img, box, NULL);
boxDestroy(&box);
if (level == RIL_BLOCK) {
Pix* mask = it_->block()->block->render_mask();
Pix* expanded_mask = pixCreate(right - *left, bottom - *top, 1);
pixRasterop(expanded_mask, padding, padding,
pixGetWidth(mask), pixGetHeight(mask),
PIX_SRC, mask, 0, 0);
pixDestroy(&mask);
pixDilateBrick(expanded_mask, expanded_mask, 2*padding + 1, 2*padding + 1);
pixInvert(expanded_mask, expanded_mask);
pixSetMasked(grey_pix, expanded_mask, MAX_UINT32);
pixDestroy(&expanded_mask);
}
return grey_pix;
}
/**
* Returns the baseline of the current object at the given level.
* The baseline is the line that passes through (x1, y1) and (x2, y2).
* WARNING: with vertical text, baselines may be vertical!
*/
bool PageIterator::Baseline(PageIteratorLevel level,
int* x1, int* y1, int* x2, int* y2) const {
if (it_->word() == NULL) return false; // Already at the end!
ROW* row = it_->row()->row;
WERD* word = it_->word()->word;
TBOX box = (level == RIL_WORD || level == RIL_SYMBOL)
? word->bounding_box()
: row->bounding_box();
int left = box.left();
ICOORD startpt(left, static_cast<inT16>(row->base_line(left) + 0.5));
int right = box.right();
ICOORD endpt(right, static_cast<inT16>(row->base_line(right) + 0.5));
// Rotate to image coordinates and convert to global image coords.
startpt.rotate(it_->block()->block->re_rotation());
endpt.rotate(it_->block()->block->re_rotation());
*x1 = startpt.x() / scale_ + rect_left_;
*y1 = (rect_height_ - startpt.y()) / scale_ + rect_top_;
*x2 = endpt.x() / scale_ + rect_left_;
*y2 = (rect_height_ - endpt.y()) / scale_ + rect_top_;
return true;
}
void PageIterator::Orientation(tesseract::Orientation *orientation,
tesseract::WritingDirection *writing_direction,
tesseract::TextlineOrder *textline_order,
float *deskew_angle) const {
BLOCK* block = it_->block()->block;
// Orientation
FCOORD up_in_image(0.0, 1.0);
up_in_image.unrotate(block->classify_rotation());
up_in_image.rotate(block->re_rotation());
if (up_in_image.x() == 0.0F) {
if (up_in_image.y() > 0.0F) {
*orientation = ORIENTATION_PAGE_UP;
} else {
*orientation = ORIENTATION_PAGE_DOWN;
}
} else if (up_in_image.x() > 0.0F) {
*orientation = ORIENTATION_PAGE_RIGHT;
} else {
*orientation = ORIENTATION_PAGE_LEFT;
}
// Writing direction
bool is_vertical_text = (block->classify_rotation().x() == 0.0);
bool right_to_left = block->right_to_left();
*writing_direction =
is_vertical_text
? WRITING_DIRECTION_TOP_TO_BOTTOM
: (right_to_left
? WRITING_DIRECTION_RIGHT_TO_LEFT
: WRITING_DIRECTION_LEFT_TO_RIGHT);
// Textline Order
bool is_mongolian = false; // TODO(eger): fix me
*textline_order = is_vertical_text
? (is_mongolian
? TEXTLINE_ORDER_LEFT_TO_RIGHT
: TEXTLINE_ORDER_RIGHT_TO_LEFT)
: TEXTLINE_ORDER_TOP_TO_BOTTOM;
// Deskew angle
FCOORD skew = block->skew(); // true horizontal for textlines
*deskew_angle = -skew.angle();
}
void PageIterator::ParagraphInfo(tesseract::ParagraphJustification *just,
bool *is_list_item,
bool *is_crown,
int *first_line_indent) const {
*just = tesseract::JUSTIFICATION_UNKNOWN;
if (!it_->row() || !it_->row()->row || !it_->row()->row->para() ||
!it_->row()->row->para()->model)
return;
PARA *para = it_->row()->row->para();
*is_list_item = para->is_list_item;
*is_crown = para->is_very_first_or_continuation;
*first_line_indent = para->model->first_indent() -
para->model->body_indent();
}
/**
* Sets up the internal data for iterating the blobs of a new word, then
* moves the iterator to the given offset.
*/
void PageIterator::BeginWord(int offset) {
WERD_RES* word_res = it_->word();
if (word_res == NULL) {
// This is a non-text block, so there is no word.
word_length_ = 0;
blob_index_ = 0;
word_ = NULL;
return;
}
if (word_res->best_choice != NULL) {
// Recognition has been done, so we are using the box_word, which
// is already baseline denormalized.
word_length_ = word_res->best_choice->length();
if (word_res->box_word != NULL) {
if (word_res->box_word->length() != word_length_) {
tprintf("Corrupted word! best_choice[len=%d] = %s, box_word[len=%d]: ",
word_length_, word_res->best_choice->unichar_string().string(),
word_res->box_word->length());
word_res->box_word->bounding_box().print();
}
ASSERT_HOST(word_res->box_word->length() == word_length_);
}
word_ = NULL;
// We will be iterating the box_word.
if (cblob_it_ != NULL) {
delete cblob_it_;
cblob_it_ = NULL;
}
} else {
// No recognition yet, so a "symbol" is a cblob.
word_ = word_res->word;
ASSERT_HOST(word_->cblob_list() != NULL);
word_length_ = word_->cblob_list()->length();
if (cblob_it_ == NULL) cblob_it_ = new C_BLOB_IT;
cblob_it_->set_to_list(word_->cblob_list());
}
for (blob_index_ = 0; blob_index_ < offset; ++blob_index_) {
if (cblob_it_ != NULL)
cblob_it_->forward();
}
}
bool PageIterator::SetWordBlamerBundle(BlamerBundle *blamer_bundle) {
if (it_->word() != NULL) {
it_->word()->blamer_bundle = blamer_bundle;
return true;
} else {
return false;
}
}
} // namespace tesseract.
| C++ |
/**********************************************************************
* File: werdit.cpp (Formerly wordit.c)
* Description: An iterator for passing over all the words in a document.
* Author: Ray Smith
* Created: Mon Apr 27 08:51:22 BST 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "werdit.h"
/**********************************************************************
* make_pseudo_word
*
* Make all the blobs inside a selection into a single word.
* The returned PAGE_RES_IT* it points to the new word. After use, call
* it->DeleteCurrentWord() to delete the fake word, and then
* delete it to get rid of the iterator itself.
**********************************************************************/
PAGE_RES_IT* make_pseudo_word(PAGE_RES* page_res, const TBOX& selection_box) {
PAGE_RES_IT pr_it(page_res);
C_BLOB_LIST new_blobs; // list of gathered blobs
C_BLOB_IT new_blob_it = &new_blobs; // iterator
for (WERD_RES* word_res = pr_it.word(); word_res != NULL;
word_res = pr_it.forward()) {
WERD* word = word_res->word;
if (word->bounding_box().overlap(selection_box)) {
C_BLOB_IT blob_it(word->cblob_list());
for (blob_it.mark_cycle_pt();
!blob_it.cycled_list(); blob_it.forward()) {
C_BLOB* blob = blob_it.data();
if (blob->bounding_box().overlap(selection_box)) {
new_blob_it.add_after_then_move(C_BLOB::deep_copy(blob));
}
}
if (!new_blobs.empty()) {
WERD* pseudo_word = new WERD(&new_blobs, 1, NULL);
word_res = pr_it.InsertSimpleCloneWord(*word_res, pseudo_word);
PAGE_RES_IT* it = new PAGE_RES_IT(page_res);
while (it->word() != word_res && it->word() != NULL) it->forward();
ASSERT_HOST(it->word() == word_res);
return it;
}
}
}
return NULL;
}
| C++ |
/**********************************************************************
* File: adaptions.cpp (Formerly adaptions.c)
* Description: Functions used to adapt to blobs already confidently
* identified
* Author: Chris Newton
* Created: Thu Oct 7 10:17:28 BST 1993
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#pragma warning(disable:4305) // int/float warnings
#endif
#ifdef __UNIX__
#include <assert.h>
#endif
#include <ctype.h>
#include <string.h>
#include "tessbox.h"
#include "tessvars.h"
#include "memry.h"
#include "reject.h"
#include "control.h"
#include "stopper.h"
#include "tesseractclass.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
namespace tesseract {
BOOL8 Tesseract::word_adaptable( //should we adapt?
WERD_RES *word,
uinT16 mode) {
if (tessedit_adaption_debug) {
tprintf("Running word_adaptable() for %s rating %.4f certainty %.4f\n",
word->best_choice == NULL ? "" :
word->best_choice->unichar_string().string(),
word->best_choice->rating(), word->best_choice->certainty());
}
BOOL8 status = FALSE;
BITS16 flags(mode);
enum MODES
{
ADAPTABLE_WERD,
ACCEPTABLE_WERD,
CHECK_DAWGS,
CHECK_SPACES,
CHECK_ONE_ELL_CONFLICT,
CHECK_AMBIG_WERD
};
/*
0: NO adaption
*/
if (mode == 0) {
if (tessedit_adaption_debug) tprintf("adaption disabled\n");
return FALSE;
}
if (flags.bit (ADAPTABLE_WERD)) {
status |= word->tess_would_adapt; // result of Classify::AdaptableWord()
if (tessedit_adaption_debug && !status) {
tprintf("tess_would_adapt bit is false\n");
}
}
if (flags.bit (ACCEPTABLE_WERD)) {
status |= word->tess_accepted;
if (tessedit_adaption_debug && !status) {
tprintf("tess_accepted bit is false\n");
}
}
if (!status) { // If not set then
return FALSE; // ignore other checks
}
if (flags.bit (CHECK_DAWGS) &&
(word->best_choice->permuter () != SYSTEM_DAWG_PERM) &&
(word->best_choice->permuter () != FREQ_DAWG_PERM) &&
(word->best_choice->permuter () != USER_DAWG_PERM) &&
(word->best_choice->permuter () != NUMBER_PERM)) {
if (tessedit_adaption_debug) tprintf("word not in dawgs\n");
return FALSE;
}
if (flags.bit (CHECK_ONE_ELL_CONFLICT) && one_ell_conflict (word, FALSE)) {
if (tessedit_adaption_debug) tprintf("word has ell conflict\n");
return FALSE;
}
if (flags.bit (CHECK_SPACES) &&
(strchr(word->best_choice->unichar_string().string(), ' ') != NULL)) {
if (tessedit_adaption_debug) tprintf("word contains spaces\n");
return FALSE;
}
if (flags.bit (CHECK_AMBIG_WERD) &&
word->best_choice->dangerous_ambig_found()) {
if (tessedit_adaption_debug) tprintf("word is ambiguous\n");
return FALSE;
}
if (tessedit_adaption_debug) {
tprintf("returning status %d\n", status);
}
return status;
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: tessbox.cpp (Formerly tessbox.c)
* Description: Black boxed Tess for developing a resaljet.
* Author: Ray Smith
* Created: Thu Apr 23 11:03:36 BST 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#include "mfoutline.h"
#include "tessbox.h"
#include "tesseractclass.h"
#define EXTERN
/**
* @name tess_segment_pass_n
*
* Segment a word using the pass_n conditions of the tess segmenter.
* @param pass_n pass number
* @param word word to do
*/
namespace tesseract {
void Tesseract::tess_segment_pass_n(int pass_n, WERD_RES *word) {
int saved_enable_assoc = 0;
int saved_chop_enable = 0;
if (word->word->flag(W_DONT_CHOP)) {
saved_enable_assoc = wordrec_enable_assoc;
saved_chop_enable = chop_enable;
wordrec_enable_assoc.set_value(0);
chop_enable.set_value(0);
}
if (pass_n == 1)
set_pass1();
else
set_pass2();
recog_word(word);
if (word->best_choice == NULL)
word->SetupFake(*word->uch_set);
if (word->word->flag(W_DONT_CHOP)) {
wordrec_enable_assoc.set_value(saved_enable_assoc);
chop_enable.set_value(saved_chop_enable);
}
}
/**
* @name tess_acceptable_word
*
* @return true if the word is regarded as "good enough".
* @param word_choice after context
* @param raw_choice before context
*/
bool Tesseract::tess_acceptable_word(WERD_RES* word) {
return getDict().AcceptableResult(word);
}
/**
* @name tess_add_doc_word
*
* Add the given word to the document dictionary
*/
void Tesseract::tess_add_doc_word(WERD_CHOICE *word_choice) {
getDict().add_document_word(*word_choice);
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: tesseract_cube_combiner.h
* Description: Declaration of the Tesseract & Cube results combiner Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The TesseractCubeCombiner class provides the functionality of combining
// the recognition results of Tesseract and Cube at the word level
#include <algorithm>
#include <string>
#include <vector>
#include <wctype.h>
#include "tesseract_cube_combiner.h"
#include "cube_object.h"
#include "cube_reco_context.h"
#include "cube_utils.h"
#include "neural_net.h"
#include "tesseractclass.h"
#include "word_altlist.h"
namespace tesseract {
TesseractCubeCombiner::TesseractCubeCombiner(CubeRecoContext *cube_cntxt) {
cube_cntxt_ = cube_cntxt;
combiner_net_ = NULL;
}
TesseractCubeCombiner::~TesseractCubeCombiner() {
if (combiner_net_ != NULL) {
delete combiner_net_;
combiner_net_ = NULL;
}
}
bool TesseractCubeCombiner::LoadCombinerNet() {
ASSERT_HOST(cube_cntxt_);
// Compute the path of the combiner net
string data_path;
cube_cntxt_->GetDataFilePath(&data_path);
string net_file_name = data_path + cube_cntxt_->Lang() +
".tesseract_cube.nn";
// Return false if file does not exist
FILE *fp = fopen(net_file_name.c_str(), "rb");
if (fp == NULL)
return false;
else
fclose(fp);
// Load and validate net
combiner_net_ = NeuralNet::FromFile(net_file_name);
if (combiner_net_ == NULL) {
tprintf("Could not read combiner net file %s", net_file_name.c_str());
return false;
} else if (combiner_net_->out_cnt() != 2) {
tprintf("Invalid combiner net file %s! Output count != 2\n",
net_file_name.c_str());
delete combiner_net_;
combiner_net_ = NULL;
return false;
}
return true;
}
// Normalize a UTF-8 string. Converts the UTF-8 string to UTF32 and optionally
// strips punc and/or normalizes case and then converts back
string TesseractCubeCombiner::NormalizeString(const string &str,
bool remove_punc,
bool norm_case) {
// convert to UTF32
string_32 str32;
CubeUtils::UTF8ToUTF32(str.c_str(), &str32);
// strip punc and normalize
string_32 new_str32;
for (int idx = 0; idx < str32.length(); idx++) {
// if no punc removal is required or not a punctuation character
if (!remove_punc || iswpunct(str32[idx]) == 0) {
char_32 norm_char = str32[idx];
// normalize case if required
if (norm_case && iswalpha(norm_char)) {
norm_char = towlower(norm_char);
}
new_str32.push_back(norm_char);
}
}
// convert back to UTF8
string new_str;
CubeUtils::UTF32ToUTF8(new_str32.c_str(), &new_str);
return new_str;
}
// Compares 2 strings optionally ignoring punctuation
int TesseractCubeCombiner::CompareStrings(const string &str1,
const string &str2,
bool ignore_punc,
bool ignore_case) {
if (!ignore_punc && !ignore_case) {
return str1.compare(str2);
}
string norm_str1 = NormalizeString(str1, ignore_punc, ignore_case);
string norm_str2 = NormalizeString(str2, ignore_punc, ignore_case);
return norm_str1.compare(norm_str2);
}
// Check if a string is a valid Tess dict word or not
bool TesseractCubeCombiner::ValidWord(const string &str) {
return (cube_cntxt_->TesseractObject()->getDict().valid_word(str.c_str())
> 0);
}
// Public method for computing the combiner features. The agreement
// output parameter will be true if both answers are identical,
// and false otherwise.
bool TesseractCubeCombiner::ComputeCombinerFeatures(const string &tess_str,
int tess_confidence,
CubeObject *cube_obj,
WordAltList *cube_alt_list,
vector<double> *features,
bool *agreement) {
features->clear();
*agreement = false;
if (cube_alt_list == NULL || cube_alt_list->AltCount() <= 0)
return false;
// Get Cube's best string; return false if empty
char_32 *cube_best_str32 = cube_alt_list->Alt(0);
if (cube_best_str32 == NULL || CubeUtils::StrLen(cube_best_str32) < 1)
return false;
string cube_best_str;
int cube_best_cost = cube_alt_list->AltCost(0);
int cube_best_bigram_cost = 0;
bool cube_best_bigram_cost_valid = true;
if (cube_cntxt_->Bigrams())
cube_best_bigram_cost = cube_cntxt_->Bigrams()->
Cost(cube_best_str32, cube_cntxt_->CharacterSet());
else
cube_best_bigram_cost_valid = false;
CubeUtils::UTF32ToUTF8(cube_best_str32, &cube_best_str);
// Get Tesseract's UTF32 string
string_32 tess_str32;
CubeUtils::UTF8ToUTF32(tess_str.c_str(), &tess_str32);
// Compute agreement flag
*agreement = (tess_str.compare(cube_best_str) == 0);
// Get Cube's second best string; if empty, return false
char_32 *cube_next_best_str32;
string cube_next_best_str;
int cube_next_best_cost = WORST_COST;
if (cube_alt_list->AltCount() > 1) {
cube_next_best_str32 = cube_alt_list->Alt(1);
if (cube_next_best_str32 == NULL ||
CubeUtils::StrLen(cube_next_best_str32) == 0) {
return false;
}
cube_next_best_cost = cube_alt_list->AltCost(1);
CubeUtils::UTF32ToUTF8(cube_next_best_str32, &cube_next_best_str);
}
// Rank of Tesseract's top result in Cube's alternate list
int tess_rank = 0;
for (tess_rank = 0; tess_rank < cube_alt_list->AltCount(); tess_rank++) {
string alt_str;
CubeUtils::UTF32ToUTF8(cube_alt_list->Alt(tess_rank), &alt_str);
if (alt_str == tess_str)
break;
}
// Cube's cost for tesseract's result. Note that this modifies the
// state of cube_obj, including its alternate list by calling RecognizeWord()
int tess_cost = cube_obj->WordCost(tess_str.c_str());
// Cube's bigram cost of Tesseract's string
int tess_bigram_cost = 0;
int tess_bigram_cost_valid = true;
if (cube_cntxt_->Bigrams())
tess_bigram_cost = cube_cntxt_->Bigrams()->
Cost(tess_str32.c_str(), cube_cntxt_->CharacterSet());
else
tess_bigram_cost_valid = false;
// Tesseract confidence
features->push_back(tess_confidence);
// Cube cost of Tesseract string
features->push_back(tess_cost);
// Cube Rank of Tesseract string
features->push_back(tess_rank);
// length of Tesseract OCR string
features->push_back(tess_str.length());
// Tesseract OCR string in dictionary
features->push_back(ValidWord(tess_str));
if (tess_bigram_cost_valid) {
// bigram cost of Tesseract string
features->push_back(tess_bigram_cost);
}
// Cube tess_cost of Cube best string
features->push_back(cube_best_cost);
// Cube tess_cost of Cube next best string
features->push_back(cube_next_best_cost);
// length of Cube string
features->push_back(cube_best_str.length());
// Cube string in dictionary
features->push_back(ValidWord(cube_best_str));
if (cube_best_bigram_cost_valid) {
// bigram cost of Cube string
features->push_back(cube_best_bigram_cost);
}
// case-insensitive string comparison, including punctuation
int compare_nocase_punc = CompareStrings(cube_best_str,
tess_str, false, true);
features->push_back(compare_nocase_punc == 0);
// case-sensitive string comparison, ignoring punctuation
int compare_case_nopunc = CompareStrings(cube_best_str,
tess_str, true, false);
features->push_back(compare_case_nopunc == 0);
// case-insensitive string comparison, ignoring punctuation
int compare_nocase_nopunc = CompareStrings(cube_best_str,
tess_str, true, true);
features->push_back(compare_nocase_nopunc == 0);
return true;
}
// The CubeObject parameter is used for 2 purposes: 1) to retrieve
// cube's alt list, and 2) to compute cube's word cost for the
// tesseract result. The call to CubeObject::WordCost() modifies
// the object's alternate list, so previous state will be lost.
float TesseractCubeCombiner::CombineResults(WERD_RES *tess_res,
CubeObject *cube_obj) {
// If no combiner is loaded or the cube object is undefined,
// tesseract wins with probability 1.0
if (combiner_net_ == NULL || cube_obj == NULL) {
tprintf("Cube WARNING (TesseractCubeCombiner::CombineResults): "
"Cube objects not initialized; defaulting to Tesseract\n");
return 1.0;
}
// Retrieve the alternate list from the CubeObject's current state.
// If the alt list empty, tesseract wins with probability 1.0
WordAltList *cube_alt_list = cube_obj->AlternateList();
if (cube_alt_list == NULL)
cube_alt_list = cube_obj->RecognizeWord();
if (cube_alt_list == NULL || cube_alt_list->AltCount() <= 0) {
tprintf("Cube WARNING (TesseractCubeCombiner::CombineResults): "
"Cube returned no results; defaulting to Tesseract\n");
return 1.0;
}
return CombineResults(tess_res, cube_obj, cube_alt_list);
}
// The alt_list parameter is expected to have been extracted from the
// CubeObject that recognized the word to be combined. The cube_obj
// parameter passed may be either same instance or a separate instance to
// be used only by the combiner. In both cases, its alternate
// list will be modified by an internal call to RecognizeWord().
float TesseractCubeCombiner::CombineResults(WERD_RES *tess_res,
CubeObject *cube_obj,
WordAltList *cube_alt_list) {
// If no combiner is loaded or the cube object is undefined, or the
// alt list is empty, tesseract wins with probability 1.0
if (combiner_net_ == NULL || cube_obj == NULL ||
cube_alt_list == NULL || cube_alt_list->AltCount() <= 0) {
tprintf("Cube WARNING (TesseractCubeCombiner::CombineResults): "
"Cube result cannot be retrieved; defaulting to Tesseract\n");
return 1.0;
}
// Tesseract result string, tesseract confidence, and cost of
// tesseract result according to cube
string tess_str = tess_res->best_choice->unichar_string().string();
// Map certainty [-20.0, 0.0] to confidence [0, 100]
int tess_confidence = MIN(100, MAX(1, static_cast<int>(
100 + (5 * tess_res->best_choice->certainty()))));
// Compute the combiner features. If feature computation fails or
// answers are identical, tesseract wins with probability 1.0
vector<double> features;
bool agreement;
bool combiner_success = ComputeCombinerFeatures(tess_str, tess_confidence,
cube_obj, cube_alt_list,
&features, &agreement);
if (!combiner_success || agreement)
return 1.0;
// Classify combiner feature vector and return output (probability
// of tesseract class).
double net_out[2];
if (!combiner_net_->FeedForward(&features[0], net_out))
return 1.0;
return net_out[1];
}
}
| C++ |
/******************************************************************
* File: control.cpp (Formerly control.c)
* Description: Module-independent matcher controller.
* Author: Ray Smith
* Created: Thu Apr 23 11:09:58 BST 1992
* ReHacked: Tue Sep 22 08:42:49 BST 1992 Phil Cheatle
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string.h>
#include <math.h>
#ifdef __UNIX__
#include <assert.h>
#include <unistd.h>
#include <errno.h>
#endif
#include <ctype.h>
#include "ocrclass.h"
#include "werdit.h"
#include "drawfx.h"
#include "tessbox.h"
#include "tessvars.h"
#include "pgedit.h"
#include "reject.h"
#include "fixspace.h"
#include "docqual.h"
#include "control.h"
#include "output.h"
#include "callcpp.h"
#include "globals.h"
#include "sorthelper.h"
#include "tesseractclass.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#define MIN_FONT_ROW_COUNT 8
#define MAX_XHEIGHT_DIFF 3
const char* const kBackUpConfigFile = "tempconfigdata.config";
// Multiple of x-height to make a repeated word have spaces in it.
const double kRepcharGapThreshold = 0.5;
// Min believable x-height for any text when refitting as a fraction of
// original x-height
const double kMinRefitXHeightFraction = 0.5;
/**
* recog_pseudo_word
*
* Make a word from the selected blobs and run Tess on them.
*
* @param page_res recognise blobs
* @param selection_box within this box
*/
namespace tesseract {
void Tesseract::recog_pseudo_word(PAGE_RES* page_res,
TBOX &selection_box) {
PAGE_RES_IT* it = make_pseudo_word(page_res, selection_box);
if (it != NULL) {
recog_interactive(it);
it->DeleteCurrentWord();
delete it;
}
}
/**
* recog_interactive
*
* Recognize a single word in interactive mode.
*
* @param block block
* @param row row of word
* @param word_res word to recognise
*/
BOOL8 Tesseract::recog_interactive(PAGE_RES_IT* pr_it) {
inT16 char_qual;
inT16 good_char_qual;
WordData word_data(*pr_it);
SetupWordPassN(2, &word_data);
classify_word_and_language(&Tesseract::classify_word_pass2, pr_it,
&word_data);
if (tessedit_debug_quality_metrics) {
WERD_RES* word_res = pr_it->word();
word_char_quality(word_res, pr_it->row()->row, &char_qual, &good_char_qual);
tprintf("\n%d chars; word_blob_quality: %d; outline_errs: %d; "
"char_quality: %d; good_char_quality: %d\n",
word_res->reject_map.length(),
word_blob_quality(word_res, pr_it->row()->row),
word_outline_errs(word_res), char_qual, good_char_qual);
}
return TRUE;
}
// Helper function to check for a target word and handle it appropriately.
// Inspired by Jetsoft's requirement to process only single words on pass2
// and beyond.
// If word_config is not null:
// If the word_box and target_word_box overlap, read the word_config file
// else reset to previous config data.
// return true.
// else
// If the word_box and target_word_box overlap or pass <= 1, return true.
// Note that this function uses a fixed temporary file for storing the previous
// configs, so it is neither thread-safe, nor process-safe, but the assumption
// is that it will only be used for one debug window at a time.
//
// Since this function is used for debugging (and not to change OCR results)
// set only debug params from the word config file.
bool Tesseract::ProcessTargetWord(const TBOX& word_box,
const TBOX& target_word_box,
const char* word_config,
int pass) {
if (word_config != NULL) {
if (word_box.major_overlap(target_word_box)) {
if (backup_config_file_ == NULL) {
backup_config_file_ = kBackUpConfigFile;
FILE* config_fp = fopen(backup_config_file_, "wb");
ParamUtils::PrintParams(config_fp, params());
fclose(config_fp);
ParamUtils::ReadParamsFile(word_config,
SET_PARAM_CONSTRAINT_DEBUG_ONLY,
params());
}
} else {
if (backup_config_file_ != NULL) {
ParamUtils::ReadParamsFile(backup_config_file_,
SET_PARAM_CONSTRAINT_DEBUG_ONLY,
params());
backup_config_file_ = NULL;
}
}
} else if (pass > 1 && !word_box.major_overlap(target_word_box)) {
return false;
}
return true;
}
// If tesseract is to be run, sets the words up ready for it.
void Tesseract::SetupAllWordsPassN(int pass_n,
const TBOX* target_word_box,
const char* word_config,
PAGE_RES* page_res,
GenericVector<WordData>* words) {
// Prepare all the words.
PAGE_RES_IT page_res_it(page_res);
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
if (target_word_box == NULL ||
ProcessTargetWord(page_res_it.word()->word->bounding_box(),
*target_word_box, word_config, 1)) {
words->push_back(WordData(page_res_it));
}
}
// Setup all the words for recognition with polygonal approximation.
for (int w = 0; w < words->size(); ++w) {
SetupWordPassN(pass_n, &(*words)[w]);
if (w > 0) (*words)[w].prev_word = &(*words)[w - 1];
}
}
// Sets up the single word ready for whichever engine is to be run.
void Tesseract::SetupWordPassN(int pass_n, WordData* word) {
if (pass_n == 1 || !word->word->done) {
if (pass_n == 1) {
word->word->SetupForRecognition(unicharset, this, BestPix(),
tessedit_ocr_engine_mode, NULL,
classify_bln_numeric_mode,
textord_use_cjk_fp_model,
poly_allow_detailed_fx,
word->row, word->block);
} else if (pass_n == 2) {
// TODO(rays) Should we do this on pass1 too?
word->word->caps_height = 0.0;
if (word->word->x_height == 0.0f)
word->word->x_height = word->row->x_height();
}
for (int s = 0; s <= sub_langs_.size(); ++s) {
// The sub_langs_.size() entry is for the master language.
Tesseract* lang_t = s < sub_langs_.size() ? sub_langs_[s] : this;
WERD_RES* word_res = new WERD_RES;
word_res->InitForRetryRecognition(*word->word);
word->lang_words.push_back(word_res);
// Cube doesn't get setup for pass2.
if (pass_n == 1 || lang_t->tessedit_ocr_engine_mode != OEM_CUBE_ONLY) {
word_res->SetupForRecognition(
lang_t->unicharset, lang_t, BestPix(),
lang_t->tessedit_ocr_engine_mode, NULL,
lang_t->classify_bln_numeric_mode,
lang_t->textord_use_cjk_fp_model,
lang_t->poly_allow_detailed_fx, word->row, word->block);
}
}
}
}
// Runs word recognition on all the words.
bool Tesseract::RecogAllWordsPassN(int pass_n, ETEXT_DESC* monitor,
PAGE_RES_IT* pr_it,
GenericVector<WordData>* words) {
// TODO(rays) Before this loop can be parallelized (it would yield a massive
// speed-up) all remaining member globals need to be converted to local/heap
// (eg set_pass1 and set_pass2) and an intermediate adaption pass needs to be
// added. The results will be significantly different with adaption on, and
// deterioration will need investigation.
pr_it->restart_page();
for (int w = 0; w < words->size(); ++w) {
WordData* word = &(*words)[w];
if (w > 0) word->prev_word = &(*words)[w - 1];
if (monitor != NULL) {
monitor->ocr_alive = TRUE;
if (pass_n == 1)
monitor->progress = 30 + 50 * w / words->size();
else
monitor->progress = 80 + 10 * w / words->size();
if (monitor->deadline_exceeded() ||
(monitor->cancel != NULL && (*monitor->cancel)(monitor->cancel_this,
words->size()))) {
// Timeout. Fake out the rest of the words.
for (; w < words->size(); ++w) {
(*words)[w].word->SetupFake(unicharset);
}
return false;
}
}
if (word->word->tess_failed) {
int s;
for (s = 0; s < word->lang_words.size() &&
word->lang_words[s]->tess_failed; ++s) {}
// If all are failed, skip it. Image words are skipped by this test.
if (s > word->lang_words.size()) continue;
}
// Sync pr_it with the wth WordData.
while (pr_it->word() != NULL && pr_it->word() != word->word)
pr_it->forward();
ASSERT_HOST(pr_it->word() != NULL);
WordRecognizer recognizer = pass_n == 1 ? &Tesseract::classify_word_pass1
: &Tesseract::classify_word_pass2;
classify_word_and_language(recognizer, pr_it, word);
if (tessedit_dump_choices) {
tprintf("Pass%d: %s [%s]\n", pass_n,
word->word->best_choice->unichar_string().string(),
word->word->best_choice->debug_string().string());
}
pr_it->forward();
}
return true;
}
/**
* recog_all_words()
*
* Walk the page_res, recognizing all the words.
* If monitor is not null, it is used as a progress monitor/timeout/cancel.
* If dopasses is 0, all recognition passes are run,
* 1 just pass 1, 2 passes2 and higher.
* If target_word_box is not null, special things are done to words that
* overlap the target_word_box:
* if word_config is not null, the word config file is read for just the
* target word(s), otherwise, on pass 2 and beyond ONLY the target words
* are processed (Jetsoft modification.)
* Returns false if we cancelled prematurely.
*
* @param page_res page structure
* @param monitor progress monitor
* @param word_config word_config file
* @param target_word_box specifies just to extract a rectangle
* @param dopasses 0 - all, 1 just pass 1, 2 passes 2 and higher
*/
bool Tesseract::recog_all_words(PAGE_RES* page_res,
ETEXT_DESC* monitor,
const TBOX* target_word_box,
const char* word_config,
int dopasses) {
PAGE_RES_IT page_res_it(page_res);
if (tessedit_minimal_rej_pass1) {
tessedit_test_adaption.set_value (TRUE);
tessedit_minimal_rejection.set_value (TRUE);
}
if (dopasses==0 || dopasses==1) {
page_res_it.restart_page();
// ****************** Pass 1 *******************
// Clear adaptive classifier at the beginning of the page if it is full.
// This is done only at the beginning of the page to ensure that the
// classifier is not reset at an arbitrary point while processing the page,
// which would cripple Passes 2+ if the reset happens towards the end of
// Pass 1 on a page with very difficult text.
// TODO(daria): preemptively clear the classifier if it is almost full.
if (AdaptiveClassifierIsFull()) ResetAdaptiveClassifierInternal();
// Now check the sub-langs as well.
for (int i = 0; i < sub_langs_.size(); ++i) {
if (sub_langs_[i]->AdaptiveClassifierIsFull())
sub_langs_[i]->ResetAdaptiveClassifierInternal();
}
// Set up all words ready for recognition, so that if parallelism is on
// all the input and output classes are ready to run the classifier.
GenericVector<WordData> words;
SetupAllWordsPassN(1, target_word_box, word_config, page_res, &words);
if (tessedit_parallelize) {
PrerecAllWordsPar(words);
}
stats_.word_count = words.size();
stats_.dict_words = 0;
stats_.doc_blob_quality = 0;
stats_.doc_outline_errs = 0;
stats_.doc_char_quality = 0;
stats_.good_char_count = 0;
stats_.doc_good_char_quality = 0;
most_recently_used_ = this;
// Run pass 1 word recognition.
if (!RecogAllWordsPassN(1, monitor, &page_res_it, &words)) return false;
// Pass 1 post-processing.
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
if (page_res_it.word()->word->flag(W_REP_CHAR)) {
fix_rep_char(&page_res_it);
continue;
}
// Count dict words.
if (page_res_it.word()->best_choice->permuter() == USER_DAWG_PERM)
++(stats_.dict_words);
// Update misadaption log (we only need to do it on pass 1, since
// adaption only happens on this pass).
if (page_res_it.word()->blamer_bundle != NULL &&
page_res_it.word()->blamer_bundle->misadaption_debug().length() > 0) {
page_res->misadaption_log.push_back(
page_res_it.word()->blamer_bundle->misadaption_debug());
}
}
}
if (dopasses == 1) return true;
// ****************** Pass 2 *******************
if (tessedit_tess_adaption_mode != 0x0 && !tessedit_test_adaption &&
AnyTessLang()) {
page_res_it.restart_page();
GenericVector<WordData> words;
SetupAllWordsPassN(2, target_word_box, word_config, page_res, &words);
if (tessedit_parallelize) {
PrerecAllWordsPar(words);
}
most_recently_used_ = this;
// Run pass 2 word recognition.
if (!RecogAllWordsPassN(2, monitor, &page_res_it, &words)) return false;
}
// The next passes can only be run if tesseract has been used, as cube
// doesn't set all the necessary outputs in WERD_RES.
if (AnyTessLang()) {
// ****************** Pass 3 *******************
// Fix fuzzy spaces.
set_global_loc_code(LOC_FUZZY_SPACE);
if (!tessedit_test_adaption && tessedit_fix_fuzzy_spaces
&& !tessedit_word_for_word && !right_to_left())
fix_fuzzy_spaces(monitor, stats_.word_count, page_res);
// ****************** Pass 4 *******************
if (tessedit_enable_dict_correction) dictionary_correction_pass(page_res);
if (tessedit_enable_bigram_correction) bigram_correction_pass(page_res);
// ****************** Pass 5,6 *******************
rejection_passes(page_res, monitor, target_word_box, word_config);
// ****************** Pass 7 *******************
// Cube combiner.
// If cube is loaded and its combiner is present, run it.
if (tessedit_ocr_engine_mode == OEM_TESSERACT_CUBE_COMBINED) {
run_cube_combiner(page_res);
}
// ****************** Pass 8 *******************
font_recognition_pass(page_res);
// ****************** Pass 9 *******************
// Check the correctness of the final results.
blamer_pass(page_res);
script_pos_pass(page_res);
}
// Write results pass.
set_global_loc_code(LOC_WRITE_RESULTS);
// This is now redundant, but retained commented so show how to obtain
// bounding boxes and style information.
// changed by jetsoft
// needed for dll to output memory structure
if ((dopasses == 0 || dopasses == 2) && (monitor || tessedit_write_unlv))
output_pass(page_res_it, target_word_box);
// end jetsoft
PageSegMode pageseg_mode = static_cast<PageSegMode>(
static_cast<int>(tessedit_pageseg_mode));
textord_.CleanupSingleRowResult(pageseg_mode, page_res);
// Remove empty words, as these mess up the result iterators.
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
WERD_RES* word = page_res_it.word();
if (word->best_choice == NULL || word->best_choice->length() == 0)
page_res_it.DeleteCurrentWord();
}
if (monitor != NULL) {
monitor->progress = 100;
}
return true;
}
void Tesseract::bigram_correction_pass(PAGE_RES *page_res) {
PAGE_RES_IT word_it(page_res);
WERD_RES *w_prev = NULL;
WERD_RES *w = word_it.word();
while (1) {
w_prev = w;
while (word_it.forward() != NULL &&
(!word_it.word() || word_it.word()->part_of_combo)) {
// advance word_it, skipping over parts of combos
}
if (!word_it.word()) break;
w = word_it.word();
if (!w || !w_prev || w->uch_set != w_prev->uch_set) {
continue;
}
if (w_prev->word->flag(W_REP_CHAR) || w->word->flag(W_REP_CHAR)) {
if (tessedit_bigram_debug) {
tprintf("Skipping because one of the words is W_REP_CHAR\n");
}
continue;
}
// Two words sharing the same language model, excellent!
GenericVector<WERD_CHOICE *> overrides_word1;
GenericVector<WERD_CHOICE *> overrides_word2;
STRING orig_w1_str = w_prev->best_choice->unichar_string();
STRING orig_w2_str = w->best_choice->unichar_string();
WERD_CHOICE prev_best(w->uch_set);
{
int w1start, w1end;
w_prev->best_choice->GetNonSuperscriptSpan(&w1start, &w1end);
prev_best = w_prev->best_choice->shallow_copy(w1start, w1end);
}
WERD_CHOICE this_best(w->uch_set);
{
int w2start, w2end;
w->best_choice->GetNonSuperscriptSpan(&w2start, &w2end);
this_best = w->best_choice->shallow_copy(w2start, w2end);
}
if (w->tesseract->getDict().valid_bigram(prev_best, this_best)) {
if (tessedit_bigram_debug) {
tprintf("Top choice \"%s %s\" verified by bigram model.\n",
orig_w1_str.string(), orig_w2_str.string());
}
continue;
}
if (tessedit_bigram_debug > 2) {
tprintf("Examining alt choices for \"%s %s\".\n",
orig_w1_str.string(), orig_w2_str.string());
}
if (tessedit_bigram_debug > 1) {
if (!w_prev->best_choices.singleton()) {
w_prev->PrintBestChoices();
}
if (!w->best_choices.singleton()) {
w->PrintBestChoices();
}
}
float best_rating = 0.0;
int best_idx = 0;
WERD_CHOICE_IT prev_it(&w_prev->best_choices);
for (prev_it.mark_cycle_pt(); !prev_it.cycled_list(); prev_it.forward()) {
WERD_CHOICE *p1 = prev_it.data();
WERD_CHOICE strip1(w->uch_set);
{
int p1start, p1end;
p1->GetNonSuperscriptSpan(&p1start, &p1end);
strip1 = p1->shallow_copy(p1start, p1end);
}
WERD_CHOICE_IT w_it(&w->best_choices);
for (w_it.mark_cycle_pt(); !w_it.cycled_list(); w_it.forward()) {
WERD_CHOICE *p2 = w_it.data();
WERD_CHOICE strip2(w->uch_set);
{
int p2start, p2end;
p2->GetNonSuperscriptSpan(&p2start, &p2end);
strip2 = p2->shallow_copy(p2start, p2end);
}
if (w->tesseract->getDict().valid_bigram(strip1, strip2)) {
overrides_word1.push_back(p1);
overrides_word2.push_back(p2);
if (overrides_word1.size() == 1 ||
p1->rating() + p2->rating() < best_rating) {
best_rating = p1->rating() + p2->rating();
best_idx = overrides_word1.size() - 1;
}
}
}
}
if (overrides_word1.size() >= 1) {
// Excellent, we have some bigram matches.
if (EqualIgnoringCaseAndTerminalPunct(*w_prev->best_choice,
*overrides_word1[best_idx]) &&
EqualIgnoringCaseAndTerminalPunct(*w->best_choice,
*overrides_word2[best_idx])) {
if (tessedit_bigram_debug > 1) {
tprintf("Top choice \"%s %s\" verified (sans case) by bigram "
"model.\n", orig_w1_str.string(), orig_w2_str.string());
}
continue;
}
STRING new_w1_str = overrides_word1[best_idx]->unichar_string();
STRING new_w2_str = overrides_word2[best_idx]->unichar_string();
if (new_w1_str != orig_w1_str) {
w_prev->ReplaceBestChoice(overrides_word1[best_idx]);
}
if (new_w2_str != orig_w2_str) {
w->ReplaceBestChoice(overrides_word2[best_idx]);
}
if (tessedit_bigram_debug > 0) {
STRING choices_description;
int num_bigram_choices
= overrides_word1.size() * overrides_word2.size();
if (num_bigram_choices == 1) {
choices_description = "This was the unique bigram choice.";
} else {
if (tessedit_bigram_debug > 1) {
STRING bigrams_list;
const int kMaxChoicesToPrint = 20;
for (int i = 0; i < overrides_word1.size() &&
i < kMaxChoicesToPrint; i++) {
if (i > 0) { bigrams_list += ", "; }
WERD_CHOICE *p1 = overrides_word1[i];
WERD_CHOICE *p2 = overrides_word2[i];
bigrams_list += p1->unichar_string() + " " + p2->unichar_string();
if (i == kMaxChoicesToPrint) {
bigrams_list += " ...";
}
}
choices_description = "There were many choices: {";
choices_description += bigrams_list;
choices_description += "}";
} else {
choices_description.add_str_int("There were ", num_bigram_choices);
choices_description += " compatible bigrams.";
}
}
tprintf("Replaced \"%s %s\" with \"%s %s\" with bigram model. %s\n",
orig_w1_str.string(), orig_w2_str.string(),
new_w1_str.string(), new_w2_str.string(),
choices_description.string());
}
}
}
}
void Tesseract::rejection_passes(PAGE_RES* page_res,
ETEXT_DESC* monitor,
const TBOX* target_word_box,
const char* word_config) {
PAGE_RES_IT page_res_it(page_res);
// ****************** Pass 5 *******************
// Gather statistics on rejects.
int word_index = 0;
while (!tessedit_test_adaption && page_res_it.word() != NULL) {
set_global_loc_code(LOC_MM_ADAPT);
WERD_RES* word = page_res_it.word();
word_index++;
if (monitor != NULL) {
monitor->ocr_alive = TRUE;
monitor->progress = 95 + 5 * word_index / stats_.word_count;
}
if (word->rebuild_word == NULL) {
// Word was not processed by tesseract.
page_res_it.forward();
continue;
}
check_debug_pt(word, 70);
// changed by jetsoft
// specific to its needs to extract one word when need
if (target_word_box &&
!ProcessTargetWord(word->word->bounding_box(),
*target_word_box, word_config, 4)) {
page_res_it.forward();
continue;
}
// end jetsoft
page_res_it.rej_stat_word();
int chars_in_word = word->reject_map.length();
int rejects_in_word = word->reject_map.reject_count();
int blob_quality = word_blob_quality(word, page_res_it.row()->row);
stats_.doc_blob_quality += blob_quality;
int outline_errs = word_outline_errs(word);
stats_.doc_outline_errs += outline_errs;
inT16 all_char_quality;
inT16 accepted_all_char_quality;
word_char_quality(word, page_res_it.row()->row,
&all_char_quality, &accepted_all_char_quality);
stats_.doc_char_quality += all_char_quality;
uinT8 permuter_type = word->best_choice->permuter();
if ((permuter_type == SYSTEM_DAWG_PERM) ||
(permuter_type == FREQ_DAWG_PERM) ||
(permuter_type == USER_DAWG_PERM)) {
stats_.good_char_count += chars_in_word - rejects_in_word;
stats_.doc_good_char_quality += accepted_all_char_quality;
}
check_debug_pt(word, 80);
if (tessedit_reject_bad_qual_wds &&
(blob_quality == 0) && (outline_errs >= chars_in_word))
word->reject_map.rej_word_bad_quality();
check_debug_pt(word, 90);
page_res_it.forward();
}
if (tessedit_debug_quality_metrics) {
tprintf
("QUALITY: num_chs= %d num_rejs= %d %5.3f blob_qual= %d %5.3f"
" outline_errs= %d %5.3f char_qual= %d %5.3f good_ch_qual= %d %5.3f\n",
page_res->char_count, page_res->rej_count,
page_res->rej_count / static_cast<float>(page_res->char_count),
stats_.doc_blob_quality,
stats_.doc_blob_quality / static_cast<float>(page_res->char_count),
stats_.doc_outline_errs,
stats_.doc_outline_errs / static_cast<float>(page_res->char_count),
stats_.doc_char_quality,
stats_.doc_char_quality / static_cast<float>(page_res->char_count),
stats_.doc_good_char_quality,
(stats_.good_char_count > 0) ?
(stats_.doc_good_char_quality /
static_cast<float>(stats_.good_char_count)) : 0.0);
}
BOOL8 good_quality_doc =
((page_res->rej_count / static_cast<float>(page_res->char_count)) <=
quality_rej_pc) &&
(stats_.doc_blob_quality / static_cast<float>(page_res->char_count) >=
quality_blob_pc) &&
(stats_.doc_outline_errs / static_cast<float>(page_res->char_count) <=
quality_outline_pc) &&
(stats_.doc_char_quality / static_cast<float>(page_res->char_count) >=
quality_char_pc);
// ****************** Pass 6 *******************
// Do whole document or whole block rejection pass
if (!tessedit_test_adaption) {
set_global_loc_code(LOC_DOC_BLK_REJ);
quality_based_rejection(page_res_it, good_quality_doc);
}
}
void Tesseract::blamer_pass(PAGE_RES* page_res) {
if (!wordrec_run_blamer) return;
PAGE_RES_IT page_res_it(page_res);
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
WERD_RES *word = page_res_it.word();
BlamerBundle::LastChanceBlame(wordrec_debug_blamer, word);
page_res->blame_reasons[word->blamer_bundle->incorrect_result_reason()]++;
}
tprintf("Blame reasons:\n");
for (int bl = 0; bl < IRR_NUM_REASONS; ++bl) {
tprintf("%s %d\n", BlamerBundle::IncorrectReasonName(
static_cast<IncorrectResultReason>(bl)),
page_res->blame_reasons[bl]);
}
if (page_res->misadaption_log.length() > 0) {
tprintf("Misadaption log:\n");
for (int i = 0; i < page_res->misadaption_log.length(); ++i) {
tprintf("%s\n", page_res->misadaption_log[i].string());
}
}
}
// Sets script positions and detects smallcaps on all output words.
void Tesseract::script_pos_pass(PAGE_RES* page_res) {
PAGE_RES_IT page_res_it(page_res);
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
WERD_RES* word = page_res_it.word();
if (word->word->flag(W_REP_CHAR)) {
page_res_it.forward();
continue;
}
float x_height = page_res_it.block()->block->x_height();
float word_x_height = word->x_height;
if (word_x_height < word->best_choice->min_x_height() ||
word_x_height > word->best_choice->max_x_height()) {
word_x_height = (word->best_choice->min_x_height() +
word->best_choice->max_x_height()) / 2.0f;
}
// Test for small caps. Word capheight must be close to block xheight,
// and word must contain no lower case letters, and at least one upper case.
double small_cap_xheight = x_height * kXHeightCapRatio;
double small_cap_delta = (x_height - small_cap_xheight) / 2.0;
if (word->uch_set->script_has_xheight() &&
small_cap_xheight - small_cap_delta <= word_x_height &&
word_x_height <= small_cap_xheight + small_cap_delta) {
// Scan for upper/lower.
int num_upper = 0;
int num_lower = 0;
for (int i = 0; i < word->best_choice->length(); ++i) {
if (word->uch_set->get_isupper(word->best_choice->unichar_id(i)))
++num_upper;
else if (word->uch_set->get_islower(word->best_choice->unichar_id(i)))
++num_lower;
}
if (num_upper > 0 && num_lower == 0)
word->small_caps = true;
}
word->SetScriptPositions();
}
}
// Factored helper considers the indexed word and updates all the pointed
// values.
static void EvaluateWord(const PointerVector<WERD_RES>& words, int index,
float* rating, float* certainty, bool* bad,
bool* valid_permuter, int* right, int* next_left) {
*right = -MAX_INT32;
*next_left = MAX_INT32;
if (index < words.size()) {
WERD_CHOICE* choice = words[index]->best_choice;
if (choice == NULL) {
*bad = true;
} else {
*rating += choice->rating();
*certainty = MIN(*certainty, choice->certainty());
if (!Dict::valid_word_permuter(choice->permuter(), false))
*valid_permuter = false;
}
*right = words[index]->word->bounding_box().right();
if (index + 1 < words.size())
*next_left = words[index + 1]->word->bounding_box().left();
} else {
*valid_permuter = false;
*bad = true;
}
}
// Helper chooses the best combination of words, transferring good ones from
// new_words to best_words. To win, a new word must have (better rating and
// certainty) or (better permuter status and rating within rating ratio and
// certainty within certainty margin) than current best.
// All the new_words are consumed (moved to best_words or deleted.)
// The return value is the number of new_words used minus the number of
// best_words that remain in the output.
static int SelectBestWords(double rating_ratio,
double certainty_margin,
bool debug,
PointerVector<WERD_RES>* new_words,
PointerVector<WERD_RES>* best_words) {
// Process the smallest groups of words that have an overlapping word
// boundary at the end.
GenericVector<WERD_RES*> out_words;
// Index into each word vector (best, new).
int b = 0, n = 0;
int num_best = 0, num_new = 0;
while (b < best_words->size() || n < new_words->size()) {
// Start of the current run in each.
int start_b = b, start_n = n;
// Rating of the current run in each.
float b_rating = 0.0f, n_rating = 0.0f;
// Certainty of the current run in each.
float b_certainty = 0.0f, n_certainty = 0.0f;
// True if any word is missing its best choice.
bool b_bad = false, n_bad = false;
// True if all words have a valid permuter.
bool b_valid_permuter = true, n_valid_permuter = true;
while (b < best_words->size() || n < new_words->size()) {
int b_right = -MAX_INT32;
int next_b_left = MAX_INT32;
EvaluateWord(*best_words, b, &b_rating, &b_certainty, &b_bad,
&b_valid_permuter, &b_right, &next_b_left);
int n_right = -MAX_INT32;
int next_n_left = MAX_INT32;
EvaluateWord(*new_words, n, &n_rating, &n_certainty, &n_bad,
&n_valid_permuter, &n_right, &next_n_left);
if (MAX(b_right, n_right) < MIN(next_b_left, next_n_left)) {
// The word breaks overlap. [start_b,b] and [start_n, n] match.
break;
}
// Keep searching for the matching word break.
if ((b_right < n_right && b < best_words->size()) ||
n == new_words->size())
++b;
else
++n;
}
bool new_better = false;
if (!n_bad && (b_bad || (n_certainty > b_certainty &&
n_rating < b_rating) ||
(!b_valid_permuter && n_valid_permuter &&
n_rating < b_rating * rating_ratio &&
n_certainty > b_certainty - certainty_margin))) {
// New is better.
for (int i = start_n; i <= n; ++i) {
out_words.push_back((*new_words)[i]);
(*new_words)[i] = NULL;
++num_new;
}
new_better = true;
} else if (!b_bad) {
// Current best is better.
for (int i = start_b; i <= b; ++i) {
out_words.push_back((*best_words)[i]);
(*best_words)[i] = NULL;
++num_best;
}
}
int end_b = b < best_words->size() ? b + 1 : b;
int end_n = n < new_words->size() ? n + 1 : n;
if (debug) {
tprintf("%d new words %s than %d old words: r: %g v %g c: %g v %g"
" valid dict: %d v %d\n",
end_n - start_n, new_better ? "better" : "worse",
end_b - start_b, n_rating, b_rating,
n_certainty, b_certainty, n_valid_permuter, b_valid_permuter);
}
// Move on to the next group.
b = end_b;
n = end_n;
}
// Transfer from out_words to best_words.
best_words->clear();
for (int i = 0; i < out_words.size(); ++i)
best_words->push_back(out_words[i]);
return num_new - num_best;
}
// Helper to recognize the word using the given (language-specific) tesseract.
// Returns positive if this recognizer found more new best words than the
// number kept from best_words.
int Tesseract::RetryWithLanguage(const WordData& word_data,
WordRecognizer recognizer,
WERD_RES** in_word,
PointerVector<WERD_RES>* best_words) {
bool debug = classify_debug_level || cube_debug_level;
if (debug) {
tprintf("Trying word using lang %s, oem %d\n",
lang.string(), static_cast<int>(tessedit_ocr_engine_mode));
}
// Run the recognizer on the word.
PointerVector<WERD_RES> new_words;
(this->*recognizer)(word_data, in_word, &new_words);
if (new_words.empty()) {
// Transfer input word to new_words, as the classifier must have put
// the result back in the input.
new_words.push_back(*in_word);
*in_word = NULL;
}
if (debug) {
for (int i = 0; i < new_words.size(); ++i)
new_words[i]->DebugTopChoice("Lang result");
}
// Initial version is a bit of a hack based on better certainty and rating
// (to reduce false positives from cube) or a dictionary vs non-dictionary
// word.
return SelectBestWords(classify_max_rating_ratio,
classify_max_certainty_margin,
debug, &new_words, best_words);
}
// Helper returns true if all the words are acceptable.
static bool WordsAcceptable(const PointerVector<WERD_RES>& words) {
for (int w = 0; w < words.size(); ++w) {
if (words[w]->tess_failed || !words[w]->tess_accepted) return false;
}
return true;
}
// Generic function for classifying a word. Can be used either for pass1 or
// pass2 according to the function passed to recognizer.
// word_data holds the word to be recognized, and its block and row, and
// pr_it points to the word as well, in case we are running LSTM and it wants
// to output multiple words.
// Recognizes in the current language, and if successful that is all.
// If recognition was not successful, tries all available languages until
// it gets a successful result or runs out of languages. Keeps the best result.
void Tesseract::classify_word_and_language(WordRecognizer recognizer,
PAGE_RES_IT* pr_it,
WordData* word_data) {
// Best result so far.
PointerVector<WERD_RES> best_words;
// Points to the best result. May be word or in lang_words.
WERD_RES* word = word_data->word;
clock_t start_t = clock();
if (classify_debug_level || cube_debug_level) {
tprintf("%s word with lang %s at:",
word->done ? "Already done" : "Processing",
most_recently_used_->lang.string());
word->word->bounding_box().print();
}
if (word->done) {
// If done on pass1, leave it as-is.
if (!word->tess_failed)
most_recently_used_ = word->tesseract;
return;
}
int sub = sub_langs_.size();
if (most_recently_used_ != this) {
// Get the index of the most_recently_used_.
for (sub = 0; sub < sub_langs_.size() &&
most_recently_used_ != sub_langs_[sub]; ++sub) {}
}
most_recently_used_->RetryWithLanguage(
*word_data, recognizer, &word_data->lang_words[sub], &best_words);
Tesseract* best_lang_tess = most_recently_used_;
if (!WordsAcceptable(best_words)) {
// Try all the other languages to see if they are any better.
if (most_recently_used_ != this &&
this->RetryWithLanguage(*word_data, recognizer,
&word_data->lang_words[sub_langs_.size()],
&best_words) > 0) {
best_lang_tess = this;
}
for (int i = 0; !WordsAcceptable(best_words) && i < sub_langs_.size();
++i) {
if (most_recently_used_ != sub_langs_[i] &&
sub_langs_[i]->RetryWithLanguage(*word_data, recognizer,
&word_data->lang_words[i],
&best_words) > 0) {
best_lang_tess = sub_langs_[i];
}
}
}
most_recently_used_ = best_lang_tess;
if (!best_words.empty()) {
if (best_words.size() == 1 && !best_words[0]->combination) {
// Move the best single result to the main word.
word_data->word->ConsumeWordResults(best_words[0]);
} else {
// Words came from LSTM, and must be moved to the PAGE_RES properly.
word_data->word = best_words.back();
pr_it->ReplaceCurrentWord(&best_words);
}
ASSERT_HOST(word_data->word->box_word != NULL);
} else {
tprintf("no best words!!\n");
}
clock_t ocr_t = clock();
if (tessedit_timing_debug) {
tprintf("%s (ocr took %.2f sec)\n",
word->best_choice->unichar_string().string(),
static_cast<double>(ocr_t-start_t)/CLOCKS_PER_SEC);
}
}
/**
* classify_word_pass1
*
* Baseline normalize the word and pass it to Tess.
*/
void Tesseract::classify_word_pass1(const WordData& word_data,
WERD_RES** in_word,
PointerVector<WERD_RES>* out_words) {
ROW* row = word_data.row;
BLOCK* block = word_data.block;
prev_word_best_choice_ = word_data.prev_word != NULL
? word_data.prev_word->word->best_choice : NULL;
// If we only intend to run cube - run it and return.
if (tessedit_ocr_engine_mode == OEM_CUBE_ONLY) {
cube_word_pass1(block, row, *in_word);
return;
}
WERD_RES* word = *in_word;
match_word_pass_n(1, word, row, block);
if (!word->tess_failed && !word->word->flag(W_REP_CHAR)) {
word->tess_would_adapt = AdaptableWord(word);
bool adapt_ok = word_adaptable(word, tessedit_tess_adaption_mode);
if (adapt_ok) {
// Send word to adaptive classifier for training.
word->BestChoiceToCorrectText();
LearnWord(NULL, word);
// Mark misadaptions if running blamer.
if (word->blamer_bundle != NULL) {
word->blamer_bundle->SetMisAdaptionDebug(word->best_choice,
wordrec_debug_blamer);
}
}
if (tessedit_enable_doc_dict && !word->IsAmbiguous())
tess_add_doc_word(word->best_choice);
}
}
// Helper to report the result of the xheight fix.
void Tesseract::ReportXhtFixResult(bool accept_new_word, float new_x_ht,
WERD_RES* word, WERD_RES* new_word) {
tprintf("New XHT Match:%s = %s ",
word->best_choice->unichar_string().string(),
word->best_choice->debug_string().string());
word->reject_map.print(debug_fp);
tprintf(" -> %s = %s ",
new_word->best_choice->unichar_string().string(),
new_word->best_choice->debug_string().string());
new_word->reject_map.print(debug_fp);
tprintf(" %s->%s %s %s\n",
word->guessed_x_ht ? "GUESS" : "CERT",
new_word->guessed_x_ht ? "GUESS" : "CERT",
new_x_ht > 0.1 ? "STILL DOUBT" : "OK",
accept_new_word ? "ACCEPTED" : "");
}
// Run the x-height fix-up, based on min/max top/bottom information in
// unicharset.
// Returns true if the word was changed.
// See the comment in fixxht.cpp for a description of the overall process.
bool Tesseract::TrainedXheightFix(WERD_RES *word, BLOCK* block, ROW *row) {
bool accept_new_x_ht = false;
int original_misfits = CountMisfitTops(word);
if (original_misfits == 0)
return false;
float new_x_ht = ComputeCompatibleXheight(word);
if (new_x_ht >= kMinRefitXHeightFraction * word->x_height) {
WERD_RES new_x_ht_word(word->word);
if (word->blamer_bundle != NULL) {
new_x_ht_word.blamer_bundle = new BlamerBundle();
new_x_ht_word.blamer_bundle->CopyTruth(*(word->blamer_bundle));
}
new_x_ht_word.x_height = new_x_ht;
new_x_ht_word.caps_height = 0.0;
new_x_ht_word.SetupForRecognition(
unicharset, this, BestPix(), tessedit_ocr_engine_mode, NULL,
classify_bln_numeric_mode, textord_use_cjk_fp_model,
poly_allow_detailed_fx, row, block);
match_word_pass_n(2, &new_x_ht_word, row, block);
if (!new_x_ht_word.tess_failed) {
int new_misfits = CountMisfitTops(&new_x_ht_word);
if (debug_x_ht_level >= 1) {
tprintf("Old misfits=%d with x-height %f, new=%d with x-height %f\n",
original_misfits, word->x_height,
new_misfits, new_x_ht);
tprintf("Old rating= %f, certainty=%f, new=%f, %f\n",
word->best_choice->rating(), word->best_choice->certainty(),
new_x_ht_word.best_choice->rating(),
new_x_ht_word.best_choice->certainty());
}
// The misfits must improve and either the rating or certainty.
accept_new_x_ht = new_misfits < original_misfits &&
(new_x_ht_word.best_choice->certainty() >
word->best_choice->certainty() ||
new_x_ht_word.best_choice->rating() <
word->best_choice->rating());
if (debug_x_ht_level >= 1) {
ReportXhtFixResult(accept_new_x_ht, new_x_ht, word, &new_x_ht_word);
}
}
if (accept_new_x_ht) {
word->ConsumeWordResults(&new_x_ht_word);
return true;
}
}
return false;
}
/**
* classify_word_pass2
*
* Control what to do with the word in pass 2
*/
void Tesseract::classify_word_pass2(const WordData& word_data,
WERD_RES** in_word,
PointerVector<WERD_RES>* out_words) {
// Return if we do not want to run Tesseract.
if (tessedit_ocr_engine_mode != OEM_TESSERACT_ONLY &&
tessedit_ocr_engine_mode != OEM_TESSERACT_CUBE_COMBINED &&
word_data.word->best_choice != NULL)
return;
if (tessedit_ocr_engine_mode == OEM_CUBE_ONLY) {
return;
}
ROW* row = word_data.row;
BLOCK* block = word_data.block;
WERD_RES* word = *in_word;
prev_word_best_choice_ = word_data.prev_word != NULL
? word_data.prev_word->word->best_choice : NULL;
set_global_subloc_code(SUBLOC_NORM);
check_debug_pt(word, 30);
if (!word->done) {
word->caps_height = 0.0;
if (word->x_height == 0.0f)
word->x_height = row->x_height();
match_word_pass_n(2, word, row, block);
check_debug_pt(word, 40);
}
SubAndSuperscriptFix(word);
if (!word->tess_failed && !word->word->flag(W_REP_CHAR)) {
if (unicharset.top_bottom_useful() && unicharset.script_has_xheight() &&
block->classify_rotation().y() == 0.0f) {
// Use the tops and bottoms since they are available.
TrainedXheightFix(word, block, row);
}
set_global_subloc_code(SUBLOC_NORM);
}
#ifndef GRAPHICS_DISABLED
if (tessedit_display_outwords) {
if (fx_win == NULL)
create_fx_win();
clear_fx_win();
word->rebuild_word->plot(fx_win);
TBOX wbox = word->rebuild_word->bounding_box();
fx_win->ZoomToRectangle(wbox.left(), wbox.top(),
wbox.right(), wbox.bottom());
ScrollView::Update();
}
#endif
set_global_subloc_code(SUBLOC_NORM);
check_debug_pt(word, 50);
}
/**
* match_word_pass2
*
* Baseline normalize the word and pass it to Tess.
*/
void Tesseract::match_word_pass_n(int pass_n, WERD_RES *word,
ROW *row, BLOCK* block) {
if (word->tess_failed) return;
tess_segment_pass_n(pass_n, word);
if (!word->tess_failed) {
if (!word->word->flag (W_REP_CHAR)) {
word->fix_quotes();
if (tessedit_fix_hyphens)
word->fix_hyphens();
/* Dont trust fix_quotes! - though I think I've fixed the bug */
if (word->best_choice->length() != word->box_word->length()) {
tprintf("POST FIX_QUOTES FAIL String:\"%s\"; Strlen=%d;"
" #Blobs=%d\n",
word->best_choice->debug_string().string(),
word->best_choice->length(),
word->box_word->length());
}
word->tess_accepted = tess_acceptable_word(word);
// Also sets word->done flag
make_reject_map(word, row, pass_n);
}
}
set_word_fonts(word);
ASSERT_HOST(word->raw_choice != NULL);
}
// Helper to return the best rated BLOB_CHOICE in the whole word that matches
// the given char_id, or NULL if none can be found.
static BLOB_CHOICE* FindBestMatchingChoice(UNICHAR_ID char_id,
WERD_RES* word_res) {
// Find the corresponding best BLOB_CHOICE from any position in the word_res.
BLOB_CHOICE* best_choice = NULL;
for (int i = 0; i < word_res->best_choice->length(); ++i) {
BLOB_CHOICE* choice = FindMatchingChoice(char_id,
word_res->GetBlobChoices(i));
if (choice != NULL) {
if (best_choice == NULL || choice->rating() < best_choice->rating())
best_choice = choice;
}
}
return best_choice;
}
// Helper to insert blob_choice in each location in the leader word if there is
// no matching BLOB_CHOICE there already, and correct any incorrect results
// in the best_choice.
static void CorrectRepcharChoices(BLOB_CHOICE* blob_choice,
WERD_RES* word_res) {
WERD_CHOICE* word = word_res->best_choice;
for (int i = 0; i < word_res->best_choice->length(); ++i) {
BLOB_CHOICE* choice = FindMatchingChoice(blob_choice->unichar_id(),
word_res->GetBlobChoices(i));
if (choice == NULL) {
BLOB_CHOICE_IT choice_it(word_res->GetBlobChoices(i));
choice_it.add_before_stay_put(new BLOB_CHOICE(*blob_choice));
}
}
// Correct any incorrect results in word.
for (int i = 0; i < word->length(); ++i) {
if (word->unichar_id(i) != blob_choice->unichar_id())
word->set_unichar_id(blob_choice->unichar_id(), i);
}
}
/**
* fix_rep_char()
* The word is a repeated char. (Leader.) Find the repeated char character.
* Create the appropriate single-word or multi-word sequence according to
* the size of spaces in between blobs, and correct the classifications
* where some of the characters disagree with the majority.
*/
void Tesseract::fix_rep_char(PAGE_RES_IT* page_res_it) {
WERD_RES *word_res = page_res_it->word();
const WERD_CHOICE &word = *(word_res->best_choice);
// Find the frequency of each unique character in the word.
SortHelper<UNICHAR_ID> rep_ch(word.length());
for (int i = 0; i < word.length(); ++i) {
rep_ch.Add(word.unichar_id(i), 1);
}
// Find the most frequent result.
UNICHAR_ID maxch_id = INVALID_UNICHAR_ID; // most common char
int max_count = rep_ch.MaxCount(&maxch_id);
// Find the best exemplar of a classifier result for maxch_id.
BLOB_CHOICE* best_choice = FindBestMatchingChoice(maxch_id, word_res);
if (best_choice == NULL) {
tprintf("Failed to find a choice for %s, occurring %d times\n",
word_res->uch_set->debug_str(maxch_id).string(), max_count);
return;
}
word_res->done = TRUE;
// Measure the mean space.
int gap_count = 0;
WERD* werd = word_res->word;
C_BLOB_IT blob_it(werd->cblob_list());
C_BLOB* prev_blob = blob_it.data();
for (blob_it.forward(); !blob_it.at_first(); blob_it.forward()) {
C_BLOB* blob = blob_it.data();
int gap = blob->bounding_box().left();
gap -= prev_blob->bounding_box().right();
++gap_count;
prev_blob = blob;
}
// Just correct existing classification.
CorrectRepcharChoices(best_choice, word_res);
word_res->reject_map.initialise(word.length());
}
ACCEPTABLE_WERD_TYPE Tesseract::acceptable_word_string(
const UNICHARSET& char_set, const char *s, const char *lengths) {
int i = 0;
int offset = 0;
int leading_punct_count;
int upper_count = 0;
int hyphen_pos = -1;
ACCEPTABLE_WERD_TYPE word_type = AC_UNACCEPTABLE;
if (strlen (lengths) > 20)
return word_type;
/* Single Leading punctuation char*/
if (s[offset] != '\0' && STRING(chs_leading_punct).contains(s[offset]))
offset += lengths[i++];
leading_punct_count = i;
/* Initial cap */
while (s[offset] != '\0' && char_set.get_isupper(s + offset, lengths[i])) {
offset += lengths[i++];
upper_count++;
}
if (upper_count > 1) {
word_type = AC_UPPER_CASE;
} else {
/* Lower case word, possibly with an initial cap */
while (s[offset] != '\0' && char_set.get_islower(s + offset, lengths[i])) {
offset += lengths[i++];
}
if (i - leading_punct_count < quality_min_initial_alphas_reqd)
goto not_a_word;
/*
Allow a single hyphen in a lower case word
- dont trust upper case - I've seen several cases of "H" -> "I-I"
*/
if (lengths[i] == 1 && s[offset] == '-') {
hyphen_pos = i;
offset += lengths[i++];
if (s[offset] != '\0') {
while ((s[offset] != '\0') &&
char_set.get_islower(s + offset, lengths[i])) {
offset += lengths[i++];
}
if (i < hyphen_pos + 3)
goto not_a_word;
}
} else {
/* Allow "'s" in NON hyphenated lower case words */
if (lengths[i] == 1 && (s[offset] == '\'') &&
lengths[i + 1] == 1 && (s[offset + lengths[i]] == 's')) {
offset += lengths[i++];
offset += lengths[i++];
}
}
if (upper_count > 0)
word_type = AC_INITIAL_CAP;
else
word_type = AC_LOWER_CASE;
}
/* Up to two different, constrained trailing punctuation chars */
if (lengths[i] == 1 && s[offset] != '\0' &&
STRING(chs_trailing_punct1).contains(s[offset]))
offset += lengths[i++];
if (lengths[i] == 1 && s[offset] != '\0' && i > 0 &&
s[offset - lengths[i - 1]] != s[offset] &&
STRING(chs_trailing_punct2).contains (s[offset]))
offset += lengths[i++];
if (s[offset] != '\0')
word_type = AC_UNACCEPTABLE;
not_a_word:
if (word_type == AC_UNACCEPTABLE) {
/* Look for abbreviation string */
i = 0;
offset = 0;
if (s[0] != '\0' && char_set.get_isupper(s, lengths[0])) {
word_type = AC_UC_ABBREV;
while (s[offset] != '\0' &&
char_set.get_isupper(s + offset, lengths[i]) &&
lengths[i + 1] == 1 && s[offset + lengths[i]] == '.') {
offset += lengths[i++];
offset += lengths[i++];
}
}
else if (s[0] != '\0' && char_set.get_islower(s, lengths[0])) {
word_type = AC_LC_ABBREV;
while (s[offset] != '\0' &&
char_set.get_islower(s + offset, lengths[i]) &&
lengths[i + 1] == 1 && s[offset + lengths[i]] == '.') {
offset += lengths[i++];
offset += lengths[i++];
}
}
if (s[offset] != '\0')
word_type = AC_UNACCEPTABLE;
}
return word_type;
}
BOOL8 Tesseract::check_debug_pt(WERD_RES *word, int location) {
BOOL8 show_map_detail = FALSE;
inT16 i;
if (!test_pt)
return FALSE;
tessedit_rejection_debug.set_value (FALSE);
debug_x_ht_level.set_value (0);
if (word->word->bounding_box ().contains (FCOORD (test_pt_x, test_pt_y))) {
if (location < 0)
return TRUE; // For breakpoint use
tessedit_rejection_debug.set_value (TRUE);
debug_x_ht_level.set_value (20);
tprintf ("\n\nTESTWD::");
switch (location) {
case 0:
tprintf ("classify_word_pass1 start\n");
word->word->print();
break;
case 10:
tprintf ("make_reject_map: initial map");
break;
case 20:
tprintf ("make_reject_map: after NN");
break;
case 30:
tprintf ("classify_word_pass2 - START");
break;
case 40:
tprintf ("classify_word_pass2 - Pre Xht");
break;
case 50:
tprintf ("classify_word_pass2 - END");
show_map_detail = TRUE;
break;
case 60:
tprintf ("fixspace");
break;
case 70:
tprintf ("MM pass START");
break;
case 80:
tprintf ("MM pass END");
break;
case 90:
tprintf ("After Poor quality rejection");
break;
case 100:
tprintf ("unrej_good_quality_words - START");
break;
case 110:
tprintf ("unrej_good_quality_words - END");
break;
case 120:
tprintf ("Write results pass");
show_map_detail = TRUE;
break;
}
if (word->best_choice != NULL) {
tprintf(" \"%s\" ", word->best_choice->unichar_string().string());
word->reject_map.print(debug_fp);
tprintf("\n");
if (show_map_detail) {
tprintf("\"%s\"\n", word->best_choice->unichar_string().string());
for (i = 0; word->best_choice->unichar_string()[i] != '\0'; i++) {
tprintf("**** \"%c\" ****\n", word->best_choice->unichar_string()[i]);
word->reject_map[i].full_print(debug_fp);
}
}
} else {
tprintf("null best choice\n");
}
tprintf ("Tess Accepted: %s\n", word->tess_accepted ? "TRUE" : "FALSE");
tprintf ("Done flag: %s\n\n", word->done ? "TRUE" : "FALSE");
return TRUE;
} else {
return FALSE;
}
}
/**
* find_modal_font
*
* Find the modal font and remove from the stats.
*/
static void find_modal_font( //good chars in word
STATS *fonts, //font stats
inT16 *font_out, //output font
inT8 *font_count //output count
) {
inT16 font; //font index
inT32 count; //pile couat
if (fonts->get_total () > 0) {
font = (inT16) fonts->mode ();
*font_out = font;
count = fonts->pile_count (font);
*font_count = count < MAX_INT8 ? count : MAX_INT8;
fonts->add (font, -*font_count);
}
else {
*font_out = -1;
*font_count = 0;
}
}
/**
* set_word_fonts
*
* Get the fonts for the word.
*/
void Tesseract::set_word_fonts(WERD_RES *word) {
// Don't try to set the word fonts for a cube word, as the configs
// will be meaningless.
if (word->chopped_word == NULL) return;
ASSERT_HOST(word->best_choice != NULL);
inT32 index; // char id index
// character iterator
BLOB_CHOICE_IT choice_it; // choice iterator
int fontinfo_size = get_fontinfo_table().size();
int fontset_size = get_fontset_table().size();
if (fontinfo_size == 0 || fontset_size == 0) return;
STATS fonts(0, fontinfo_size); // font counters
word->italic = 0;
word->bold = 0;
if (!word->best_choice_fontinfo_ids.empty()) {
word->best_choice_fontinfo_ids.clear();
}
// Compute the modal font for the word
for (index = 0; index < word->best_choice->length(); ++index) {
UNICHAR_ID word_ch_id = word->best_choice->unichar_id(index);
choice_it.set_to_list(word->GetBlobChoices(index));
if (tessedit_debug_fonts) {
tprintf("Examining fonts in %s\n",
word->best_choice->debug_string().string());
}
for (choice_it.mark_cycle_pt(); !choice_it.cycled_list();
choice_it.forward()) {
UNICHAR_ID blob_ch_id = choice_it.data()->unichar_id();
if (blob_ch_id == word_ch_id) {
if (tessedit_debug_fonts) {
tprintf("%s font %s (%d) font2 %s (%d)\n",
word->uch_set->id_to_unichar(blob_ch_id),
choice_it.data()->fontinfo_id() < 0 ? "unknown" :
fontinfo_table_.get(choice_it.data()->fontinfo_id()).name,
choice_it.data()->fontinfo_id(),
choice_it.data()->fontinfo_id2() < 0 ? "unknown" :
fontinfo_table_.get(choice_it.data()->fontinfo_id2()).name,
choice_it.data()->fontinfo_id2());
}
// 1st choice font gets 2 pts, 2nd choice 1 pt.
if (choice_it.data()->fontinfo_id() >= 0) {
fonts.add(choice_it.data()->fontinfo_id(), 2);
}
if (choice_it.data()->fontinfo_id2() >= 0) {
fonts.add(choice_it.data()->fontinfo_id2(), 1);
}
break;
}
}
}
inT16 font_id1, font_id2;
find_modal_font(&fonts, &font_id1, &word->fontinfo_id_count);
find_modal_font(&fonts, &font_id2, &word->fontinfo_id2_count);
word->fontinfo = font_id1 >= 0 ? &fontinfo_table_.get(font_id1) : NULL;
word->fontinfo2 = font_id2 >= 0 ? &fontinfo_table_.get(font_id2) : NULL;
// All the blobs get the word's best choice font.
for (int i = 0; i < word->best_choice->length(); ++i) {
word->best_choice_fontinfo_ids.push_back(font_id1);
}
if (word->fontinfo_id_count > 0) {
FontInfo fi = fontinfo_table_.get(font_id1);
if (tessedit_debug_fonts) {
if (word->fontinfo_id2_count > 0) {
tprintf("Word modal font=%s, score=%d, 2nd choice %s/%d\n",
fi.name, word->fontinfo_id_count,
fontinfo_table_.get(font_id2).name,
word->fontinfo_id2_count);
} else {
tprintf("Word modal font=%s, score=%d. No 2nd choice\n",
fi.name, word->fontinfo_id_count);
}
}
// 1st choices got 2 pts, so we need to halve the score for the mode.
word->italic = (fi.is_italic() ? 1 : -1) * (word->fontinfo_id_count + 1) / 2;
word->bold = (fi.is_bold() ? 1 : -1) * (word->fontinfo_id_count + 1) / 2;
}
}
/**
* font_recognition_pass
*
* Smooth the fonts for the document.
*/
void Tesseract::font_recognition_pass(PAGE_RES* page_res) {
PAGE_RES_IT page_res_it(page_res);
WERD_RES *word; // current word
STATS doc_fonts(0, font_table_size_); // font counters
// Gather font id statistics.
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
word = page_res_it.word();
if (word->fontinfo != NULL) {
doc_fonts.add(word->fontinfo->universal_id, word->fontinfo_id_count);
}
if (word->fontinfo2 != NULL) {
doc_fonts.add(word->fontinfo2->universal_id, word->fontinfo_id2_count);
}
}
inT16 doc_font; // modal font
inT8 doc_font_count; // modal font
find_modal_font(&doc_fonts, &doc_font, &doc_font_count);
if (doc_font_count == 0)
return;
// Get the modal font pointer.
const FontInfo* modal_font = NULL;
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
word = page_res_it.word();
if (word->fontinfo != NULL && word->fontinfo->universal_id == doc_font) {
modal_font = word->fontinfo;
break;
}
if (word->fontinfo2 != NULL && word->fontinfo2->universal_id == doc_font) {
modal_font = word->fontinfo2;
break;
}
}
ASSERT_HOST(modal_font != NULL);
// Assign modal font to weak words.
for (page_res_it.restart_page(); page_res_it.word() != NULL;
page_res_it.forward()) {
word = page_res_it.word();
int length = word->best_choice->length();
// 1st choices got 2 pts, so we need to halve the score for the mode.
int count = (word->fontinfo_id_count + 1) / 2;
if (!(count == length || (length > 3 && count >= length * 3 / 4))) {
word->fontinfo = modal_font;
// Counts only get 1 as it came from the doc.
word->fontinfo_id_count = 1;
word->italic = modal_font->is_italic() ? 1 : -1;
word->bold = modal_font->is_bold() ? 1 : -1;
}
}
}
// If a word has multiple alternates check if the best choice is in the
// dictionary. If not, replace it with an alternate that exists in the
// dictionary.
void Tesseract::dictionary_correction_pass(PAGE_RES *page_res) {
PAGE_RES_IT word_it(page_res);
for (WERD_RES* word = word_it.word(); word != NULL;
word = word_it.forward()) {
if (word->best_choices.singleton())
continue; // There are no alternates.
WERD_CHOICE* best = word->best_choice;
if (word->tesseract->getDict().valid_word(*best) != 0)
continue; // The best choice is in the dictionary.
WERD_CHOICE_IT choice_it(&word->best_choices);
for (choice_it.mark_cycle_pt(); !choice_it.cycled_list();
choice_it.forward()) {
WERD_CHOICE* alternate = choice_it.data();
if (word->tesseract->getDict().valid_word(*alternate)) {
// The alternate choice is in the dictionary.
if (tessedit_bigram_debug) {
tprintf("Dictionary correction replaces best choice '%s' with '%s'\n",
best->unichar_string().string(),
alternate->unichar_string().string());
}
// Replace the 'best' choice with a better choice.
word->ReplaceBestChoice(alternate);
break;
}
}
}
}
} // namespace tesseract
| C++ |
/******************************************************************
* File: fixspace.cpp (Formerly fixspace.c)
* Description: Implements a pass over the page res, exploring the alternative
* spacing possibilities, trying to use context to improve the
* word spacing
* Author: Phil Cheatle
* Created: Thu Oct 21 11:38:43 BST 1993
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <ctype.h>
#include "reject.h"
#include "statistc.h"
#include "control.h"
#include "fixspace.h"
#include "genblob.h"
#include "tessvars.h"
#include "tessbox.h"
#include "globals.h"
#include "tesseractclass.h"
#define PERFECT_WERDS 999
#define MAXSPACING 128 /*max expected spacing in pix */
namespace tesseract {
/**
* @name fix_fuzzy_spaces()
* Walk over the page finding sequences of words joined by fuzzy spaces. Extract
* them as a sublist, process the sublist to find the optimal arrangement of
* spaces then replace the sublist in the ROW_RES.
*
* @param monitor progress monitor
* @param word_count count of words in doc
* @param[out] page_res
*/
void Tesseract::fix_fuzzy_spaces(ETEXT_DESC *monitor,
inT32 word_count,
PAGE_RES *page_res) {
BLOCK_RES_IT block_res_it;
ROW_RES_IT row_res_it;
WERD_RES_IT word_res_it_from;
WERD_RES_IT word_res_it_to;
WERD_RES *word_res;
WERD_RES_LIST fuzzy_space_words;
inT16 new_length;
BOOL8 prevent_null_wd_fixsp; // DONT process blobless wds
inT32 word_index; // current word
block_res_it.set_to_list(&page_res->block_res_list);
word_index = 0;
for (block_res_it.mark_cycle_pt(); !block_res_it.cycled_list();
block_res_it.forward()) {
row_res_it.set_to_list(&block_res_it.data()->row_res_list);
for (row_res_it.mark_cycle_pt(); !row_res_it.cycled_list();
row_res_it.forward()) {
word_res_it_from.set_to_list(&row_res_it.data()->word_res_list);
while (!word_res_it_from.at_last()) {
word_res = word_res_it_from.data();
while (!word_res_it_from.at_last() &&
!(word_res->combination ||
word_res_it_from.data_relative(1)->word->flag(W_FUZZY_NON) ||
word_res_it_from.data_relative(1)->word->flag(W_FUZZY_SP))) {
fix_sp_fp_word(word_res_it_from, row_res_it.data()->row,
block_res_it.data()->block);
word_res = word_res_it_from.forward();
word_index++;
if (monitor != NULL) {
monitor->ocr_alive = TRUE;
monitor->progress = 90 + 5 * word_index / word_count;
if (monitor->deadline_exceeded() ||
(monitor->cancel != NULL &&
(*monitor->cancel)(monitor->cancel_this, stats_.dict_words)))
return;
}
}
if (!word_res_it_from.at_last()) {
word_res_it_to = word_res_it_from;
prevent_null_wd_fixsp =
word_res->word->cblob_list()->empty();
if (check_debug_pt(word_res, 60))
debug_fix_space_level.set_value(10);
word_res_it_to.forward();
word_index++;
if (monitor != NULL) {
monitor->ocr_alive = TRUE;
monitor->progress = 90 + 5 * word_index / word_count;
if (monitor->deadline_exceeded() ||
(monitor->cancel != NULL &&
(*monitor->cancel)(monitor->cancel_this, stats_.dict_words)))
return;
}
while (!word_res_it_to.at_last () &&
(word_res_it_to.data_relative(1)->word->flag(W_FUZZY_NON) ||
word_res_it_to.data_relative(1)->word->flag(W_FUZZY_SP))) {
if (check_debug_pt(word_res, 60))
debug_fix_space_level.set_value(10);
if (word_res->word->cblob_list()->empty())
prevent_null_wd_fixsp = TRUE;
word_res = word_res_it_to.forward();
}
if (check_debug_pt(word_res, 60))
debug_fix_space_level.set_value(10);
if (word_res->word->cblob_list()->empty())
prevent_null_wd_fixsp = TRUE;
if (prevent_null_wd_fixsp) {
word_res_it_from = word_res_it_to;
} else {
fuzzy_space_words.assign_to_sublist(&word_res_it_from,
&word_res_it_to);
fix_fuzzy_space_list(fuzzy_space_words,
row_res_it.data()->row,
block_res_it.data()->block);
new_length = fuzzy_space_words.length();
word_res_it_from.add_list_before(&fuzzy_space_words);
for (;
!word_res_it_from.at_last() && new_length > 0;
new_length--) {
word_res_it_from.forward();
}
}
if (test_pt)
debug_fix_space_level.set_value(0);
}
fix_sp_fp_word(word_res_it_from, row_res_it.data()->row,
block_res_it.data()->block);
// Last word in row
}
}
}
}
void Tesseract::fix_fuzzy_space_list(WERD_RES_LIST &best_perm,
ROW *row,
BLOCK* block) {
inT16 best_score;
WERD_RES_LIST current_perm;
inT16 current_score;
BOOL8 improved = FALSE;
best_score = eval_word_spacing(best_perm); // default score
dump_words(best_perm, best_score, 1, improved);
if (best_score != PERFECT_WERDS)
initialise_search(best_perm, current_perm);
while ((best_score != PERFECT_WERDS) && !current_perm.empty()) {
match_current_words(current_perm, row, block);
current_score = eval_word_spacing(current_perm);
dump_words(current_perm, current_score, 2, improved);
if (current_score > best_score) {
best_perm.clear();
best_perm.deep_copy(¤t_perm, &WERD_RES::deep_copy);
best_score = current_score;
improved = TRUE;
}
if (current_score < PERFECT_WERDS)
transform_to_next_perm(current_perm);
}
dump_words(best_perm, best_score, 3, improved);
}
} // namespace tesseract
void initialise_search(WERD_RES_LIST &src_list, WERD_RES_LIST &new_list) {
WERD_RES_IT src_it(&src_list);
WERD_RES_IT new_it(&new_list);
WERD_RES *src_wd;
WERD_RES *new_wd;
for (src_it.mark_cycle_pt(); !src_it.cycled_list(); src_it.forward()) {
src_wd = src_it.data();
if (!src_wd->combination) {
new_wd = WERD_RES::deep_copy(src_wd);
new_wd->combination = FALSE;
new_wd->part_of_combo = FALSE;
new_it.add_after_then_move(new_wd);
}
}
}
namespace tesseract {
void Tesseract::match_current_words(WERD_RES_LIST &words, ROW *row,
BLOCK* block) {
WERD_RES_IT word_it(&words);
WERD_RES *word;
// Since we are not using PAGE_RES to iterate over words, we need to update
// prev_word_best_choice_ before calling classify_word_pass2().
prev_word_best_choice_ = NULL;
for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
word = word_it.data();
if ((!word->part_of_combo) && (word->box_word == NULL)) {
WordData word_data(block, row, word);
SetupWordPassN(2, &word_data);
classify_word_and_language(&Tesseract::classify_word_pass2, NULL,
&word_data);
}
prev_word_best_choice_ = word->best_choice;
}
}
/**
* @name eval_word_spacing()
* The basic measure is the number of characters in contextually confirmed
* words. (I.e the word is done)
* If all words are contextually confirmed the evaluation is deemed perfect.
*
* Some fiddles are done to handle "1"s as these are VERY frequent causes of
* fuzzy spaces. The problem with the basic measure is that "561 63" would score
* the same as "56163", though given our knowledge that the space is fuzzy, and
* that there is a "1" next to the fuzzy space, we need to ensure that "56163"
* is prefered.
*
* The solution is to NOT COUNT the score of any word which has a digit at one
* end and a "1Il" as the character the other side of the space.
*
* Conversly, any character next to a "1" within a word is counted as a positive
* score. Thus "561 63" would score 4 (3 chars in a numeric word plus 1 side of
* the "1" joined). "56163" would score 7 - all chars in a numeric word + 2
* sides of a "1" joined.
*
* The joined 1 rule is applied to any word REGARDLESS of contextual
* confirmation. Thus "PS7a71 3/7a" scores 1 (neither word is contexutally
* confirmed. The only score is from the joined 1. "PS7a713/7a" scores 2.
*
*/
inT16 Tesseract::eval_word_spacing(WERD_RES_LIST &word_res_list) {
WERD_RES_IT word_res_it(&word_res_list);
inT16 total_score = 0;
inT16 word_count = 0;
inT16 done_word_count = 0;
inT16 word_len;
inT16 i;
inT16 offset;
WERD_RES *word; // current word
inT16 prev_word_score = 0;
BOOL8 prev_word_done = FALSE;
BOOL8 prev_char_1 = FALSE; // prev ch a "1/I/l"?
BOOL8 prev_char_digit = FALSE; // prev ch 2..9 or 0
BOOL8 current_char_1 = FALSE;
BOOL8 current_word_ok_so_far;
STRING punct_chars = "!\"`',.:;";
BOOL8 prev_char_punct = FALSE;
BOOL8 current_char_punct = FALSE;
BOOL8 word_done = FALSE;
do {
word = word_res_it.data();
word_done = fixspace_thinks_word_done(word);
word_count++;
if (word->tess_failed) {
total_score += prev_word_score;
if (prev_word_done)
done_word_count++;
prev_word_score = 0;
prev_char_1 = FALSE;
prev_char_digit = FALSE;
prev_word_done = FALSE;
} else {
/*
Can we add the prev word score and potentially count this word?
Yes IF it didnt end in a 1 when the first char of this word is a digit
AND it didnt end in a digit when the first char of this word is a 1
*/
word_len = word->reject_map.length();
current_word_ok_so_far = FALSE;
if (!((prev_char_1 && digit_or_numeric_punct(word, 0)) ||
(prev_char_digit && (
(word_done &&
word->best_choice->unichar_lengths().string()[0] == 1 &&
word->best_choice->unichar_string()[0] == '1') ||
(!word_done && STRING(conflict_set_I_l_1).contains(
word->best_choice->unichar_string()[0])))))) {
total_score += prev_word_score;
if (prev_word_done)
done_word_count++;
current_word_ok_so_far = word_done;
}
if (current_word_ok_so_far) {
prev_word_done = TRUE;
prev_word_score = word_len;
} else {
prev_word_done = FALSE;
prev_word_score = 0;
}
/* Add 1 to total score for every joined 1 regardless of context and
rejtn */
for (i = 0, prev_char_1 = FALSE; i < word_len; i++) {
current_char_1 = word->best_choice->unichar_string()[i] == '1';
if (prev_char_1 || (current_char_1 && (i > 0)))
total_score++;
prev_char_1 = current_char_1;
}
/* Add 1 to total score for every joined punctuation regardless of context
and rejtn */
if (tessedit_prefer_joined_punct) {
for (i = 0, offset = 0, prev_char_punct = FALSE; i < word_len;
offset += word->best_choice->unichar_lengths()[i++]) {
current_char_punct =
punct_chars.contains(word->best_choice->unichar_string()[offset]);
if (prev_char_punct || (current_char_punct && i > 0))
total_score++;
prev_char_punct = current_char_punct;
}
}
prev_char_digit = digit_or_numeric_punct(word, word_len - 1);
for (i = 0, offset = 0; i < word_len - 1;
offset += word->best_choice->unichar_lengths()[i++]);
prev_char_1 =
((word_done && (word->best_choice->unichar_string()[offset] == '1'))
|| (!word_done && STRING(conflict_set_I_l_1).contains(
word->best_choice->unichar_string()[offset])));
}
/* Find next word */
do {
word_res_it.forward();
} while (word_res_it.data()->part_of_combo);
} while (!word_res_it.at_first());
total_score += prev_word_score;
if (prev_word_done)
done_word_count++;
if (done_word_count == word_count)
return PERFECT_WERDS;
else
return total_score;
}
BOOL8 Tesseract::digit_or_numeric_punct(WERD_RES *word, int char_position) {
int i;
int offset;
for (i = 0, offset = 0; i < char_position;
offset += word->best_choice->unichar_lengths()[i++]);
return (
word->uch_set->get_isdigit(
word->best_choice->unichar_string().string() + offset,
word->best_choice->unichar_lengths()[i]) ||
(word->best_choice->permuter() == NUMBER_PERM &&
STRING(numeric_punctuation).contains(
word->best_choice->unichar_string().string()[offset])));
}
} // namespace tesseract
/**
* @name transform_to_next_perm()
* Examines the current word list to find the smallest word gap size. Then walks
* the word list closing any gaps of this size by either inserted new
* combination words, or extending existing ones.
*
* The routine COULD be limited to stop it building words longer than N blobs.
*
* If there are no more gaps then it DELETES the entire list and returns the
* empty list to cause termination.
*/
void transform_to_next_perm(WERD_RES_LIST &words) {
WERD_RES_IT word_it(&words);
WERD_RES_IT prev_word_it(&words);
WERD_RES *word;
WERD_RES *prev_word;
WERD_RES *combo;
WERD *copy_word;
inT16 prev_right = -MAX_INT16;
TBOX box;
inT16 gap;
inT16 min_gap = MAX_INT16;
for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
word = word_it.data();
if (!word->part_of_combo) {
box = word->word->bounding_box();
if (prev_right > -MAX_INT16) {
gap = box.left() - prev_right;
if (gap < min_gap)
min_gap = gap;
}
prev_right = box.right();
}
}
if (min_gap < MAX_INT16) {
prev_right = -MAX_INT16; // back to start
word_it.set_to_list(&words);
// Note: we can't use cycle_pt due to inserted combos at start of list.
for (; (prev_right == -MAX_INT16) || !word_it.at_first();
word_it.forward()) {
word = word_it.data();
if (!word->part_of_combo) {
box = word->word->bounding_box();
if (prev_right > -MAX_INT16) {
gap = box.left() - prev_right;
if (gap <= min_gap) {
prev_word = prev_word_it.data();
if (prev_word->combination) {
combo = prev_word;
} else {
/* Make a new combination and insert before
* the first word being joined. */
copy_word = new WERD;
*copy_word = *(prev_word->word);
// deep copy
combo = new WERD_RES(copy_word);
combo->combination = TRUE;
combo->x_height = prev_word->x_height;
prev_word->part_of_combo = TRUE;
prev_word_it.add_before_then_move(combo);
}
combo->word->set_flag(W_EOL, word->word->flag(W_EOL));
if (word->combination) {
combo->word->join_on(word->word);
// Move blobs to combo
// old combo no longer needed
delete word_it.extract();
} else {
// Copy current wd to combo
combo->copy_on(word);
word->part_of_combo = TRUE;
}
combo->done = FALSE;
combo->ClearResults();
} else {
prev_word_it = word_it; // catch up
}
}
prev_right = box.right();
}
}
} else {
words.clear(); // signal termination
}
}
namespace tesseract {
void Tesseract::dump_words(WERD_RES_LIST &perm, inT16 score,
inT16 mode, BOOL8 improved) {
WERD_RES_IT word_res_it(&perm);
if (debug_fix_space_level > 0) {
if (mode == 1) {
stats_.dump_words_str = "";
for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
word_res_it.forward()) {
if (!word_res_it.data()->part_of_combo) {
stats_.dump_words_str +=
word_res_it.data()->best_choice->unichar_string();
stats_.dump_words_str += ' ';
}
}
}
if (debug_fix_space_level > 1) {
switch (mode) {
case 1:
tprintf("EXTRACTED (%d): \"", score);
break;
case 2:
tprintf("TESTED (%d): \"", score);
break;
case 3:
tprintf("RETURNED (%d): \"", score);
break;
}
for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
word_res_it.forward()) {
if (!word_res_it.data()->part_of_combo) {
tprintf("%s/%1d ",
word_res_it.data()->best_choice->unichar_string().string(),
(int)word_res_it.data()->best_choice->permuter());
}
}
tprintf("\"\n");
} else if (improved) {
tprintf("FIX SPACING \"%s\" => \"", stats_.dump_words_str.string());
for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
word_res_it.forward()) {
if (!word_res_it.data()->part_of_combo) {
tprintf("%s/%1d ",
word_res_it.data()->best_choice->unichar_string().string(),
(int)word_res_it.data()->best_choice->permuter());
}
}
tprintf("\"\n");
}
}
}
BOOL8 Tesseract::fixspace_thinks_word_done(WERD_RES *word) {
if (word->done)
return TRUE;
/*
Use all the standard pass 2 conditions for mode 5 in set_done() in
reject.c BUT DONT REJECT IF THE WERD IS AMBIGUOUS - FOR SPACING WE DONT
CARE WHETHER WE HAVE of/at on/an etc.
*/
if (fixsp_done_mode > 0 &&
(word->tess_accepted ||
(fixsp_done_mode == 2 && word->reject_map.reject_count() == 0) ||
fixsp_done_mode == 3) &&
(strchr(word->best_choice->unichar_string().string(), ' ') == NULL) &&
((word->best_choice->permuter() == SYSTEM_DAWG_PERM) ||
(word->best_choice->permuter() == FREQ_DAWG_PERM) ||
(word->best_choice->permuter() == USER_DAWG_PERM) ||
(word->best_choice->permuter() == NUMBER_PERM))) {
return TRUE;
} else {
return FALSE;
}
}
/**
* @name fix_sp_fp_word()
* Test the current word to see if it can be split by deleting noise blobs. If
* so, do the business.
* Return with the iterator pointing to the same place if the word is unchanged,
* or the last of the replacement words.
*/
void Tesseract::fix_sp_fp_word(WERD_RES_IT &word_res_it, ROW *row,
BLOCK* block) {
WERD_RES *word_res;
WERD_RES_LIST sub_word_list;
WERD_RES_IT sub_word_list_it(&sub_word_list);
inT16 blob_index;
inT16 new_length;
float junk;
word_res = word_res_it.data();
if (word_res->word->flag(W_REP_CHAR) ||
word_res->combination ||
word_res->part_of_combo ||
!word_res->word->flag(W_DONT_CHOP))
return;
blob_index = worst_noise_blob(word_res, &junk);
if (blob_index < 0)
return;
if (debug_fix_space_level > 1) {
tprintf("FP fixspace working on \"%s\"\n",
word_res->best_choice->unichar_string().string());
}
word_res->word->rej_cblob_list()->sort(c_blob_comparator);
sub_word_list_it.add_after_stay_put(word_res_it.extract());
fix_noisy_space_list(sub_word_list, row, block);
new_length = sub_word_list.length();
word_res_it.add_list_before(&sub_word_list);
for (; !word_res_it.at_last() && new_length > 1; new_length--) {
word_res_it.forward();
}
}
void Tesseract::fix_noisy_space_list(WERD_RES_LIST &best_perm, ROW *row,
BLOCK* block) {
inT16 best_score;
WERD_RES_IT best_perm_it(&best_perm);
WERD_RES_LIST current_perm;
WERD_RES_IT current_perm_it(¤t_perm);
WERD_RES *old_word_res;
inT16 current_score;
BOOL8 improved = FALSE;
best_score = fp_eval_word_spacing(best_perm); // default score
dump_words(best_perm, best_score, 1, improved);
old_word_res = best_perm_it.data();
// Even deep_copy doesn't copy the underlying WERD unless its combination
// flag is true!.
old_word_res->combination = TRUE; // Kludge to force deep copy
current_perm_it.add_to_end(WERD_RES::deep_copy(old_word_res));
old_word_res->combination = FALSE; // Undo kludge
break_noisiest_blob_word(current_perm);
while (best_score != PERFECT_WERDS && !current_perm.empty()) {
match_current_words(current_perm, row, block);
current_score = fp_eval_word_spacing(current_perm);
dump_words(current_perm, current_score, 2, improved);
if (current_score > best_score) {
best_perm.clear();
best_perm.deep_copy(¤t_perm, &WERD_RES::deep_copy);
best_score = current_score;
improved = TRUE;
}
if (current_score < PERFECT_WERDS) {
break_noisiest_blob_word(current_perm);
}
}
dump_words(best_perm, best_score, 3, improved);
}
/**
* break_noisiest_blob_word()
* Find the word with the blob which looks like the worst noise.
* Break the word into two, deleting the noise blob.
*/
void Tesseract::break_noisiest_blob_word(WERD_RES_LIST &words) {
WERD_RES_IT word_it(&words);
WERD_RES_IT worst_word_it;
float worst_noise_score = 9999;
int worst_blob_index = -1; // Noisiest blob of noisiest wd
int blob_index; // of wds noisiest blob
float noise_score; // of wds noisiest blob
WERD_RES *word_res;
C_BLOB_IT blob_it;
C_BLOB_IT rej_cblob_it;
C_BLOB_LIST new_blob_list;
C_BLOB_IT new_blob_it;
C_BLOB_IT new_rej_cblob_it;
WERD *new_word;
inT16 start_of_noise_blob;
inT16 i;
for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
blob_index = worst_noise_blob(word_it.data(), &noise_score);
if (blob_index > -1 && worst_noise_score > noise_score) {
worst_noise_score = noise_score;
worst_blob_index = blob_index;
worst_word_it = word_it;
}
}
if (worst_blob_index < 0) {
words.clear(); // signal termination
return;
}
/* Now split the worst_word_it */
word_res = worst_word_it.data();
/* Move blobs before noise blob to a new bloblist */
new_blob_it.set_to_list(&new_blob_list);
blob_it.set_to_list(word_res->word->cblob_list());
for (i = 0; i < worst_blob_index; i++, blob_it.forward()) {
new_blob_it.add_after_then_move(blob_it.extract());
}
start_of_noise_blob = blob_it.data()->bounding_box().left();
delete blob_it.extract(); // throw out noise blob
new_word = new WERD(&new_blob_list, word_res->word);
new_word->set_flag(W_EOL, FALSE);
word_res->word->set_flag(W_BOL, FALSE);
word_res->word->set_blanks(1); // After break
new_rej_cblob_it.set_to_list(new_word->rej_cblob_list());
rej_cblob_it.set_to_list(word_res->word->rej_cblob_list());
for (;
(!rej_cblob_it.empty() &&
(rej_cblob_it.data()->bounding_box().left() < start_of_noise_blob));
rej_cblob_it.forward()) {
new_rej_cblob_it.add_after_then_move(rej_cblob_it.extract());
}
WERD_RES* new_word_res = new WERD_RES(new_word);
new_word_res->combination = TRUE;
worst_word_it.add_before_then_move(new_word_res);
word_res->ClearResults();
}
inT16 Tesseract::worst_noise_blob(WERD_RES *word_res,
float *worst_noise_score) {
float noise_score[512];
int i;
int min_noise_blob; // 1st contender
int max_noise_blob; // last contender
int non_noise_count;
int worst_noise_blob; // Worst blob
float small_limit = kBlnXHeight * fixsp_small_outlines_size;
float non_noise_limit = kBlnXHeight * 0.8;
if (word_res->rebuild_word == NULL)
return -1; // Can't handle cube words.
// Normalised.
int blob_count = word_res->box_word->length();
ASSERT_HOST(blob_count <= 512);
if (blob_count < 5)
return -1; // too short to split
/* Get the noise scores for all blobs */
#ifndef SECURE_NAMES
if (debug_fix_space_level > 5)
tprintf("FP fixspace Noise metrics for \"%s\": ",
word_res->best_choice->unichar_string().string());
#endif
for (i = 0; i < blob_count && i < word_res->rebuild_word->NumBlobs(); i++) {
TBLOB* blob = word_res->rebuild_word->blobs[i];
if (word_res->reject_map[i].accepted())
noise_score[i] = non_noise_limit;
else
noise_score[i] = blob_noise_score(blob);
if (debug_fix_space_level > 5)
tprintf("%1.1f ", noise_score[i]);
}
if (debug_fix_space_level > 5)
tprintf("\n");
/* Now find the worst one which is far enough away from the end of the word */
non_noise_count = 0;
for (i = 0; i < blob_count && non_noise_count < fixsp_non_noise_limit; i++) {
if (noise_score[i] >= non_noise_limit) {
non_noise_count++;
}
}
if (non_noise_count < fixsp_non_noise_limit)
return -1;
min_noise_blob = i;
non_noise_count = 0;
for (i = blob_count - 1; i >= 0 && non_noise_count < fixsp_non_noise_limit;
i--) {
if (noise_score[i] >= non_noise_limit) {
non_noise_count++;
}
}
if (non_noise_count < fixsp_non_noise_limit)
return -1;
max_noise_blob = i;
if (min_noise_blob > max_noise_blob)
return -1;
*worst_noise_score = small_limit;
worst_noise_blob = -1;
for (i = min_noise_blob; i <= max_noise_blob; i++) {
if (noise_score[i] < *worst_noise_score) {
worst_noise_blob = i;
*worst_noise_score = noise_score[i];
}
}
return worst_noise_blob;
}
float Tesseract::blob_noise_score(TBLOB *blob) {
TBOX box; // BB of outline
inT16 outline_count = 0;
inT16 max_dimension;
inT16 largest_outline_dimension = 0;
for (TESSLINE* ol = blob->outlines; ol != NULL; ol= ol->next) {
outline_count++;
box = ol->bounding_box();
if (box.height() > box.width()) {
max_dimension = box.height();
} else {
max_dimension = box.width();
}
if (largest_outline_dimension < max_dimension)
largest_outline_dimension = max_dimension;
}
if (outline_count > 5) {
// penalise LOTS of blobs
largest_outline_dimension *= 2;
}
box = blob->bounding_box();
if (box.bottom() > kBlnBaselineOffset * 4 ||
box.top() < kBlnBaselineOffset / 2) {
// Lax blob is if high or low
largest_outline_dimension /= 2;
}
return largest_outline_dimension;
}
} // namespace tesseract
void fixspace_dbg(WERD_RES *word) {
TBOX box = word->word->bounding_box();
BOOL8 show_map_detail = FALSE;
inT16 i;
box.print();
tprintf(" \"%s\" ", word->best_choice->unichar_string().string());
tprintf("Blob count: %d (word); %d/%d (rebuild word)\n",
word->word->cblob_list()->length(),
word->rebuild_word->NumBlobs(),
word->box_word->length());
word->reject_map.print(debug_fp);
tprintf("\n");
if (show_map_detail) {
tprintf("\"%s\"\n", word->best_choice->unichar_string().string());
for (i = 0; word->best_choice->unichar_string()[i] != '\0'; i++) {
tprintf("**** \"%c\" ****\n", word->best_choice->unichar_string()[i]);
word->reject_map[i].full_print(debug_fp);
}
}
tprintf("Tess Accepted: %s\n", word->tess_accepted ? "TRUE" : "FALSE");
tprintf("Done flag: %s\n\n", word->done ? "TRUE" : "FALSE");
}
/**
* fp_eval_word_spacing()
* Evaluation function for fixed pitch word lists.
*
* Basically, count the number of "nice" characters - those which are in tess
* acceptable words or in dict words and are not rejected.
* Penalise any potential noise chars
*/
namespace tesseract {
inT16 Tesseract::fp_eval_word_spacing(WERD_RES_LIST &word_res_list) {
WERD_RES_IT word_it(&word_res_list);
WERD_RES *word;
inT16 word_length;
inT16 score = 0;
inT16 i;
float small_limit = kBlnXHeight * fixsp_small_outlines_size;
for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
word = word_it.data();
if (word->rebuild_word == NULL)
continue; // Can't handle cube words.
word_length = word->reject_map.length();
if (word->done ||
word->tess_accepted ||
word->best_choice->permuter() == SYSTEM_DAWG_PERM ||
word->best_choice->permuter() == FREQ_DAWG_PERM ||
word->best_choice->permuter() == USER_DAWG_PERM ||
safe_dict_word(word) > 0) {
int num_blobs = word->rebuild_word->NumBlobs();
UNICHAR_ID space = word->uch_set->unichar_to_id(" ");
for (i = 0; i < word->best_choice->length() && i < num_blobs; ++i) {
TBLOB* blob = word->rebuild_word->blobs[i];
if (word->best_choice->unichar_id(i) == space ||
blob_noise_score(blob) < small_limit) {
score -= 1; // penalise possibly erroneous non-space
} else if (word->reject_map[i].accepted()) {
score++;
}
}
}
}
if (score < 0)
score = 0;
return score;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: equationdetect.cpp
// Description: Helper classes to detect equations.
// Author: Zongyi (Joe) Liu (joeliu@google.com)
// Created: Fri Aug 31 11:13:01 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#include <mathfix.h>
#endif
#ifdef __MINGW32__
#include <limits.h>
#endif
#include <float.h>
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "equationdetect.h"
#include "bbgrid.h"
#include "classify.h"
#include "colpartition.h"
#include "colpartitiongrid.h"
#include "colpartitionset.h"
#include "helpers.h"
#include "ratngs.h"
#include "tesseractclass.h"
// Config variables.
BOOL_VAR(equationdetect_save_bi_image, false, "Save input bi image");
BOOL_VAR(equationdetect_save_spt_image, false, "Save special character image");
BOOL_VAR(equationdetect_save_seed_image, false, "Save the seed image");
BOOL_VAR(equationdetect_save_merged_image, false, "Save the merged image");
namespace tesseract {
///////////////////////////////////////////////////////////////////////////
// Utility ColParition sort functions.
///////////////////////////////////////////////////////////////////////////
static int SortCPByTopReverse(const void* p1, const void* p2) {
const ColPartition* cp1 = *reinterpret_cast<ColPartition* const*>(p1);
const ColPartition* cp2 = *reinterpret_cast<ColPartition* const*>(p2);
ASSERT_HOST(cp1 != NULL && cp2 != NULL);
const TBOX &box1(cp1->bounding_box()), &box2(cp2->bounding_box());
return box2.top() - box1.top();
}
static int SortCPByBottom(const void* p1, const void* p2) {
const ColPartition* cp1 = *reinterpret_cast<ColPartition* const*>(p1);
const ColPartition* cp2 = *reinterpret_cast<ColPartition* const*>(p2);
ASSERT_HOST(cp1 != NULL && cp2 != NULL);
const TBOX &box1(cp1->bounding_box()), &box2(cp2->bounding_box());
return box1.bottom() - box2.bottom();
}
static int SortCPByHeight(const void* p1, const void* p2) {
const ColPartition* cp1 = *reinterpret_cast<ColPartition* const*>(p1);
const ColPartition* cp2 = *reinterpret_cast<ColPartition* const*>(p2);
ASSERT_HOST(cp1 != NULL && cp2 != NULL);
const TBOX &box1(cp1->bounding_box()), &box2(cp2->bounding_box());
return box1.height() - box2.height();
}
// TODO(joeliu): we may want to parameterize these constants.
const float kMathDigitDensityTh1 = 0.25;
const float kMathDigitDensityTh2 = 0.1;
const float kMathItalicDensityTh = 0.5;
const float kUnclearDensityTh = 0.25;
const int kSeedBlobsCountTh = 10;
const int kLeftIndentAlignmentCountTh = 1;
// Returns true if PolyBlockType is of text type or equation type.
inline bool IsTextOrEquationType(PolyBlockType type) {
return PTIsTextType(type) || type == PT_EQUATION;
}
inline bool IsLeftIndented(const EquationDetect::IndentType type) {
return type == EquationDetect::LEFT_INDENT ||
type == EquationDetect::BOTH_INDENT;
}
inline bool IsRightIndented(const EquationDetect::IndentType type) {
return type == EquationDetect::RIGHT_INDENT ||
type == EquationDetect::BOTH_INDENT;
}
EquationDetect::EquationDetect(const char* equ_datapath,
const char* equ_name) {
const char* default_name = "equ";
if (equ_name == NULL) {
equ_name = default_name;
}
equ_tesseract_ = lang_tesseract_ = NULL;
resolution_ = 0;
page_count_ = 0;
// Construct equ_tesseract_.
equ_tesseract_ = new Tesseract();
if (equ_tesseract_->init_tesseract(equ_datapath, equ_name,
OEM_TESSERACT_ONLY)) {
tprintf("Warning: equation region detection requested,"
" but %s failed to load from %s\n", equ_name, equ_datapath);
delete equ_tesseract_;
equ_tesseract_ = NULL;
}
cps_super_bbox_ = NULL;
}
EquationDetect::~EquationDetect() {
if (equ_tesseract_) {
delete (equ_tesseract_);
}
if (cps_super_bbox_) {
delete(cps_super_bbox_);
}
}
void EquationDetect::SetLangTesseract(Tesseract* lang_tesseract) {
lang_tesseract_ = lang_tesseract;
}
void EquationDetect::SetResolution(const int resolution) {
resolution_ = resolution;
}
int EquationDetect::LabelSpecialText(TO_BLOCK* to_block) {
if (to_block == NULL) {
tprintf("Warning: input to_block is NULL!\n");
return -1;
}
GenericVector<BLOBNBOX_LIST*> blob_lists;
blob_lists.push_back(&(to_block->blobs));
blob_lists.push_back(&(to_block->large_blobs));
for (int i = 0; i < blob_lists.size(); ++i) {
BLOBNBOX_IT bbox_it(blob_lists[i]);
for (bbox_it.mark_cycle_pt (); !bbox_it.cycled_list();
bbox_it.forward()) {
bbox_it.data()->set_special_text_type(BSTT_NONE);
}
}
return 0;
}
void EquationDetect::IdentifySpecialText(
BLOBNBOX *blobnbox, const int height_th) {
ASSERT_HOST(blobnbox != NULL);
if (blobnbox->bounding_box().height() < height_th && height_th > 0) {
// For small blob, we simply set to BSTT_NONE.
blobnbox->set_special_text_type(BSTT_NONE);
return;
}
BLOB_CHOICE_LIST ratings_equ, ratings_lang;
C_BLOB* blob = blobnbox->cblob();
// TODO(joeliu/rays) Fix this. We may have to normalize separately for
// each classifier here, as they may require different PolygonalCopy.
TBLOB* tblob = TBLOB::PolygonalCopy(false, blob);
const TBOX& box = tblob->bounding_box();
// Normalize the blob. Set the origin to the place we want to be the
// bottom-middle, and scaling is to make the height the x-height.
float scaling = static_cast<float>(kBlnXHeight) / box.height();
float x_orig = (box.left() + box.right()) / 2.0f, y_orig = box.bottom();
TBLOB* normed_blob = new TBLOB(*tblob);
normed_blob->Normalize(NULL, NULL, NULL, x_orig, y_orig, scaling, scaling,
0.0f, static_cast<float>(kBlnBaselineOffset),
false, NULL);
equ_tesseract_->AdaptiveClassifier(normed_blob, &ratings_equ);
lang_tesseract_->AdaptiveClassifier(normed_blob, &ratings_lang);
delete normed_blob;
delete tblob;
// Get the best choice from ratings_lang and rating_equ. As the choice in the
// list has already been sorted by the certainty, we simply use the first
// choice.
BLOB_CHOICE *lang_choice = NULL, *equ_choice = NULL;
if (ratings_lang.length() > 0) {
BLOB_CHOICE_IT choice_it(&ratings_lang);
lang_choice = choice_it.data();
}
if (ratings_equ.length() > 0) {
BLOB_CHOICE_IT choice_it(&ratings_equ);
equ_choice = choice_it.data();
}
float lang_score = lang_choice ? lang_choice->certainty() : -FLT_MAX;
float equ_score = equ_choice ? equ_choice->certainty() : -FLT_MAX;
const float kConfScoreTh = -5.0f, kConfDiffTh = 1.8;
// The scores here are negative, so the max/min == fabs(min/max).
// float ratio = fmax(lang_score, equ_score) / fmin(lang_score, equ_score);
float diff = fabs(lang_score - equ_score);
BlobSpecialTextType type = BSTT_NONE;
// Classification.
if (fmax(lang_score, equ_score) < kConfScoreTh) {
// If both score are very small, then mark it as unclear.
type = BSTT_UNCLEAR;
} else if (diff > kConfDiffTh && equ_score > lang_score) {
// If equ_score is significantly higher, then we classify this character as
// math symbol.
type = BSTT_MATH;
} else if (lang_choice) {
// For other cases: lang_score is similar or significantly higher.
type = EstimateTypeForUnichar(
lang_tesseract_->unicharset, lang_choice->unichar_id());
}
if (type == BSTT_NONE && lang_tesseract_->get_fontinfo_table().get(
lang_choice->fontinfo_id()).is_italic()) {
// For text symbol, we still check if it is italic.
blobnbox->set_special_text_type(BSTT_ITALIC);
} else {
blobnbox->set_special_text_type(type);
}
}
BlobSpecialTextType EquationDetect::EstimateTypeForUnichar(
const UNICHARSET& unicharset, const UNICHAR_ID id) const {
STRING s = unicharset.id_to_unichar(id);
if (unicharset.get_isalpha(id)) {
return BSTT_NONE;
}
if (unicharset.get_ispunctuation(id)) {
// Exclude some special texts that are likely to be confused as math symbol.
static GenericVector<UNICHAR_ID> ids_to_exclude;
if (ids_to_exclude.empty()) {
static const STRING kCharsToEx[] = {"'", "`", "\"", "\\", ",", ".",
"〈", "〉", "《", "》", "」", "「", ""};
int i = 0;
while (kCharsToEx[i] != "") {
ids_to_exclude.push_back(
unicharset.unichar_to_id(kCharsToEx[i++].string()));
}
ids_to_exclude.sort();
}
return ids_to_exclude.bool_binary_search(id) ? BSTT_NONE : BSTT_MATH;
}
// Check if it is digit. In addition to the isdigit attribute, we also check
// if this character belongs to those likely to be confused with a digit.
static const STRING kDigitsChars = "|";
if (unicharset.get_isdigit(id) ||
(s.length() == 1 && kDigitsChars.contains(s[0]))) {
return BSTT_DIGIT;
} else {
return BSTT_MATH;
}
}
void EquationDetect::IdentifySpecialText() {
// Set configuration for Tesseract::AdaptiveClassifier.
equ_tesseract_->tess_cn_matching.set_value(true); // turn it on
equ_tesseract_->tess_bn_matching.set_value(false);
// Set the multiplier to zero for lang_tesseract_ to improve the accuracy.
int classify_class_pruner = lang_tesseract_->classify_class_pruner_multiplier;
int classify_integer_matcher =
lang_tesseract_->classify_integer_matcher_multiplier;
lang_tesseract_->classify_class_pruner_multiplier.set_value(0);
lang_tesseract_->classify_integer_matcher_multiplier.set_value(0);
ColPartitionGridSearch gsearch(part_grid_);
ColPartition *part = NULL;
gsearch.StartFullSearch();
while ((part = gsearch.NextFullSearch()) != NULL) {
if (!IsTextOrEquationType(part->type())) {
continue;
}
IdentifyBlobsToSkip(part);
BLOBNBOX_C_IT bbox_it(part->boxes());
// Compute the height threshold.
GenericVector<int> blob_heights;
for (bbox_it.mark_cycle_pt (); !bbox_it.cycled_list();
bbox_it.forward()) {
if (bbox_it.data()->special_text_type() != BSTT_SKIP) {
blob_heights.push_back(bbox_it.data()->bounding_box().height());
}
}
blob_heights.sort();
int height_th = blob_heights[blob_heights.size() / 2] / 3 * 2;
for (bbox_it.mark_cycle_pt (); !bbox_it.cycled_list();
bbox_it.forward()) {
if (bbox_it.data()->special_text_type() != BSTT_SKIP) {
IdentifySpecialText(bbox_it.data(), height_th);
}
}
}
// Set the multiplier values back.
lang_tesseract_->classify_class_pruner_multiplier.set_value(
classify_class_pruner);
lang_tesseract_->classify_integer_matcher_multiplier.set_value(
classify_integer_matcher);
if (equationdetect_save_spt_image) { // For debug.
STRING outfile;
GetOutputTiffName("_spt", &outfile);
PaintSpecialTexts(outfile);
}
}
void EquationDetect::IdentifyBlobsToSkip(ColPartition* part) {
ASSERT_HOST(part);
BLOBNBOX_C_IT blob_it(part->boxes());
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
// At this moment, no blob should have been joined.
ASSERT_HOST(!blob_it.data()->joined_to_prev());
}
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
if (blob->joined_to_prev() || blob->special_text_type() == BSTT_SKIP) {
continue;
}
TBOX blob_box = blob->bounding_box();
// Search if any blob can be merged into blob. If found, then we mark all
// these blobs as BSTT_SKIP.
BLOBNBOX_C_IT blob_it2 = blob_it;
bool found = false;
while (!blob_it2.at_last()) {
BLOBNBOX* nextblob = blob_it2.forward();
const TBOX& nextblob_box = nextblob->bounding_box();
if (nextblob_box.left() >= blob_box.right()) {
break;
}
const float kWidthR = 0.4, kHeightR = 0.3;
bool xoverlap = blob_box.major_x_overlap(nextblob_box),
yoverlap = blob_box.y_overlap(nextblob_box);
float widthR = static_cast<float>(
MIN(nextblob_box.width(), blob_box.width())) /
MAX(nextblob_box.width(), blob_box.width());
float heightR = static_cast<float>(
MIN(nextblob_box.height(), blob_box.height())) /
MAX(nextblob_box.height(), blob_box.height());
if (xoverlap && yoverlap && widthR > kWidthR && heightR > kHeightR) {
// Found one, set nextblob type and recompute blob_box.
found = true;
nextblob->set_special_text_type(BSTT_SKIP);
blob_box += nextblob_box;
}
}
if (found) {
blob->set_special_text_type(BSTT_SKIP);
}
}
}
int EquationDetect::FindEquationParts(
ColPartitionGrid* part_grid, ColPartitionSet** best_columns) {
if (!equ_tesseract_ || !lang_tesseract_) {
tprintf("Warning: equ_tesseract_/lang_tesseract_ is NULL!\n");
return -1;
}
if (!part_grid || !best_columns) {
tprintf("part_grid/best_columns is NULL!!\n");
return -1;
}
cp_seeds_.clear();
part_grid_ = part_grid;
best_columns_ = best_columns;
resolution_ = lang_tesseract_->source_resolution();
STRING outfile;
page_count_++;
if (equationdetect_save_bi_image) {
GetOutputTiffName("_bi", &outfile);
pixWrite(outfile.string(), lang_tesseract_->pix_binary(), IFF_TIFF_G4);
}
// Pass 0: Compute special text type for blobs.
IdentifySpecialText();
// Pass 1: Merge parts by overlap.
MergePartsByLocation();
// Pass 2: compute the math blob density and find the seed partition.
IdentifySeedParts();
// We still need separate seed into block seed and inline seed partition.
IdentifyInlineParts();
if (equationdetect_save_seed_image) {
GetOutputTiffName("_seed", &outfile);
PaintColParts(outfile);
}
// Pass 3: expand block equation seeds.
while (!cp_seeds_.empty()) {
GenericVector<ColPartition*> seeds_expanded;
for (int i = 0; i < cp_seeds_.size(); ++i) {
if (ExpandSeed(cp_seeds_[i])) {
// If this seed is expanded, then we add it into seeds_expanded. Note
// this seed has been removed from part_grid_ if it is expanded.
seeds_expanded.push_back(cp_seeds_[i]);
}
}
// Add seeds_expanded back into part_grid_ and reset cp_seeds_.
for (int i = 0; i < seeds_expanded.size(); ++i) {
InsertPartAfterAbsorb(seeds_expanded[i]);
}
cp_seeds_ = seeds_expanded;
}
// Pass 4: find math block satellite text partitions and merge them.
ProcessMathBlockSatelliteParts();
if (equationdetect_save_merged_image) { // For debug.
GetOutputTiffName("_merged", &outfile);
PaintColParts(outfile);
}
return 0;
}
void EquationDetect::MergePartsByLocation() {
while (true) {
ColPartition* part = NULL;
// partitions that have been updated.
GenericVector<ColPartition*> parts_updated;
ColPartitionGridSearch gsearch(part_grid_);
gsearch.StartFullSearch();
while ((part = gsearch.NextFullSearch()) != NULL) {
if (!IsTextOrEquationType(part->type())) {
continue;
}
GenericVector<ColPartition*> parts_to_merge;
SearchByOverlap(part, &parts_to_merge);
if (parts_to_merge.empty()) {
continue;
}
// Merge parts_to_merge with part, and remove them from part_grid_.
part_grid_->RemoveBBox(part);
for (int i = 0; i < parts_to_merge.size(); ++i) {
ASSERT_HOST(parts_to_merge[i] != NULL && parts_to_merge[i] != part);
part->Absorb(parts_to_merge[i], NULL);
}
gsearch.RepositionIterator();
parts_updated.push_back(part);
}
if (parts_updated.empty()) { // Exit the loop
break;
}
// Re-insert parts_updated into part_grid_.
for (int i = 0; i < parts_updated.size(); ++i) {
InsertPartAfterAbsorb(parts_updated[i]);
}
}
}
void EquationDetect::SearchByOverlap(
ColPartition* seed,
GenericVector<ColPartition*>* parts_overlap) {
ASSERT_HOST(seed != NULL && parts_overlap != NULL);
if (!IsTextOrEquationType(seed->type())) {
return;
}
ColPartitionGridSearch search(part_grid_);
const TBOX& seed_box(seed->bounding_box());
const int kRadNeighborCells = 30;
search.StartRadSearch((seed_box.left() + seed_box.right()) / 2,
(seed_box.top() + seed_box.bottom()) / 2,
kRadNeighborCells);
search.SetUniqueMode(true);
// Search iteratively.
ColPartition *part;
GenericVector<ColPartition*> parts;
const float kLargeOverlapTh = 0.95;
const float kEquXOverlap = 0.4, kEquYOverlap = 0.5;
while ((part = search.NextRadSearch()) != NULL) {
if (part == seed || !IsTextOrEquationType(part->type())) {
continue;
}
const TBOX& part_box(part->bounding_box());
bool merge = false;
float x_overlap_fraction = part_box.x_overlap_fraction(seed_box),
y_overlap_fraction = part_box.y_overlap_fraction(seed_box);
// If part is large overlapped with seed, then set merge to true.
if (x_overlap_fraction >= kLargeOverlapTh &&
y_overlap_fraction >= kLargeOverlapTh) {
merge = true;
} else if (seed->type() == PT_EQUATION &&
IsTextOrEquationType(part->type())) {
if ((x_overlap_fraction > kEquXOverlap && y_overlap_fraction > 0.0) ||
(x_overlap_fraction > 0.0 && y_overlap_fraction > kEquYOverlap)) {
merge = true;
}
}
if (merge) { // Remove the part from search and put it into parts.
search.RemoveBBox();
parts_overlap->push_back(part);
}
}
}
void EquationDetect::InsertPartAfterAbsorb(ColPartition* part) {
ASSERT_HOST(part);
// Before insert part back into part_grid_, we will need re-compute some
// of its attributes such as first_column_, last_column_. However, we still
// want to preserve its type.
BlobTextFlowType flow_type = part->flow();
PolyBlockType part_type = part->type();
BlobRegionType blob_type = part->blob_type();
// Call SetPartitionType to re-compute the attributes of part.
const TBOX& part_box(part->bounding_box());
int grid_x, grid_y;
part_grid_->GridCoords(
part_box.left(), part_box.bottom(), &grid_x, &grid_y);
part->SetPartitionType(resolution_, best_columns_[grid_y]);
// Reset the types back.
part->set_type(part_type);
part->set_blob_type(blob_type);
part->set_flow(flow_type);
part->SetBlobTypes();
// Insert into part_grid_.
part_grid_->InsertBBox(true, true, part);
}
void EquationDetect::IdentifySeedParts() {
ColPartitionGridSearch gsearch(part_grid_);
ColPartition *part = NULL;
gsearch.StartFullSearch();
GenericVector<ColPartition*> seeds1, seeds2;
// The left coordinates of indented text partitions.
GenericVector<int> indented_texts_left;
// The foreground density of text partitions.
GenericVector<float> texts_foreground_density;
while ((part = gsearch.NextFullSearch()) != NULL) {
if (!IsTextOrEquationType(part->type())) {
continue;
}
part->ComputeSpecialBlobsDensity();
bool blobs_check = CheckSeedBlobsCount(part);
const int kTextBlobsTh = 20;
if (CheckSeedDensity(kMathDigitDensityTh1, kMathDigitDensityTh2, part) &&
blobs_check) {
// Passed high density threshold test, save into seeds1.
seeds1.push_back(part);
} else {
IndentType indent = IsIndented(part);
if (IsLeftIndented(indent) && blobs_check &&
CheckSeedDensity(kMathDigitDensityTh2, kMathDigitDensityTh2, part)) {
// Passed low density threshold test and is indented, save into seeds2.
seeds2.push_back(part);
} else if (!IsRightIndented(indent) &&
part->boxes_count() > kTextBlobsTh) {
// This is likely to be a text part, save the features.
const TBOX&box = part->bounding_box();
if (IsLeftIndented(indent)) {
indented_texts_left.push_back(box.left());
}
texts_foreground_density.push_back(ComputeForegroundDensity(box));
}
}
}
// Sort the features collected from text regions.
indented_texts_left.sort();
texts_foreground_density.sort();
float foreground_density_th = 0.15; // Default value.
if (!texts_foreground_density.empty()) {
// Use the median of the texts_foreground_density.
foreground_density_th = 0.8 * texts_foreground_density[
texts_foreground_density.size() / 2];
}
for (int i = 0; i < seeds1.size(); ++i) {
const TBOX& box = seeds1[i]->bounding_box();
if (CheckSeedFgDensity(foreground_density_th, seeds1[i]) &&
!(IsLeftIndented(IsIndented(seeds1[i])) &&
CountAlignment(indented_texts_left, box.left()) >=
kLeftIndentAlignmentCountTh)) {
// Mark as PT_EQUATION type.
seeds1[i]->set_type(PT_EQUATION);
cp_seeds_.push_back(seeds1[i]);
} else { // Mark as PT_INLINE_EQUATION type.
seeds1[i]->set_type(PT_INLINE_EQUATION);
}
}
for (int i = 0; i < seeds2.size(); ++i) {
if (CheckForSeed2(indented_texts_left, foreground_density_th, seeds2[i])) {
seeds2[i]->set_type(PT_EQUATION);
cp_seeds_.push_back(seeds2[i]);
}
}
}
float EquationDetect::ComputeForegroundDensity(const TBOX& tbox) {
#if LIBLEPT_MINOR_VERSION < 69 && LIBLEPT_MAJOR_VERSION <= 1
// This will disable the detector because no seed will be identified.
return 1.0f;
#else
Pix *pix_bi = lang_tesseract_->pix_binary();
int pix_height = pixGetHeight(pix_bi);
Box* box = boxCreate(tbox.left(), pix_height - tbox.top(),
tbox.width(), tbox.height());
Pix *pix_sub = pixClipRectangle(pix_bi, box, NULL);
l_float32 fract;
pixForegroundFraction(pix_sub, &fract);
pixDestroy(&pix_sub);
boxDestroy(&box);
return fract;
#endif
}
bool EquationDetect::CheckSeedFgDensity(const float density_th,
ColPartition* part) {
ASSERT_HOST(part);
// Split part horizontall, and check for each sub part.
GenericVector<TBOX> sub_boxes;
SplitCPHorLite(part, &sub_boxes);
float parts_passed = 0.0;
for (int i = 0; i < sub_boxes.size(); ++i) {
float density = ComputeForegroundDensity(sub_boxes[i]);
if (density < density_th) {
parts_passed++;
}
}
// If most sub parts passed, then we return true.
const float kSeedPartRatioTh = 0.3;
bool retval = (parts_passed / sub_boxes.size() >= kSeedPartRatioTh);
return retval;
}
void EquationDetect::SplitCPHor(ColPartition* part,
GenericVector<ColPartition*>* parts_splitted) {
ASSERT_HOST(part && parts_splitted);
if (part->median_width() == 0 || part->boxes_count() == 0) {
return;
}
// Make a copy of part, and reset parts_splitted.
ColPartition* right_part = part->CopyButDontOwnBlobs();
parts_splitted->delete_data_pointers();
parts_splitted->clear();
const double kThreshold = part->median_width() * 3.0;
bool found_split = true;
while (found_split) {
found_split = false;
BLOBNBOX_C_IT box_it(right_part->boxes());
// Blobs are sorted left side first. If blobs overlap,
// the previous blob may have a "more right" right side.
// Account for this by always keeping the largest "right"
// so far.
int previous_right = MIN_INT32;
// Look for the next split in the partition.
for (box_it.mark_cycle_pt(); !box_it.cycled_list(); box_it.forward()) {
const TBOX& box = box_it.data()->bounding_box();
if (previous_right != MIN_INT32 &&
box.left() - previous_right > kThreshold) {
// We have a split position. Split the partition in two pieces.
// Insert the left piece in the grid and keep processing the right.
int mid_x = (box.left() + previous_right) / 2;
ColPartition* left_part = right_part;
right_part = left_part->SplitAt(mid_x);
parts_splitted->push_back(left_part);
left_part->ComputeSpecialBlobsDensity();
found_split = true;
break;
}
// The right side of the previous blobs.
previous_right = MAX(previous_right, box.right());
}
}
// Add the last piece.
right_part->ComputeSpecialBlobsDensity();
parts_splitted->push_back(right_part);
}
void EquationDetect::SplitCPHorLite(ColPartition* part,
GenericVector<TBOX>* splitted_boxes) {
ASSERT_HOST(part && splitted_boxes);
splitted_boxes->clear();
if (part->median_width() == 0) {
return;
}
const double kThreshold = part->median_width() * 3.0;
// Blobs are sorted left side first. If blobs overlap,
// the previous blob may have a "more right" right side.
// Account for this by always keeping the largest "right"
// so far.
TBOX union_box;
int previous_right = MIN_INT32;
BLOBNBOX_C_IT box_it(part->boxes());
for (box_it.mark_cycle_pt(); !box_it.cycled_list(); box_it.forward()) {
const TBOX& box = box_it.data()->bounding_box();
if (previous_right != MIN_INT32 &&
box.left() - previous_right > kThreshold) {
// We have a split position.
splitted_boxes->push_back(union_box);
previous_right = MIN_INT32;
}
if (previous_right == MIN_INT32) {
union_box = box;
} else {
union_box += box;
}
// The right side of the previous blobs.
previous_right = MAX(previous_right, box.right());
}
// Add the last piece.
if (previous_right != MIN_INT32) {
splitted_boxes->push_back(union_box);
}
}
bool EquationDetect::CheckForSeed2(
const GenericVector<int>& indented_texts_left,
const float foreground_density_th,
ColPartition* part) {
ASSERT_HOST(part);
const TBOX& box = part->bounding_box();
// Check if it is aligned with any indented_texts_left.
if (!indented_texts_left.empty() &&
CountAlignment(indented_texts_left, box.left()) >=
kLeftIndentAlignmentCountTh) {
return false;
}
// Check the foreground density.
if (ComputeForegroundDensity(box) > foreground_density_th) {
return false;
}
return true;
}
int EquationDetect::CountAlignment(
const GenericVector<int>& sorted_vec, const int val) const {
if (sorted_vec.empty()) {
return 0;
}
const int kDistTh = static_cast<int>(roundf(0.03 * resolution_));
int pos = sorted_vec.binary_search(val), count = 0;
// Search left side.
int index = pos;
while (index >= 0 && abs(val - sorted_vec[index--]) < kDistTh) {
count++;
}
// Search right side.
index = pos + 1;
while (index < sorted_vec.size() && sorted_vec[index++] - val < kDistTh) {
count++;
}
return count;
}
void EquationDetect::IdentifyInlineParts() {
ComputeCPsSuperBBox();
IdentifyInlinePartsHorizontal();
int textparts_linespacing = EstimateTextPartLineSpacing();
IdentifyInlinePartsVertical(true, textparts_linespacing);
IdentifyInlinePartsVertical(false, textparts_linespacing);
}
void EquationDetect::ComputeCPsSuperBBox() {
ColPartitionGridSearch gsearch(part_grid_);
ColPartition *part = NULL;
gsearch.StartFullSearch();
if (cps_super_bbox_) {
delete cps_super_bbox_;
}
cps_super_bbox_ = new TBOX();
while ((part = gsearch.NextFullSearch()) != NULL) {
(*cps_super_bbox_) += part->bounding_box();
}
}
void EquationDetect::IdentifyInlinePartsHorizontal() {
ASSERT_HOST(cps_super_bbox_);
GenericVector<ColPartition*> new_seeds;
const int kMarginDiffTh = IntCastRounded(
0.5 * lang_tesseract_->source_resolution());
const int kGapTh = static_cast<int>(roundf(
1.0 * lang_tesseract_->source_resolution()));
ColPartitionGridSearch search(part_grid_);
search.SetUniqueMode(true);
// The center x coordinate of the cp_super_bbox_.
int cps_cx = cps_super_bbox_->left() + cps_super_bbox_->width() / 2;
for (int i = 0; i < cp_seeds_.size(); ++i) {
ColPartition* part = cp_seeds_[i];
const TBOX& part_box(part->bounding_box());
int left_margin = part_box.left() - cps_super_bbox_->left(),
right_margin = cps_super_bbox_->right() - part_box.right();
bool right_to_left;
if (left_margin + kMarginDiffTh < right_margin &&
left_margin < kMarginDiffTh) {
// part is left aligned, so we search if it has any right neighbor.
search.StartSideSearch(
part_box.right(), part_box.top(), part_box.bottom());
right_to_left = false;
} else if (left_margin > cps_cx) {
// part locates on the right half on image, so search if it has any left
// neighbor.
search.StartSideSearch(
part_box.left(), part_box.top(), part_box.bottom());
right_to_left = true;
} else { // part is not an inline equation.
new_seeds.push_back(part);
continue;
}
ColPartition* neighbor = NULL;
bool side_neighbor_found = false;
while ((neighbor = search.NextSideSearch(right_to_left)) != NULL) {
const TBOX& neighbor_box(neighbor->bounding_box());
if (!IsTextOrEquationType(neighbor->type()) ||
part_box.x_gap(neighbor_box) > kGapTh ||
!part_box.major_y_overlap(neighbor_box) ||
part_box.major_x_overlap(neighbor_box)) {
continue;
}
// We have found one. Set the side_neighbor_found flag.
side_neighbor_found = true;
break;
}
if (!side_neighbor_found) { // Mark part as PT_INLINE_EQUATION.
part->set_type(PT_INLINE_EQUATION);
} else {
// Check the geometric feature of neighbor.
const TBOX& neighbor_box(neighbor->bounding_box());
if (neighbor_box.width() > part_box.width() &&
neighbor->type() != PT_EQUATION) { // Mark as PT_INLINE_EQUATION.
part->set_type(PT_INLINE_EQUATION);
} else { // part is not an inline equation type.
new_seeds.push_back(part);
}
}
}
// Reset the cp_seeds_ using the new_seeds.
cp_seeds_ = new_seeds;
}
int EquationDetect::EstimateTextPartLineSpacing() {
ColPartitionGridSearch gsearch(part_grid_);
// Get the y gap between text partitions;
ColPartition *current = NULL, *prev = NULL;
gsearch.StartFullSearch();
GenericVector<int> ygaps;
while ((current = gsearch.NextFullSearch()) != NULL) {
if (!PTIsTextType(current->type())) {
continue;
}
if (prev != NULL) {
const TBOX ¤t_box = current->bounding_box();
const TBOX &prev_box = prev->bounding_box();
// prev and current should be x major overlap and non y overlap.
if (current_box.major_x_overlap(prev_box) &&
!current_box.y_overlap(prev_box)) {
int gap = current_box.y_gap(prev_box);
if (gap < MIN(current_box.height(), prev_box.height())) {
// The gap should be smaller than the height of the bounding boxes.
ygaps.push_back(gap);
}
}
}
prev = current;
}
if (ygaps.size() < 8) { // We do not have enough data.
return -1;
}
// Compute the line spacing from ygaps: use the mean of the first half.
ygaps.sort();
int spacing = 0, count;
for (count = 0; count < ygaps.size() / 2; count++) {
spacing += ygaps[count];
}
return spacing / count;
}
void EquationDetect::IdentifyInlinePartsVertical(
const bool top_to_bottom, const int textparts_linespacing) {
if (cp_seeds_.empty()) {
return;
}
// Sort cp_seeds_.
if (top_to_bottom) { // From top to bottom.
cp_seeds_.sort(&SortCPByTopReverse);
} else { // From bottom to top.
cp_seeds_.sort(&SortCPByBottom);
}
GenericVector<ColPartition*> new_seeds;
for (int i = 0; i < cp_seeds_.size(); ++i) {
ColPartition* part = cp_seeds_[i];
// If we sort cp_seeds_ from top to bottom, then for each cp_seeds_, we look
// for its top neighbors, so that if two/more inline regions are connected
// to each other, then we will identify the top one, and then use it to
// identify the bottom one.
if (IsInline(!top_to_bottom, textparts_linespacing, part)) {
part->set_type(PT_INLINE_EQUATION);
} else {
new_seeds.push_back(part);
}
}
cp_seeds_ = new_seeds;
}
bool EquationDetect::IsInline(const bool search_bottom,
const int textparts_linespacing,
ColPartition* part) {
ASSERT_HOST(part != NULL);
// Look for its nearest vertical neighbor that hardly overlaps in y but
// largely overlaps in x.
ColPartitionGridSearch search(part_grid_);
ColPartition *neighbor = NULL;
const TBOX& part_box(part->bounding_box());
const float kYGapRatioTh = 1.0;
if (search_bottom) {
search.StartVerticalSearch(part_box.left(), part_box.right(),
part_box.bottom());
} else {
search.StartVerticalSearch(part_box.left(), part_box.right(),
part_box.top());
}
search.SetUniqueMode(true);
while ((neighbor = search.NextVerticalSearch(search_bottom)) != NULL) {
const TBOX& neighbor_box(neighbor->bounding_box());
if (part_box.y_gap(neighbor_box) > kYGapRatioTh *
MIN(part_box.height(), neighbor_box.height())) {
// Finished searching.
break;
}
if (!PTIsTextType(neighbor->type())) {
continue;
}
// Check if neighbor and part is inline similar.
const float kHeightRatioTh = 0.5;
const int kYGapTh = textparts_linespacing > 0 ?
textparts_linespacing + static_cast<int>(roundf(0.02 * resolution_)):
static_cast<int>(roundf(0.05 * resolution_)); // Default value.
if (part_box.x_overlap(neighbor_box) && // Location feature.
part_box.y_gap(neighbor_box) <= kYGapTh && // Line spacing.
// Geo feature.
static_cast<float>(MIN(part_box.height(), neighbor_box.height())) /
MAX(part_box.height(), neighbor_box.height()) > kHeightRatioTh) {
return true;
}
}
return false;
}
bool EquationDetect::CheckSeedBlobsCount(ColPartition* part) {
if (!part) {
return false;
}
const int kSeedMathBlobsCount = 2;
const int kSeedMathDigitBlobsCount = 5;
int blobs = part->boxes_count(),
math_blobs = part->SpecialBlobsCount(BSTT_MATH),
digit_blobs = part->SpecialBlobsCount(BSTT_DIGIT);
if (blobs < kSeedBlobsCountTh || math_blobs <= kSeedMathBlobsCount ||
math_blobs + digit_blobs <= kSeedMathDigitBlobsCount) {
return false;
}
return true;
}
bool EquationDetect::CheckSeedDensity(
const float math_density_high,
const float math_density_low,
const ColPartition* part) const {
ASSERT_HOST(part);
float math_digit_density = part->SpecialBlobsDensity(BSTT_MATH)
+ part->SpecialBlobsDensity(BSTT_DIGIT);
float italic_density = part->SpecialBlobsDensity(BSTT_ITALIC);
if (math_digit_density > math_density_high) {
return true;
}
if (math_digit_density + italic_density > kMathItalicDensityTh &&
math_digit_density > math_density_low) {
return true;
}
return false;
}
EquationDetect::IndentType EquationDetect::IsIndented(ColPartition* part) {
ASSERT_HOST(part);
ColPartitionGridSearch search(part_grid_);
ColPartition *neighbor = NULL;
const TBOX& part_box(part->bounding_box());
const int kXGapTh = static_cast<int>(roundf(0.5 * resolution_));
const int kRadiusTh = static_cast<int>(roundf(3.0 * resolution_));
const int kYGapTh = static_cast<int>(roundf(0.5 * resolution_));
// Here we use a simple approximation algorithm: from the center of part, We
// perform the radius search, and check if we can find a neighboring parition
// that locates on the top/bottom left of part.
search.StartRadSearch((part_box.left() + part_box.right()) / 2,
(part_box.top() + part_box.bottom()) / 2, kRadiusTh);
search.SetUniqueMode(true);
bool left_indented = false, right_indented = false;
while ((neighbor = search.NextRadSearch()) != NULL &&
(!left_indented || !right_indented)) {
if (neighbor == part) {
continue;
}
const TBOX& neighbor_box(neighbor->bounding_box());
if (part_box.major_y_overlap(neighbor_box) &&
part_box.x_gap(neighbor_box) < kXGapTh) {
// When this happens, it is likely part is a fragment of an
// over-segmented colpartition. So we return false.
return NO_INDENT;
}
if (!IsTextOrEquationType(neighbor->type())) {
continue;
}
// The neighbor should be above/below part, and overlap in x direction.
if (!part_box.x_overlap(neighbor_box) || part_box.y_overlap(neighbor_box)) {
continue;
}
if (part_box.y_gap(neighbor_box) < kYGapTh) {
int left_gap = part_box.left() - neighbor_box.left();
int right_gap = neighbor_box.right() - part_box.right();
if (left_gap > kXGapTh) {
left_indented = true;
}
if (right_gap > kXGapTh) {
right_indented = true;
}
}
}
if (left_indented && right_indented) {
return BOTH_INDENT;
}
if (left_indented) {
return LEFT_INDENT;
}
if (right_indented) {
return RIGHT_INDENT;
}
return NO_INDENT;
}
bool EquationDetect::ExpandSeed(ColPartition* seed) {
if (seed == NULL || // This seed has been absorbed by other seeds.
seed->IsVerticalType()) { // We skip vertical type right now.
return false;
}
// Expand in four directions.
GenericVector<ColPartition*> parts_to_merge;
ExpandSeedHorizontal(true, seed, &parts_to_merge);
ExpandSeedHorizontal(false, seed, &parts_to_merge);
ExpandSeedVertical(true, seed, &parts_to_merge);
ExpandSeedVertical(false, seed, &parts_to_merge);
SearchByOverlap(seed, &parts_to_merge);
if (parts_to_merge.empty()) { // We don't find any partition to merge.
return false;
}
// Merge all partitions in parts_to_merge with seed. We first remove seed
// from part_grid_ as its bounding box is going to expand. Then we add it
// back after it aborbs all parts_to_merge parititions.
part_grid_->RemoveBBox(seed);
for (int i = 0; i < parts_to_merge.size(); ++i) {
ColPartition* part = parts_to_merge[i];
if (part->type() == PT_EQUATION) {
// If part is in cp_seeds_, then we mark it as NULL so that we won't
// process it again.
for (int j = 0; j < cp_seeds_.size(); ++j) {
if (part == cp_seeds_[j]) {
cp_seeds_[j] = NULL;
break;
}
}
}
// part has already been removed from part_grid_ in function
// ExpandSeedHorizontal/ExpandSeedVertical.
seed->Absorb(part, NULL);
}
return true;
}
void EquationDetect::ExpandSeedHorizontal(
const bool search_left,
ColPartition* seed,
GenericVector<ColPartition*>* parts_to_merge) {
ASSERT_HOST(seed != NULL && parts_to_merge != NULL);
const float kYOverlapTh = 0.6;
const int kXGapTh = static_cast<int>(roundf(0.2 * resolution_));
ColPartitionGridSearch search(part_grid_);
const TBOX& seed_box(seed->bounding_box());
int x = search_left ? seed_box.left() : seed_box.right();
search.StartSideSearch(x, seed_box.bottom(), seed_box.top());
search.SetUniqueMode(true);
// Search iteratively.
ColPartition *part = NULL;
while ((part = search.NextSideSearch(search_left)) != NULL) {
if (part == seed) {
continue;
}
const TBOX& part_box(part->bounding_box());
if (part_box.x_gap(seed_box) > kXGapTh) { // Out of scope.
break;
}
// Check part location.
if ((part_box.left() >= seed_box.left() && search_left) ||
(part_box.right() <= seed_box.right() && !search_left)) {
continue;
}
if (part->type() != PT_EQUATION) { // Non-equation type.
// Skip PT_LINLINE_EQUATION and non text type.
if (part->type() == PT_INLINE_EQUATION ||
(!IsTextOrEquationType(part->type()) &&
part->blob_type() != BRT_HLINE)) {
continue;
}
// For other types, it should be the near small neighbor of seed.
if (!IsNearSmallNeighbor(seed_box, part_box) ||
!CheckSeedNeighborDensity(part)) {
continue;
}
} else { // Equation type, check the y overlap.
if (part_box.y_overlap_fraction(seed_box) < kYOverlapTh &&
seed_box.y_overlap_fraction(part_box) < kYOverlapTh) {
continue;
}
}
// Passed the check, delete it from search and add into parts_to_merge.
search.RemoveBBox();
parts_to_merge->push_back(part);
}
}
void EquationDetect::ExpandSeedVertical(
const bool search_bottom,
ColPartition* seed,
GenericVector<ColPartition*>* parts_to_merge) {
ASSERT_HOST(seed != NULL && parts_to_merge != NULL &&
cps_super_bbox_ != NULL);
const float kXOverlapTh = 0.4;
const int kYGapTh = static_cast<int>(roundf(0.2 * resolution_));
ColPartitionGridSearch search(part_grid_);
const TBOX& seed_box(seed->bounding_box());
int y = search_bottom ? seed_box.bottom() : seed_box.top();
search.StartVerticalSearch(
cps_super_bbox_->left(), cps_super_bbox_->right(), y);
search.SetUniqueMode(true);
// Search iteratively.
ColPartition *part = NULL;
GenericVector<ColPartition*> parts;
int skipped_min_top = INT_MAX, skipped_max_bottom = -1;
while ((part = search.NextVerticalSearch(search_bottom)) != NULL) {
if (part == seed) {
continue;
}
const TBOX& part_box(part->bounding_box());
if (part_box.y_gap(seed_box) > kYGapTh) { // Out of scope.
break;
}
// Check part location.
if ((part_box.bottom() >= seed_box.bottom() && search_bottom) ||
(part_box.top() <= seed_box.top() && !search_bottom)) {
continue;
}
bool skip_part = false;
if (part->type() != PT_EQUATION) { // Non-equation type.
// Skip PT_LINLINE_EQUATION and non text type.
if (part->type() == PT_INLINE_EQUATION ||
(!IsTextOrEquationType(part->type()) &&
part->blob_type() != BRT_HLINE)) {
skip_part = true;
} else if (!IsNearSmallNeighbor(seed_box, part_box) ||
!CheckSeedNeighborDensity(part)) {
// For other types, it should be the near small neighbor of seed.
skip_part = true;
}
} else { // Equation type, check the x overlap.
if (part_box.x_overlap_fraction(seed_box) < kXOverlapTh &&
seed_box.x_overlap_fraction(part_box) < kXOverlapTh) {
skip_part = true;
}
}
if (skip_part) {
if (part->type() != PT_EQUATION) {
if (skipped_min_top > part_box.top()) {
skipped_min_top = part_box.top();
}
if (skipped_max_bottom < part_box.bottom()) {
skipped_max_bottom = part_box.bottom();
}
}
} else {
parts.push_back(part);
}
}
// For every part in parts, we need verify it is not above skipped_min_top
// when search top, or not below skipped_max_bottom when search bottom. I.e.,
// we will skip a part if it looks like:
// search bottom | search top
// seed: ****************** | part: **********
// skipped: xxx | skipped: xxx
// part: ********** | seed: ***********
for (int i = 0; i < parts.size(); i++) {
const TBOX& part_box(parts[i]->bounding_box());
if ((search_bottom && part_box.top() <= skipped_max_bottom) ||
(!search_bottom && part_box.bottom() >= skipped_min_top)) {
continue;
}
// Add parts[i] into parts_to_merge, and delete it from part_grid_.
parts_to_merge->push_back(parts[i]);
part_grid_->RemoveBBox(parts[i]);
}
}
bool EquationDetect::IsNearSmallNeighbor(const TBOX& seed_box,
const TBOX& part_box) const {
const int kXGapTh = static_cast<int>(roundf(0.25 * resolution_));
const int kYGapTh = static_cast<int>(roundf(0.05 * resolution_));
// Check geometric feature.
if (part_box.height() > seed_box.height() ||
part_box.width() > seed_box.width()) {
return false;
}
// Check overlap and distance.
if ((!part_box.major_x_overlap(seed_box) ||
part_box.y_gap(seed_box) > kYGapTh) &&
(!part_box.major_y_overlap(seed_box) ||
part_box.x_gap(seed_box) > kXGapTh)) {
return false;
}
return true;
}
bool EquationDetect::CheckSeedNeighborDensity(const ColPartition* part) const {
ASSERT_HOST(part);
if (part->boxes_count() < kSeedBlobsCountTh) {
// Too few blobs, skip the check.
return true;
}
// We check the math blobs density and the unclear blobs density.
if (part->SpecialBlobsDensity(BSTT_MATH) +
part->SpecialBlobsDensity(BSTT_DIGIT) > kMathDigitDensityTh1 ||
part->SpecialBlobsDensity(BSTT_UNCLEAR) > kUnclearDensityTh) {
return true;
}
return false;
}
void EquationDetect::ProcessMathBlockSatelliteParts() {
// Iterate over part_grid_, and find all parts that are text type but not
// equation type.
ColPartition *part = NULL;
GenericVector<ColPartition*> text_parts;
ColPartitionGridSearch gsearch(part_grid_);
gsearch.StartFullSearch();
while ((part = gsearch.NextFullSearch()) != NULL) {
if (part->type() == PT_FLOWING_TEXT || part->type() == PT_HEADING_TEXT) {
text_parts.push_back(part);
}
}
if (text_parts.empty()) {
return;
}
// Compute the medium height of the text_parts.
text_parts.sort(&SortCPByHeight);
const TBOX& text_box = text_parts[text_parts.size() / 2]->bounding_box();
int med_height = text_box.height();
if (text_parts.size() % 2 == 0 && text_parts.size() > 1) {
const TBOX& text_box =
text_parts[text_parts.size() / 2 - 1]->bounding_box();
med_height = static_cast<int>(roundf(
0.5 * (text_box.height() + med_height)));
}
// Iterate every text_parts and check if it is a math block satellite.
for (int i = 0; i < text_parts.size(); ++i) {
const TBOX& text_box(text_parts[i]->bounding_box());
if (text_box.height() > med_height) {
continue;
}
GenericVector<ColPartition*> math_blocks;
if (!IsMathBlockSatellite(text_parts[i], &math_blocks)) {
continue;
}
// Found. merge text_parts[i] with math_blocks.
part_grid_->RemoveBBox(text_parts[i]);
text_parts[i]->set_type(PT_EQUATION);
for (int j = 0; j < math_blocks.size(); ++j) {
part_grid_->RemoveBBox(math_blocks[j]);
text_parts[i]->Absorb(math_blocks[j], NULL);
}
InsertPartAfterAbsorb(text_parts[i]);
}
}
bool EquationDetect::IsMathBlockSatellite(
ColPartition* part, GenericVector<ColPartition*>* math_blocks) {
ASSERT_HOST(part != NULL && math_blocks != NULL);
math_blocks->clear();
const TBOX& part_box(part->bounding_box());
// Find the top/bottom nearest neighbor of part.
ColPartition *neighbors[2];
int y_gaps[2] = {INT_MAX, INT_MAX};
// The horizontal boundary of the neighbors.
int neighbors_left = INT_MAX, neighbors_right = 0;
for (int i = 0; i < 2; ++i) {
neighbors[i] = SearchNNVertical(i != 0, part);
if (neighbors[i]) {
const TBOX& neighbor_box = neighbors[i]->bounding_box();
y_gaps[i] = neighbor_box.y_gap(part_box);
if (neighbor_box.left() < neighbors_left) {
neighbors_left = neighbor_box.left();
}
if (neighbor_box.right() > neighbors_right) {
neighbors_right = neighbor_box.right();
}
}
}
if (neighbors[0] == neighbors[1]) {
// This happens when part is inside neighbor.
neighbors[1] = NULL;
y_gaps[1] = INT_MAX;
}
// Check if part is within [neighbors_left, neighbors_right].
if (part_box.left() < neighbors_left || part_box.right() > neighbors_right) {
return false;
}
// Get the index of the near one in neighbors.
int index = y_gaps[0] < y_gaps[1] ? 0 : 1;
// Check the near one.
if (IsNearMathNeighbor(y_gaps[index], neighbors[index])) {
math_blocks->push_back(neighbors[index]);
} else {
// If the near one failed the check, then we skip checking the far one.
return false;
}
// Check the far one.
index = 1 - index;
if (IsNearMathNeighbor(y_gaps[index], neighbors[index])) {
math_blocks->push_back(neighbors[index]);
}
return true;
}
ColPartition* EquationDetect::SearchNNVertical(
const bool search_bottom, const ColPartition* part) {
ASSERT_HOST(part);
ColPartition *nearest_neighbor = NULL, *neighbor = NULL;
const int kYGapTh = static_cast<int>(roundf(resolution_ * 0.5));
ColPartitionGridSearch search(part_grid_);
search.SetUniqueMode(true);
const TBOX& part_box(part->bounding_box());
int y = search_bottom ? part_box.bottom() : part_box.top();
search.StartVerticalSearch(part_box.left(), part_box.right(), y);
int min_y_gap = INT_MAX;
while ((neighbor = search.NextVerticalSearch(search_bottom)) != NULL) {
if (neighbor == part || !IsTextOrEquationType(neighbor->type())) {
continue;
}
const TBOX& neighbor_box(neighbor->bounding_box());
int y_gap = neighbor_box.y_gap(part_box);
if (y_gap > kYGapTh) { // Out of scope.
break;
}
if (!neighbor_box.major_x_overlap(part_box) ||
(search_bottom && neighbor_box.bottom() > part_box.bottom()) ||
(!search_bottom && neighbor_box.top() < part_box.top())) {
continue;
}
if (y_gap < min_y_gap) {
min_y_gap = y_gap;
nearest_neighbor = neighbor;
}
}
return nearest_neighbor;
}
bool EquationDetect::IsNearMathNeighbor(
const int y_gap, const ColPartition *neighbor) const {
if (!neighbor) {
return false;
}
const int kYGapTh = static_cast<int>(roundf(resolution_ * 0.1));
return neighbor->type() == PT_EQUATION && y_gap <= kYGapTh;
}
void EquationDetect::GetOutputTiffName(const char* name,
STRING* image_name) const {
ASSERT_HOST(image_name && name);
char page[50];
snprintf(page, sizeof(page), "%04d", page_count_);
*image_name = STRING(lang_tesseract_->imagebasename) + page + name + ".tif";
}
void EquationDetect::PaintSpecialTexts(const STRING& outfile) const {
Pix *pix = NULL, *pixBi = lang_tesseract_->pix_binary();
pix = pixConvertTo32(pixBi);
ColPartitionGridSearch gsearch(part_grid_);
ColPartition* part = NULL;
gsearch.StartFullSearch();
while ((part = gsearch.NextFullSearch()) != NULL) {
BLOBNBOX_C_IT blob_it(part->boxes());
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
RenderSpecialText(pix, blob_it.data());
}
}
pixWrite(outfile.string(), pix, IFF_TIFF_LZW);
pixDestroy(&pix);
}
void EquationDetect::PaintColParts(const STRING& outfile) const {
Pix *pix = pixConvertTo32(lang_tesseract_->BestPix());
ColPartitionGridSearch gsearch(part_grid_);
gsearch.StartFullSearch();
ColPartition* part = NULL;
while ((part = gsearch.NextFullSearch()) != NULL) {
const TBOX& tbox = part->bounding_box();
Box *box = boxCreate(tbox.left(), pixGetHeight(pix) - tbox.top(),
tbox.width(), tbox.height());
if (part->type() == PT_EQUATION) {
pixRenderBoxArb(pix, box, 5, 255, 0, 0);
} else if (part->type() == PT_INLINE_EQUATION) {
pixRenderBoxArb(pix, box, 5, 0, 255, 0);
} else {
pixRenderBoxArb(pix, box, 5, 0, 0, 255);
}
boxDestroy(&box);
}
pixWrite(outfile.string(), pix, IFF_TIFF_LZW);
pixDestroy(&pix);
}
void EquationDetect::PrintSpecialBlobsDensity(const ColPartition* part) const {
ASSERT_HOST(part);
TBOX box(part->bounding_box());
int h = pixGetHeight(lang_tesseract_->BestPix());
tprintf("Printing special blobs density values for ColParition (t=%d,b=%d) ",
h - box.top(), h - box.bottom());
box.print();
tprintf("blobs count = %d, density = ", part->boxes_count());
for (int i = 0; i < BSTT_COUNT; ++i) {
BlobSpecialTextType type = static_cast<BlobSpecialTextType>(i);
tprintf("%d:%f ", i, part->SpecialBlobsDensity(type));
}
tprintf("\n");
}
}; // namespace tesseract
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: cubeclassifier.h
// Description: Cube implementation of a ShapeClassifier.
// Author: Ray Smith
// Created: Wed Nov 23 10:36:32 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef THIRD_PARTY_TESSERACT_CCMAIN_CUBECLASSIFIER_H_
#define THIRD_PARTY_TESSERACT_CCMAIN_CUBECLASSIFIER_H_
#include "shapeclassifier.h"
namespace tesseract {
class Classify;
class CubeRecoContext;
class ShapeTable;
class TessClassifier;
class Tesseract;
class TrainingSample;
struct UnicharRating;
// Cube implementation of a ShapeClassifier.
class CubeClassifier : public ShapeClassifier {
public:
explicit CubeClassifier(Tesseract* tesseract);
virtual ~CubeClassifier();
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
virtual int UnicharClassifySample(const TrainingSample& sample, Pix* page_pix,
int debug, UNICHAR_ID keep_this,
GenericVector<UnicharRating>* results);
// Provides access to the ShapeTable that this classifier works with.
virtual const ShapeTable* GetShapeTable() const;
private:
// Cube objects.
CubeRecoContext* cube_cntxt_;
const ShapeTable& shape_table_;
};
// Combination of Tesseract class pruner with scoring by cube.
class CubeTessClassifier : public ShapeClassifier {
public:
explicit CubeTessClassifier(Tesseract* tesseract);
virtual ~CubeTessClassifier();
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
virtual int UnicharClassifySample(const TrainingSample& sample, Pix* page_pix,
int debug, UNICHAR_ID keep_this,
GenericVector<UnicharRating>* results);
// Provides access to the ShapeTable that this classifier works with.
virtual const ShapeTable* GetShapeTable() const;
private:
// Cube objects.
CubeRecoContext* cube_cntxt_;
const ShapeTable& shape_table_;
TessClassifier* pruner_;
};
} // namespace tesseract
#endif /* THIRD_PARTY_TESSERACT_CCMAIN_CUBECLASSIFIER_H_ */
| C++ |
/**********************************************************************
* File: pagesegmain.cpp
* Description: Top-level page segmenter for Tesseract.
* Author: Ray Smith
* Created: Thu Sep 25 17:12:01 PDT 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _WIN32
#ifndef __GNUC__
#include <windows.h>
#endif // __GNUC__
#ifndef unlink
#include <io.h>
#endif
#else
#include <unistd.h>
#endif // _WIN32
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "allheaders.h"
#include "blobbox.h"
#include "blread.h"
#include "colfind.h"
#include "equationdetect.h"
#include "imagefind.h"
#include "linefind.h"
#include "makerow.h"
#include "osdetect.h"
#include "tabvector.h"
#include "tesseractclass.h"
#include "tessvars.h"
#include "textord.h"
#include "tordmain.h"
#include "wordseg.h"
namespace tesseract {
/// Minimum believable resolution.
const int kMinCredibleResolution = 70;
/// Default resolution used if input in not believable.
const int kDefaultResolution = 300;
// Max erosions to perform in removing an enclosing circle.
const int kMaxCircleErosions = 8;
// Helper to remove an enclosing circle from an image.
// If there isn't one, then the image will most likely get badly mangled.
// The returned pix must be pixDestroyed after use. NULL may be returned
// if the image doesn't meet the trivial conditions that it uses to determine
// success.
static Pix* RemoveEnclosingCircle(Pix* pixs) {
Pix* pixsi = pixInvert(NULL, pixs);
Pix* pixc = pixCreateTemplate(pixs);
pixSetOrClearBorder(pixc, 1, 1, 1, 1, PIX_SET);
pixSeedfillBinary(pixc, pixc, pixsi, 4);
pixInvert(pixc, pixc);
pixDestroy(&pixsi);
Pix* pixt = pixAnd(NULL, pixs, pixc);
l_int32 max_count;
pixCountConnComp(pixt, 8, &max_count);
// The count has to go up before we start looking for the minimum.
l_int32 min_count = MAX_INT32;
Pix* pixout = NULL;
for (int i = 1; i < kMaxCircleErosions; i++) {
pixDestroy(&pixt);
pixErodeBrick(pixc, pixc, 3, 3);
pixt = pixAnd(NULL, pixs, pixc);
l_int32 count;
pixCountConnComp(pixt, 8, &count);
if (i == 1 || count > max_count) {
max_count = count;
min_count = count;
} else if (i > 1 && count < min_count) {
min_count = count;
pixDestroy(&pixout);
pixout = pixCopy(NULL, pixt); // Save the best.
} else if (count >= min_count) {
break; // We have passed by the best.
}
}
pixDestroy(&pixt);
pixDestroy(&pixc);
return pixout;
}
/**
* Segment the page according to the current value of tessedit_pageseg_mode.
* pix_binary_ is used as the source image and should not be NULL.
* On return the blocks list owns all the constructed page layout.
*/
int Tesseract::SegmentPage(const STRING* input_file, BLOCK_LIST* blocks,
Tesseract* osd_tess, OSResults* osr) {
ASSERT_HOST(pix_binary_ != NULL);
int width = pixGetWidth(pix_binary_);
int height = pixGetHeight(pix_binary_);
// Get page segmentation mode.
PageSegMode pageseg_mode = static_cast<PageSegMode>(
static_cast<int>(tessedit_pageseg_mode));
// If a UNLV zone file can be found, use that instead of segmentation.
if (!PSM_COL_FIND_ENABLED(pageseg_mode) &&
input_file != NULL && input_file->length() > 0) {
STRING name = *input_file;
const char* lastdot = strrchr(name.string(), '.');
if (lastdot != NULL)
name[lastdot - name.string()] = '\0';
read_unlv_file(name, width, height, blocks);
}
if (blocks->empty()) {
// No UNLV file present. Work according to the PageSegMode.
// First make a single block covering the whole image.
BLOCK_IT block_it(blocks);
BLOCK* block = new BLOCK("", TRUE, 0, 0, 0, 0, width, height);
block->set_right_to_left(right_to_left());
block_it.add_to_end(block);
} else {
// UNLV file present. Use PSM_SINGLE_BLOCK.
pageseg_mode = PSM_SINGLE_BLOCK;
}
int auto_page_seg_ret_val = 0;
TO_BLOCK_LIST to_blocks;
if (PSM_OSD_ENABLED(pageseg_mode) || PSM_BLOCK_FIND_ENABLED(pageseg_mode) ||
PSM_SPARSE(pageseg_mode)) {
auto_page_seg_ret_val =
AutoPageSeg(pageseg_mode, blocks, &to_blocks, osd_tess, osr);
if (pageseg_mode == PSM_OSD_ONLY)
return auto_page_seg_ret_val;
// To create blobs from the image region bounds uncomment this line:
// to_blocks.clear(); // Uncomment to go back to the old mode.
} else {
deskew_ = FCOORD(1.0f, 0.0f);
reskew_ = FCOORD(1.0f, 0.0f);
if (pageseg_mode == PSM_CIRCLE_WORD) {
Pix* pixcleaned = RemoveEnclosingCircle(pix_binary_);
if (pixcleaned != NULL) {
pixDestroy(&pix_binary_);
pix_binary_ = pixcleaned;
}
}
}
if (auto_page_seg_ret_val < 0) {
return -1;
}
if (blocks->empty()) {
if (textord_debug_tabfind)
tprintf("Empty page\n");
return 0; // AutoPageSeg found an empty page.
}
bool splitting =
pageseg_devanagari_split_strategy != ShiroRekhaSplitter::NO_SPLIT;
bool cjk_mode = textord_use_cjk_fp_model;
textord_.TextordPage(pageseg_mode, reskew_, width, height, pix_binary_,
pix_thresholds_, pix_grey_, splitting || cjk_mode,
blocks, &to_blocks);
return auto_page_seg_ret_val;
}
// Helper writes a grey image to a file for use by scrollviewer.
// Normally for speed we don't display the image in the layout debug windows.
// If textord_debug_images is true, we draw the image as a background to some
// of the debug windows. printable determines whether these
// images are optimized for printing instead of screen display.
static void WriteDebugBackgroundImage(bool printable, Pix* pix_binary) {
Pix* grey_pix = pixCreate(pixGetWidth(pix_binary),
pixGetHeight(pix_binary), 8);
// Printable images are light grey on white, but for screen display
// they are black on dark grey so the other colors show up well.
if (printable) {
pixSetAll(grey_pix);
pixSetMasked(grey_pix, pix_binary, 192);
} else {
pixSetAllArbitrary(grey_pix, 64);
pixSetMasked(grey_pix, pix_binary, 0);
}
AlignedBlob::IncrementDebugPix();
pixWrite(AlignedBlob::textord_debug_pix().string(), grey_pix, IFF_PNG);
pixDestroy(&grey_pix);
}
/**
* Auto page segmentation. Divide the page image into blocks of uniform
* text linespacing and images.
*
* Resolution (in ppi) is derived from the input image.
*
* The output goes in the blocks list with corresponding TO_BLOCKs in the
* to_blocks list.
*
* If single_column is true, then no attempt is made to divide the image
* into columns, but multiple blocks are still made if the text is of
* non-uniform linespacing.
*
* If osd (orientation and script detection) is true then that is performed
* as well. If only_osd is true, then only orientation and script detection is
* performed. If osd is desired, (osd or only_osd) then osr_tess must be
* another Tesseract that was initialized especially for osd, and the results
* will be output into osr (orientation and script result).
*/
int Tesseract::AutoPageSeg(PageSegMode pageseg_mode,
BLOCK_LIST* blocks, TO_BLOCK_LIST* to_blocks,
Tesseract* osd_tess, OSResults* osr) {
if (textord_debug_images) {
WriteDebugBackgroundImage(textord_debug_printable, pix_binary_);
}
Pix* photomask_pix = NULL;
Pix* musicmask_pix = NULL;
// The blocks made by the ColumnFinder. Moved to blocks before return.
BLOCK_LIST found_blocks;
TO_BLOCK_LIST temp_blocks;
bool single_column = !PSM_COL_FIND_ENABLED(pageseg_mode);
bool osd_enabled = PSM_OSD_ENABLED(pageseg_mode);
bool osd_only = pageseg_mode == PSM_OSD_ONLY;
ColumnFinder* finder = SetupPageSegAndDetectOrientation(
single_column, osd_enabled, osd_only, blocks, osd_tess, osr,
&temp_blocks, &photomask_pix, &musicmask_pix);
int result = 0;
if (finder != NULL) {
TO_BLOCK_IT to_block_it(&temp_blocks);
TO_BLOCK* to_block = to_block_it.data();
if (musicmask_pix != NULL) {
// TODO(rays) pass the musicmask_pix into FindBlocks and mark music
// blocks separately. For now combine with photomask_pix.
pixOr(photomask_pix, photomask_pix, musicmask_pix);
}
if (equ_detect_) {
finder->SetEquationDetect(equ_detect_);
}
result = finder->FindBlocks(pageseg_mode, scaled_color_, scaled_factor_,
to_block, photomask_pix,
pix_thresholds_, pix_grey_,
&found_blocks, to_blocks);
if (result >= 0)
finder->GetDeskewVectors(&deskew_, &reskew_);
delete finder;
}
pixDestroy(&photomask_pix);
pixDestroy(&musicmask_pix);
if (result < 0) return result;
blocks->clear();
BLOCK_IT block_it(blocks);
// Move the found blocks to the input/output blocks.
block_it.add_list_after(&found_blocks);
if (textord_debug_images) {
// The debug image is no longer needed so delete it.
unlink(AlignedBlob::textord_debug_pix().string());
}
return result;
}
// Helper adds all the scripts from sid_set converted to ids from osd_set to
// allowed_ids.
static void AddAllScriptsConverted(const UNICHARSET& sid_set,
const UNICHARSET& osd_set,
GenericVector<int>* allowed_ids) {
for (int i = 0; i < sid_set.get_script_table_size(); ++i) {
if (i != sid_set.null_sid()) {
const char* script = sid_set.get_script_from_script_id(i);
allowed_ids->push_back(osd_set.get_script_id_from_name(script));
}
}
}
/**
* Sets up auto page segmentation, determines the orientation, and corrects it.
* Somewhat arbitrary chunk of functionality, factored out of AutoPageSeg to
* facilitate testing.
* photo_mask_pix is a pointer to a NULL pointer that will be filled on return
* with the leptonica photo mask, which must be pixDestroyed by the caller.
* to_blocks is an empty list that will be filled with (usually a single)
* block that is used during layout analysis. This ugly API is required
* because of the possibility of a unlv zone file.
* TODO(rays) clean this up.
* See AutoPageSeg for other arguments.
* The returned ColumnFinder must be deleted after use.
*/
ColumnFinder* Tesseract::SetupPageSegAndDetectOrientation(
bool single_column, bool osd, bool only_osd,
BLOCK_LIST* blocks, Tesseract* osd_tess, OSResults* osr,
TO_BLOCK_LIST* to_blocks, Pix** photo_mask_pix, Pix** music_mask_pix) {
int vertical_x = 0;
int vertical_y = 1;
TabVector_LIST v_lines;
TabVector_LIST h_lines;
ICOORD bleft(0, 0);
ASSERT_HOST(pix_binary_ != NULL);
if (tessedit_dump_pageseg_images) {
pixWrite("tessinput.png", pix_binary_, IFF_PNG);
}
// Leptonica is used to find the rule/separator lines in the input.
LineFinder::FindAndRemoveLines(source_resolution_,
textord_tabfind_show_vlines, pix_binary_,
&vertical_x, &vertical_y, music_mask_pix,
&v_lines, &h_lines);
if (tessedit_dump_pageseg_images)
pixWrite("tessnolines.png", pix_binary_, IFF_PNG);
// Leptonica is used to find a mask of the photo regions in the input.
*photo_mask_pix = ImageFind::FindImages(pix_binary_);
if (tessedit_dump_pageseg_images)
pixWrite("tessnoimages.png", pix_binary_, IFF_PNG);
if (single_column)
v_lines.clear();
// The rest of the algorithm uses the usual connected components.
textord_.find_components(pix_binary_, blocks, to_blocks);
TO_BLOCK_IT to_block_it(to_blocks);
// There must be exactly one input block.
// TODO(rays) handle new textline finding with a UNLV zone file.
ASSERT_HOST(to_blocks->singleton());
TO_BLOCK* to_block = to_block_it.data();
TBOX blkbox = to_block->block->bounding_box();
ColumnFinder* finder = NULL;
if (to_block->line_size >= 2) {
finder = new ColumnFinder(static_cast<int>(to_block->line_size),
blkbox.botleft(), blkbox.topright(),
source_resolution_, textord_use_cjk_fp_model,
textord_tabfind_aligned_gap_fraction,
&v_lines, &h_lines, vertical_x, vertical_y);
finder->SetupAndFilterNoise(*photo_mask_pix, to_block);
if (equ_detect_) {
equ_detect_->LabelSpecialText(to_block);
}
BLOBNBOX_CLIST osd_blobs;
// osd_orientation is the number of 90 degree rotations to make the
// characters upright. (See osdetect.h for precise definition.)
// We want the text lines horizontal, (vertical text indicates vertical
// textlines) which may conflict (eg vertically written CJK).
int osd_orientation = 0;
bool vertical_text = textord_tabfind_force_vertical_text;
if (!vertical_text && textord_tabfind_vertical_text) {
vertical_text =
finder->IsVerticallyAlignedText(textord_tabfind_vertical_text_ratio,
to_block, &osd_blobs);
}
if (osd && osd_tess != NULL && osr != NULL) {
GenericVector<int> osd_scripts;
if (osd_tess != this) {
// We are running osd as part of layout analysis, so constrain the
// scripts to those allowed by *this.
AddAllScriptsConverted(unicharset, osd_tess->unicharset, &osd_scripts);
for (int s = 0; s < sub_langs_.size(); ++s) {
AddAllScriptsConverted(sub_langs_[s]->unicharset,
osd_tess->unicharset, &osd_scripts);
}
}
os_detect_blobs(&osd_scripts, &osd_blobs, osr, osd_tess);
if (only_osd) {
delete finder;
return NULL;
}
osd_orientation = osr->best_result.orientation_id;
double osd_score = osr->orientations[osd_orientation];
double osd_margin = min_orientation_margin * 2;
for (int i = 0; i < 4; ++i) {
if (i != osd_orientation &&
osd_score - osr->orientations[i] < osd_margin) {
osd_margin = osd_score - osr->orientations[i];
}
}
int best_script_id = osr->best_result.script_id;
const char* best_script_str =
osd_tess->unicharset.get_script_from_script_id(best_script_id);
bool cjk = best_script_id == osd_tess->unicharset.han_sid() ||
best_script_id == osd_tess->unicharset.hiragana_sid() ||
best_script_id == osd_tess->unicharset.katakana_sid() ||
strcmp("Japanese", best_script_str) == 0 ||
strcmp("Korean", best_script_str) == 0 ||
strcmp("Hangul", best_script_str) == 0;
if (cjk) {
finder->set_cjk_script(true);
}
if (osd_margin < min_orientation_margin) {
// The margin is weak.
if (!cjk && !vertical_text && osd_orientation == 2) {
// upside down latin text is improbable with such a weak margin.
tprintf("OSD: Weak margin (%.2f), horiz textlines, not CJK: "
"Don't rotate.\n", osd_margin);
osd_orientation = 0;
} else {
tprintf("OSD: Weak margin (%.2f) for %d blob text block, "
"but using orientation anyway: %d\n",
osd_blobs.length(), osd_margin, osd_orientation);
}
}
}
osd_blobs.shallow_clear();
finder->CorrectOrientation(to_block, vertical_text, osd_orientation);
}
return finder;
}
} // namespace tesseract.
| C++ |
///////////////////////////////////////////////////////////////////////
// File: ltrresultiterator.cpp
// Description: Iterator for tesseract results in strict left-to-right
// order that avoids using tesseract internal data structures.
// Author: Ray Smith
// Created: Fri Feb 26 14:32:09 PST 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "ltrresultiterator.h"
#include "allheaders.h"
#include "pageres.h"
#include "strngs.h"
#include "tesseractclass.h"
namespace tesseract {
LTRResultIterator::LTRResultIterator(PAGE_RES* page_res, Tesseract* tesseract,
int scale, int scaled_yres,
int rect_left, int rect_top,
int rect_width, int rect_height)
: PageIterator(page_res, tesseract, scale, scaled_yres,
rect_left, rect_top, rect_width, rect_height),
line_separator_("\n"),
paragraph_separator_("\n") {
}
LTRResultIterator::~LTRResultIterator() {
}
// Returns the null terminated UTF-8 encoded text string for the current
// object at the given level. Use delete [] to free after use.
char* LTRResultIterator::GetUTF8Text(PageIteratorLevel level) const {
if (it_->word() == NULL) return NULL; // Already at the end!
STRING text;
PAGE_RES_IT res_it(*it_);
WERD_CHOICE* best_choice = res_it.word()->best_choice;
ASSERT_HOST(best_choice != NULL);
if (level == RIL_SYMBOL) {
text = res_it.word()->BestUTF8(blob_index_, false);
} else if (level == RIL_WORD) {
text = best_choice->unichar_string();
} else {
bool eol = false; // end of line?
bool eop = false; // end of paragraph?
do { // for each paragraph in a block
do { // for each text line in a paragraph
do { // for each word in a text line
best_choice = res_it.word()->best_choice;
ASSERT_HOST(best_choice != NULL);
text += best_choice->unichar_string();
text += " ";
res_it.forward();
eol = res_it.row() != res_it.prev_row();
} while (!eol);
text.truncate_at(text.length() - 1);
text += line_separator_;
eop = res_it.block() != res_it.prev_block() ||
res_it.row()->row->para() != res_it.prev_row()->row->para();
} while (level != RIL_TEXTLINE && !eop);
if (eop) text += paragraph_separator_;
} while (level == RIL_BLOCK && res_it.block() == res_it.prev_block());
}
int length = text.length() + 1;
char* result = new char[length];
strncpy(result, text.string(), length);
return result;
}
// Set the string inserted at the end of each text line. "\n" by default.
void LTRResultIterator::SetLineSeparator(const char *new_line) {
line_separator_ = new_line;
}
// Set the string inserted at the end of each paragraph. "\n" by default.
void LTRResultIterator::SetParagraphSeparator(const char *new_para) {
paragraph_separator_ = new_para;
}
// Returns the mean confidence of the current object at the given level.
// The number should be interpreted as a percent probability. (0.0f-100.0f)
float LTRResultIterator::Confidence(PageIteratorLevel level) const {
if (it_->word() == NULL) return 0.0f; // Already at the end!
float mean_certainty = 0.0f;
int certainty_count = 0;
PAGE_RES_IT res_it(*it_);
WERD_CHOICE* best_choice = res_it.word()->best_choice;
ASSERT_HOST(best_choice != NULL);
switch (level) {
case RIL_BLOCK:
do {
best_choice = res_it.word()->best_choice;
ASSERT_HOST(best_choice != NULL);
mean_certainty += best_choice->certainty();
++certainty_count;
res_it.forward();
} while (res_it.block() == res_it.prev_block());
break;
case RIL_PARA:
do {
best_choice = res_it.word()->best_choice;
ASSERT_HOST(best_choice != NULL);
mean_certainty += best_choice->certainty();
++certainty_count;
res_it.forward();
} while (res_it.block() == res_it.prev_block() &&
res_it.row()->row->para() == res_it.prev_row()->row->para());
break;
case RIL_TEXTLINE:
do {
best_choice = res_it.word()->best_choice;
ASSERT_HOST(best_choice != NULL);
mean_certainty += best_choice->certainty();
++certainty_count;
res_it.forward();
} while (res_it.row() == res_it.prev_row());
break;
case RIL_WORD:
mean_certainty += best_choice->certainty();
++certainty_count;
break;
case RIL_SYMBOL:
mean_certainty += best_choice->certainty(blob_index_);
++certainty_count;
}
if (certainty_count > 0) {
mean_certainty /= certainty_count;
float confidence = 100 + 5 * mean_certainty;
if (confidence < 0.0f) confidence = 0.0f;
if (confidence > 100.0f) confidence = 100.0f;
return confidence;
}
return 0.0f;
}
// Returns the font attributes of the current word. If iterating at a higher
// level object than words, eg textlines, then this will return the
// attributes of the first word in that textline.
// The actual return value is a string representing a font name. It points
// to an internal table and SHOULD NOT BE DELETED. Lifespan is the same as
// the iterator itself, ie rendered invalid by various members of
// TessBaseAPI, including Init, SetImage, End or deleting the TessBaseAPI.
// Pointsize is returned in printers points (1/72 inch.)
const char* LTRResultIterator::WordFontAttributes(bool* is_bold,
bool* is_italic,
bool* is_underlined,
bool* is_monospace,
bool* is_serif,
bool* is_smallcaps,
int* pointsize,
int* font_id) const {
if (it_->word() == NULL) return NULL; // Already at the end!
if (it_->word()->fontinfo == NULL) {
*font_id = -1;
return NULL; // No font information.
}
const FontInfo& font_info = *it_->word()->fontinfo;
*font_id = font_info.universal_id;
*is_bold = font_info.is_bold();
*is_italic = font_info.is_italic();
*is_underlined = false; // TODO(rays) fix this!
*is_monospace = font_info.is_fixed_pitch();
*is_serif = font_info.is_serif();
*is_smallcaps = it_->word()->small_caps;
float row_height = it_->row()->row->x_height() +
it_->row()->row->ascenders() - it_->row()->row->descenders();
// Convert from pixels to printers points.
*pointsize = scaled_yres_ > 0
? static_cast<int>(row_height * kPointsPerInch / scaled_yres_ + 0.5)
: 0;
return font_info.name;
}
// Returns the name of the language used to recognize this word.
const char* LTRResultIterator::WordRecognitionLanguage() const {
if (it_->word() == NULL || it_->word()->tesseract == NULL) return NULL;
return it_->word()->tesseract->lang.string();
}
// Return the overall directionality of this word.
StrongScriptDirection LTRResultIterator::WordDirection() const {
if (it_->word() == NULL) return DIR_NEUTRAL;
bool has_rtl = it_->word()->AnyRtlCharsInWord();
bool has_ltr = it_->word()->AnyLtrCharsInWord();
if (has_rtl && !has_ltr)
return DIR_RIGHT_TO_LEFT;
if (has_ltr && !has_rtl)
return DIR_LEFT_TO_RIGHT;
if (!has_ltr && !has_rtl)
return DIR_NEUTRAL;
return DIR_MIX;
}
// Returns true if the current word was found in a dictionary.
bool LTRResultIterator::WordIsFromDictionary() const {
if (it_->word() == NULL) return false; // Already at the end!
int permuter = it_->word()->best_choice->permuter();
return permuter == SYSTEM_DAWG_PERM || permuter == FREQ_DAWG_PERM ||
permuter == USER_DAWG_PERM;
}
// Returns true if the current word is numeric.
bool LTRResultIterator::WordIsNumeric() const {
if (it_->word() == NULL) return false; // Already at the end!
int permuter = it_->word()->best_choice->permuter();
return permuter == NUMBER_PERM;
}
// Returns true if the word contains blamer information.
bool LTRResultIterator::HasBlamerInfo() const {
return it_->word() != NULL && it_->word()->blamer_bundle != NULL &&
it_->word()->blamer_bundle->HasDebugInfo();
}
// Returns the pointer to ParamsTrainingBundle stored in the BlamerBundle
// of the current word.
const void *LTRResultIterator::GetParamsTrainingBundle() const {
return (it_->word() != NULL && it_->word()->blamer_bundle != NULL) ?
&(it_->word()->blamer_bundle->params_training_bundle()) : NULL;
}
// Returns the pointer to the string with blamer information for this word.
// Assumes that the word's blamer_bundle is not NULL.
const char *LTRResultIterator::GetBlamerDebug() const {
return it_->word()->blamer_bundle->debug().string();
}
// Returns the pointer to the string with misadaption information for this word.
// Assumes that the word's blamer_bundle is not NULL.
const char *LTRResultIterator::GetBlamerMisadaptionDebug() const {
return it_->word()->blamer_bundle->misadaption_debug().string();
}
// Returns true if a truth string was recorded for the current word.
bool LTRResultIterator::HasTruthString() const {
if (it_->word() == NULL) return false; // Already at the end!
if (it_->word()->blamer_bundle == NULL ||
it_->word()->blamer_bundle->NoTruth()) {
return false; // no truth information for this word
}
return true;
}
// Returns true if the given string is equivalent to the truth string for
// the current word.
bool LTRResultIterator::EquivalentToTruth(const char *str) const {
if (!HasTruthString()) return false;
ASSERT_HOST(it_->word()->uch_set != NULL);
WERD_CHOICE str_wd(str, *(it_->word()->uch_set));
return it_->word()->blamer_bundle->ChoiceIsCorrect(&str_wd);
}
// Returns the null terminated UTF-8 encoded truth string for the current word.
// Use delete [] to free after use.
char* LTRResultIterator::WordTruthUTF8Text() const {
if (!HasTruthString()) return NULL;
STRING truth_text = it_->word()->blamer_bundle->TruthString();
int length = truth_text.length() + 1;
char* result = new char[length];
strncpy(result, truth_text.string(), length);
return result;
}
// Returns the null terminated UTF-8 encoded normalized OCR string for the
// current word. Use delete [] to free after use.
char* LTRResultIterator::WordNormedUTF8Text() const {
if (it_->word() == NULL) return NULL; // Already at the end!
STRING ocr_text;
WERD_CHOICE* best_choice = it_->word()->best_choice;
const UNICHARSET *unicharset = it_->word()->uch_set;
ASSERT_HOST(best_choice != NULL);
for (int i = 0; i < best_choice->length(); ++i) {
ocr_text += unicharset->get_normed_unichar(best_choice->unichar_id(i));
}
int length = ocr_text.length() + 1;
char* result = new char[length];
strncpy(result, ocr_text.string(), length);
return result;
}
// Returns a pointer to serialized choice lattice.
// Fills lattice_size with the number of bytes in lattice data.
const char *LTRResultIterator::WordLattice(int *lattice_size) const {
if (it_->word() == NULL) return NULL; // Already at the end!
if (it_->word()->blamer_bundle == NULL) return NULL;
*lattice_size = it_->word()->blamer_bundle->lattice_size();
return it_->word()->blamer_bundle->lattice_data();
}
// Returns true if the current symbol is a superscript.
// If iterating at a higher level object than symbols, eg words, then
// this will return the attributes of the first symbol in that word.
bool LTRResultIterator::SymbolIsSuperscript() const {
if (cblob_it_ == NULL && it_->word() != NULL)
return it_->word()->best_choice->BlobPosition(blob_index_) ==
SP_SUPERSCRIPT;
return false;
}
// Returns true if the current symbol is a subscript.
// If iterating at a higher level object than symbols, eg words, then
// this will return the attributes of the first symbol in that word.
bool LTRResultIterator::SymbolIsSubscript() const {
if (cblob_it_ == NULL && it_->word() != NULL)
return it_->word()->best_choice->BlobPosition(blob_index_) == SP_SUBSCRIPT;
return false;
}
// Returns true if the current symbol is a dropcap.
// If iterating at a higher level object than symbols, eg words, then
// this will return the attributes of the first symbol in that word.
bool LTRResultIterator::SymbolIsDropcap() const {
if (cblob_it_ == NULL && it_->word() != NULL)
return it_->word()->best_choice->BlobPosition(blob_index_) == SP_DROPCAP;
return false;
}
ChoiceIterator::ChoiceIterator(const LTRResultIterator& result_it) {
ASSERT_HOST(result_it.it_->word() != NULL);
word_res_ = result_it.it_->word();
BLOB_CHOICE_LIST* choices = NULL;
if (word_res_->ratings != NULL)
choices = word_res_->GetBlobChoices(result_it.blob_index_);
if (choices != NULL && !choices->empty()) {
choice_it_ = new BLOB_CHOICE_IT(choices);
choice_it_->mark_cycle_pt();
} else {
choice_it_ = NULL;
}
}
ChoiceIterator::~ChoiceIterator() {
delete choice_it_;
}
// Moves to the next choice for the symbol and returns false if there
// are none left.
bool ChoiceIterator::Next() {
if (choice_it_ == NULL)
return false;
choice_it_->forward();
return !choice_it_->cycled_list();
}
// Returns the null terminated UTF-8 encoded text string for the current
// choice. Do NOT use delete [] to free after use.
const char* ChoiceIterator::GetUTF8Text() const {
if (choice_it_ == NULL)
return NULL;
UNICHAR_ID id = choice_it_->data()->unichar_id();
return word_res_->uch_set->id_to_unichar_ext(id);
}
// Returns the confidence of the current choice.
// The number should be interpreted as a percent probability. (0.0f-100.0f)
float ChoiceIterator::Confidence() const {
if (choice_it_ == NULL)
return 0.0f;
float confidence = 100 + 5 * choice_it_->data()->certainty();
if (confidence < 0.0f) confidence = 0.0f;
if (confidence > 100.0f) confidence = 100.0f;
return confidence;
}
} // namespace tesseract.
| C++ |
/**********************************************************************
* File: tesseract_cube_combiner.h
* Description: Declaration of the Tesseract & Cube results combiner Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The TesseractCubeCombiner class provides the functionality of combining
// the recognition results of Tesseract and Cube at the word level
#ifndef TESSERACT_CCMAIN_TESSERACT_CUBE_COMBINER_H
#define TESSERACT_CCMAIN_TESSERACT_CUBE_COMBINER_H
#include <string>
#include <vector>
#include "pageres.h"
#ifdef _WIN32
#include <windows.h>
using namespace std;
#endif
#ifdef USE_STD_NAMESPACE
using std::string;
using std::vector;
#endif
namespace tesseract {
class CubeObject;
class NeuralNet;
class CubeRecoContext;
class WordAltList;
class TesseractCubeCombiner {
public:
explicit TesseractCubeCombiner(CubeRecoContext *cube_cntxt);
virtual ~TesseractCubeCombiner();
// There are 2 public methods for combining the results of tesseract
// and cube. Both return the probability that the Tesseract result is
// correct. The difference between the two interfaces is in how the
// passed-in CubeObject is used.
// The CubeObject parameter is used for 2 purposes: 1) to retrieve
// cube's alt list, and 2) to compute cube's word cost for the
// tesseract result. Both uses may modify the state of the
// CubeObject (including the BeamSearch state) with a call to
// RecognizeWord().
float CombineResults(WERD_RES *tess_res, CubeObject *cube_obj);
// The alt_list parameter is expected to have been extracted from the
// CubeObject that recognized the word to be combined. The cube_obj
// parameter passed in is a separate instance to be used only by
// the combiner.
float CombineResults(WERD_RES *tess_res, CubeObject *cube_obj,
WordAltList *alt_list);
// Public method for computing the combiner features. The agreement
// output parameter will be true if both answers are identical,
// false otherwise. Modifies the cube_alt_list, so no assumptions
// should be made about its state upon return.
bool ComputeCombinerFeatures(const string &tess_res,
int tess_confidence,
CubeObject *cube_obj,
WordAltList *cube_alt_list,
vector<double> *features,
bool *agreement);
// Is the word valid according to Tesseract's language model
bool ValidWord(const string &str);
// Loads the combiner neural network from file, using cube_cntxt_
// to find path.
bool LoadCombinerNet();
private:
// Normalize a UTF-8 string. Converts the UTF-8 string to UTF32 and optionally
// strips punc and/or normalizes case and then converts back
string NormalizeString(const string &str, bool remove_punc, bool norm_case);
// Compares 2 strings after optionally normalizing them and or stripping
// punctuation
int CompareStrings(const string &str1, const string &str2, bool ignore_punc,
bool norm_case);
NeuralNet *combiner_net_; // pointer to the combiner NeuralNet object
CubeRecoContext *cube_cntxt_; // used for language ID and data paths
};
}
#endif // TESSERACT_CCMAIN_TESSERACT_CUBE_COMBINER_H
| C++ |
///////////////////////////////////////////////////////////////////////
// File: pageiterator.h
// Description: Iterator for tesseract page structure that avoids using
// tesseract internal data structures.
// Author: Ray Smith
// Created: Fri Feb 26 11:01:06 PST 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_PAGEITERATOR_H__
#define TESSERACT_CCMAIN_PAGEITERATOR_H__
#include "publictypes.h"
#include "platform.h"
struct BlamerBundle;
class C_BLOB_IT;
class PAGE_RES;
class PAGE_RES_IT;
class WERD;
struct Pix;
struct Pta;
namespace tesseract {
class Tesseract;
/**
* Class to iterate over tesseract page structure, providing access to all
* levels of the page hierarchy, without including any tesseract headers or
* having to handle any tesseract structures.
* WARNING! This class points to data held within the TessBaseAPI class, and
* therefore can only be used while the TessBaseAPI class still exists and
* has not been subjected to a call of Init, SetImage, Recognize, Clear, End
* DetectOS, or anything else that changes the internal PAGE_RES.
* See apitypes.h for the definition of PageIteratorLevel.
* See also ResultIterator, derived from PageIterator, which adds in the
* ability to access OCR output with text-specific methods.
*/
class TESS_API PageIterator {
public:
/**
* page_res and tesseract come directly from the BaseAPI.
* The rectangle parameters are copied indirectly from the Thresholder,
* via the BaseAPI. They represent the coordinates of some rectangle in an
* original image (in top-left-origin coordinates) and therefore the top-left
* needs to be added to any output boxes in order to specify coordinates
* in the original image. See TessBaseAPI::SetRectangle.
* The scale and scaled_yres are in case the Thresholder scaled the image
* rectangle prior to thresholding. Any coordinates in tesseract's image
* must be divided by scale before adding (rect_left, rect_top).
* The scaled_yres indicates the effective resolution of the binary image
* that tesseract has been given by the Thresholder.
* After the constructor, Begin has already been called.
*/
PageIterator(PAGE_RES* page_res, Tesseract* tesseract,
int scale, int scaled_yres,
int rect_left, int rect_top,
int rect_width, int rect_height);
virtual ~PageIterator();
/**
* Page/ResultIterators may be copied! This makes it possible to iterate over
* all the objects at a lower level, while maintaining an iterator to
* objects at a higher level. These constructors DO NOT CALL Begin, so
* iterations will continue from the location of src.
*/
PageIterator(const PageIterator& src);
const PageIterator& operator=(const PageIterator& src);
/** Are we positioned at the same location as other? */
bool PositionedAtSameWord(const PAGE_RES_IT* other) const;
// ============= Moving around within the page ============.
/**
* Moves the iterator to point to the start of the page to begin an
* iteration.
*/
virtual void Begin();
/**
* Moves the iterator to the beginning of the paragraph.
* This class implements this functionality by moving it to the zero indexed
* blob of the first (leftmost) word on the first row of the paragraph.
*/
virtual void RestartParagraph();
/**
* Return whether this iterator points anywhere in the first textline of a
* paragraph.
*/
bool IsWithinFirstTextlineOfParagraph() const;
/**
* Moves the iterator to the beginning of the text line.
* This class implements this functionality by moving it to the zero indexed
* blob of the first (leftmost) word of the row.
*/
virtual void RestartRow();
/**
* Moves to the start of the next object at the given level in the
* page hierarchy, and returns false if the end of the page was reached.
* NOTE that RIL_SYMBOL will skip non-text blocks, but all other
* PageIteratorLevel level values will visit each non-text block once.
* Think of non text blocks as containing a single para, with a single line,
* with a single imaginary word.
* Calls to Next with different levels may be freely intermixed.
* This function iterates words in right-to-left scripts correctly, if
* the appropriate language has been loaded into Tesseract.
*/
virtual bool Next(PageIteratorLevel level);
/**
* Returns true if the iterator is at the start of an object at the given
* level.
*
* For instance, suppose an iterator it is pointed to the first symbol of the
* first word of the third line of the second paragraph of the first block in
* a page, then:
* it.IsAtBeginningOf(RIL_BLOCK) = false
* it.IsAtBeginningOf(RIL_PARA) = false
* it.IsAtBeginningOf(RIL_TEXTLINE) = true
* it.IsAtBeginningOf(RIL_WORD) = true
* it.IsAtBeginningOf(RIL_SYMBOL) = true
*/
virtual bool IsAtBeginningOf(PageIteratorLevel level) const;
/**
* Returns whether the iterator is positioned at the last element in a
* given level. (e.g. the last word in a line, the last line in a block)
*
* Here's some two-paragraph example
* text. It starts off innocuously
* enough but quickly turns bizarre.
* The author inserts a cornucopia
* of words to guard against confused
* references.
*
* Now take an iterator it pointed to the start of "bizarre."
* it.IsAtFinalElement(RIL_PARA, RIL_SYMBOL) = false
* it.IsAtFinalElement(RIL_PARA, RIL_WORD) = true
* it.IsAtFinalElement(RIL_BLOCK, RIL_WORD) = false
*/
virtual bool IsAtFinalElement(PageIteratorLevel level,
PageIteratorLevel element) const;
/**
* Returns whether this iterator is positioned
* before other: -1
* equal to other: 0
* after other: 1
*/
int Cmp(const PageIterator &other) const;
// ============= Accessing data ==============.
// Coordinate system:
// Integer coordinates are at the cracks between the pixels.
// The top-left corner of the top-left pixel in the image is at (0,0).
// The bottom-right corner of the bottom-right pixel in the image is at
// (width, height).
// Every bounding box goes from the top-left of the top-left contained
// pixel to the bottom-right of the bottom-right contained pixel, so
// the bounding box of the single top-left pixel in the image is:
// (0,0)->(1,1).
// If an image rectangle has been set in the API, then returned coordinates
// relate to the original (full) image, rather than the rectangle.
/**
* Returns the bounding rectangle of the current object at the given level.
* See comment on coordinate system above.
* Returns false if there is no such object at the current position.
* The returned bounding box is guaranteed to match the size and position
* of the image returned by GetBinaryImage, but may clip foreground pixels
* from a grey image. The padding argument to GetImage can be used to expand
* the image to include more foreground pixels. See GetImage below.
*/
bool BoundingBox(PageIteratorLevel level,
int* left, int* top, int* right, int* bottom) const;
bool BoundingBox(PageIteratorLevel level, const int padding,
int* left, int* top, int* right, int* bottom) const;
/**
* Returns the bounding rectangle of the object in a coordinate system of the
* working image rectangle having its origin at (rect_left_, rect_top_) with
* respect to the original image and is scaled by a factor scale_.
*/
bool BoundingBoxInternal(PageIteratorLevel level,
int* left, int* top, int* right, int* bottom) const;
/** Returns whether there is no object of a given level. */
bool Empty(PageIteratorLevel level) const;
/**
* Returns the type of the current block. See apitypes.h for
* PolyBlockType.
*/
PolyBlockType BlockType() const;
/**
* Returns the polygon outline of the current block. The returned Pta must
* be ptaDestroy-ed after use. Note that the returned Pta lists the vertices
* of the polygon, and the last edge is the line segment between the last
* point and the first point. NULL will be returned if the iterator is
* at the end of the document or layout analysis was not used.
*/
Pta* BlockPolygon() const;
/**
* Returns a binary image of the current object at the given level.
* The position and size match the return from BoundingBoxInternal, and so
* this could be upscaled with respect to the original input image.
* Use pixDestroy to delete the image after use.
*/
Pix* GetBinaryImage(PageIteratorLevel level) const;
/**
* Returns an image of the current object at the given level in greyscale
* if available in the input. To guarantee a binary image use BinaryImage.
* NOTE that in order to give the best possible image, the bounds are
* expanded slightly over the binary connected component, by the supplied
* padding, so the top-left position of the returned image is returned
* in (left,top). These will most likely not match the coordinates
* returned by BoundingBox.
* If you do not supply an original image, you will get a binary one.
* Use pixDestroy to delete the image after use.
*/
Pix* GetImage(PageIteratorLevel level, int padding, Pix* original_img,
int* left, int* top) const;
/**
* Returns the baseline of the current object at the given level.
* The baseline is the line that passes through (x1, y1) and (x2, y2).
* WARNING: with vertical text, baselines may be vertical!
* Returns false if there is no baseline at the current position.
*/
bool Baseline(PageIteratorLevel level,
int* x1, int* y1, int* x2, int* y2) const;
/**
* Returns orientation for the block the iterator points to.
* orientation, writing_direction, textline_order: see publictypes.h
* deskew_angle: after rotating the block so the text orientation is
* upright, how many radians does one have to rotate the
* block anti-clockwise for it to be level?
* -Pi/4 <= deskew_angle <= Pi/4
*/
void Orientation(tesseract::Orientation *orientation,
tesseract::WritingDirection *writing_direction,
tesseract::TextlineOrder *textline_order,
float *deskew_angle) const;
/**
* Returns information about the current paragraph, if available.
*
* justification -
* LEFT if ragged right, or fully justified and script is left-to-right.
* RIGHT if ragged left, or fully justified and script is right-to-left.
* unknown if it looks like source code or we have very few lines.
* is_list_item -
* true if we believe this is a member of an ordered or unordered list.
* is_crown -
* true if the first line of the paragraph is aligned with the other
* lines of the paragraph even though subsequent paragraphs have first
* line indents. This typically indicates that this is the continuation
* of a previous paragraph or that it is the very first paragraph in
* the chapter.
* first_line_indent -
* For LEFT aligned paragraphs, the first text line of paragraphs of
* this kind are indented this many pixels from the left edge of the
* rest of the paragraph.
* for RIGHT aligned paragraphs, the first text line of paragraphs of
* this kind are indented this many pixels from the right edge of the
* rest of the paragraph.
* NOTE 1: This value may be negative.
* NOTE 2: if *is_crown == true, the first line of this paragraph is
* actually flush, and first_line_indent is set to the "common"
* first_line_indent for subsequent paragraphs in this block
* of text.
*/
void ParagraphInfo(tesseract::ParagraphJustification *justification,
bool *is_list_item,
bool *is_crown,
int *first_line_indent) const;
// If the current WERD_RES (it_->word()) is not NULL, sets the BlamerBundle
// of the current word to the given pointer (takes ownership of the pointer)
// and returns true.
// Can only be used when iterating on the word level.
bool SetWordBlamerBundle(BlamerBundle *blamer_bundle);
protected:
/**
* Sets up the internal data for iterating the blobs of a new word, then
* moves the iterator to the given offset.
*/
TESS_LOCAL void BeginWord(int offset);
/** Pointer to the page_res owned by the API. */
PAGE_RES* page_res_;
/** Pointer to the Tesseract object owned by the API. */
Tesseract* tesseract_;
/**
* The iterator to the page_res_. Owned by this ResultIterator.
* A pointer just to avoid dragging in Tesseract includes.
*/
PAGE_RES_IT* it_;
/**
* The current input WERD being iterated. If there is an output from OCR,
* then word_ is NULL. Owned by the API
*/
WERD* word_;
/** The length of the current word_. */
int word_length_;
/** The current blob index within the word. */
int blob_index_;
/**
* Iterator to the blobs within the word. If NULL, then we are iterating
* OCR results in the box_word.
* Owned by this ResultIterator.
*/
C_BLOB_IT* cblob_it_;
/** Parameters saved from the Thresholder. Needed to rebuild coordinates.*/
int scale_;
int scaled_yres_;
int rect_left_;
int rect_top_;
int rect_width_;
int rect_height_;
};
} // namespace tesseract.
#endif // TESSERACT_CCMAIN_PAGEITERATOR_H__
| C++ |
/******************************************************************
* File: output.cpp (Formerly output.c)
* Description: Output pass
* Author: Phil Cheatle
* Created: Thu Aug 4 10:56:08 BST 1994
*
* (C) Copyright 1994, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#include <string.h>
#include <ctype.h>
#ifdef __UNIX__
#include <assert.h>
#include <unistd.h>
#include <errno.h>
#endif
#include "helpers.h"
#include "tessvars.h"
#include "control.h"
#include "reject.h"
#include "docqual.h"
#include "output.h"
#include "globals.h"
#include "tesseractclass.h"
#define EPAPER_EXT ".ep"
#define PAGE_YSIZE 3508
#define CTRL_INSET '\024' //dc4=text inset
#define CTRL_FONT '\016' //so=font change
#define CTRL_DEFAULT '\017' //si=default font
#define CTRL_SHIFT '\022' //dc2=x shift
#define CTRL_TAB '\011' //tab
#define CTRL_NEWLINE '\012' //newline
#define CTRL_HARDLINE '\015' //cr
/**********************************************************************
* pixels_to_pts
*
* Convert an integer number of pixels to the nearest integer
* number of points.
**********************************************************************/
inT32 pixels_to_pts( //convert coords
inT32 pixels,
inT32 pix_res //resolution
) {
float pts; //converted value
pts = pixels * 72.0 / pix_res;
return (inT32) (pts + 0.5); //round it
}
namespace tesseract {
void Tesseract::output_pass( //Tess output pass //send to api
PAGE_RES_IT &page_res_it,
const TBOX *target_word_box) {
BLOCK_RES *block_of_last_word;
BOOL8 force_eol; //During output
BLOCK *nextblock; //block of next word
WERD *nextword; //next word
page_res_it.restart_page ();
block_of_last_word = NULL;
while (page_res_it.word () != NULL) {
check_debug_pt (page_res_it.word (), 120);
if (target_word_box)
{
TBOX current_word_box=page_res_it.word ()->word->bounding_box();
FCOORD center_pt((current_word_box.right()+current_word_box.left())/2,(current_word_box.bottom()+current_word_box.top())/2);
if (!target_word_box->contains(center_pt))
{
page_res_it.forward ();
continue;
}
}
if (tessedit_write_block_separators &&
block_of_last_word != page_res_it.block ()) {
block_of_last_word = page_res_it.block ();
}
force_eol = (tessedit_write_block_separators &&
(page_res_it.block () != page_res_it.next_block ())) ||
(page_res_it.next_word () == NULL);
if (page_res_it.next_word () != NULL)
nextword = page_res_it.next_word ()->word;
else
nextword = NULL;
if (page_res_it.next_block () != NULL)
nextblock = page_res_it.next_block ()->block;
else
nextblock = NULL;
//regardless of tilde crunching
write_results(page_res_it,
determine_newline_type(page_res_it.word()->word,
page_res_it.block()->block,
nextword, nextblock), force_eol);
page_res_it.forward();
}
}
/*************************************************************************
* write_results()
*
* All recognition and rejection has now been done. Generate the following:
* .txt file - giving the final best choices with NO highlighting
* .raw file - giving the tesseract top choice output for each word
* .map file - showing how the .txt file has been rejected in the .ep file
* epchoice list - a list of one element per word, containing the text for the
* epaper. Reject strings are inserted.
* inset list - a list of bounding boxes of reject insets - indexed by the
* reject strings in the epchoice text.
*************************************************************************/
void Tesseract::write_results(PAGE_RES_IT &page_res_it,
char newline_type, // type of newline
BOOL8 force_eol) { // override tilde crunch?
WERD_RES *word = page_res_it.word();
const UNICHARSET &uchset = *word->uch_set;
int i;
BOOL8 need_reject = FALSE;
UNICHAR_ID space = uchset.unichar_to_id(" ");
if ((word->unlv_crunch_mode != CR_NONE ||
word->best_choice->length() == 0) &&
!tessedit_zero_kelvin_rejection && !tessedit_word_for_word) {
if ((word->unlv_crunch_mode != CR_DELETE) &&
(!stats_.tilde_crunch_written ||
((word->unlv_crunch_mode == CR_KEEP_SPACE) &&
(word->word->space () > 0) &&
!word->word->flag (W_FUZZY_NON) &&
!word->word->flag (W_FUZZY_SP)))) {
if (!word->word->flag (W_BOL) &&
(word->word->space () > 0) &&
!word->word->flag (W_FUZZY_NON) &&
!word->word->flag (W_FUZZY_SP)) {
stats_.last_char_was_tilde = false;
}
need_reject = TRUE;
}
if ((need_reject && !stats_.last_char_was_tilde) ||
(force_eol && stats_.write_results_empty_block)) {
/* Write a reject char - mark as rejected unless zero_rejection mode */
stats_.last_char_was_tilde = TRUE;
stats_.tilde_crunch_written = true;
stats_.last_char_was_newline = false;
stats_.write_results_empty_block = false;
}
if ((word->word->flag (W_EOL) && !stats_.last_char_was_newline) || force_eol) {
stats_.tilde_crunch_written = false;
stats_.last_char_was_newline = true;
stats_.last_char_was_tilde = false;
}
if (force_eol)
stats_.write_results_empty_block = true;
return;
}
/* NORMAL PROCESSING of non tilde crunched words */
stats_.tilde_crunch_written = false;
if (newline_type)
stats_.last_char_was_newline = true;
else
stats_.last_char_was_newline = false;
stats_.write_results_empty_block = force_eol; // about to write a real word
if (unlv_tilde_crunching &&
stats_.last_char_was_tilde &&
(word->word->space() == 0) &&
!(word->word->flag(W_REP_CHAR) && tessedit_write_rep_codes) &&
(word->best_choice->unichar_id(0) == space)) {
/* Prevent adjacent tilde across words - we know that adjacent tildes within
words have been removed */
word->MergeAdjacentBlobs(0);
}
if (newline_type ||
(word->word->flag (W_REP_CHAR) && tessedit_write_rep_codes))
stats_.last_char_was_tilde = false;
else {
if (word->reject_map.length () > 0) {
if (word->best_choice->unichar_id(word->reject_map.length() - 1) == space)
stats_.last_char_was_tilde = true;
else
stats_.last_char_was_tilde = false;
}
else if (word->word->space () > 0)
stats_.last_char_was_tilde = false;
/* else it is unchanged as there are no output chars */
}
ASSERT_HOST (word->best_choice->length() == word->reject_map.length());
set_unlv_suspects(word);
check_debug_pt (word, 120);
if (tessedit_rejection_debug) {
tprintf ("Dict word: \"%s\": %d\n",
word->best_choice->debug_string().string(),
dict_word(*(word->best_choice)));
}
if (!word->word->flag(W_REP_CHAR) || !tessedit_write_rep_codes) {
if (tessedit_zero_rejection) {
/* OVERRIDE ALL REJECTION MECHANISMS - ONLY REJECT TESS FAILURES */
for (i = 0; i < word->best_choice->length(); ++i) {
if (word->reject_map[i].rejected())
word->reject_map[i].setrej_minimal_rej_accept();
}
}
if (tessedit_minimal_rejection) {
/* OVERRIDE ALL REJECTION MECHANISMS - ONLY REJECT TESS FAILURES */
for (i = 0; i < word->best_choice->length(); ++i) {
if ((word->best_choice->unichar_id(i) != space) &&
word->reject_map[i].rejected())
word->reject_map[i].setrej_minimal_rej_accept();
}
}
}
}
} // namespace tesseract
/**********************************************************************
* determine_newline_type
*
* Find whether we have a wrapping or hard newline.
* Return FALSE if not at end of line.
**********************************************************************/
char determine_newline_type( //test line ends
WERD *word, //word to do
BLOCK *block, //current block
WERD *next_word, //next word
BLOCK *next_block //block of next word
) {
inT16 end_gap; //to right edge
inT16 width; //of next word
TBOX word_box; //bounding
TBOX next_box; //next word
TBOX block_box; //block bounding
if (!word->flag (W_EOL))
return FALSE; //not end of line
if (next_word == NULL || next_block == NULL || block != next_block)
return CTRL_NEWLINE;
if (next_word->space () > 0)
return CTRL_HARDLINE; //it is tabbed
word_box = word->bounding_box ();
next_box = next_word->bounding_box ();
block_box = block->bounding_box ();
//gap to eol
end_gap = block_box.right () - word_box.right ();
end_gap -= (inT32) block->space ();
width = next_box.right () - next_box.left ();
// tprintf("end_gap=%d-%d=%d, width=%d-%d=%d, nl=%d\n",
// block_box.right(),word_box.right(),end_gap,
// next_box.right(),next_box.left(),width,
// end_gap>width ? CTRL_HARDLINE : CTRL_NEWLINE);
return end_gap > width ? CTRL_HARDLINE : CTRL_NEWLINE;
}
/*************************************************************************
* get_rep_char()
* Return the first accepted character from the repetition string. This is the
* character which is repeated - as determined earlier by fix_rep_char()
*************************************************************************/
namespace tesseract {
UNICHAR_ID Tesseract::get_rep_char(WERD_RES *word) { // what char is repeated?
int i;
for (i = 0; ((i < word->reject_map.length()) &&
(word->reject_map[i].rejected())); ++i);
if (i < word->reject_map.length()) {
return word->best_choice->unichar_id(i);
} else {
return word->uch_set->unichar_to_id(unrecognised_char.string());
}
}
/*************************************************************************
* SUSPECT LEVELS
*
* 0 - dont reject ANYTHING
* 1,2 - partial rejection
* 3 - BEST
*
* NOTE: to reject JUST tess failures in the .map file set suspect_level 3 and
* tessedit_minimal_rejection.
*************************************************************************/
void Tesseract::set_unlv_suspects(WERD_RES *word_res) {
int len = word_res->reject_map.length();
const WERD_CHOICE &word = *(word_res->best_choice);
const UNICHARSET &uchset = *word.unicharset();
int i;
float rating_per_ch;
if (suspect_level == 0) {
for (i = 0; i < len; i++) {
if (word_res->reject_map[i].rejected())
word_res->reject_map[i].setrej_minimal_rej_accept();
}
return;
}
if (suspect_level >= 3)
return; //Use defaults
/* NOW FOR LEVELS 1 and 2 Find some stuff to unreject*/
if (safe_dict_word(word_res) &&
(count_alphas(word) > suspect_short_words)) {
/* Unreject alphas in dictionary words */
for (i = 0; i < len; ++i) {
if (word_res->reject_map[i].rejected() &&
uchset.get_isalpha(word.unichar_id(i)))
word_res->reject_map[i].setrej_minimal_rej_accept();
}
}
rating_per_ch = word.rating() / word_res->reject_map.length();
if (rating_per_ch >= suspect_rating_per_ch)
return; //Dont touch bad ratings
if ((word_res->tess_accepted) || (rating_per_ch < suspect_accept_rating)) {
/* Unreject any Tess Acceptable word - but NOT tess reject chs*/
for (i = 0; i < len; ++i) {
if (word_res->reject_map[i].rejected() &&
(!uchset.eq(word.unichar_id(i), " ")))
word_res->reject_map[i].setrej_minimal_rej_accept();
}
}
for (i = 0; i < len; i++) {
if (word_res->reject_map[i].rejected()) {
if (word_res->reject_map[i].flag(R_DOC_REJ))
word_res->reject_map[i].setrej_minimal_rej_accept();
if (word_res->reject_map[i].flag(R_BLOCK_REJ))
word_res->reject_map[i].setrej_minimal_rej_accept();
if (word_res->reject_map[i].flag(R_ROW_REJ))
word_res->reject_map[i].setrej_minimal_rej_accept();
}
}
if (suspect_level == 2)
return;
if (!suspect_constrain_1Il ||
(word_res->reject_map.length() <= suspect_short_words)) {
for (i = 0; i < len; i++) {
if (word_res->reject_map[i].rejected()) {
if ((word_res->reject_map[i].flag(R_1IL_CONFLICT) ||
word_res->reject_map[i].flag(R_POSTNN_1IL)))
word_res->reject_map[i].setrej_minimal_rej_accept();
if (!suspect_constrain_1Il &&
word_res->reject_map[i].flag(R_MM_REJECT))
word_res->reject_map[i].setrej_minimal_rej_accept();
}
}
}
if (acceptable_word_string(*word_res->uch_set,
word.unichar_string().string(),
word.unichar_lengths().string()) !=
AC_UNACCEPTABLE ||
acceptable_number_string(word.unichar_string().string(),
word.unichar_lengths().string())) {
if (word_res->reject_map.length() > suspect_short_words) {
for (i = 0; i < len; i++) {
if (word_res->reject_map[i].rejected() &&
(!word_res->reject_map[i].perm_rejected() ||
word_res->reject_map[i].flag (R_1IL_CONFLICT) ||
word_res->reject_map[i].flag (R_POSTNN_1IL) ||
word_res->reject_map[i].flag (R_MM_REJECT))) {
word_res->reject_map[i].setrej_minimal_rej_accept();
}
}
}
}
}
inT16 Tesseract::count_alphas(const WERD_CHOICE &word) {
int count = 0;
for (int i = 0; i < word.length(); ++i) {
if (word.unicharset()->get_isalpha(word.unichar_id(i)))
count++;
}
return count;
}
inT16 Tesseract::count_alphanums(const WERD_CHOICE &word) {
int count = 0;
for (int i = 0; i < word.length(); ++i) {
if (word.unicharset()->get_isalpha(word.unichar_id(i)) ||
word.unicharset()->get_isdigit(word.unichar_id(i)))
count++;
}
return count;
}
BOOL8 Tesseract::acceptable_number_string(const char *s,
const char *lengths) {
BOOL8 prev_digit = FALSE;
if (*lengths == 1 && *s == '(')
s++;
if (*lengths == 1 &&
((*s == '$') || (*s == '.') || (*s == '+') || (*s == '-')))
s++;
for (; *s != '\0'; s += *(lengths++)) {
if (unicharset.get_isdigit(s, *lengths))
prev_digit = TRUE;
else if (prev_digit &&
(*lengths == 1 && ((*s == '.') || (*s == ',') || (*s == '-'))))
prev_digit = FALSE;
else if (prev_digit && *lengths == 1 &&
(*(s + *lengths) == '\0') && ((*s == '%') || (*s == ')')))
return TRUE;
else if (prev_digit &&
*lengths == 1 && (*s == '%') &&
(*(lengths + 1) == 1 && *(s + *lengths) == ')') &&
(*(s + *lengths + *(lengths + 1)) == '\0'))
return TRUE;
else
return FALSE;
}
return TRUE;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: pgedit.h
// Description: Page structure file editor
// Author: Joern Wanke
// Created: Wed Jul 18 10:05:01 PDT 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef PGEDIT_H
#define PGEDIT_H
#include "ocrblock.h"
#include "ocrrow.h"
#include "werd.h"
#include "rect.h"
#include "params.h"
#include "tesseractclass.h"
class ScrollView;
class SVMenuNode;
struct SVEvent;
// A small event handler class to process incoming events to
// this window.
class PGEventHandler : public SVEventHandler {
public:
PGEventHandler(tesseract::Tesseract* tess) : tess_(tess) {
}
void Notify(const SVEvent* sve);
private:
tesseract::Tesseract* tess_;
};
extern BLOCK_LIST *current_block_list;
extern STRING_VAR_H (editor_image_win_name, "EditorImage",
"Editor image window name");
extern INT_VAR_H (editor_image_xpos, 590, "Editor image X Pos");
extern INT_VAR_H (editor_image_ypos, 10, "Editor image Y Pos");
extern INT_VAR_H (editor_image_height, 680, "Editor image height");
extern INT_VAR_H (editor_image_width, 655, "Editor image width");
extern INT_VAR_H (editor_image_word_bb_color, BLUE,
"Word bounding box colour");
extern INT_VAR_H (editor_image_blob_bb_color, YELLOW,
"Blob bounding box colour");
extern INT_VAR_H (editor_image_text_color, WHITE, "Correct text colour");
extern STRING_VAR_H (editor_dbwin_name, "EditorDBWin",
"Editor debug window name");
extern INT_VAR_H (editor_dbwin_xpos, 50, "Editor debug window X Pos");
extern INT_VAR_H (editor_dbwin_ypos, 500, "Editor debug window Y Pos");
extern INT_VAR_H (editor_dbwin_height, 24, "Editor debug window height");
extern INT_VAR_H (editor_dbwin_width, 80, "Editor debug window width");
extern STRING_VAR_H (editor_word_name, "BlnWords",
"BL normalised word window");
extern INT_VAR_H (editor_word_xpos, 60, "Word window X Pos");
extern INT_VAR_H (editor_word_ypos, 510, "Word window Y Pos");
extern INT_VAR_H (editor_word_height, 240, "Word window height");
extern INT_VAR_H (editor_word_width, 655, "Word window width");
extern double_VAR_H (editor_smd_scale_factor, 1.0, "Scaling for smd image");
ScrollView* bln_word_window_handle(); //return handle
void build_image_window(int width, int height);
void display_bln_lines(ScrollView window,
ScrollView::Color colour,
float scale_factor,
float y_offset,
float minx,
float maxx);
//function to call
void pgeditor_msg( //message display
const char *msg);
void pgeditor_show_point( //display coords
SVEvent *event);
//put bln word in box
void show_point(PAGE_RES* page_res, float x, float y);
#endif
| C++ |
/**********************************************************************
* File: tessedit.cpp (Formerly tessedit.c)
* Description: Main program for merge of tess and editor.
* Author: Ray Smith
* Created: Tue Jan 07 15:21:46 GMT 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "stderr.h"
#include "basedir.h"
#include "tessvars.h"
#include "control.h"
#include "reject.h"
#include "pageres.h"
#include "nwmain.h"
#include "pgedit.h"
#include "tprintf.h"
#include "tessedit.h"
#include "stopper.h"
#include "intmatcher.h"
#include "chop.h"
#include "efio.h"
#include "danerror.h"
#include "globals.h"
#include "tesseractclass.h"
#include "params.h"
#define VARDIR "configs/" /*variables files */
//config under api
#define API_CONFIG "configs/api_config"
ETEXT_DESC *global_monitor = NULL; // progress monitor
namespace tesseract {
// Read a "config" file containing a set of variable, value pairs.
// Searches the standard places: tessdata/configs, tessdata/tessconfigs
// and also accepts a relative or absolute path name.
void Tesseract::read_config_file(const char *filename,
SetParamConstraint constraint) {
STRING path = datadir;
path += "configs/";
path += filename;
FILE* fp;
if ((fp = fopen(path.string(), "rb")) != NULL) {
fclose(fp);
} else {
path = datadir;
path += "tessconfigs/";
path += filename;
if ((fp = fopen(path.string(), "rb")) != NULL) {
fclose(fp);
} else {
path = filename;
}
}
ParamUtils::ReadParamsFile(path.string(), constraint, this->params());
}
// Returns false if a unicharset file for the specified language was not found
// or was invalid.
// This function initializes TessdataManager. After TessdataManager is
// no longer needed, TessdataManager::End() should be called.
//
// This function sets tessedit_oem_mode to the given OcrEngineMode oem, unless
// it is OEM_DEFAULT, in which case the value of the variable will be obtained
// from the language-specific config file (stored in [lang].traineddata), from
// the config files specified on the command line or left as the default
// OEM_TESSERACT_ONLY if none of the configs specify this variable.
bool Tesseract::init_tesseract_lang_data(
const char *arg0, const char *textbase, const char *language,
OcrEngineMode oem, char **configs, int configs_size,
const GenericVector<STRING> *vars_vec,
const GenericVector<STRING> *vars_values,
bool set_only_non_debug_params) {
// Set the basename, compute the data directory.
main_setup(arg0, textbase);
// Set the language data path prefix
lang = language != NULL ? language : "eng";
language_data_path_prefix = datadir;
language_data_path_prefix += lang;
language_data_path_prefix += ".";
// Initialize TessdataManager.
STRING tessdata_path = language_data_path_prefix + kTrainedDataSuffix;
if (!tessdata_manager.Init(tessdata_path.string(),
tessdata_manager_debug_level)) {
return false;
}
// If a language specific config file (lang.config) exists, load it in.
if (tessdata_manager.SeekToStart(TESSDATA_LANG_CONFIG)) {
ParamUtils::ReadParamsFromFp(
tessdata_manager.GetDataFilePtr(),
tessdata_manager.GetEndOffset(TESSDATA_LANG_CONFIG),
SET_PARAM_CONSTRAINT_NONE, this->params());
if (tessdata_manager_debug_level) {
tprintf("Loaded language config file\n");
}
}
SetParamConstraint set_params_constraint = set_only_non_debug_params ?
SET_PARAM_CONSTRAINT_NON_DEBUG_ONLY : SET_PARAM_CONSTRAINT_NONE;
// Load tesseract variables from config files. This is done after loading
// language-specific variables from [lang].traineddata file, so that custom
// config files can override values in [lang].traineddata file.
for (int i = 0; i < configs_size; ++i) {
read_config_file(configs[i], set_params_constraint);
}
// Set params specified in vars_vec (done after setting params from config
// files, so that params in vars_vec can override those from files).
if (vars_vec != NULL && vars_values != NULL) {
for (int i = 0; i < vars_vec->size(); ++i) {
if (!ParamUtils::SetParam((*vars_vec)[i].string(),
(*vars_values)[i].string(),
set_params_constraint, this->params())) {
tprintf("Error setting param %s\n", (*vars_vec)[i].string());
exit(1);
}
}
}
if (((STRING &)tessedit_write_params_to_file).length() > 0) {
FILE *params_file = fopen(tessedit_write_params_to_file.string(), "wb");
if (params_file != NULL) {
ParamUtils::PrintParams(params_file, this->params());
fclose(params_file);
if (tessdata_manager_debug_level > 0) {
tprintf("Wrote parameters to %s\n",
tessedit_write_params_to_file.string());
}
} else {
tprintf("Failed to open %s for writing params.\n",
tessedit_write_params_to_file.string());
}
}
// Determine which ocr engine(s) should be loaded and used for recognition.
if (oem != OEM_DEFAULT) tessedit_ocr_engine_mode.set_value(oem);
if (tessdata_manager_debug_level) {
tprintf("Loading Tesseract/Cube with tessedit_ocr_engine_mode %d\n",
static_cast<int>(tessedit_ocr_engine_mode));
}
// If we are only loading the config file (and so not planning on doing any
// recognition) then there's nothing else do here.
if (tessedit_init_config_only) {
if (tessdata_manager_debug_level) {
tprintf("Returning after loading config file\n");
}
return true;
}
// Load the unicharset
if (!tessdata_manager.SeekToStart(TESSDATA_UNICHARSET) ||
!unicharset.load_from_file(tessdata_manager.GetDataFilePtr())) {
return false;
}
if (unicharset.size() > MAX_NUM_CLASSES) {
tprintf("Error: Size of unicharset is greater than MAX_NUM_CLASSES\n");
return false;
}
if (tessdata_manager_debug_level) tprintf("Loaded unicharset\n");
right_to_left_ = unicharset.major_right_to_left();
// Setup initial unichar ambigs table and read universal ambigs.
UNICHARSET encoder_unicharset;
encoder_unicharset.CopyFrom(unicharset);
unichar_ambigs.InitUnicharAmbigs(unicharset, use_ambigs_for_adaption);
unichar_ambigs.LoadUniversal(encoder_unicharset, &unicharset);
if (!tessedit_ambigs_training &&
tessdata_manager.SeekToStart(TESSDATA_AMBIGS)) {
TFile ambigs_file;
ambigs_file.Open(tessdata_manager.GetDataFilePtr(),
tessdata_manager.GetEndOffset(TESSDATA_AMBIGS) + 1);
unichar_ambigs.LoadUnicharAmbigs(
encoder_unicharset,
&ambigs_file,
ambigs_debug_level, use_ambigs_for_adaption, &unicharset);
if (tessdata_manager_debug_level) tprintf("Loaded ambigs\n");
}
// Load Cube objects if necessary.
if (tessedit_ocr_engine_mode == OEM_CUBE_ONLY) {
ASSERT_HOST(init_cube_objects(false, &tessdata_manager));
if (tessdata_manager_debug_level)
tprintf("Loaded Cube w/out combiner\n");
} else if (tessedit_ocr_engine_mode == OEM_TESSERACT_CUBE_COMBINED) {
ASSERT_HOST(init_cube_objects(true, &tessdata_manager));
if (tessdata_manager_debug_level)
tprintf("Loaded Cube with combiner\n");
}
// Init ParamsModel.
// Load pass1 and pass2 weights (for now these two sets are the same, but in
// the future separate sets of weights can be generated).
for (int p = ParamsModel::PTRAIN_PASS1;
p < ParamsModel::PTRAIN_NUM_PASSES; ++p) {
language_model_->getParamsModel().SetPass(
static_cast<ParamsModel::PassEnum>(p));
if (tessdata_manager.SeekToStart(TESSDATA_PARAMS_MODEL)) {
if (!language_model_->getParamsModel().LoadFromFp(
lang.string(), tessdata_manager.GetDataFilePtr(),
tessdata_manager.GetEndOffset(TESSDATA_PARAMS_MODEL))) {
return false;
}
}
}
if (tessdata_manager_debug_level) language_model_->getParamsModel().Print();
return true;
}
// Helper returns true if the given string is in the vector of strings.
static bool IsStrInList(const STRING& str,
const GenericVector<STRING>& str_list) {
for (int i = 0; i < str_list.size(); ++i) {
if (str_list[i] == str)
return true;
}
return false;
}
// Parse a string of the form [~]<lang>[+[~]<lang>]*.
// Langs with no prefix get appended to to_load, provided they
// are not in there already.
// Langs with ~ prefix get appended to not_to_load, provided they are not in
// there already.
void Tesseract::ParseLanguageString(const char* lang_str,
GenericVector<STRING>* to_load,
GenericVector<STRING>* not_to_load) {
STRING remains(lang_str);
while (remains.length() > 0) {
// Find the start of the lang code and which vector to add to.
const char* start = remains.string();
while (*start == '+')
++start;
GenericVector<STRING>* target = to_load;
if (*start == '~') {
target = not_to_load;
++start;
}
// Find the index of the end of the lang code in string start.
int end = strlen(start);
const char* plus = strchr(start, '+');
if (plus != NULL && plus - start < end)
end = plus - start;
STRING lang_code(start);
lang_code.truncate_at(end);
STRING next(start + end);
remains = next;
// Check whether lang_code is already in the target vector and add.
if (!IsStrInList(lang_code, *target)) {
if (tessdata_manager_debug_level)
tprintf("Adding language '%s' to list\n", lang_code.string());
target->push_back(lang_code);
}
}
}
// Initialize for potentially a set of languages defined by the language
// string and recursively any additional languages required by any language
// traineddata file (via tessedit_load_sublangs in its config) that is loaded.
// See init_tesseract_internal for args.
int Tesseract::init_tesseract(
const char *arg0, const char *textbase, const char *language,
OcrEngineMode oem, char **configs, int configs_size,
const GenericVector<STRING> *vars_vec,
const GenericVector<STRING> *vars_values,
bool set_only_non_debug_params) {
GenericVector<STRING> langs_to_load;
GenericVector<STRING> langs_not_to_load;
ParseLanguageString(language, &langs_to_load, &langs_not_to_load);
sub_langs_.delete_data_pointers();
sub_langs_.clear();
// Find the first loadable lang and load into this.
// Add any languages that this language requires
bool loaded_primary = false;
// Load the rest into sub_langs_.
for (int lang_index = 0; lang_index < langs_to_load.size(); ++lang_index) {
if (!IsStrInList(langs_to_load[lang_index], langs_not_to_load)) {
const char *lang_str = langs_to_load[lang_index].string();
Tesseract *tess_to_init;
if (!loaded_primary) {
tess_to_init = this;
} else {
tess_to_init = new Tesseract;
}
int result = tess_to_init->init_tesseract_internal(
arg0, textbase, lang_str, oem, configs, configs_size,
vars_vec, vars_values, set_only_non_debug_params);
if (!loaded_primary) {
if (result < 0) {
tprintf("Failed loading language '%s'\n", lang_str);
} else {
if (tessdata_manager_debug_level)
tprintf("Loaded language '%s' as main language\n", lang_str);
ParseLanguageString(tess_to_init->tessedit_load_sublangs.string(),
&langs_to_load, &langs_not_to_load);
loaded_primary = true;
}
} else {
if (result < 0) {
tprintf("Failed loading language '%s'\n", lang_str);
delete tess_to_init;
} else {
if (tessdata_manager_debug_level)
tprintf("Loaded language '%s' as secondary language\n", lang_str);
sub_langs_.push_back(tess_to_init);
// Add any languages that this language requires
ParseLanguageString(tess_to_init->tessedit_load_sublangs.string(),
&langs_to_load, &langs_not_to_load);
}
}
}
}
if (!loaded_primary) {
tprintf("Tesseract couldn't load any languages!\n");
return -1; // Couldn't load any language!
}
if (!sub_langs_.empty()) {
// In multilingual mode word ratings have to be directly comparable,
// so use the same language model weights for all languages:
// use the primary language's params model if
// tessedit_use_primary_params_model is set,
// otherwise use default language model weights.
if (tessedit_use_primary_params_model) {
for (int s = 0; s < sub_langs_.size(); ++s) {
sub_langs_[s]->language_model_->getParamsModel().Copy(
this->language_model_->getParamsModel());
}
tprintf("Using params model of the primary language\n");
if (tessdata_manager_debug_level) {
this->language_model_->getParamsModel().Print();
}
} else {
this->language_model_->getParamsModel().Clear();
for (int s = 0; s < sub_langs_.size(); ++s) {
sub_langs_[s]->language_model_->getParamsModel().Clear();
}
if (tessdata_manager_debug_level)
tprintf("Using default language params\n");
}
}
SetupUniversalFontIds();
return 0;
}
// Common initialization for a single language.
// arg0 is the datapath for the tessdata directory, which could be the
// path of the tessdata directory with no trailing /, or (if tessdata
// lives in the same directory as the executable, the path of the executable,
// hence the name arg0.
// textbase is an optional output file basename (used only for training)
// language is the language code to load.
// oem controls which engine(s) will operate on the image
// configs (argv) is an array of config filenames to load variables from.
// May be NULL.
// configs_size (argc) is the number of elements in configs.
// vars_vec is an optional vector of variables to set.
// vars_values is an optional corresponding vector of values for the variables
// in vars_vec.
// If set_only_init_params is true, then only the initialization variables
// will be set.
int Tesseract::init_tesseract_internal(
const char *arg0, const char *textbase, const char *language,
OcrEngineMode oem, char **configs, int configs_size,
const GenericVector<STRING> *vars_vec,
const GenericVector<STRING> *vars_values,
bool set_only_non_debug_params) {
if (!init_tesseract_lang_data(arg0, textbase, language, oem, configs,
configs_size, vars_vec, vars_values,
set_only_non_debug_params)) {
return -1;
}
if (tessedit_init_config_only) {
tessdata_manager.End();
return 0;
}
// If only Cube will be used, skip loading Tesseract classifier's
// pre-trained templates.
bool init_tesseract_classifier =
(tessedit_ocr_engine_mode == OEM_TESSERACT_ONLY ||
tessedit_ocr_engine_mode == OEM_TESSERACT_CUBE_COMBINED);
// If only Cube will be used and if it has its own Unicharset,
// skip initializing permuter and loading Tesseract Dawgs.
bool init_dict =
!(tessedit_ocr_engine_mode == OEM_CUBE_ONLY &&
tessdata_manager.SeekToStart(TESSDATA_CUBE_UNICHARSET));
program_editup(textbase, init_tesseract_classifier, init_dict);
tessdata_manager.End();
return 0; //Normal exit
}
// Helper builds the all_fonts table by adding new fonts from new_fonts.
static void CollectFonts(const UnicityTable<FontInfo>& new_fonts,
UnicityTable<FontInfo>* all_fonts) {
for (int i = 0; i < new_fonts.size(); ++i) {
// UnicityTable uniques as we go.
all_fonts->push_back(new_fonts.get(i));
}
}
// Helper assigns an id to lang_fonts using the index in all_fonts table.
static void AssignIds(const UnicityTable<FontInfo>& all_fonts,
UnicityTable<FontInfo>* lang_fonts) {
for (int i = 0; i < lang_fonts->size(); ++i) {
int index = all_fonts.get_id(lang_fonts->get(i));
lang_fonts->get_mutable(i)->universal_id = index;
}
}
// Set the universal_id member of each font to be unique among all
// instances of the same font loaded.
void Tesseract::SetupUniversalFontIds() {
// Note that we can get away with bitwise copying FontInfo in
// all_fonts, as it is a temporary structure and we avoid setting the
// delete callback.
UnicityTable<FontInfo> all_fonts;
all_fonts.set_compare_callback(NewPermanentTessCallback(CompareFontInfo));
// Create the universal ID table.
CollectFonts(get_fontinfo_table(), &all_fonts);
for (int i = 0; i < sub_langs_.size(); ++i) {
CollectFonts(sub_langs_[i]->get_fontinfo_table(), &all_fonts);
}
// Assign ids from the table to each font table.
AssignIds(all_fonts, &get_fontinfo_table());
for (int i = 0; i < sub_langs_.size(); ++i) {
AssignIds(all_fonts, &sub_langs_[i]->get_fontinfo_table());
}
font_table_size_ = all_fonts.size();
}
// init the LM component
int Tesseract::init_tesseract_lm(const char *arg0,
const char *textbase,
const char *language) {
if (!init_tesseract_lang_data(arg0, textbase, language, OEM_TESSERACT_ONLY,
NULL, 0, NULL, NULL, false))
return -1;
getDict().Load(Dict::GlobalDawgCache());
tessdata_manager.End();
return 0;
}
void Tesseract::end_tesseract() {
end_recog();
}
/* Define command type identifiers */
enum CMD_EVENTS
{
ACTION_1_CMD_EVENT,
RECOG_WERDS,
RECOG_PSEUDO,
ACTION_2_CMD_EVENT
};
} // namespace tesseract
| C++ |
/**********************************************************************
* File: pgedit.cpp (Formerly pgeditor.c)
* Description: Page structure file editor
* Author: Phil Cheatle
* Created: Thu Oct 10 16:25:24 BST 1991
*
*(C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0(the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http:// www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "pgedit.h"
#include <ctype.h>
#include <math.h>
#include "blread.h"
#include "control.h"
#include "paramsd.h"
#include "pageres.h"
#include "tordmain.h"
#include "scrollview.h"
#include "svmnode.h"
#include "statistc.h"
#include "tesseractclass.h"
#include "werdit.h"
#ifndef GRAPHICS_DISABLED
#define ASC_HEIGHT (2 * kBlnBaselineOffset + kBlnXHeight)
#define X_HEIGHT (kBlnBaselineOffset + kBlnXHeight)
#define BL_HEIGHT kBlnBaselineOffset
#define DESC_HEIGHT 0
#define MAXSPACING 128 /*max expected spacing in pix */
const ERRCODE EMPTYBLOCKLIST = "No blocks to edit";
enum CMD_EVENTS
{
NULL_CMD_EVENT,
CHANGE_DISP_CMD_EVENT,
DUMP_WERD_CMD_EVENT,
SHOW_POINT_CMD_EVENT,
SHOW_BLN_WERD_CMD_EVENT,
DEBUG_WERD_CMD_EVENT,
BLAMER_CMD_EVENT,
BOUNDING_BOX_CMD_EVENT,
CORRECT_TEXT_CMD_EVENT,
POLYGONAL_CMD_EVENT,
BL_NORM_CMD_EVENT,
BITMAP_CMD_EVENT,
IMAGE_CMD_EVENT,
BLOCKS_CMD_EVENT,
BASELINES_CMD_EVENT,
UNIFORM_DISP_CMD_EVENT,
REFRESH_CMD_EVENT,
QUIT_CMD_EVENT,
RECOG_WERDS,
RECOG_PSEUDO,
SHOW_BLOB_FEATURES,
SHOW_SUBSCRIPT_CMD_EVENT,
SHOW_SUPERSCRIPT_CMD_EVENT,
SHOW_ITALIC_CMD_EVENT,
SHOW_BOLD_CMD_EVENT,
SHOW_UNDERLINE_CMD_EVENT,
SHOW_FIXEDPITCH_CMD_EVENT,
SHOW_SERIF_CMD_EVENT,
SHOW_SMALLCAPS_CMD_EVENT,
SHOW_DROPCAPS_CMD_EVENT,
};
enum ColorationMode {
CM_RAINBOW,
CM_SUBSCRIPT,
CM_SUPERSCRIPT,
CM_ITALIC,
CM_BOLD,
CM_UNDERLINE,
CM_FIXEDPITCH,
CM_SERIF,
CM_SMALLCAPS,
CM_DROPCAPS
};
/*
*
* Some global data
*
*/
ScrollView* image_win;
ParamsEditor* pe;
bool stillRunning = false;
#ifdef __UNIX__
FILE *debug_window = NULL; // opened on demand
#endif
ScrollView* bln_word_window = NULL; // baseline norm words
CMD_EVENTS mode = CHANGE_DISP_CMD_EVENT; // selected words op
bool recog_done = false; // recog_all_words was called
// These variables should remain global, since they are only used for the
// debug mode (in which only a single Tesseract thread/instance will be exist).
BITS16 word_display_mode;
static ColorationMode color_mode = CM_RAINBOW;
BOOL8 display_image = FALSE;
BOOL8 display_blocks = FALSE;
BOOL8 display_baselines = FALSE;
PAGE_RES *current_page_res = NULL;
STRING_VAR(editor_image_win_name, "EditorImage",
"Editor image window name");
INT_VAR(editor_image_xpos, 590, "Editor image X Pos");
INT_VAR(editor_image_ypos, 10, "Editor image Y Pos");
INT_VAR(editor_image_menuheight, 50, "Add to image height for menu bar");
INT_VAR(editor_image_word_bb_color, ScrollView::BLUE,
"Word bounding box colour");
INT_VAR(editor_image_blob_bb_color, ScrollView::YELLOW,
"Blob bounding box colour");
INT_VAR(editor_image_text_color, ScrollView::WHITE,
"Correct text colour");
STRING_VAR(editor_dbwin_name, "EditorDBWin",
"Editor debug window name");
INT_VAR(editor_dbwin_xpos, 50, "Editor debug window X Pos");
INT_VAR(editor_dbwin_ypos, 500, "Editor debug window Y Pos");
INT_VAR(editor_dbwin_height, 24, "Editor debug window height");
INT_VAR(editor_dbwin_width, 80, "Editor debug window width");
STRING_VAR(editor_word_name, "BlnWords", "BL normalized word window");
INT_VAR(editor_word_xpos, 60, "Word window X Pos");
INT_VAR(editor_word_ypos, 510, "Word window Y Pos");
INT_VAR(editor_word_height, 240, "Word window height");
INT_VAR(editor_word_width, 655, "Word window width");
STRING_VAR(editor_debug_config_file, "", "Config file to apply to single words");
class BlnEventHandler : public SVEventHandler {
public:
void Notify(const SVEvent* sv_event) {
if (sv_event->type == SVET_DESTROY)
bln_word_window = NULL;
else if (sv_event->type == SVET_CLICK)
show_point(current_page_res, sv_event->x, sv_event->y);
}
};
/**
* bln_word_window_handle()
*
* @return a WINDOW for the word window, creating it if necessary
*/
ScrollView* bln_word_window_handle() { // return handle
// not opened yet
if (bln_word_window == NULL) {
pgeditor_msg("Creating BLN word window...");
bln_word_window = new ScrollView(editor_word_name.string(),
editor_word_xpos, editor_word_ypos, editor_word_width,
editor_word_height, 4000, 4000, true);
BlnEventHandler* a = new BlnEventHandler();
bln_word_window->AddEventHandler(a);
pgeditor_msg("Creating BLN word window...Done");
}
return bln_word_window;
}
/**
* build_image_window()
*
* Destroy the existing image window if there is one. Work out how big the
* new window needs to be. Create it and re-display.
*/
void build_image_window(int width, int height) {
if (image_win != NULL) { delete image_win; }
image_win = new ScrollView(editor_image_win_name.string(),
editor_image_xpos, editor_image_ypos,
width + 1,
height + editor_image_menuheight + 1,
width,
height,
true);
}
/**
* display_bln_lines()
*
* Display normalized baseline, x-height, ascender limit and descender limit
*/
void display_bln_lines(ScrollView* window,
ScrollView::Color colour,
float scale_factor,
float y_offset,
float minx,
float maxx) {
window->Pen(colour);
window->Line(minx, y_offset + scale_factor * DESC_HEIGHT,
maxx, y_offset + scale_factor * DESC_HEIGHT);
window->Line(minx, y_offset + scale_factor * BL_HEIGHT,
maxx, y_offset + scale_factor * BL_HEIGHT);
window->Line(minx, y_offset + scale_factor * X_HEIGHT,
maxx, y_offset + scale_factor * X_HEIGHT);
window->Line(minx, y_offset + scale_factor * ASC_HEIGHT,
maxx, y_offset + scale_factor * ASC_HEIGHT);
}
/**
* notify()
*
* Event handler that processes incoming events, either forwarding
* them to process_cmd_win_event or process_image_event.
*
*/
void PGEventHandler::Notify(const SVEvent* event) {
char myval = '0';
if (event->type == SVET_POPUP) {
pe->Notify(event);
} // These are handled by ParamsEditor
else if (event->type == SVET_EXIT) { stillRunning = false; }
else if (event->type == SVET_MENU) {
if (strcmp(event->parameter, "true") == 0) { myval = 'T'; }
else if (strcmp(event->parameter, "false") == 0) { myval = 'F'; }
tess_->process_cmd_win_event(event->command_id, &myval);
}
else {
tess_->process_image_event(*event);
}
}
/**
* build_menu()
*
* Construct the menu tree used by the command window
*/
namespace tesseract {
SVMenuNode *Tesseract::build_menu_new() {
SVMenuNode* parent_menu;
SVMenuNode* root_menu_item = new SVMenuNode();
SVMenuNode* modes_menu_item = root_menu_item->AddChild("MODES");
modes_menu_item->AddChild("Change Display", CHANGE_DISP_CMD_EVENT);
modes_menu_item->AddChild("Dump Word", DUMP_WERD_CMD_EVENT);
modes_menu_item->AddChild("Show Point", SHOW_POINT_CMD_EVENT);
modes_menu_item->AddChild("Show BL Norm Word", SHOW_BLN_WERD_CMD_EVENT);
modes_menu_item->AddChild("Config Words", DEBUG_WERD_CMD_EVENT);
modes_menu_item->AddChild("Recog Words", RECOG_WERDS);
modes_menu_item->AddChild("Recog Blobs", RECOG_PSEUDO);
modes_menu_item->AddChild("Show Blob Features", SHOW_BLOB_FEATURES);
parent_menu = root_menu_item->AddChild("DISPLAY");
parent_menu->AddChild("Blamer", BLAMER_CMD_EVENT, FALSE);
parent_menu->AddChild("Bounding Boxes", BOUNDING_BOX_CMD_EVENT, FALSE);
parent_menu->AddChild("Correct Text", CORRECT_TEXT_CMD_EVENT, FALSE);
parent_menu->AddChild("Polygonal Approx", POLYGONAL_CMD_EVENT, FALSE);
parent_menu->AddChild("Baseline Normalized", BL_NORM_CMD_EVENT, FALSE);
parent_menu->AddChild("Edge Steps", BITMAP_CMD_EVENT, TRUE);
parent_menu->AddChild("Subscripts", SHOW_SUBSCRIPT_CMD_EVENT);
parent_menu->AddChild("Superscripts", SHOW_SUPERSCRIPT_CMD_EVENT);
parent_menu->AddChild("Italics", SHOW_ITALIC_CMD_EVENT);
parent_menu->AddChild("Bold", SHOW_BOLD_CMD_EVENT);
parent_menu->AddChild("Underline", SHOW_UNDERLINE_CMD_EVENT);
parent_menu->AddChild("FixedPitch", SHOW_FIXEDPITCH_CMD_EVENT);
parent_menu->AddChild("Serifs", SHOW_SERIF_CMD_EVENT);
parent_menu->AddChild("SmallCaps", SHOW_SMALLCAPS_CMD_EVENT);
parent_menu->AddChild("DropCaps", SHOW_DROPCAPS_CMD_EVENT);
parent_menu = root_menu_item->AddChild("OTHER");
parent_menu->AddChild("Quit", QUIT_CMD_EVENT);
parent_menu->AddChild("Show Image", IMAGE_CMD_EVENT, FALSE);
parent_menu->AddChild("ShowBlock Outlines", BLOCKS_CMD_EVENT, FALSE);
parent_menu->AddChild("Show Baselines", BASELINES_CMD_EVENT, FALSE);
parent_menu->AddChild("Uniform Display", UNIFORM_DISP_CMD_EVENT);
parent_menu->AddChild("Refresh Display", REFRESH_CMD_EVENT);
return root_menu_item;
}
/**
* do_re_display()
*
* Redisplay page
*/
void Tesseract::do_re_display(
BOOL8 (tesseract::Tesseract::*word_painter)(PAGE_RES_IT* pr_it)) {
int block_count = 1;
image_win->Clear();
if (display_image != 0) {
image_win->Image(pix_binary_, 0, 0);
}
PAGE_RES_IT pr_it(current_page_res);
for (WERD_RES* word = pr_it.word(); word != NULL; word = pr_it.forward()) {
(this->*word_painter)(&pr_it);
if (display_baselines && pr_it.row() != pr_it.prev_row())
pr_it.row()->row->plot_baseline(image_win, ScrollView::GREEN);
if (display_blocks && pr_it.block() != pr_it.prev_block())
pr_it.block()->block->plot(image_win, block_count++, ScrollView::RED);
}
image_win->Update();
}
/**
* pgeditor_main()
*
* Top level editor operation:
* Setup a new window and an according event handler
*
*/
void Tesseract::pgeditor_main(int width, int height, PAGE_RES *page_res) {
current_page_res = page_res;
if (current_page_res->block_res_list.empty())
return;
recog_done = false;
stillRunning = true;
build_image_window(width, height);
word_display_mode.turn_on_bit(DF_EDGE_STEP);
do_re_display(&tesseract::Tesseract::word_set_display);
#ifndef GRAPHICS_DISABLED
pe = new ParamsEditor(this, image_win);
#endif
PGEventHandler pgEventHandler(this);
image_win->AddEventHandler(&pgEventHandler);
image_win->AddMessageBox();
SVMenuNode* svMenuRoot = build_menu_new();
svMenuRoot->BuildMenu(image_win);
image_win->SetVisible(true);
image_win->AwaitEvent(SVET_DESTROY);
image_win->AddEventHandler(NULL);
}
} // namespace tesseract
/**
* pgeditor_msg()
*
* Display a message - in the command window if there is one, or to stdout
*/
void pgeditor_msg( // message display
const char *msg) {
image_win->AddMessage(msg);
}
/**
* pgeditor_show_point()
*
* Display the coordinates of a point in the command window
*/
void pgeditor_show_point( // display coords
SVEvent *event) {
image_win->AddMessage("Pointing at(%d, %d)", event->x, event->y);
}
/**
* process_cmd_win_event()
*
* Process a command returned from the command window
* (Just call the appropriate command handler)
*/
namespace tesseract {
BOOL8 Tesseract::process_cmd_win_event( // UI command semantics
inT32 cmd_event, // which menu item?
char *new_value // any prompt data
) {
char msg[160];
BOOL8 exit = FALSE;
color_mode = CM_RAINBOW;
// Run recognition on the full page if needed.
switch (cmd_event) {
case BLAMER_CMD_EVENT:
case SHOW_SUBSCRIPT_CMD_EVENT:
case SHOW_SUPERSCRIPT_CMD_EVENT:
case SHOW_ITALIC_CMD_EVENT:
case SHOW_BOLD_CMD_EVENT:
case SHOW_UNDERLINE_CMD_EVENT:
case SHOW_FIXEDPITCH_CMD_EVENT:
case SHOW_SERIF_CMD_EVENT:
case SHOW_SMALLCAPS_CMD_EVENT:
case SHOW_DROPCAPS_CMD_EVENT:
if (!recog_done) {
recog_all_words(current_page_res, NULL, NULL, NULL, 0);
recog_done = true;
}
break;
default:
break;
}
switch (cmd_event) {
case NULL_CMD_EVENT:
break;
case CHANGE_DISP_CMD_EVENT:
case DUMP_WERD_CMD_EVENT:
case SHOW_POINT_CMD_EVENT:
case SHOW_BLN_WERD_CMD_EVENT:
case RECOG_WERDS:
case RECOG_PSEUDO:
case SHOW_BLOB_FEATURES:
mode =(CMD_EVENTS) cmd_event;
break;
case DEBUG_WERD_CMD_EVENT:
mode = DEBUG_WERD_CMD_EVENT;
word_config_ = image_win->ShowInputDialog("Config File Name");
break;
case BOUNDING_BOX_CMD_EVENT:
if (new_value[0] == 'T')
word_display_mode.turn_on_bit(DF_BOX);
else
word_display_mode.turn_off_bit(DF_BOX);
mode = CHANGE_DISP_CMD_EVENT;
break;
case BLAMER_CMD_EVENT:
if (new_value[0] == 'T')
word_display_mode.turn_on_bit(DF_BLAMER);
else
word_display_mode.turn_off_bit(DF_BLAMER);
do_re_display(&tesseract::Tesseract::word_display);
mode = CHANGE_DISP_CMD_EVENT;
break;
case CORRECT_TEXT_CMD_EVENT:
if (new_value[0] == 'T')
word_display_mode.turn_on_bit(DF_TEXT);
else
word_display_mode.turn_off_bit(DF_TEXT);
mode = CHANGE_DISP_CMD_EVENT;
break;
case POLYGONAL_CMD_EVENT:
if (new_value[0] == 'T')
word_display_mode.turn_on_bit(DF_POLYGONAL);
else
word_display_mode.turn_off_bit(DF_POLYGONAL);
mode = CHANGE_DISP_CMD_EVENT;
break;
case BL_NORM_CMD_EVENT:
if (new_value[0] == 'T')
word_display_mode.turn_on_bit(DF_BN_POLYGONAL);
else
word_display_mode.turn_off_bit(DF_BN_POLYGONAL);
mode = CHANGE_DISP_CMD_EVENT;
break;
case BITMAP_CMD_EVENT:
if (new_value[0] == 'T')
word_display_mode.turn_on_bit(DF_EDGE_STEP);
else
word_display_mode.turn_off_bit(DF_EDGE_STEP);
mode = CHANGE_DISP_CMD_EVENT;
break;
case UNIFORM_DISP_CMD_EVENT:
do_re_display(&tesseract::Tesseract::word_set_display);
break;
case IMAGE_CMD_EVENT:
display_image =(new_value[0] == 'T');
do_re_display(&tesseract::Tesseract::word_display);
break;
case BLOCKS_CMD_EVENT:
display_blocks =(new_value[0] == 'T');
do_re_display(&tesseract::Tesseract::word_display);
break;
case BASELINES_CMD_EVENT:
display_baselines =(new_value[0] == 'T');
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_SUBSCRIPT_CMD_EVENT:
color_mode = CM_SUBSCRIPT;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_SUPERSCRIPT_CMD_EVENT:
color_mode = CM_SUPERSCRIPT;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_ITALIC_CMD_EVENT:
color_mode = CM_ITALIC;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_BOLD_CMD_EVENT:
color_mode = CM_BOLD;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_UNDERLINE_CMD_EVENT:
color_mode = CM_UNDERLINE;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_FIXEDPITCH_CMD_EVENT:
color_mode = CM_FIXEDPITCH;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_SERIF_CMD_EVENT:
color_mode = CM_SERIF;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_SMALLCAPS_CMD_EVENT:
color_mode = CM_SMALLCAPS;
do_re_display(&tesseract::Tesseract::word_display);
break;
case SHOW_DROPCAPS_CMD_EVENT:
color_mode = CM_DROPCAPS;
do_re_display(&tesseract::Tesseract::word_display);
break;
case REFRESH_CMD_EVENT:
do_re_display(&tesseract::Tesseract::word_display);
break;
case QUIT_CMD_EVENT:
exit = TRUE;
ScrollView::Exit();
break;
default:
sprintf(msg, "Unrecognised event " INT32FORMAT "(%s)",
cmd_event, new_value);
image_win->AddMessage(msg);
break;
}
return exit;
}
/**
* process_image_event()
*
* User has done something in the image window - mouse down or up. Work out
* what it is and do something with it.
* If DOWN - just remember where it was.
* If UP - for each word in the selected area do the operation defined by
* the current mode.
*/
void Tesseract::process_image_event( // action in image win
const SVEvent &event) {
// The following variable should remain static, since it is used by
// debug editor, which uses a single Tesseract instance.
static ICOORD down;
ICOORD up;
TBOX selection_box;
char msg[80];
switch(event.type) {
case SVET_SELECTION:
if (event.type == SVET_SELECTION) {
down.set_x(event.x + event.x_size);
down.set_y(event.y + event.y_size);
if (mode == SHOW_POINT_CMD_EVENT)
show_point(current_page_res, event.x, event.y);
}
up.set_x(event.x);
up.set_y(event.y);
selection_box = TBOX(down, up);
switch(mode) {
case CHANGE_DISP_CMD_EVENT:
process_selected_words(
current_page_res,
selection_box,
&tesseract::Tesseract::word_blank_and_set_display);
break;
case DUMP_WERD_CMD_EVENT:
process_selected_words(current_page_res,
selection_box,
&tesseract::Tesseract::word_dumper);
break;
case SHOW_BLN_WERD_CMD_EVENT:
process_selected_words(current_page_res,
selection_box,
&tesseract::Tesseract::word_bln_display);
break;
case DEBUG_WERD_CMD_EVENT:
debug_word(current_page_res, selection_box);
break;
case SHOW_POINT_CMD_EVENT:
break; // ignore up event
case RECOG_WERDS:
image_win->AddMessage("Recogging selected words");
this->process_selected_words(current_page_res,
selection_box,
&Tesseract::recog_interactive);
break;
case RECOG_PSEUDO:
image_win->AddMessage("Recogging selected blobs");
recog_pseudo_word(current_page_res, selection_box);
break;
case SHOW_BLOB_FEATURES:
blob_feature_display(current_page_res, selection_box);
break;
default:
sprintf(msg, "Mode %d not yet implemented", mode);
image_win->AddMessage(msg);
break;
}
default:
break;
}
}
/**
* debug_word
*
* Process the whole image, but load word_config_ for the selected word(s).
*/
void Tesseract::debug_word(PAGE_RES* page_res, const TBOX &selection_box) {
ResetAdaptiveClassifier();
recog_all_words(page_res, NULL, &selection_box, word_config_.string(), 0);
}
} // namespace tesseract
/**
* show_point()
*
* Show coords of point, blob bounding box, word bounding box and offset from
* row baseline
*/
void show_point(PAGE_RES* page_res, float x, float y) {
FCOORD pt(x, y);
PAGE_RES_IT pr_it(page_res);
char msg[160];
char *msg_ptr = msg;
msg_ptr += sprintf(msg_ptr, "Pt:(%0.3f, %0.3f) ", x, y);
for (WERD_RES* word = pr_it.word(); word != NULL; word = pr_it.forward()) {
if (pr_it.row() != pr_it.prev_row() &&
pr_it.row()->row->bounding_box().contains(pt)) {
msg_ptr += sprintf(msg_ptr, "BL(x)=%0.3f ",
pr_it.row()->row->base_line(x));
}
if (word->word->bounding_box().contains(pt)) {
TBOX box = word->word->bounding_box();
msg_ptr += sprintf(msg_ptr, "Wd(%d, %d)/(%d, %d) ",
box.left(), box.bottom(),
box.right(), box.top());
C_BLOB_IT cblob_it(word->word->cblob_list());
for (cblob_it.mark_cycle_pt();
!cblob_it.cycled_list();
cblob_it.forward()) {
C_BLOB* cblob = cblob_it.data();
box = cblob->bounding_box();
if (box.contains(pt)) {
msg_ptr += sprintf(msg_ptr,
"CBlb(%d, %d)/(%d, %d) ",
box.left(), box.bottom(),
box.right(), box.top());
}
}
}
}
image_win->AddMessage(msg);
}
/**********************************************************************
* WERD PROCESSOR FUNCTIONS
* ========================
*
* These routines are invoked by one or more of:
* process_all_words()
* process_selected_words()
* or
* process_all_words_it()
* process_selected_words_it()
* for each word to be processed
**********************************************************************/
/**
* word_blank_and_set_display() Word processor
*
* Blank display of word then redisplay word according to current display mode
* settings
*/
#endif // GRAPHICS_DISABLED
namespace tesseract {
#ifndef GRAPHICS_DISABLED
BOOL8 Tesseract:: word_blank_and_set_display(PAGE_RES_IT* pr_it) {
pr_it->word()->word->bounding_box().plot(image_win, ScrollView::BLACK,
ScrollView::BLACK);
return word_set_display(pr_it);
}
/**
* word_bln_display()
*
* Normalize word and display in word window
*/
BOOL8 Tesseract::word_bln_display(PAGE_RES_IT* pr_it) {
WERD_RES* word_res = pr_it->word();
if (word_res->chopped_word == NULL) {
// Setup word normalization parameters.
word_res->SetupForRecognition(unicharset, this, BestPix(),
tessedit_ocr_engine_mode, NULL,
classify_bln_numeric_mode,
textord_use_cjk_fp_model,
poly_allow_detailed_fx,
pr_it->row()->row, pr_it->block()->block);
}
bln_word_window_handle()->Clear();
display_bln_lines(bln_word_window_handle(), ScrollView::CYAN,
1.0, 0.0f, -1000.0f, 1000.0f);
C_BLOB_IT it(word_res->word->cblob_list());
ScrollView::Color color = WERD::NextColor(ScrollView::BLACK);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
it.data()->plot_normed(word_res->denorm, color, ScrollView::BROWN,
bln_word_window_handle());
color = WERD::NextColor(color);
}
bln_word_window_handle()->Update();
return TRUE;
}
/**
* word_display() Word Processor
*
* Display a word according to its display modes
*/
BOOL8 Tesseract::word_display(PAGE_RES_IT* pr_it) {
WERD_RES* word_res = pr_it->word();
WERD* word = word_res->word;
TBOX word_bb; // word bounding box
int word_height; // ht of word BB
BOOL8 displayed_something = FALSE;
float shift; // from bot left
C_BLOB_IT c_it; // cblob iterator
if (color_mode != CM_RAINBOW && word_res->box_word != NULL) {
BoxWord* box_word = word_res->box_word;
WERD_CHOICE* best_choice = word_res->best_choice;
int length = box_word->length();
if (word_res->fontinfo == NULL) return false;
const FontInfo& font_info = *word_res->fontinfo;
for (int i = 0; i < length; ++i) {
ScrollView::Color color = ScrollView::GREEN;
switch (color_mode) {
case CM_SUBSCRIPT:
if (best_choice->BlobPosition(i) == SP_SUBSCRIPT)
color = ScrollView::RED;
break;
case CM_SUPERSCRIPT:
if (best_choice->BlobPosition(i) == SP_SUPERSCRIPT)
color = ScrollView::RED;
break;
case CM_ITALIC:
if (font_info.is_italic())
color = ScrollView::RED;
break;
case CM_BOLD:
if (font_info.is_bold())
color = ScrollView::RED;
break;
case CM_FIXEDPITCH:
if (font_info.is_fixed_pitch())
color = ScrollView::RED;
break;
case CM_SERIF:
if (font_info.is_serif())
color = ScrollView::RED;
break;
case CM_SMALLCAPS:
if (word_res->small_caps)
color = ScrollView::RED;
break;
case CM_DROPCAPS:
if (best_choice->BlobPosition(i) == SP_DROPCAP)
color = ScrollView::RED;
break;
// TODO(rays) underline is currently completely unsupported.
case CM_UNDERLINE:
default:
break;
}
image_win->Pen(color);
TBOX box = box_word->BlobBox(i);
image_win->Rectangle(box.left(), box.bottom(), box.right(), box.top());
}
return true;
}
/*
Note the double coercions of(COLOUR)((inT32)editor_image_word_bb_color)
etc. are to keep the compiler happy.
*/
// display bounding box
if (word->display_flag(DF_BOX)) {
word->bounding_box().plot(image_win,
(ScrollView::Color)((inT32)
editor_image_word_bb_color),
(ScrollView::Color)((inT32)
editor_image_word_bb_color));
ScrollView::Color c = (ScrollView::Color)
((inT32) editor_image_blob_bb_color);
image_win->Pen(c);
c_it.set_to_list(word->cblob_list());
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward())
c_it.data()->bounding_box().plot(image_win);
displayed_something = TRUE;
}
// display edge steps
if (word->display_flag(DF_EDGE_STEP)) { // edgesteps available
word->plot(image_win); // rainbow colors
displayed_something = TRUE;
}
// display poly approx
if (word->display_flag(DF_POLYGONAL)) {
// need to convert
TWERD* tword = TWERD::PolygonalCopy(poly_allow_detailed_fx, word);
tword->plot(image_win);
delete tword;
displayed_something = TRUE;
}
// Display correct text and blamer information.
STRING text;
STRING blame;
if (word->display_flag(DF_TEXT) && word->text() != NULL) {
text = word->text();
}
if (word->display_flag(DF_BLAMER) &&
!(word_res->blamer_bundle != NULL &&
word_res->blamer_bundle->incorrect_result_reason() == IRR_CORRECT)) {
text = "";
const BlamerBundle *blamer_bundle = word_res->blamer_bundle;
if (blamer_bundle == NULL) {
text += "NULL";
} else {
text = blamer_bundle->TruthString();
}
text += " -> ";
STRING best_choice_str;
if (word_res->best_choice == NULL) {
best_choice_str = "NULL";
} else {
word_res->best_choice->string_and_lengths(&best_choice_str, NULL);
}
text += best_choice_str;
IncorrectResultReason reason = (blamer_bundle == NULL) ?
IRR_PAGE_LAYOUT : blamer_bundle->incorrect_result_reason();
ASSERT_HOST(reason < IRR_NUM_REASONS)
blame += " [";
blame += BlamerBundle::IncorrectReasonName(reason);
blame += "]";
}
if (text.length() > 0) {
word_bb = word->bounding_box();
image_win->Pen(ScrollView::RED);
word_height = word_bb.height();
int text_height = 0.50 * word_height;
if (text_height > 20) text_height = 20;
image_win->TextAttributes("Arial", text_height, false, false, false);
shift = (word_height < word_bb.width()) ? 0.25 * word_height : 0.0f;
image_win->Text(word_bb.left() + shift,
word_bb.bottom() + 0.25 * word_height, text.string());
if (blame.length() > 0) {
image_win->Text(word_bb.left() + shift,
word_bb.bottom() + 0.25 * word_height - text_height,
blame.string());
}
displayed_something = TRUE;
}
if (!displayed_something) // display BBox anyway
word->bounding_box().plot(image_win,
(ScrollView::Color)((inT32) editor_image_word_bb_color),
(ScrollView::Color)((inT32)
editor_image_word_bb_color));
return TRUE;
}
#endif // GRAPHICS_DISABLED
/**
* word_dumper()
*
* Dump members to the debug window
*/
BOOL8 Tesseract::word_dumper(PAGE_RES_IT* pr_it) {
if (pr_it->block()->block != NULL) {
tprintf("\nBlock data...\n");
pr_it->block()->block->print(NULL, FALSE);
}
tprintf("\nRow data...\n");
pr_it->row()->row->print(NULL);
tprintf("\nWord data...\n");
WERD_RES* word_res = pr_it->word();
word_res->word->print();
if (word_res->blamer_bundle != NULL && wordrec_debug_blamer &&
word_res->blamer_bundle->incorrect_result_reason() != IRR_CORRECT) {
tprintf("Current blamer debug: %s\n",
word_res->blamer_bundle->debug().string());
}
return TRUE;
}
#ifndef GRAPHICS_DISABLED
/**
* word_set_display() Word processor
*
* Display word according to current display mode settings
*/
BOOL8 Tesseract::word_set_display(PAGE_RES_IT* pr_it) {
WERD* word = pr_it->word()->word;
word->set_display_flag(DF_BOX, word_display_mode.bit(DF_BOX));
word->set_display_flag(DF_TEXT, word_display_mode.bit(DF_TEXT));
word->set_display_flag(DF_POLYGONAL, word_display_mode.bit(DF_POLYGONAL));
word->set_display_flag(DF_EDGE_STEP, word_display_mode.bit(DF_EDGE_STEP));
word->set_display_flag(DF_BN_POLYGONAL,
word_display_mode.bit(DF_BN_POLYGONAL));
word->set_display_flag(DF_BLAMER, word_display_mode.bit(DF_BLAMER));
return word_display(pr_it);
}
// page_res is non-const because the iterator doesn't know if you are going
// to change the items it points to! Really a const here though.
void Tesseract::blob_feature_display(PAGE_RES* page_res,
const TBOX& selection_box) {
PAGE_RES_IT* it = make_pseudo_word(page_res, selection_box);
if (it != NULL) {
WERD_RES* word_res = it->word();
word_res->x_height = it->row()->row->x_height();
word_res->SetupForRecognition(unicharset, this, BestPix(),
tessedit_ocr_engine_mode, NULL,
classify_bln_numeric_mode,
textord_use_cjk_fp_model,
poly_allow_detailed_fx,
it->row()->row, it->block()->block);
TWERD* bln_word = word_res->chopped_word;
TBLOB* bln_blob = bln_word->blobs[0];
INT_FX_RESULT_STRUCT fx_info;
GenericVector<INT_FEATURE_STRUCT> bl_features;
GenericVector<INT_FEATURE_STRUCT> cn_features;
Classify::ExtractFeatures(*bln_blob, classify_nonlinear_norm, &bl_features,
&cn_features, &fx_info, NULL);
// Display baseline features.
ScrollView* bl_win = CreateFeatureSpaceWindow("BL Features", 512, 0);
ClearFeatureSpaceWindow(baseline, bl_win);
for (int f = 0; f < bl_features.size(); ++f)
RenderIntFeature(bl_win, &bl_features[f], ScrollView::GREEN);
bl_win->Update();
// Display cn features.
ScrollView* cn_win = CreateFeatureSpaceWindow("CN Features", 512, 0);
ClearFeatureSpaceWindow(character, cn_win);
for (int f = 0; f < cn_features.size(); ++f)
RenderIntFeature(cn_win, &cn_features[f], ScrollView::GREEN);
cn_win->Update();
it->DeleteCurrentWord();
delete it;
}
}
#endif // GRAPHICS_DISABLED
} // namespace tesseract
| C++ |
/******************************************************************
* File: cube_control.cpp
* Description: Tesseract class methods for invoking cube convolutional
* neural network word recognizer.
* Author: Raquel Romano
* Created: September 2009
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "allheaders.h"
#include "cube_object.h"
#include "cube_reco_context.h"
#include "tesseractclass.h"
#include "tesseract_cube_combiner.h"
namespace tesseract {
/**********************************************************************
* convert_prob_to_tess_certainty
*
* Normalize a probability in the range [0.0, 1.0] to a tesseract
* certainty in the range [-20.0, 0.0]
**********************************************************************/
static float convert_prob_to_tess_certainty(float prob) {
return (prob - 1.0) * 20.0;
}
/**********************************************************************
* char_box_to_tbox
*
* Create a TBOX from a character bounding box. If nonzero, the
* x_offset accounts for any additional padding of the word box that
* should be taken into account.
*
**********************************************************************/
TBOX char_box_to_tbox(Box* char_box, TBOX word_box, int x_offset) {
l_int32 left;
l_int32 top;
l_int32 width;
l_int32 height;
l_int32 right;
l_int32 bottom;
boxGetGeometry(char_box, &left, &top, &width, &height);
left += word_box.left() - x_offset;
right = left + width;
top = word_box.bottom() + word_box.height() - top;
bottom = top - height;
return TBOX(left, bottom, right, top);
}
/**********************************************************************
* extract_cube_state
*
* Extract CharSamp objects and character bounding boxes from the
* CubeObject's state. The caller should free both structres.
*
**********************************************************************/
bool Tesseract::extract_cube_state(CubeObject* cube_obj,
int* num_chars,
Boxa** char_boxes,
CharSamp*** char_samples) {
if (!cube_obj) {
if (cube_debug_level > 0) {
tprintf("Cube WARNING (extract_cube_state): Invalid cube object "
"passed to extract_cube_state\n");
}
return false;
}
// Note that the CubeObject accessors return either the deslanted or
// regular objects search object or beam search object, whichever
// was used in the last call to Recognize()
CubeSearchObject* cube_search_obj = cube_obj->SrchObj();
if (!cube_search_obj) {
if (cube_debug_level > 0) {
tprintf("Cube WARNING (Extract_cube_state): Could not retrieve "
"cube's search object in extract_cube_state.\n");
}
return false;
}
BeamSearch *beam_search_obj = cube_obj->BeamObj();
if (!beam_search_obj) {
if (cube_debug_level > 0) {
tprintf("Cube WARNING (Extract_cube_state): Could not retrieve "
"cube's beam search object in extract_cube_state.\n");
}
return false;
}
// Get the character samples and bounding boxes by backtracking
// through the beam search path
int best_node_index = beam_search_obj->BestPresortedNodeIndex();
*char_samples = beam_search_obj->BackTrack(
cube_search_obj, best_node_index, num_chars, NULL, char_boxes);
if (!*char_samples)
return false;
return true;
}
/**********************************************************************
* create_cube_box_word
*
* Fill the given BoxWord with boxes from character bounding
* boxes. The char_boxes have local coordinates w.r.t. the
* word bounding box, i.e., the left-most character bbox of each word
* has (0,0) left-top coord, but the BoxWord must be defined in page
* coordinates.
**********************************************************************/
bool Tesseract::create_cube_box_word(Boxa *char_boxes,
int num_chars,
TBOX word_box,
BoxWord* box_word) {
if (!box_word) {
if (cube_debug_level > 0) {
tprintf("Cube WARNING (create_cube_box_word): Invalid box_word.\n");
}
return false;
}
// Find the x-coordinate of left-most char_box, which could be
// nonzero if the word image was padded before recognition took place.
int x_offset = -1;
for (int i = 0; i < num_chars; ++i) {
Box* char_box = boxaGetBox(char_boxes, i, L_CLONE);
if (x_offset < 0 || char_box->x < x_offset) {
x_offset = char_box->x;
}
boxDestroy(&char_box);
}
for (int i = 0; i < num_chars; ++i) {
Box* char_box = boxaGetBox(char_boxes, i, L_CLONE);
TBOX tbox = char_box_to_tbox(char_box, word_box, x_offset);
boxDestroy(&char_box);
box_word->InsertBox(i, tbox);
}
return true;
}
/**********************************************************************
* init_cube_objects
*
* Instantiates Tesseract object's CubeRecoContext and TesseractCubeCombiner.
* Returns false if cube context could not be created or if load_combiner is
* true, but the combiner could not be loaded.
**********************************************************************/
bool Tesseract::init_cube_objects(bool load_combiner,
TessdataManager *tessdata_manager) {
ASSERT_HOST(cube_cntxt_ == NULL);
ASSERT_HOST(tess_cube_combiner_ == NULL);
// Create the cube context object
cube_cntxt_ = CubeRecoContext::Create(this, tessdata_manager, &unicharset);
if (cube_cntxt_ == NULL) {
if (cube_debug_level > 0) {
tprintf("Cube WARNING (Tesseract::init_cube_objects()): Failed to "
"instantiate CubeRecoContext\n");
}
return false;
}
// Create the combiner object and load the combiner net for target languages.
if (load_combiner) {
tess_cube_combiner_ = new tesseract::TesseractCubeCombiner(cube_cntxt_);
if (!tess_cube_combiner_ || !tess_cube_combiner_->LoadCombinerNet()) {
delete cube_cntxt_;
cube_cntxt_ = NULL;
if (tess_cube_combiner_ != NULL) {
delete tess_cube_combiner_;
tess_cube_combiner_ = NULL;
}
if (cube_debug_level > 0)
tprintf("Cube ERROR (Failed to instantiate TesseractCubeCombiner\n");
return false;
}
}
return true;
}
/**********************************************************************
* run_cube_combiner
*
* Iterates through tesseract's results and calls cube on each word,
* combining the results with the existing tesseract result.
**********************************************************************/
void Tesseract::run_cube_combiner(PAGE_RES *page_res) {
if (page_res == NULL || tess_cube_combiner_ == NULL)
return;
PAGE_RES_IT page_res_it(page_res);
// Iterate through the word results and call cube on each word.
for (page_res_it.restart_page(); page_res_it.word () != NULL;
page_res_it.forward()) {
BLOCK* block = page_res_it.block()->block;
if (block->poly_block() != NULL && !block->poly_block()->IsText())
continue; // Don't deal with non-text blocks.
WERD_RES* word = page_res_it.word();
// Skip cube entirely if tesseract's certainty is greater than threshold.
int combiner_run_thresh = convert_prob_to_tess_certainty(
cube_cntxt_->Params()->CombinerRunThresh());
if (word->best_choice->certainty() >= combiner_run_thresh) {
continue;
}
// Use the same language as Tesseract used for the word.
Tesseract* lang_tess = word->tesseract;
// Setup a trial WERD_RES in which to classify with cube.
WERD_RES cube_word;
cube_word.InitForRetryRecognition(*word);
cube_word.SetupForRecognition(lang_tess->unicharset, this, BestPix(),
OEM_CUBE_ONLY,
NULL, false, false, false,
page_res_it.row()->row,
page_res_it.block()->block);
CubeObject *cube_obj = lang_tess->cube_recognize_word(
page_res_it.block()->block, &cube_word);
if (cube_obj != NULL)
lang_tess->cube_combine_word(cube_obj, &cube_word, word);
delete cube_obj;
}
}
/**********************************************************************
* cube_word_pass1
*
* Recognizes a single word using (only) cube. Compatible with
* Tesseract's classify_word_pass1/classify_word_pass2.
**********************************************************************/
void Tesseract::cube_word_pass1(BLOCK* block, ROW *row, WERD_RES *word) {
CubeObject *cube_obj = cube_recognize_word(block, word);
delete cube_obj;
}
/**********************************************************************
* cube_recognize_word
*
* Cube recognizer to recognize a single word as with classify_word_pass1
* but also returns the cube object in case the combiner is needed.
**********************************************************************/
CubeObject* Tesseract::cube_recognize_word(BLOCK* block, WERD_RES* word) {
if (!cube_binary_ || !cube_cntxt_) {
if (cube_debug_level > 0 && !cube_binary_)
tprintf("Tesseract::run_cube(): NULL binary image.\n");
word->SetupFake(unicharset);
return NULL;
}
TBOX word_box = word->word->bounding_box();
if (block != NULL && (block->re_rotation().x() != 1.0f ||
block->re_rotation().y() != 0.0f)) {
// TODO(rays) We have to rotate the bounding box to get the true coords.
// This will be achieved in the future via DENORM.
// In the mean time, cube can't process this word.
if (cube_debug_level > 0) {
tprintf("Cube can't process rotated word at:");
word_box.print();
}
word->SetupFake(unicharset);
return NULL;
}
CubeObject* cube_obj = new tesseract::CubeObject(
cube_cntxt_, cube_binary_, word_box.left(),
pixGetHeight(cube_binary_) - word_box.top(),
word_box.width(), word_box.height());
if (!cube_recognize(cube_obj, block, word)) {
delete cube_obj;
return NULL;
}
return cube_obj;
}
/**********************************************************************
* cube_combine_word
*
* Combines the cube and tesseract results for a single word, leaving the
* result in tess_word.
**********************************************************************/
void Tesseract::cube_combine_word(CubeObject* cube_obj, WERD_RES* cube_word,
WERD_RES* tess_word) {
float combiner_prob = tess_cube_combiner_->CombineResults(tess_word,
cube_obj);
// If combiner probability is greater than tess/cube combiner
// classifier threshold, i.e. tesseract wins, then just return the
// tesseract result unchanged, as the combiner knows nothing about how
// correct the answer is. If cube and tesseract agree, then improve the
// scores before returning.
WERD_CHOICE* tess_best = tess_word->best_choice;
WERD_CHOICE* cube_best = cube_word->best_choice;
if (cube_debug_level || classify_debug_level) {
tprintf("Combiner prob = %g vs threshold %g\n",
combiner_prob, cube_cntxt_->Params()->CombinerClassifierThresh());
}
if (combiner_prob >=
cube_cntxt_->Params()->CombinerClassifierThresh()) {
if (tess_best->unichar_string() == cube_best->unichar_string()) {
// Cube and tess agree, so improve the scores.
tess_best->set_rating(tess_best->rating() / 2);
tess_best->set_certainty(tess_best->certainty() / 2);
}
return;
}
// Cube wins.
// It is better for the language combiner to have all tesseract scores,
// so put them in the cube result.
cube_best->set_rating(tess_best->rating());
cube_best->set_certainty(tess_best->certainty());
if (cube_debug_level || classify_debug_level) {
tprintf("Cube INFO: tesseract result replaced by cube: %s -> %s\n",
tess_best->unichar_string().string(),
cube_best->unichar_string().string());
}
tess_word->ConsumeWordResults(cube_word);
}
/**********************************************************************
* cube_recognize
*
* Call cube on the current word, and write the result to word.
* Sets up a fake result and returns false if something goes wrong.
**********************************************************************/
bool Tesseract::cube_recognize(CubeObject *cube_obj, BLOCK* block,
WERD_RES *word) {
// Run cube
WordAltList *cube_alt_list = cube_obj->RecognizeWord();
if (!cube_alt_list || cube_alt_list->AltCount() <= 0) {
if (cube_debug_level > 0) {
tprintf("Cube returned nothing for word at:");
word->word->bounding_box().print();
}
word->SetupFake(unicharset);
return false;
}
// Get cube's best result and its probability, mapped to tesseract's
// certainty range
char_32 *cube_best_32 = cube_alt_list->Alt(0);
double cube_prob = CubeUtils::Cost2Prob(cube_alt_list->AltCost(0));
float cube_certainty = convert_prob_to_tess_certainty(cube_prob);
string cube_best_str;
CubeUtils::UTF32ToUTF8(cube_best_32, &cube_best_str);
// Retrieve Cube's character bounding boxes and CharSamples,
// corresponding to the most recent call to RecognizeWord().
Boxa *char_boxes = NULL;
CharSamp **char_samples = NULL;;
int num_chars;
if (!extract_cube_state(cube_obj, &num_chars, &char_boxes, &char_samples)
&& cube_debug_level > 0) {
tprintf("Cube WARNING (Tesseract::cube_recognize): Cannot extract "
"cube state.\n");
word->SetupFake(unicharset);
return false;
}
// Convert cube's character bounding boxes to a BoxWord.
BoxWord cube_box_word;
TBOX tess_word_box = word->word->bounding_box();
if (word->denorm.block() != NULL)
tess_word_box.rotate(word->denorm.block()->re_rotation());
bool box_word_success = create_cube_box_word(char_boxes, num_chars,
tess_word_box,
&cube_box_word);
boxaDestroy(&char_boxes);
if (!box_word_success) {
if (cube_debug_level > 0) {
tprintf("Cube WARNING (Tesseract::cube_recognize): Could not "
"create cube BoxWord\n");
}
word->SetupFake(unicharset);
return false;
}
// Fill tesseract result's fields with cube results
fill_werd_res(cube_box_word, cube_best_str.c_str(), word);
// Create cube's best choice.
BLOB_CHOICE** choices = new BLOB_CHOICE*[num_chars];
for (int i = 0; i < num_chars; ++i) {
UNICHAR_ID uch_id =
cube_cntxt_->CharacterSet()->UnicharID(char_samples[i]->StrLabel());
choices[i] = new BLOB_CHOICE(uch_id, -cube_certainty, cube_certainty,
-1, -1, 0, 0, 0, 0, BCC_STATIC_CLASSIFIER);
}
word->FakeClassifyWord(num_chars, choices);
// within a word, cube recognizes the word in reading order.
word->best_choice->set_unichars_in_script_order(true);
delete [] choices;
delete [] char_samples;
// Some sanity checks
ASSERT_HOST(word->best_choice->length() == word->reject_map.length());
if (cube_debug_level || classify_debug_level) {
tprintf("Cube result: %s r=%g, c=%g\n",
word->best_choice->unichar_string().string(),
word->best_choice->rating(),
word->best_choice->certainty());
}
return true;
}
/**********************************************************************
* fill_werd_res
*
* Fill Tesseract's word result fields with cube's.
*
**********************************************************************/
void Tesseract::fill_werd_res(const BoxWord& cube_box_word,
const char* cube_best_str,
WERD_RES* tess_werd_res) {
delete tess_werd_res->box_word;
tess_werd_res->box_word = new BoxWord(cube_box_word);
tess_werd_res->box_word->ClipToOriginalWord(tess_werd_res->denorm.block(),
tess_werd_res->word);
// Fill text and remaining fields
tess_werd_res->word->set_text(cube_best_str);
tess_werd_res->tess_failed = FALSE;
tess_werd_res->tess_accepted = tess_acceptable_word(tess_werd_res);
// There is no output word, so we can' call AdaptableWord, but then I don't
// think we need to. Fudge the result with accepted.
tess_werd_res->tess_would_adapt = tess_werd_res->tess_accepted;
// Set word to done, i.e., ignore all of tesseract's tests for rejection
tess_werd_res->done = tess_werd_res->tess_accepted;
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: fixxht.cpp (Formerly fixxht.c)
* Description: Improve x_ht and look out for case inconsistencies
* Author: Phil Cheatle
* Created: Thu Aug 5 14:11:08 BST 1993
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string.h>
#include <ctype.h>
#include "params.h"
#include "float2int.h"
#include "tesseractclass.h"
namespace tesseract {
// Fixxht overview.
// Premise: Initial estimate of x-height is adequate most of the time, but
// occasionally it is incorrect. Most notable causes of failure are:
// 1. Small caps, where the top of the caps is the same as the body text
// xheight. For small caps words the xheight needs to be reduced to correctly
// recognize the caps in the small caps word.
// 2. All xheight lines, such as summer. Here the initial estimate will have
// guessed that the blob tops are caps and will have placed the xheight too low.
// 3. Noise/logos beside words, or changes in font size on a line. Such
// things can blow the statistics and cause an incorrect estimate.
//
// Algorithm.
// Compare the vertical position (top only) of alphnumerics in a word with
// the range of positions in training data (in the unicharset).
// See CountMisfitTops. If any characters disagree sufficiently with the
// initial xheight estimate, then recalculate the xheight, re-run OCR on
// the word, and if the number of vertical misfits goes down, along with
// either the word rating or certainty, then keep the new xheight.
// The new xheight is calculated as follows:ComputeCompatibleXHeight
// For each alphanumeric character that has a vertically misplaced top
// (a misfit), yet its bottom is within the acceptable range (ie it is not
// likely a sub-or super-script) calculate the range of acceptable xheight
// positions from its range of tops, and give each value in the range a
// number of votes equal to the distance of its top from its acceptance range.
// The x-height position with the median of the votes becomes the new
// x-height. This assumes that most characters will be correctly recognized
// even if the x-height is incorrect. This is not a terrible assumption, but
// it is not great. An improvement would be to use a classifier that does
// not care about vertical position or scaling at all.
// If the max-min top of a unicharset char is bigger than kMaxCharTopRange
// then the char top cannot be used to judge misfits or suggest a new top.
const int kMaxCharTopRange = 48;
// Returns the number of misfit blob tops in this word.
int Tesseract::CountMisfitTops(WERD_RES *word_res) {
int bad_blobs = 0;
int num_blobs = word_res->rebuild_word->NumBlobs();
for (int blob_id = 0; blob_id < num_blobs; ++blob_id) {
TBLOB* blob = word_res->rebuild_word->blobs[blob_id];
UNICHAR_ID class_id = word_res->best_choice->unichar_id(blob_id);
if (unicharset.get_isalpha(class_id) || unicharset.get_isdigit(class_id)) {
int top = blob->bounding_box().top();
if (top >= INT_FEAT_RANGE)
top = INT_FEAT_RANGE - 1;
int min_bottom, max_bottom, min_top, max_top;
unicharset.get_top_bottom(class_id, &min_bottom, &max_bottom,
&min_top, &max_top);
if (max_top - min_top > kMaxCharTopRange)
continue;
bool bad = top < min_top - x_ht_acceptance_tolerance ||
top > max_top + x_ht_acceptance_tolerance;
if (bad)
++bad_blobs;
if (debug_x_ht_level >= 1) {
tprintf("Class %s is %s with top %d vs limits of %d->%d, +/-%d\n",
unicharset.id_to_unichar(class_id),
bad ? "Misfit" : "OK", top, min_top, max_top,
static_cast<int>(x_ht_acceptance_tolerance));
}
}
}
return bad_blobs;
}
// Returns a new x-height maximally compatible with the result in word_res.
// See comment above for overall algorithm.
float Tesseract::ComputeCompatibleXheight(WERD_RES *word_res) {
STATS top_stats(0, MAX_UINT8);
int num_blobs = word_res->rebuild_word->NumBlobs();
for (int blob_id = 0; blob_id < num_blobs; ++blob_id) {
TBLOB* blob = word_res->rebuild_word->blobs[blob_id];
UNICHAR_ID class_id = word_res->best_choice->unichar_id(blob_id);
if (unicharset.get_isalpha(class_id) || unicharset.get_isdigit(class_id)) {
int top = blob->bounding_box().top();
// Clip the top to the limit of normalized feature space.
if (top >= INT_FEAT_RANGE)
top = INT_FEAT_RANGE - 1;
int bottom = blob->bounding_box().bottom();
int min_bottom, max_bottom, min_top, max_top;
unicharset.get_top_bottom(class_id, &min_bottom, &max_bottom,
&min_top, &max_top);
// Chars with a wild top range would mess up the result so ignore them.
if (max_top - min_top > kMaxCharTopRange)
continue;
int misfit_dist = MAX((min_top - x_ht_acceptance_tolerance) - top,
top - (max_top + x_ht_acceptance_tolerance));
int height = top - kBlnBaselineOffset;
if (debug_x_ht_level >= 20) {
tprintf("Class %s: height=%d, bottom=%d,%d top=%d,%d, actual=%d,%d : ",
unicharset.id_to_unichar(class_id),
height, min_bottom, max_bottom, min_top, max_top,
bottom, top);
}
// Use only chars that fit in the expected bottom range, and where
// the range of tops is sensibly near the xheight.
if (min_bottom <= bottom + x_ht_acceptance_tolerance &&
bottom - x_ht_acceptance_tolerance <= max_bottom &&
min_top > kBlnBaselineOffset &&
max_top - kBlnBaselineOffset >= kBlnXHeight &&
misfit_dist > 0) {
// Compute the x-height position using proportionality between the
// actual height and expected height.
int min_xht = DivRounded(height * kBlnXHeight,
max_top - kBlnBaselineOffset);
int max_xht = DivRounded(height * kBlnXHeight,
min_top - kBlnBaselineOffset);
if (debug_x_ht_level >= 20) {
tprintf(" xht range min=%d, max=%d\n",
min_xht, max_xht);
}
// The range of expected heights gets a vote equal to the distance
// of the actual top from the expected top.
for (int y = min_xht; y <= max_xht; ++y)
top_stats.add(y, misfit_dist);
} else if (debug_x_ht_level >= 20) {
tprintf(" already OK\n");
}
}
}
if (top_stats.get_total() == 0)
return 0.0f;
// The new xheight is just the median vote, which is then scaled out
// of BLN space back to pixel space to get the x-height in pixel space.
float new_xht = top_stats.median();
if (debug_x_ht_level >= 20) {
tprintf("Median xht=%f\n", new_xht);
tprintf("Mode20:A: New x-height = %f (norm), %f (orig)\n",
new_xht, new_xht / word_res->denorm.y_scale());
}
// The xheight must change by at least x_ht_min_change to be used.
if (fabs(new_xht - kBlnXHeight) >= x_ht_min_change)
return new_xht / word_res->denorm.y_scale();
else
return 0.0f;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: resultiterator.h
// Description: Iterator for tesseract results that is capable of
// iterating in proper reading order over Bi Directional
// (e.g. mixed Hebrew and English) text.
// Author: David Eger
// Created: Fri May 27 13:58:06 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_RESULT_ITERATOR_H__
#define TESSERACT_CCMAIN_RESULT_ITERATOR_H__
#include "platform.h"
#include "ltrresultiterator.h"
template <typename T> class GenericVector;
template <typename T> class GenericVectorEqEq;
class BLOB_CHOICE_IT;
class WERD_RES;
class STRING;
namespace tesseract {
class Tesseract;
class TESS_API ResultIterator : public LTRResultIterator {
public:
static ResultIterator *StartOfParagraph(const LTRResultIterator &resit);
/**
* ResultIterator is copy constructible!
* The default copy constructor works just fine for us.
*/
virtual ~ResultIterator() {}
// ============= Moving around within the page ============.
/**
* Moves the iterator to point to the start of the page to begin
* an iteration.
*/
virtual void Begin();
/**
* Moves to the start of the next object at the given level in the
* page hierarchy in the appropriate reading order and returns false if
* the end of the page was reached.
* NOTE that RIL_SYMBOL will skip non-text blocks, but all other
* PageIteratorLevel level values will visit each non-text block once.
* Think of non text blocks as containing a single para, with a single line,
* with a single imaginary word.
* Calls to Next with different levels may be freely intermixed.
* This function iterates words in right-to-left scripts correctly, if
* the appropriate language has been loaded into Tesseract.
*/
virtual bool Next(PageIteratorLevel level);
/**
* IsAtBeginningOf() returns whether we're at the logical beginning of the
* given level. (as opposed to ResultIterator's left-to-right top-to-bottom
* order). Otherwise, this acts the same as PageIterator::IsAtBeginningOf().
* For a full description, see pageiterator.h
*/
virtual bool IsAtBeginningOf(PageIteratorLevel level) const;
/**
* Implement PageIterator's IsAtFinalElement correctly in a BiDi context.
* For instance, IsAtFinalElement(RIL_PARA, RIL_WORD) returns whether we
* point at the last word in a paragraph. See PageIterator for full comment.
*/
virtual bool IsAtFinalElement(PageIteratorLevel level,
PageIteratorLevel element) const;
// ============= Accessing data ==============.
/**
* Returns the null terminated UTF-8 encoded text string for the current
* object at the given level. Use delete [] to free after use.
*/
virtual char* GetUTF8Text(PageIteratorLevel level) const;
/**
* Return whether the current paragraph's dominant reading direction
* is left-to-right (as opposed to right-to-left).
*/
bool ParagraphIsLtr() const;
// ============= Exposed only for testing =============.
/**
* Yields the reading order as a sequence of indices and (optional)
* meta-marks for a set of words (given left-to-right).
* The meta marks are passed as negative values:
* kMinorRunStart Start of minor direction text.
* kMinorRunEnd End of minor direction text.
* kComplexWord The next indexed word contains both left-to-right and
* right-to-left characters and was treated as neutral.
*
* For example, suppose we have five words in a text line,
* indexed [0,1,2,3,4] from the leftmost side of the text line.
* The following are all believable reading_orders:
*
* Left-to-Right (in ltr paragraph):
* { 0, 1, 2, 3, 4 }
* Left-to-Right (in rtl paragraph):
* { kMinorRunStart, 0, 1, 2, 3, 4, kMinorRunEnd }
* Right-to-Left (in rtl paragraph):
* { 4, 3, 2, 1, 0 }
* Left-to-Right except for an RTL phrase in words 2, 3 in an ltr paragraph:
* { 0, 1, kMinorRunStart, 3, 2, kMinorRunEnd, 4 }
*/
static void CalculateTextlineOrder(
bool paragraph_is_ltr,
const GenericVector<StrongScriptDirection> &word_dirs,
GenericVectorEqEq<int> *reading_order);
static const int kMinorRunStart;
static const int kMinorRunEnd;
static const int kComplexWord;
protected:
/**
* We presume the data associated with the given iterator will outlive us.
* NB: This is private because it does something that is non-obvious:
* it resets to the beginning of the paragraph instead of staying wherever
* resit might have pointed.
*/
TESS_LOCAL explicit ResultIterator(const LTRResultIterator &resit);
private:
/**
* Calculates the current paragraph's dominant writing direction.
* Typically, members should use current_paragraph_ltr_ instead.
*/
bool CurrentParagraphIsLtr() const;
/**
* Returns word indices as measured from resit->RestartRow() = index 0
* for the reading order of words within a textline given an iterator
* into the middle of the text line.
* In addition to non-negative word indices, the following negative values
* may be inserted:
* kMinorRunStart Start of minor direction text.
* kMinorRunEnd End of minor direction text.
* kComplexWord The previous word contains both left-to-right and
* right-to-left characters and was treated as neutral.
*/
void CalculateTextlineOrder(bool paragraph_is_ltr,
const LTRResultIterator &resit,
GenericVectorEqEq<int> *indices) const;
/** Same as above, but the caller's ssd gets filled in if ssd != NULL. */
void CalculateTextlineOrder(bool paragraph_is_ltr,
const LTRResultIterator &resit,
GenericVector<StrongScriptDirection> *ssd,
GenericVectorEqEq<int> *indices) const;
/**
* What is the index of the current word in a strict left-to-right reading
* of the row?
*/
int LTRWordIndex() const;
/**
* Given an iterator pointing at a word, returns the logical reading order
* of blob indices for the word.
*/
void CalculateBlobOrder(GenericVector<int> *blob_indices) const;
/** Precondition: current_paragraph_is_ltr_ is set. */
void MoveToLogicalStartOfTextline();
/**
* Precondition: current_paragraph_is_ltr_ and in_minor_direction_
* are set.
*/
void MoveToLogicalStartOfWord();
/** Are we pointing at the final (reading order) symbol of the word? */
bool IsAtFinalSymbolOfWord() const;
/** Are we pointing at the first (reading order) symbol of the word? */
bool IsAtFirstSymbolOfWord() const;
/**
* Append any extra marks that should be appended to this word when printed.
* Mostly, these are Unicode BiDi control characters.
*/
void AppendSuffixMarks(STRING *text) const;
/** Appends the current word in reading order to the given buffer.*/
void AppendUTF8WordText(STRING *text) const;
/**
* Appends the text of the current text line, *assuming this iterator is
* positioned at the beginning of the text line* This function
* updates the iterator to point to the first position past the text line.
* Each textline is terminated in a single newline character.
* If the textline ends a paragraph, it gets a second terminal newline.
*/
void IterateAndAppendUTF8TextlineText(STRING *text);
/**
* Appends the text of the current paragraph in reading order
* to the given buffer.
* Each textline is terminated in a single newline character, and the
* paragraph gets an extra newline at the end.
*/
void AppendUTF8ParagraphText(STRING *text) const;
/** Returns whether the bidi_debug flag is set to at least min_level. */
bool BidiDebug(int min_level) const;
bool current_paragraph_is_ltr_;
/**
* Is the currently pointed-at character at the beginning of
* a minor-direction run?
*/
bool at_beginning_of_minor_run_;
/** Is the currently pointed-at character in a minor-direction sequence? */
bool in_minor_direction_;
};
} // namespace tesseract.
#endif // TESSERACT_CCMAIN_RESULT_ITERATOR_H__
| C++ |
/**********************************************************************
* File: applybox.cpp (Formerly applybox.c)
* Description: Re segment rows according to box file data
* Author: Phil Cheatle
* Created: Wed Nov 24 09:11:23 GMT 1993
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#include <ctype.h>
#include <string.h>
#ifdef __UNIX__
#include <assert.h>
#include <errno.h>
#endif
#include "allheaders.h"
#include "boxread.h"
#include "chopper.h"
#include "pageres.h"
#include "unichar.h"
#include "unicharset.h"
#include "tesseractclass.h"
#include "genericvector.h"
// Max number of blobs to classify together in FindSegmentation.
const int kMaxGroupSize = 4;
// Max fraction of median allowed as deviation in xheight before switching
// to median.
const double kMaxXHeightDeviationFraction = 0.125;
/*************************************************************************
* The box file is assumed to contain box definitions, one per line, of the
* following format for blob-level boxes:
* <UTF8 str> <left> <bottom> <right> <top> <page id>
* and for word/line-level boxes:
* WordStr <left> <bottom> <right> <top> <page id> #<space-delimited word str>
* NOTES:
* The boxes use tesseract coordinates, i.e. 0,0 is at BOTTOM-LEFT.
*
* <page id> is 0-based, and the page number is used for multipage input (tiff).
*
* In the blob-level form, each line represents a recognizable unit, which may
* be several UTF-8 bytes, but there is a bounding box around each recognizable
* unit, and no classifier is needed to train in this mode (bootstrapping.)
*
* In the word/line-level form, the line begins with the literal "WordStr", and
* the bounding box bounds either a whole line or a whole word. The recognizable
* units in the word/line are listed after the # at the end of the line and
* are space delimited, ignoring any original spaces on the line.
* Eg.
* word -> #w o r d
* multi word line -> #m u l t i w o r d l i n e
* The recognizable units must be space-delimited in order to allow multiple
* unicodes to be used for a single recognizable unit, eg Hindi.
* In this mode, the classifier must have been pre-trained with the desired
* character set, or it will not be able to find the character segmentations.
*************************************************************************/
namespace tesseract {
static void clear_any_old_text(BLOCK_LIST *block_list) {
BLOCK_IT block_it(block_list);
for (block_it.mark_cycle_pt();
!block_it.cycled_list(); block_it.forward()) {
ROW_IT row_it(block_it.data()->row_list());
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
WERD_IT word_it(row_it.data()->word_list());
for (word_it.mark_cycle_pt();
!word_it.cycled_list(); word_it.forward()) {
word_it.data()->set_text("");
}
}
}
}
// Applies the box file based on the image name fname, and resegments
// the words in the block_list (page), with:
// blob-mode: one blob per line in the box file, words as input.
// word/line-mode: one blob per space-delimited unit after the #, and one word
// per line in the box file. (See comment above for box file format.)
// If find_segmentation is true, (word/line mode) then the classifier is used
// to re-segment words/lines to match the space-delimited truth string for
// each box. In this case, the input box may be for a word or even a whole
// text line, and the output words will contain multiple blobs corresponding
// to the space-delimited input string.
// With find_segmentation false, no classifier is needed, but the chopper
// can still be used to correctly segment touching characters with the help
// of the input boxes.
// In the returned PAGE_RES, the WERD_RES are setup as they would be returned
// from normal classification, ie. with a word, chopped_word, rebuild_word,
// seam_array, denorm, box_word, and best_state, but NO best_choice or
// raw_choice, as they would require a UNICHARSET, which we aim to avoid.
// Instead, the correct_text member of WERD_RES is set, and this may be later
// converted to a best_choice using CorrectClassifyWords. CorrectClassifyWords
// is not required before calling ApplyBoxTraining.
PAGE_RES* Tesseract::ApplyBoxes(const STRING& fname,
bool find_segmentation,
BLOCK_LIST *block_list) {
GenericVector<TBOX> boxes;
GenericVector<STRING> texts, full_texts;
if (!ReadAllBoxes(applybox_page, true, fname, &boxes, &texts, &full_texts,
NULL)) {
return NULL; // Can't do it.
}
int box_count = boxes.size();
int box_failures = 0;
// Add an empty everything to the end.
boxes.push_back(TBOX());
texts.push_back(STRING());
full_texts.push_back(STRING());
// In word mode, we use the boxes to make a word for each box, but
// in blob mode we use the existing words and maximally chop them first.
PAGE_RES* page_res = find_segmentation ?
NULL : SetupApplyBoxes(boxes, block_list);
clear_any_old_text(block_list);
for (int i = 0; i < boxes.size() - 1; i++) {
bool foundit = false;
if (page_res != NULL) {
if (i == 0) {
foundit = ResegmentCharBox(page_res, NULL, boxes[i], boxes[i + 1],
full_texts[i].string());
} else {
foundit = ResegmentCharBox(page_res, &boxes[i-1], boxes[i],
boxes[i + 1], full_texts[i].string());
}
} else {
foundit = ResegmentWordBox(block_list, boxes[i], boxes[i + 1],
texts[i].string());
}
if (!foundit) {
box_failures++;
ReportFailedBox(i, boxes[i], texts[i].string(),
"FAILURE! Couldn't find a matching blob");
}
}
if (page_res == NULL) {
// In word/line mode, we now maximally chop all the words and resegment
// them with the classifier.
page_res = SetupApplyBoxes(boxes, block_list);
ReSegmentByClassification(page_res);
}
if (applybox_debug > 0) {
tprintf("APPLY_BOXES:\n");
tprintf(" Boxes read from boxfile: %6d\n", box_count);
if (box_failures > 0)
tprintf(" Boxes failed resegmentation: %6d\n", box_failures);
}
TidyUp(page_res);
return page_res;
}
// Helper computes median xheight in the image.
static double MedianXHeight(BLOCK_LIST *block_list) {
BLOCK_IT block_it(block_list);
STATS xheights(0, block_it.data()->bounding_box().height());
for (block_it.mark_cycle_pt();
!block_it.cycled_list(); block_it.forward()) {
ROW_IT row_it(block_it.data()->row_list());
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
xheights.add(IntCastRounded(row_it.data()->x_height()), 1);
}
}
return xheights.median();
}
// Any row xheight that is significantly different from the median is set
// to the median.
void Tesseract::PreenXHeights(BLOCK_LIST *block_list) {
double median_xheight = MedianXHeight(block_list);
double max_deviation = kMaxXHeightDeviationFraction * median_xheight;
// Strip all fuzzy space markers to simplify the PAGE_RES.
BLOCK_IT b_it(block_list);
for (b_it.mark_cycle_pt(); !b_it.cycled_list(); b_it.forward()) {
BLOCK* block = b_it.data();
ROW_IT r_it(block->row_list());
for (r_it.mark_cycle_pt(); !r_it.cycled_list(); r_it.forward ()) {
ROW* row = r_it.data();
float diff = fabs(row->x_height() - median_xheight);
if (diff > max_deviation) {
if (applybox_debug) {
tprintf("row xheight=%g, but median xheight = %g\n",
row->x_height(), median_xheight);
}
row->set_x_height(static_cast<float>(median_xheight));
}
}
}
}
// Builds a PAGE_RES from the block_list in the way required for ApplyBoxes:
// All fuzzy spaces are removed, and all the words are maximally chopped.
PAGE_RES* Tesseract::SetupApplyBoxes(const GenericVector<TBOX>& boxes,
BLOCK_LIST *block_list) {
PreenXHeights(block_list);
// Strip all fuzzy space markers to simplify the PAGE_RES.
BLOCK_IT b_it(block_list);
for (b_it.mark_cycle_pt(); !b_it.cycled_list(); b_it.forward()) {
BLOCK* block = b_it.data();
ROW_IT r_it(block->row_list());
for (r_it.mark_cycle_pt(); !r_it.cycled_list(); r_it.forward ()) {
ROW* row = r_it.data();
WERD_IT w_it(row->word_list());
for (w_it.mark_cycle_pt(); !w_it.cycled_list(); w_it.forward()) {
WERD* word = w_it.data();
if (word->cblob_list()->empty()) {
delete w_it.extract();
} else {
word->set_flag(W_FUZZY_SP, false);
word->set_flag(W_FUZZY_NON, false);
}
}
}
}
PAGE_RES* page_res = new PAGE_RES(false, block_list, NULL);
PAGE_RES_IT pr_it(page_res);
WERD_RES* word_res;
while ((word_res = pr_it.word()) != NULL) {
MaximallyChopWord(boxes, pr_it.block()->block,
pr_it.row()->row, word_res);
pr_it.forward();
}
return page_res;
}
// Tests the chopper by exhaustively running chop_one_blob.
// The word_res will contain filled chopped_word, seam_array, denorm,
// box_word and best_state for the maximally chopped word.
void Tesseract::MaximallyChopWord(const GenericVector<TBOX>& boxes,
BLOCK* block, ROW* row,
WERD_RES* word_res) {
if (!word_res->SetupForRecognition(unicharset, this, BestPix(),
tessedit_ocr_engine_mode, NULL,
classify_bln_numeric_mode,
textord_use_cjk_fp_model,
poly_allow_detailed_fx,
row, block)) {
word_res->CloneChoppedToRebuild();
return;
}
if (chop_debug) {
tprintf("Maximally chopping word at:");
word_res->word->bounding_box().print();
}
GenericVector<BLOB_CHOICE*> blob_choices;
ASSERT_HOST(!word_res->chopped_word->blobs.empty());
float rating = static_cast<float>(MAX_INT8);
for (int i = 0; i < word_res->chopped_word->NumBlobs(); ++i) {
// The rating and certainty are not quite arbitrary. Since
// select_blob_to_chop uses the worst certainty to choose, they all have
// to be different, so starting with MAX_INT8, subtract 1/8 for each blob
// in here, and then divide by e each time they are chopped, which
// should guarantee a set of unequal values for the whole tree of blobs
// produced, however much chopping is required. The chops are thus only
// limited by the ability of the chopper to find suitable chop points,
// and not by the value of the certainties.
BLOB_CHOICE* choice =
new BLOB_CHOICE(0, rating, -rating, -1, -1, 0, 0, 0, 0, BCC_FAKE);
blob_choices.push_back(choice);
rating -= 0.125f;
}
const double e = exp(1.0); // The base of natural logs.
int blob_number;
int right_chop_index = 0;
if (!assume_fixed_pitch_char_segment) {
// We only chop if the language is not fixed pitch like CJK.
SEAM* seam = NULL;
while ((seam = chop_one_blob(boxes, blob_choices, word_res,
&blob_number)) != NULL) {
word_res->InsertSeam(blob_number, seam);
BLOB_CHOICE* left_choice = blob_choices[blob_number];
rating = left_choice->rating() / e;
left_choice->set_rating(rating);
left_choice->set_certainty(-rating);
// combine confidence w/ serial #
BLOB_CHOICE* right_choice = new BLOB_CHOICE(++right_chop_index,
rating - 0.125f, -rating,
-1, -1, 0, 0, 0, 0, BCC_FAKE);
blob_choices.insert(right_choice, blob_number + 1);
}
}
word_res->CloneChoppedToRebuild();
word_res->FakeClassifyWord(blob_choices.size(), &blob_choices[0]);
}
// Helper to compute the dispute resolution metric.
// Disputed blob resolution. The aim is to give the blob to the most
// appropriate boxfile box. Most of the time it is obvious, but if
// two boxfile boxes overlap significantly it is not. If a small boxfile
// box takes most of the blob, and a large boxfile box does too, then
// we want the small boxfile box to get it, but if the small box
// is much smaller than the blob, we don't want it to get it.
// Details of the disputed blob resolution:
// Given a box with area A, and a blob with area B, with overlap area C,
// then the miss metric is (A-C)(B-C)/(AB) and the box with minimum
// miss metric gets the blob.
static double BoxMissMetric(const TBOX& box1, const TBOX& box2) {
int overlap_area = box1.intersection(box2).area();
double miss_metric = box1.area()- overlap_area;
miss_metric /= box1.area();
miss_metric *= box2.area() - overlap_area;
miss_metric /= box2.area();
return miss_metric;
}
// Gather consecutive blobs that match the given box into the best_state
// and corresponding correct_text.
// Fights over which box owns which blobs are settled by pre-chopping and
// applying the blobs to box or next_box with the least non-overlap.
// Returns false if the box was in error, which can only be caused by
// failing to find an appropriate blob for a box.
// This means that occasionally, blobs may be incorrectly segmented if the
// chopper fails to find a suitable chop point.
bool Tesseract::ResegmentCharBox(PAGE_RES* page_res, const TBOX *prev_box,
const TBOX& box, const TBOX& next_box,
const char* correct_text) {
if (applybox_debug > 1) {
tprintf("\nAPPLY_BOX: in ResegmentCharBox() for %s\n", correct_text);
}
PAGE_RES_IT page_res_it(page_res);
WERD_RES* word_res;
for (word_res = page_res_it.word(); word_res != NULL;
word_res = page_res_it.forward()) {
if (!word_res->box_word->bounding_box().major_overlap(box))
continue;
if (applybox_debug > 1) {
tprintf("Checking word box:");
word_res->box_word->bounding_box().print();
}
int word_len = word_res->box_word->length();
for (int i = 0; i < word_len; ++i) {
TBOX char_box = TBOX();
int blob_count = 0;
for (blob_count = 0; i + blob_count < word_len; ++blob_count) {
TBOX blob_box = word_res->box_word->BlobBox(i + blob_count);
if (!blob_box.major_overlap(box))
break;
if (word_res->correct_text[i + blob_count].length() > 0)
break; // Blob is claimed already.
double current_box_miss_metric = BoxMissMetric(blob_box, box);
double next_box_miss_metric = BoxMissMetric(blob_box, next_box);
if (applybox_debug > 2) {
tprintf("Checking blob:");
blob_box.print();
tprintf("Current miss metric = %g, next = %g\n",
current_box_miss_metric, next_box_miss_metric);
}
if (current_box_miss_metric > next_box_miss_metric)
break; // Blob is a better match for next box.
char_box += blob_box;
}
if (blob_count > 0) {
if (applybox_debug > 1) {
tprintf("Index [%d, %d) seem good.\n", i, i + blob_count);
}
if (!char_box.almost_equal(box, 3) &&
(box.x_gap(next_box) < -3 ||
(prev_box != NULL && prev_box->x_gap(box) < -3))) {
return false;
}
// We refine just the box_word, best_state and correct_text here.
// The rebuild_word is made in TidyUp.
// blob_count blobs are put together to match the box. Merge the
// box_word boxes, save the blob_count in the state and the text.
word_res->box_word->MergeBoxes(i, i + blob_count);
word_res->best_state[i] = blob_count;
word_res->correct_text[i] = correct_text;
if (applybox_debug > 2) {
tprintf("%d Blobs match: blob box:", blob_count);
word_res->box_word->BlobBox(i).print();
tprintf("Matches box:");
box.print();
tprintf("With next box:");
next_box.print();
}
// Eliminated best_state and correct_text entries for the consumed
// blobs.
for (int j = 1; j < blob_count; ++j) {
word_res->best_state.remove(i + 1);
word_res->correct_text.remove(i + 1);
}
// Assume that no box spans multiple source words, so we are done with
// this box.
if (applybox_debug > 1) {
tprintf("Best state = ");
for (int j = 0; j < word_res->best_state.size(); ++j) {
tprintf("%d ", word_res->best_state[j]);
}
tprintf("\n");
tprintf("Correct text = [[ ");
for (int j = 0; j < word_res->correct_text.size(); ++j) {
tprintf("%s ", word_res->correct_text[j].string());
}
tprintf("]]\n");
}
return true;
}
}
}
if (applybox_debug > 0) {
tprintf("FAIL!\n");
}
return false; // Failure.
}
// Consume all source blobs that strongly overlap the given box,
// putting them into a new word, with the correct_text label.
// Fights over which box owns which blobs are settled by
// applying the blobs to box or next_box with the least non-overlap.
// Returns false if the box was in error, which can only be caused by
// failing to find an overlapping blob for a box.
bool Tesseract::ResegmentWordBox(BLOCK_LIST *block_list,
const TBOX& box, const TBOX& next_box,
const char* correct_text) {
if (applybox_debug > 1) {
tprintf("\nAPPLY_BOX: in ResegmentWordBox() for %s\n", correct_text);
}
WERD* new_word = NULL;
BLOCK_IT b_it(block_list);
for (b_it.mark_cycle_pt(); !b_it.cycled_list(); b_it.forward()) {
BLOCK* block = b_it.data();
if (!box.major_overlap(block->bounding_box()))
continue;
ROW_IT r_it(block->row_list());
for (r_it.mark_cycle_pt(); !r_it.cycled_list(); r_it.forward()) {
ROW* row = r_it.data();
if (!box.major_overlap(row->bounding_box()))
continue;
WERD_IT w_it(row->word_list());
for (w_it.mark_cycle_pt(); !w_it.cycled_list(); w_it.forward()) {
WERD* word = w_it.data();
if (applybox_debug > 2) {
tprintf("Checking word:");
word->bounding_box().print();
}
if (word->text() != NULL && word->text()[0] != '\0')
continue; // Ignore words that are already done.
if (!box.major_overlap(word->bounding_box()))
continue;
C_BLOB_IT blob_it(word->cblob_list());
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list();
blob_it.forward()) {
C_BLOB* blob = blob_it.data();
TBOX blob_box = blob->bounding_box();
if (!blob_box.major_overlap(box))
continue;
double current_box_miss_metric = BoxMissMetric(blob_box, box);
double next_box_miss_metric = BoxMissMetric(blob_box, next_box);
if (applybox_debug > 2) {
tprintf("Checking blob:");
blob_box.print();
tprintf("Current miss metric = %g, next = %g\n",
current_box_miss_metric, next_box_miss_metric);
}
if (current_box_miss_metric > next_box_miss_metric)
continue; // Blob is a better match for next box.
if (applybox_debug > 2) {
tprintf("Blob match: blob:");
blob_box.print();
tprintf("Matches box:");
box.print();
tprintf("With next box:");
next_box.print();
}
if (new_word == NULL) {
// Make a new word with a single blob.
new_word = word->shallow_copy();
new_word->set_text(correct_text);
w_it.add_to_end(new_word);
}
C_BLOB_IT new_blob_it(new_word->cblob_list());
new_blob_it.add_to_end(blob_it.extract());
}
}
}
}
if (new_word == NULL && applybox_debug > 0) tprintf("FAIL!\n");
return new_word != NULL;
}
// Resegments the words by running the classifier in an attempt to find the
// correct segmentation that produces the required string.
void Tesseract::ReSegmentByClassification(PAGE_RES* page_res) {
PAGE_RES_IT pr_it(page_res);
WERD_RES* word_res;
for (; (word_res = pr_it.word()) != NULL; pr_it.forward()) {
WERD* word = word_res->word;
if (word->text() == NULL || word->text()[0] == '\0')
continue; // Ignore words that have no text.
// Convert the correct text to a vector of UNICHAR_ID
GenericVector<UNICHAR_ID> target_text;
if (!ConvertStringToUnichars(word->text(), &target_text)) {
tprintf("APPLY_BOX: FAILURE: can't find class_id for '%s'\n",
word->text());
pr_it.DeleteCurrentWord();
continue;
}
if (!FindSegmentation(target_text, word_res)) {
tprintf("APPLY_BOX: FAILURE: can't find segmentation for '%s'\n",
word->text());
pr_it.DeleteCurrentWord();
continue;
}
}
}
// Converts the space-delimited string of utf8 text to a vector of UNICHAR_ID.
// Returns false if an invalid UNICHAR_ID is encountered.
bool Tesseract::ConvertStringToUnichars(const char* utf8,
GenericVector<UNICHAR_ID>* class_ids) {
for (int step = 0; *utf8 != '\0'; utf8 += step) {
const char* next_space = strchr(utf8, ' ');
if (next_space == NULL)
next_space = utf8 + strlen(utf8);
step = next_space - utf8;
UNICHAR_ID class_id = unicharset.unichar_to_id(utf8, step);
if (class_id == INVALID_UNICHAR_ID) {
return false;
}
while (utf8[step] == ' ')
++step;
class_ids->push_back(class_id);
}
return true;
}
// Resegments the word to achieve the target_text from the classifier.
// Returns false if the re-segmentation fails.
// Uses brute-force combination of up to kMaxGroupSize adjacent blobs, and
// applies a full search on the classifier results to find the best classified
// segmentation. As a compromise to obtain better recall, 1-1 ambiguity
// substitutions ARE used.
bool Tesseract::FindSegmentation(const GenericVector<UNICHAR_ID>& target_text,
WERD_RES* word_res) {
// Classify all required combinations of blobs and save results in choices.
int word_length = word_res->box_word->length();
GenericVector<BLOB_CHOICE_LIST*>* choices =
new GenericVector<BLOB_CHOICE_LIST*>[word_length];
for (int i = 0; i < word_length; ++i) {
for (int j = 1; j <= kMaxGroupSize && i + j <= word_length; ++j) {
BLOB_CHOICE_LIST* match_result = classify_piece(
word_res->seam_array, i, i + j - 1, "Applybox",
word_res->chopped_word, word_res->blamer_bundle);
if (applybox_debug > 2) {
tprintf("%d+%d:", i, j);
print_ratings_list("Segment:", match_result, unicharset);
}
choices[i].push_back(match_result);
}
}
// Search the segmentation graph for the target text. Must be an exact
// match. Using wildcards makes it difficult to find the correct
// segmentation even when it is there.
word_res->best_state.clear();
GenericVector<int> search_segmentation;
float best_rating = 0.0f;
SearchForText(choices, 0, word_length, target_text, 0, 0.0f,
&search_segmentation, &best_rating, &word_res->best_state);
for (int i = 0; i < word_length; ++i)
choices[i].delete_data_pointers();
delete [] choices;
if (word_res->best_state.empty()) {
// Build the original segmentation and if it is the same length as the
// truth, assume it will do.
int blob_count = 1;
for (int s = 0; s < word_res->seam_array.size(); ++s) {
SEAM* seam = word_res->seam_array[s];
if (seam->split1 == NULL) {
word_res->best_state.push_back(blob_count);
blob_count = 1;
} else {
++blob_count;
}
}
word_res->best_state.push_back(blob_count);
if (word_res->best_state.size() != target_text.size()) {
word_res->best_state.clear(); // No good. Original segmentation bad size.
return false;
}
}
word_res->correct_text.clear();
for (int i = 0; i < target_text.size(); ++i) {
word_res->correct_text.push_back(
STRING(unicharset.id_to_unichar(target_text[i])));
}
return true;
}
// Recursive helper to find a match to the target_text (from text_index
// position) in the choices (from choices_pos position).
// Choices is an array of GenericVectors, of length choices_length, with each
// element representing a starting position in the word, and the
// GenericVector holding classification results for a sequence of consecutive
// blobs, with index 0 being a single blob, index 1 being 2 blobs etc.
void Tesseract::SearchForText(const GenericVector<BLOB_CHOICE_LIST*>* choices,
int choices_pos, int choices_length,
const GenericVector<UNICHAR_ID>& target_text,
int text_index,
float rating, GenericVector<int>* segmentation,
float* best_rating,
GenericVector<int>* best_segmentation) {
const UnicharAmbigsVector& table = getDict().getUnicharAmbigs().dang_ambigs();
for (int length = 1; length <= choices[choices_pos].size(); ++length) {
// Rating of matching choice or worst choice if no match.
float choice_rating = 0.0f;
// Find the corresponding best BLOB_CHOICE.
BLOB_CHOICE_IT choice_it(choices[choices_pos][length - 1]);
for (choice_it.mark_cycle_pt(); !choice_it.cycled_list();
choice_it.forward()) {
BLOB_CHOICE* choice = choice_it.data();
choice_rating = choice->rating();
UNICHAR_ID class_id = choice->unichar_id();
if (class_id == target_text[text_index]) {
break;
}
// Search ambigs table.
if (class_id < table.size() && table[class_id] != NULL) {
AmbigSpec_IT spec_it(table[class_id]);
for (spec_it.mark_cycle_pt(); !spec_it.cycled_list();
spec_it.forward()) {
const AmbigSpec *ambig_spec = spec_it.data();
// We'll only do 1-1.
if (ambig_spec->wrong_ngram[1] == INVALID_UNICHAR_ID &&
ambig_spec->correct_ngram_id == target_text[text_index])
break;
}
if (!spec_it.cycled_list())
break; // Found an ambig.
}
}
if (choice_it.cycled_list())
continue; // No match.
segmentation->push_back(length);
if (choices_pos + length == choices_length &&
text_index + 1 == target_text.size()) {
// This is a complete match. If the rating is good record a new best.
if (applybox_debug > 2) {
tprintf("Complete match, rating = %g, best=%g, seglength=%d, best=%d\n",
rating + choice_rating, *best_rating, segmentation->size(),
best_segmentation->size());
}
if (best_segmentation->empty() || rating + choice_rating < *best_rating) {
*best_segmentation = *segmentation;
*best_rating = rating + choice_rating;
}
} else if (choices_pos + length < choices_length &&
text_index + 1 < target_text.size()) {
if (applybox_debug > 3) {
tprintf("Match found for %d=%s:%s, at %d+%d, recursing...\n",
target_text[text_index],
unicharset.id_to_unichar(target_text[text_index]),
choice_it.data()->unichar_id() == target_text[text_index]
? "Match" : "Ambig",
choices_pos, length);
}
SearchForText(choices, choices_pos + length, choices_length, target_text,
text_index + 1, rating + choice_rating, segmentation,
best_rating, best_segmentation);
if (applybox_debug > 3) {
tprintf("End recursion for %d=%s\n", target_text[text_index],
unicharset.id_to_unichar(target_text[text_index]));
}
}
segmentation->truncate(segmentation->size() - 1);
}
}
// Counts up the labelled words and the blobs within.
// Deletes all unused or emptied words, counting the unused ones.
// Resets W_BOL and W_EOL flags correctly.
// Builds the rebuild_word and rebuilds the box_word and the best_choice.
void Tesseract::TidyUp(PAGE_RES* page_res) {
int ok_blob_count = 0;
int bad_blob_count = 0;
int ok_word_count = 0;
int unlabelled_words = 0;
PAGE_RES_IT pr_it(page_res);
WERD_RES* word_res;
for (; (word_res = pr_it.word()) != NULL; pr_it.forward()) {
int ok_in_word = 0;
int blob_count = word_res->correct_text.size();
WERD_CHOICE* word_choice = new WERD_CHOICE(word_res->uch_set, blob_count);
word_choice->set_permuter(TOP_CHOICE_PERM);
for (int c = 0; c < blob_count; ++c) {
if (word_res->correct_text[c].length() > 0) {
++ok_in_word;
}
// Since we only need a fake word_res->best_choice, the actual
// unichar_ids do not matter. Which is fortunate, since TidyUp()
// can be called while training Tesseract, at the stage where
// unicharset is not meaningful yet.
word_choice->append_unichar_id_space_allocated(
INVALID_UNICHAR_ID, word_res->best_state[c], 1.0f, -1.0f);
}
if (ok_in_word > 0) {
ok_blob_count += ok_in_word;
bad_blob_count += word_res->correct_text.size() - ok_in_word;
word_res->LogNewRawChoice(word_choice);
word_res->LogNewCookedChoice(1, false, word_choice);
} else {
++unlabelled_words;
if (applybox_debug > 0) {
tprintf("APPLY_BOXES: Unlabelled word at :");
word_res->word->bounding_box().print();
}
pr_it.DeleteCurrentWord();
delete word_choice;
}
}
pr_it.restart_page();
for (; (word_res = pr_it.word()) != NULL; pr_it.forward()) {
// Denormalize back to a BoxWord.
word_res->RebuildBestState();
word_res->SetupBoxWord();
word_res->word->set_flag(W_BOL, pr_it.prev_row() != pr_it.row());
word_res->word->set_flag(W_EOL, pr_it.next_row() != pr_it.row());
}
if (applybox_debug > 0) {
tprintf(" Found %d good blobs.\n", ok_blob_count);
if (bad_blob_count > 0) {
tprintf(" Leaving %d unlabelled blobs in %d words.\n",
bad_blob_count, ok_word_count);
}
if (unlabelled_words > 0)
tprintf(" %d remaining unlabelled words deleted.\n", unlabelled_words);
}
}
// Logs a bad box by line in the box file and box coords.
void Tesseract::ReportFailedBox(int boxfile_lineno, TBOX box,
const char *box_ch, const char *err_msg) {
tprintf("APPLY_BOXES: boxfile line %d/%s ((%d,%d),(%d,%d)): %s\n",
boxfile_lineno + 1, box_ch,
box.left(), box.bottom(), box.right(), box.top(), err_msg);
}
// Creates a fake best_choice entry in each WERD_RES with the correct text.
void Tesseract::CorrectClassifyWords(PAGE_RES* page_res) {
PAGE_RES_IT pr_it(page_res);
for (WERD_RES *word_res = pr_it.word(); word_res != NULL;
word_res = pr_it.forward()) {
WERD_CHOICE* choice = new WERD_CHOICE(word_res->uch_set,
word_res->correct_text.size());
for (int i = 0; i < word_res->correct_text.size(); ++i) {
// The part before the first space is the real ground truth, and the
// rest is the bounding box location and page number.
GenericVector<STRING> tokens;
word_res->correct_text[i].split(' ', &tokens);
UNICHAR_ID char_id = unicharset.unichar_to_id(tokens[0].string());
choice->append_unichar_id_space_allocated(char_id,
word_res->best_state[i],
0.0f, 0.0f);
}
word_res->ClearWordChoices();
word_res->LogNewRawChoice(choice);
word_res->LogNewCookedChoice(1, false, choice);
}
}
// Calls LearnWord to extract features for labelled blobs within each word.
// Features are written to the given filename.
void Tesseract::ApplyBoxTraining(const STRING& filename, PAGE_RES* page_res) {
PAGE_RES_IT pr_it(page_res);
int word_count = 0;
for (WERD_RES *word_res = pr_it.word(); word_res != NULL;
word_res = pr_it.forward()) {
LearnWord(filename.string(), word_res);
++word_count;
}
tprintf("Generated training data for %d words\n", word_count);
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: osdetect.h
// Description: Orientation and script detection.
// Author: Samuel Charron
// Ranjith Unnikrishnan
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_OSDETECT_H__
#define TESSERACT_CCMAIN_OSDETECT_H__
#include "strngs.h"
#include "unicharset.h"
class TO_BLOCK_LIST;
class BLOBNBOX;
class BLOB_CHOICE_LIST;
class BLOBNBOX_CLIST;
namespace tesseract {
class Tesseract;
}
// Max number of scripts in ICU + "NULL" + Japanese and Korean + Fraktur
const int kMaxNumberOfScripts = 116 + 1 + 2 + 1;
struct OSBestResult {
OSBestResult() : orientation_id(0), script_id(0), sconfidence(0.0),
oconfidence(0.0) {}
int orientation_id;
int script_id;
float sconfidence;
float oconfidence;
};
struct OSResults {
OSResults() : unicharset(NULL) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < kMaxNumberOfScripts; ++j)
scripts_na[i][j] = 0;
orientations[i] = 0;
}
}
void update_best_orientation();
// Set the estimate of the orientation to the given id.
void set_best_orientation(int orientation_id);
// Update/Compute the best estimate of the script assuming the given
// orientation id.
void update_best_script(int orientation_id);
// Return the index of the script with the highest score for this orientation.
TESS_API int get_best_script(int orientation_id) const;
// Accumulate scores with given OSResults instance and update the best script.
void accumulate(const OSResults& osr);
// Print statistics.
void print_scores(void) const;
void print_scores(int orientation_id) const;
// Array holding scores for each orientation id [0,3].
// Orientation ids [0..3] map to [0, 270, 180, 90] degree orientations of the
// page respectively, where the values refer to the amount of clockwise
// rotation to be applied to the page for the text to be upright and readable.
float orientations[4];
// Script confidence scores for each of 4 possible orientations.
float scripts_na[4][kMaxNumberOfScripts];
UNICHARSET* unicharset;
OSBestResult best_result;
};
class OrientationDetector {
public:
OrientationDetector(const GenericVector<int>* allowed_scripts,
OSResults* results);
bool detect_blob(BLOB_CHOICE_LIST* scores);
int get_orientation();
private:
OSResults* osr_;
tesseract::Tesseract* tess_;
const GenericVector<int>* allowed_scripts_;
};
class ScriptDetector {
public:
ScriptDetector(const GenericVector<int>* allowed_scripts,
OSResults* osr, tesseract::Tesseract* tess);
void detect_blob(BLOB_CHOICE_LIST* scores);
bool must_stop(int orientation);
private:
OSResults* osr_;
static const char* korean_script_;
static const char* japanese_script_;
static const char* fraktur_script_;
int korean_id_;
int japanese_id_;
int katakana_id_;
int hiragana_id_;
int han_id_;
int hangul_id_;
int latin_id_;
int fraktur_id_;
tesseract::Tesseract* tess_;
const GenericVector<int>* allowed_scripts_;
};
int orientation_and_script_detection(STRING& filename,
OSResults*,
tesseract::Tesseract*);
int os_detect(TO_BLOCK_LIST* port_blocks,
OSResults* osr,
tesseract::Tesseract* tess);
int os_detect_blobs(const GenericVector<int>* allowed_scripts,
BLOBNBOX_CLIST* blob_list,
OSResults* osr,
tesseract::Tesseract* tess);
bool os_detect_blob(BLOBNBOX* bbox, OrientationDetector* o,
ScriptDetector* s, OSResults*,
tesseract::Tesseract* tess);
// Helper method to convert an orientation index to its value in degrees.
// The value represents the amount of clockwise rotation in degrees that must be
// applied for the text to be upright (readable).
TESS_API const int OrientationIdToValue(const int& id);
#endif // TESSERACT_CCMAIN_OSDETECT_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: thresholder.h
// Description: Base API for thresolding images in tesseract.
// Author: Ray Smith
// Created: Mon May 12 11:00:15 PDT 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_THRESHOLDER_H__
#define TESSERACT_CCMAIN_THRESHOLDER_H__
#include "platform.h"
#include "publictypes.h"
struct Pix;
namespace tesseract {
/// Base class for all tesseract image thresholding classes.
/// Specific classes can add new thresholding methods by
/// overriding ThresholdToPix.
/// Each instance deals with a single image, but the design is intended to
/// be useful for multiple calls to SetRectangle and ThresholdTo* if
/// desired.
class TESS_API ImageThresholder {
public:
ImageThresholder();
virtual ~ImageThresholder();
/// Destroy the Pix if there is one, freeing memory.
virtual void Clear();
/// Return true if no image has been set.
bool IsEmpty() const;
/// SetImage makes a copy of all the image data, so it may be deleted
/// immediately after this call.
/// Greyscale of 8 and color of 24 or 32 bits per pixel may be given.
/// Palette color images will not work properly and must be converted to
/// 24 bit.
/// Binary images of 1 bit per pixel may also be given but they must be
/// byte packed with the MSB of the first byte being the first pixel, and a
/// one pixel is WHITE. For binary images set bytes_per_pixel=0.
void SetImage(const unsigned char* imagedata, int width, int height,
int bytes_per_pixel, int bytes_per_line);
/// Store the coordinates of the rectangle to process for later use.
/// Doesn't actually do any thresholding.
void SetRectangle(int left, int top, int width, int height);
/// Get enough parameters to be able to rebuild bounding boxes in the
/// original image (not just within the rectangle).
/// Left and top are enough with top-down coordinates, but
/// the height of the rectangle and the image are needed for bottom-up.
virtual void GetImageSizes(int* left, int* top, int* width, int* height,
int* imagewidth, int* imageheight);
/// Return true if the source image is color.
bool IsColor() const {
return pix_channels_ >= 3;
}
/// Returns true if the source image is binary.
bool IsBinary() const {
return pix_channels_ == 0;
}
int GetScaleFactor() const {
return scale_;
}
// Set the resolution of the source image in pixels per inch.
// This should be called right after SetImage(), and will let us return
// appropriate font sizes for the text.
void SetSourceYResolution(int ppi) {
yres_ = ppi;
estimated_res_ = ppi;
}
int GetSourceYResolution() const {
return yres_;
}
int GetScaledYResolution() const {
return scale_ * yres_;
}
// Set the resolution of the source image in pixels per inch, as estimated
// by the thresholder from the text size found during thresholding.
// This value will be used to set internal size thresholds during recognition
// and will not influence the output "point size." The default value is
// the same as the source resolution. (yres_)
void SetEstimatedResolution(int ppi) {
estimated_res_ = ppi;
}
// Returns the estimated resolution, including any active scaling.
// This value will be used to set internal size thresholds during recognition.
int GetScaledEstimatedResolution() const {
return scale_ * estimated_res_;
}
/// Pix vs raw, which to use? Pix is the preferred input for efficiency,
/// since raw buffers are copied.
/// SetImage for Pix clones its input, so the source pix may be pixDestroyed
/// immediately after, but may not go away until after the Thresholder has
/// finished with it.
void SetImage(const Pix* pix);
/// Threshold the source image as efficiently as possible to the output Pix.
/// Creates a Pix and sets pix to point to the resulting pointer.
/// Caller must use pixDestroy to free the created Pix.
virtual void ThresholdToPix(PageSegMode pageseg_mode, Pix** pix);
// Gets a pix that contains an 8 bit threshold value at each pixel. The
// returned pix may be an integer reduction of the binary image such that
// the scale factor may be inferred from the ratio of the sizes, even down
// to the extreme of a 1x1 pixel thresholds image.
// Ideally the 8 bit threshold should be the exact threshold used to generate
// the binary image in ThresholdToPix, but this is not a hard constraint.
// Returns NULL if the input is binary. PixDestroy after use.
virtual Pix* GetPixRectThresholds();
/// Get a clone/copy of the source image rectangle.
/// The returned Pix must be pixDestroyed.
/// This function will be used in the future by the page layout analysis, and
/// the layout analysis that uses it will only be available with Leptonica,
/// so there is no raw equivalent.
Pix* GetPixRect();
// Get a clone/copy of the source image rectangle, reduced to greyscale,
// and at the same resolution as the output binary.
// The returned Pix must be pixDestroyed.
// Provided to the classifier to extract features from the greyscale image.
virtual Pix* GetPixRectGrey();
protected:
// ----------------------------------------------------------------------
// Utility functions that may be useful components for other thresholders.
/// Common initialization shared between SetImage methods.
virtual void Init();
/// Return true if we are processing the full image.
bool IsFullImage() const {
return rect_left_ == 0 && rect_top_ == 0 &&
rect_width_ == image_width_ && rect_height_ == image_height_;
}
// Otsu thresholds the rectangle, taking the rectangle from *this.
void OtsuThresholdRectToPix(Pix* src_pix, Pix** out_pix) const;
/// Threshold the rectangle, taking everything except the src_pix
/// from the class, using thresholds/hi_values to the output pix.
/// NOTE that num_channels is the size of the thresholds and hi_values
// arrays and also the bytes per pixel in src_pix.
void ThresholdRectToPix(Pix* src_pix, int num_channels,
const int* thresholds, const int* hi_values,
Pix** pix) const;
protected:
/// Clone or other copy of the source Pix.
/// The pix will always be PixDestroy()ed on destruction of the class.
Pix* pix_;
int image_width_; //< Width of source pix_.
int image_height_; //< Height of source pix_.
int pix_channels_; //< Number of 8-bit channels in pix_.
int pix_wpl_; //< Words per line of pix_.
// Limits of image rectangle to be processed.
int scale_; //< Scale factor from original image.
int yres_; //< y pixels/inch in source image.
int estimated_res_; //< Resolution estimate from text size.
int rect_left_;
int rect_top_;
int rect_width_;
int rect_height_;
};
} // namespace tesseract.
#endif // TESSERACT_CCMAIN_THRESHOLDER_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: ltrresultiterator.h
// Description: Iterator for tesseract results in strict left-to-right
// order that avoids using tesseract internal data structures.
// Author: Ray Smith
// Created: Fri Feb 26 11:01:06 PST 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H__
#define TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H__
#include "platform.h"
#include "pageiterator.h"
#include "unichar.h"
class BLOB_CHOICE_IT;
class WERD_RES;
namespace tesseract {
class Tesseract;
// Class to iterate over tesseract results, providing access to all levels
// of the page hierarchy, without including any tesseract headers or having
// to handle any tesseract structures.
// WARNING! This class points to data held within the TessBaseAPI class, and
// therefore can only be used while the TessBaseAPI class still exists and
// has not been subjected to a call of Init, SetImage, Recognize, Clear, End
// DetectOS, or anything else that changes the internal PAGE_RES.
// See apitypes.h for the definition of PageIteratorLevel.
// See also base class PageIterator, which contains the bulk of the interface.
// LTRResultIterator adds text-specific methods for access to OCR output.
class TESS_API LTRResultIterator : public PageIterator {
friend class ChoiceIterator;
public:
// page_res and tesseract come directly from the BaseAPI.
// The rectangle parameters are copied indirectly from the Thresholder,
// via the BaseAPI. They represent the coordinates of some rectangle in an
// original image (in top-left-origin coordinates) and therefore the top-left
// needs to be added to any output boxes in order to specify coordinates
// in the original image. See TessBaseAPI::SetRectangle.
// The scale and scaled_yres are in case the Thresholder scaled the image
// rectangle prior to thresholding. Any coordinates in tesseract's image
// must be divided by scale before adding (rect_left, rect_top).
// The scaled_yres indicates the effective resolution of the binary image
// that tesseract has been given by the Thresholder.
// After the constructor, Begin has already been called.
LTRResultIterator(PAGE_RES* page_res, Tesseract* tesseract,
int scale, int scaled_yres,
int rect_left, int rect_top,
int rect_width, int rect_height);
virtual ~LTRResultIterator();
// LTRResultIterators may be copied! This makes it possible to iterate over
// all the objects at a lower level, while maintaining an iterator to
// objects at a higher level. These constructors DO NOT CALL Begin, so
// iterations will continue from the location of src.
// TODO: For now the copy constructor and operator= only need the base class
// versions, but if new data members are added, don't forget to add them!
// ============= Moving around within the page ============.
// See PageIterator.
// ============= Accessing data ==============.
// Returns the null terminated UTF-8 encoded text string for the current
// object at the given level. Use delete [] to free after use.
char* GetUTF8Text(PageIteratorLevel level) const;
// Set the string inserted at the end of each text line. "\n" by default.
void SetLineSeparator(const char *new_line);
// Set the string inserted at the end of each paragraph. "\n" by default.
void SetParagraphSeparator(const char *new_para);
// Returns the mean confidence of the current object at the given level.
// The number should be interpreted as a percent probability. (0.0f-100.0f)
float Confidence(PageIteratorLevel level) const;
// ============= Functions that refer to words only ============.
// Returns the font attributes of the current word. If iterating at a higher
// level object than words, eg textlines, then this will return the
// attributes of the first word in that textline.
// The actual return value is a string representing a font name. It points
// to an internal table and SHOULD NOT BE DELETED. Lifespan is the same as
// the iterator itself, ie rendered invalid by various members of
// TessBaseAPI, including Init, SetImage, End or deleting the TessBaseAPI.
// Pointsize is returned in printers points (1/72 inch.)
const char* WordFontAttributes(bool* is_bold,
bool* is_italic,
bool* is_underlined,
bool* is_monospace,
bool* is_serif,
bool* is_smallcaps,
int* pointsize,
int* font_id) const;
// Return the name of the language used to recognize this word.
// On error, NULL. Do not delete this pointer.
const char* WordRecognitionLanguage() const;
// Return the overall directionality of this word.
StrongScriptDirection WordDirection() const;
// Returns true if the current word was found in a dictionary.
bool WordIsFromDictionary() const;
// Returns true if the current word is numeric.
bool WordIsNumeric() const;
// Returns true if the word contains blamer information.
bool HasBlamerInfo() const;
// Returns the pointer to ParamsTrainingBundle stored in the BlamerBundle
// of the current word.
const void *GetParamsTrainingBundle() const;
// Returns a pointer to the string with blamer information for this word.
// Assumes that the word's blamer_bundle is not NULL.
const char *GetBlamerDebug() const;
// Returns a pointer to the string with misadaption information for this word.
// Assumes that the word's blamer_bundle is not NULL.
const char *GetBlamerMisadaptionDebug() const;
// Returns true if a truth string was recorded for the current word.
bool HasTruthString() const;
// Returns true if the given string is equivalent to the truth string for
// the current word.
bool EquivalentToTruth(const char *str) const;
// Returns a null terminated UTF-8 encoded truth string for the current word.
// Use delete [] to free after use.
char* WordTruthUTF8Text() const;
// Returns a null terminated UTF-8 encoded normalized OCR string for the
// current word. Use delete [] to free after use.
char* WordNormedUTF8Text() const;
// Returns a pointer to serialized choice lattice.
// Fills lattice_size with the number of bytes in lattice data.
const char *WordLattice(int *lattice_size) const;
// ============= Functions that refer to symbols only ============.
// Returns true if the current symbol is a superscript.
// If iterating at a higher level object than symbols, eg words, then
// this will return the attributes of the first symbol in that word.
bool SymbolIsSuperscript() const;
// Returns true if the current symbol is a subscript.
// If iterating at a higher level object than symbols, eg words, then
// this will return the attributes of the first symbol in that word.
bool SymbolIsSubscript() const;
// Returns true if the current symbol is a dropcap.
// If iterating at a higher level object than symbols, eg words, then
// this will return the attributes of the first symbol in that word.
bool SymbolIsDropcap() const;
protected:
const char *line_separator_;
const char *paragraph_separator_;
};
// Class to iterate over the classifier choices for a single RIL_SYMBOL.
class ChoiceIterator {
public:
// Construction is from a LTRResultIterator that points to the symbol of
// interest. The ChoiceIterator allows a one-shot iteration over the
// choices for this symbol and after that is is useless.
explicit ChoiceIterator(const LTRResultIterator& result_it);
~ChoiceIterator();
// Moves to the next choice for the symbol and returns false if there
// are none left.
bool Next();
// ============= Accessing data ==============.
// Returns the null terminated UTF-8 encoded text string for the current
// choice.
// NOTE: Unlike LTRResultIterator::GetUTF8Text, the return points to an
// internal structure and should NOT be delete[]ed to free after use.
const char* GetUTF8Text() const;
// Returns the confidence of the current choice.
// The number should be interpreted as a percent probability. (0.0f-100.0f)
float Confidence() const;
private:
// Pointer to the WERD_RES object owned by the API.
WERD_RES* word_res_;
// Iterator over the blob choices.
BLOB_CHOICE_IT* choice_it_;
};
} // namespace tesseract.
#endif // TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H__
| C++ |
/**********************************************************************
* File: reject.cpp (Formerly reject.c)
* Description: Rejection functions used in tessedit
* Author: Phil Cheatle
* Created: Wed Sep 23 16:50:21 BST 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#pragma warning(disable:4305) // int/float warnings
#endif
#include "tessvars.h"
#ifdef __UNIX__
#include <assert.h>
#include <errno.h>
#endif
#include "scanutils.h"
#include <ctype.h>
#include <string.h>
#include "genericvector.h"
#include "reject.h"
#include "control.h"
#include "docqual.h"
#include "globaloc.h" // For err_exit.
#include "globals.h"
#include "helpers.h"
#include "tesseractclass.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
CLISTIZEH (STRING) CLISTIZE (STRING)
/*************************************************************************
* set_done()
*
* Set the done flag based on the word acceptability criteria
*************************************************************************/
namespace tesseract {
void Tesseract::set_done(WERD_RES *word, inT16 pass) {
word->done = word->tess_accepted &&
(strchr(word->best_choice->unichar_string().string(), ' ') == NULL);
bool word_is_ambig = word->best_choice->dangerous_ambig_found();
bool word_from_dict = word->best_choice->permuter() == SYSTEM_DAWG_PERM ||
word->best_choice->permuter() == FREQ_DAWG_PERM ||
word->best_choice->permuter() == USER_DAWG_PERM;
if (word->done && (pass == 1) && (!word_from_dict || word_is_ambig) &&
one_ell_conflict(word, FALSE)) {
if (tessedit_rejection_debug) tprintf("one_ell_conflict detected\n");
word->done = FALSE;
}
if (word->done && ((!word_from_dict &&
word->best_choice->permuter() != NUMBER_PERM) || word_is_ambig)) {
if (tessedit_rejection_debug) tprintf("non-dict or ambig word detected\n");
word->done = FALSE;
}
if (tessedit_rejection_debug) {
tprintf("set_done(): done=%d\n", word->done);
word->best_choice->print("");
}
}
/*************************************************************************
* make_reject_map()
*
* Sets the done flag to indicate whether the resylt is acceptable.
*
* Sets a reject map for the word.
*************************************************************************/
void Tesseract::make_reject_map(WERD_RES *word, ROW *row, inT16 pass) {
int i;
int offset;
flip_0O(word);
check_debug_pt(word, -1); // For trap only
set_done(word, pass); // Set acceptance
word->reject_map.initialise(word->best_choice->unichar_lengths().length());
reject_blanks(word);
/*
0: Rays original heuristic - the baseline
*/
if (tessedit_reject_mode == 0) {
if (!word->done)
reject_poor_matches(word);
} else if (tessedit_reject_mode == 5) {
/*
5: Reject I/1/l from words where there is no strong contextual confirmation;
the whole of any unacceptable words (incl PERM rej of dubious 1/I/ls);
and the whole of any words which are very small
*/
if (kBlnXHeight / word->denorm.y_scale() <= min_sane_x_ht_pixels) {
word->reject_map.rej_word_small_xht();
} else {
one_ell_conflict(word, TRUE);
/*
Originally the code here just used the done flag. Now I have duplicated
and unpacked the conditions for setting the done flag so that each
mechanism can be turned on or off independently. This works WITHOUT
affecting the done flag setting.
*/
if (rej_use_tess_accepted && !word->tess_accepted)
word->reject_map.rej_word_not_tess_accepted ();
if (rej_use_tess_blanks &&
(strchr (word->best_choice->unichar_string().string (), ' ') != NULL))
word->reject_map.rej_word_contains_blanks ();
WERD_CHOICE* best_choice = word->best_choice;
if (rej_use_good_perm) {
if ((best_choice->permuter() == SYSTEM_DAWG_PERM ||
best_choice->permuter() == FREQ_DAWG_PERM ||
best_choice->permuter() == USER_DAWG_PERM) &&
(!rej_use_sensible_wd ||
acceptable_word_string(*word->uch_set,
best_choice->unichar_string().string(),
best_choice->unichar_lengths().string()) !=
AC_UNACCEPTABLE)) {
// PASSED TEST
} else if (best_choice->permuter() == NUMBER_PERM) {
if (rej_alphas_in_number_perm) {
for (i = 0, offset = 0;
best_choice->unichar_string()[offset] != '\0';
offset += best_choice->unichar_lengths()[i++]) {
if (word->reject_map[i].accepted() &&
word->uch_set->get_isalpha(
best_choice->unichar_string().string() + offset,
best_choice->unichar_lengths()[i]))
word->reject_map[i].setrej_bad_permuter();
// rej alpha
}
}
} else {
word->reject_map.rej_word_bad_permuter();
}
}
/* Ambig word rejection was here once !!*/
}
} else {
tprintf("BAD tessedit_reject_mode\n");
err_exit();
}
if (tessedit_image_border > -1)
reject_edge_blobs(word);
check_debug_pt (word, 10);
if (tessedit_rejection_debug) {
tprintf("Permuter Type = %d\n", word->best_choice->permuter ());
tprintf("Certainty: %f Rating: %f\n",
word->best_choice->certainty (), word->best_choice->rating ());
tprintf("Dict word: %d\n", dict_word(*(word->best_choice)));
}
flip_hyphens(word);
check_debug_pt(word, 20);
}
} // namespace tesseract
void reject_blanks(WERD_RES *word) {
inT16 i;
inT16 offset;
for (i = 0, offset = 0; word->best_choice->unichar_string()[offset] != '\0';
offset += word->best_choice->unichar_lengths()[i], i += 1) {
if (word->best_choice->unichar_string()[offset] == ' ')
//rej unrecognised blobs
word->reject_map[i].setrej_tess_failure ();
}
}
namespace tesseract {
void Tesseract::reject_I_1_L(WERD_RES *word) {
inT16 i;
inT16 offset;
for (i = 0, offset = 0; word->best_choice->unichar_string()[offset] != '\0';
offset += word->best_choice->unichar_lengths()[i], i += 1) {
if (STRING (conflict_set_I_l_1).
contains (word->best_choice->unichar_string()[offset])) {
//rej 1Il conflict
word->reject_map[i].setrej_1Il_conflict ();
}
}
}
} // namespace tesseract
void reject_poor_matches(WERD_RES *word) {
float threshold = compute_reject_threshold(word->best_choice);
for (int i = 0; i < word->best_choice->length(); ++i) {
if (word->best_choice->unichar_id(i) == UNICHAR_SPACE)
word->reject_map[i].setrej_tess_failure();
else if (word->best_choice->certainty(i) < threshold)
word->reject_map[i].setrej_poor_match();
}
}
/**********************************************************************
* compute_reject_threshold
*
* Set a rejection threshold for this word.
* Initially this is a trivial function which looks for the largest
* gap in the certainty value.
**********************************************************************/
float compute_reject_threshold(WERD_CHOICE* word) {
float threshold; // rejection threshold
float bestgap = 0.0f; // biggest gap
float gapstart; // bottom of gap
// super iterator
BLOB_CHOICE_IT choice_it; // real iterator
int blob_count = word->length();
GenericVector<float> ratings;
ratings.init_to_size(blob_count, 0.0f);
for (int i = 0; i < blob_count; ++i) {
ratings[i] = word->certainty(i);
}
ratings.sort();
gapstart = ratings[0] - 1; // all reject if none better
if (blob_count >= 3) {
for (int index = 0; index < blob_count - 1; index++) {
if (ratings[index + 1] - ratings[index] > bestgap) {
bestgap = ratings[index + 1] - ratings[index];
// find biggest
gapstart = ratings[index];
}
}
}
threshold = gapstart + bestgap / 2;
return threshold;
}
/*************************************************************************
* reject_edge_blobs()
*
* If the word is perilously close to the edge of the image, reject those blobs
* in the word which are too close to the edge as they could be clipped.
*************************************************************************/
namespace tesseract {
void Tesseract::reject_edge_blobs(WERD_RES *word) {
TBOX word_box = word->word->bounding_box();
// Use the box_word as it is already denormed back to image coordinates.
int blobcount = word->box_word->length();
if (word_box.left() < tessedit_image_border ||
word_box.bottom() < tessedit_image_border ||
word_box.right() + tessedit_image_border > ImageWidth() - 1 ||
word_box.top() + tessedit_image_border > ImageHeight() - 1) {
ASSERT_HOST(word->reject_map.length() == blobcount);
for (int blobindex = 0; blobindex < blobcount; blobindex++) {
TBOX blob_box = word->box_word->BlobBox(blobindex);
if (blob_box.left() < tessedit_image_border ||
blob_box.bottom() < tessedit_image_border ||
blob_box.right() + tessedit_image_border > ImageWidth() - 1 ||
blob_box.top() + tessedit_image_border > ImageHeight() - 1) {
word->reject_map[blobindex].setrej_edge_char();
// Close to edge
}
}
}
}
/**********************************************************************
* one_ell_conflict()
*
* Identify words where there is a potential I/l/1 error.
* - A bundle of contextual heuristics!
**********************************************************************/
BOOL8 Tesseract::one_ell_conflict(WERD_RES *word_res, BOOL8 update_map) {
const char *word;
const char *lengths;
inT16 word_len; //its length
inT16 first_alphanum_index_;
inT16 first_alphanum_offset_;
inT16 i;
inT16 offset;
BOOL8 non_conflict_set_char; //non conf set a/n?
BOOL8 conflict = FALSE;
BOOL8 allow_1s;
ACCEPTABLE_WERD_TYPE word_type;
BOOL8 dict_perm_type;
BOOL8 dict_word_ok;
int dict_word_type;
word = word_res->best_choice->unichar_string().string ();
lengths = word_res->best_choice->unichar_lengths().string();
word_len = strlen (lengths);
/*
If there are no occurrences of the conflict set characters then the word
is OK.
*/
if (strpbrk (word, conflict_set_I_l_1.string ()) == NULL)
return FALSE;
/*
There is a conflict if there are NO other (confirmed) alphanumerics apart
from those in the conflict set.
*/
for (i = 0, offset = 0, non_conflict_set_char = FALSE;
(i < word_len) && !non_conflict_set_char; offset += lengths[i++])
non_conflict_set_char =
(word_res->uch_set->get_isalpha(word + offset, lengths[i]) ||
word_res->uch_set->get_isdigit(word + offset, lengths[i])) &&
!STRING (conflict_set_I_l_1).contains (word[offset]);
if (!non_conflict_set_char) {
if (update_map)
reject_I_1_L(word_res);
return TRUE;
}
/*
If the word is accepted by a dawg permuter, and the first alpha character
is "I" or "l", check to see if the alternative is also a dawg word. If it
is, then there is a potential error otherwise the word is ok.
*/
dict_perm_type = (word_res->best_choice->permuter () == SYSTEM_DAWG_PERM) ||
(word_res->best_choice->permuter () == USER_DAWG_PERM) ||
(rej_trust_doc_dawg &&
(word_res->best_choice->permuter () == DOC_DAWG_PERM)) ||
(word_res->best_choice->permuter () == FREQ_DAWG_PERM);
dict_word_type = dict_word(*(word_res->best_choice));
dict_word_ok = (dict_word_type > 0) &&
(rej_trust_doc_dawg || (dict_word_type != DOC_DAWG_PERM));
if ((rej_1Il_use_dict_word && dict_word_ok) ||
(rej_1Il_trust_permuter_type && dict_perm_type) ||
(dict_perm_type && dict_word_ok)) {
first_alphanum_index_ = first_alphanum_index (word, lengths);
first_alphanum_offset_ = first_alphanum_offset (word, lengths);
if (lengths[first_alphanum_index_] == 1 &&
word[first_alphanum_offset_] == 'I') {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'l';
if (safe_dict_word(word_res) > 0) {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'I';
if (update_map)
word_res->reject_map[first_alphanum_index_].
setrej_1Il_conflict();
return TRUE;
}
else {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'I';
return FALSE;
}
}
if (lengths[first_alphanum_index_] == 1 &&
word[first_alphanum_offset_] == 'l') {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'I';
if (safe_dict_word(word_res) > 0) {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'l';
if (update_map)
word_res->reject_map[first_alphanum_index_].
setrej_1Il_conflict();
return TRUE;
}
else {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'l';
return FALSE;
}
}
return FALSE;
}
/*
NEW 1Il code. The old code relied on permuter types too much. In fact,
tess will use TOP_CHOICE permute for good things like "palette".
In this code the string is examined independently to see if it looks like
a well formed word.
*/
/*
REGARDLESS OF PERMUTER, see if flipping a leading I/l generates a
dictionary word.
*/
first_alphanum_index_ = first_alphanum_index (word, lengths);
first_alphanum_offset_ = first_alphanum_offset (word, lengths);
if (lengths[first_alphanum_index_] == 1 &&
word[first_alphanum_offset_] == 'l') {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'I';
if (safe_dict_word(word_res) > 0)
return FALSE;
else
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'l';
}
else if (lengths[first_alphanum_index_] == 1 &&
word[first_alphanum_offset_] == 'I') {
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'l';
if (safe_dict_word(word_res) > 0)
return FALSE;
else
word_res->best_choice->unichar_string()[first_alphanum_offset_] = 'I';
}
/*
For strings containing digits:
If there are no alphas OR the numeric permuter liked the word,
reject any non 1 conflict chs
Else reject all conflict chs
*/
if (word_contains_non_1_digit (word, lengths)) {
allow_1s = (alpha_count (word, lengths) == 0) ||
(word_res->best_choice->permuter () == NUMBER_PERM);
inT16 offset;
conflict = FALSE;
for (i = 0, offset = 0; word[offset] != '\0';
offset += word_res->best_choice->unichar_lengths()[i++]) {
if ((!allow_1s || (word[offset] != '1')) &&
STRING (conflict_set_I_l_1).contains (word[offset])) {
if (update_map)
word_res->reject_map[i].setrej_1Il_conflict ();
conflict = TRUE;
}
}
return conflict;
}
/*
For anything else. See if it conforms to an acceptable word type. If so,
treat accordingly.
*/
word_type = acceptable_word_string(*word_res->uch_set, word, lengths);
if ((word_type == AC_LOWER_CASE) || (word_type == AC_INITIAL_CAP)) {
first_alphanum_index_ = first_alphanum_index (word, lengths);
first_alphanum_offset_ = first_alphanum_offset (word, lengths);
if (STRING (conflict_set_I_l_1).contains (word[first_alphanum_offset_])) {
if (update_map)
word_res->reject_map[first_alphanum_index_].
setrej_1Il_conflict ();
return TRUE;
}
else
return FALSE;
}
else if (word_type == AC_UPPER_CASE) {
return FALSE;
}
else {
if (update_map)
reject_I_1_L(word_res);
return TRUE;
}
}
inT16 Tesseract::first_alphanum_index(const char *word,
const char *word_lengths) {
inT16 i;
inT16 offset;
for (i = 0, offset = 0; word[offset] != '\0'; offset += word_lengths[i++]) {
if (unicharset.get_isalpha(word + offset, word_lengths[i]) ||
unicharset.get_isdigit(word + offset, word_lengths[i]))
return i;
}
return -1;
}
inT16 Tesseract::first_alphanum_offset(const char *word,
const char *word_lengths) {
inT16 i;
inT16 offset;
for (i = 0, offset = 0; word[offset] != '\0'; offset += word_lengths[i++]) {
if (unicharset.get_isalpha(word + offset, word_lengths[i]) ||
unicharset.get_isdigit(word + offset, word_lengths[i]))
return offset;
}
return -1;
}
inT16 Tesseract::alpha_count(const char *word,
const char *word_lengths) {
inT16 i;
inT16 offset;
inT16 count = 0;
for (i = 0, offset = 0; word[offset] != '\0'; offset += word_lengths[i++]) {
if (unicharset.get_isalpha (word + offset, word_lengths[i]))
count++;
}
return count;
}
BOOL8 Tesseract::word_contains_non_1_digit(const char *word,
const char *word_lengths) {
inT16 i;
inT16 offset;
for (i = 0, offset = 0; word[offset] != '\0'; offset += word_lengths[i++]) {
if (unicharset.get_isdigit (word + offset, word_lengths[i]) &&
(word_lengths[i] != 1 || word[offset] != '1'))
return TRUE;
}
return FALSE;
}
/*************************************************************************
* dont_allow_1Il()
* Dont unreject LONE accepted 1Il conflict set chars
*************************************************************************/
void Tesseract::dont_allow_1Il(WERD_RES *word) {
int i = 0;
int offset;
int word_len = word->reject_map.length();
const char *s = word->best_choice->unichar_string().string();
const char *lengths = word->best_choice->unichar_lengths().string();
BOOL8 accepted_1Il = FALSE;
for (i = 0, offset = 0; i < word_len;
offset += word->best_choice->unichar_lengths()[i++]) {
if (word->reject_map[i].accepted()) {
if (STRING(conflict_set_I_l_1).contains(s[offset])) {
accepted_1Il = TRUE;
} else {
if (word->uch_set->get_isalpha(s + offset, lengths[i]) ||
word->uch_set->get_isdigit(s + offset, lengths[i]))
return; // >=1 non 1Il ch accepted
}
}
}
if (!accepted_1Il)
return; //Nothing to worry about
for (i = 0, offset = 0; i < word_len;
offset += word->best_choice->unichar_lengths()[i++]) {
if (STRING(conflict_set_I_l_1).contains(s[offset]) &&
word->reject_map[i].accepted())
word->reject_map[i].setrej_postNN_1Il();
}
}
inT16 Tesseract::count_alphanums(WERD_RES *word_res) {
int count = 0;
const WERD_CHOICE *best_choice = word_res->best_choice;
for (int i = 0; i < word_res->reject_map.length(); ++i) {
if ((word_res->reject_map[i].accepted()) &&
(word_res->uch_set->get_isalpha(best_choice->unichar_id(i)) ||
word_res->uch_set->get_isdigit(best_choice->unichar_id(i)))) {
count++;
}
}
return count;
}
// reject all if most rejected.
void Tesseract::reject_mostly_rejects(WERD_RES *word) {
/* Reject the whole of the word if the fraction of rejects exceeds a limit */
if ((float) word->reject_map.reject_count() / word->reject_map.length() >=
rej_whole_of_mostly_reject_word_fract)
word->reject_map.rej_word_mostly_rej();
}
BOOL8 Tesseract::repeated_nonalphanum_wd(WERD_RES *word, ROW *row) {
inT16 char_quality;
inT16 accepted_char_quality;
if (word->best_choice->unichar_lengths().length() <= 1)
return FALSE;
if (!STRING(ok_repeated_ch_non_alphanum_wds).
contains(word->best_choice->unichar_string()[0]))
return FALSE;
UNICHAR_ID uch_id = word->best_choice->unichar_id(0);
for (int i = 1; i < word->best_choice->length(); ++i) {
if (word->best_choice->unichar_id(i) != uch_id) return FALSE;
}
word_char_quality(word, row, &char_quality, &accepted_char_quality);
if ((word->best_choice->unichar_lengths().length () == char_quality) &&
(char_quality == accepted_char_quality))
return TRUE;
else
return FALSE;
}
inT16 Tesseract::safe_dict_word(const WERD_RES *werd_res) {
const WERD_CHOICE &word = *werd_res->best_choice;
int dict_word_type = werd_res->tesseract->dict_word(word);
return dict_word_type == DOC_DAWG_PERM ? 0 : dict_word_type;
}
// Note: After running this function word_res->ratings
// might not contain the right BLOB_CHOICE corresponding to each character
// in word_res->best_choice.
void Tesseract::flip_hyphens(WERD_RES *word_res) {
WERD_CHOICE *best_choice = word_res->best_choice;
int i;
int prev_right = -9999;
int next_left;
TBOX out_box;
float aspect_ratio;
if (tessedit_lower_flip_hyphen <= 1)
return;
int num_blobs = word_res->rebuild_word->NumBlobs();
UNICHAR_ID unichar_dash = word_res->uch_set->unichar_to_id("-");
for (i = 0; i < best_choice->length() && i < num_blobs; ++i) {
TBLOB* blob = word_res->rebuild_word->blobs[i];
out_box = blob->bounding_box();
if (i + 1 == num_blobs)
next_left = 9999;
else
next_left = word_res->rebuild_word->blobs[i + 1]->bounding_box().left();
// Dont touch small or touching blobs - it is too dangerous.
if ((out_box.width() > 8 * word_res->denorm.x_scale()) &&
(out_box.left() > prev_right) && (out_box.right() < next_left)) {
aspect_ratio = out_box.width() / (float) out_box.height();
if (word_res->uch_set->eq(best_choice->unichar_id(i), ".")) {
if (aspect_ratio >= tessedit_upper_flip_hyphen &&
word_res->uch_set->contains_unichar_id(unichar_dash) &&
word_res->uch_set->get_enabled(unichar_dash)) {
/* Certain HYPHEN */
best_choice->set_unichar_id(unichar_dash, i);
if (word_res->reject_map[i].rejected())
word_res->reject_map[i].setrej_hyphen_accept();
}
if ((aspect_ratio > tessedit_lower_flip_hyphen) &&
word_res->reject_map[i].accepted())
//Suspected HYPHEN
word_res->reject_map[i].setrej_hyphen ();
}
else if (best_choice->unichar_id(i) == unichar_dash) {
if ((aspect_ratio >= tessedit_upper_flip_hyphen) &&
(word_res->reject_map[i].rejected()))
word_res->reject_map[i].setrej_hyphen_accept();
//Certain HYPHEN
if ((aspect_ratio <= tessedit_lower_flip_hyphen) &&
(word_res->reject_map[i].accepted()))
//Suspected HYPHEN
word_res->reject_map[i].setrej_hyphen();
}
}
prev_right = out_box.right();
}
}
// Note: After running this function word_res->ratings
// might not contain the right BLOB_CHOICE corresponding to each character
// in word_res->best_choice.
void Tesseract::flip_0O(WERD_RES *word_res) {
WERD_CHOICE *best_choice = word_res->best_choice;
int i;
TBOX out_box;
if (!tessedit_flip_0O)
return;
int num_blobs = word_res->rebuild_word->NumBlobs();
for (i = 0; i < best_choice->length() && i < num_blobs; ++i) {
TBLOB* blob = word_res->rebuild_word->blobs[i];
if (word_res->uch_set->get_isupper(best_choice->unichar_id(i)) ||
word_res->uch_set->get_isdigit(best_choice->unichar_id(i))) {
out_box = blob->bounding_box();
if ((out_box.top() < kBlnBaselineOffset + kBlnXHeight) ||
(out_box.bottom() > kBlnBaselineOffset + kBlnXHeight / 4))
return; //Beware words with sub/superscripts
}
}
UNICHAR_ID unichar_0 = word_res->uch_set->unichar_to_id("0");
UNICHAR_ID unichar_O = word_res->uch_set->unichar_to_id("O");
if (unichar_0 == INVALID_UNICHAR_ID ||
!word_res->uch_set->get_enabled(unichar_0) ||
unichar_O == INVALID_UNICHAR_ID ||
!word_res->uch_set->get_enabled(unichar_O)) {
return; // 0 or O are not present/enabled in unicharset
}
for (i = 1; i < best_choice->length(); ++i) {
if (best_choice->unichar_id(i) == unichar_0 ||
best_choice->unichar_id(i) == unichar_O) {
/* A0A */
if ((i+1) < best_choice->length() &&
non_O_upper(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
non_O_upper(*word_res->uch_set, best_choice->unichar_id(i+1))) {
best_choice->set_unichar_id(unichar_O, i);
}
/* A00A */
if (non_O_upper(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
(i+1) < best_choice->length() &&
(best_choice->unichar_id(i+1) == unichar_0 ||
best_choice->unichar_id(i+1) == unichar_O) &&
(i+2) < best_choice->length() &&
non_O_upper(*word_res->uch_set, best_choice->unichar_id(i+2))) {
best_choice->set_unichar_id(unichar_O, i);
i++;
}
/* AA0<non digit or end of word> */
if ((i > 1) &&
non_O_upper(*word_res->uch_set, best_choice->unichar_id(i-2)) &&
non_O_upper(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
(((i+1) < best_choice->length() &&
!word_res->uch_set->get_isdigit(best_choice->unichar_id(i+1)) &&
!word_res->uch_set->eq(best_choice->unichar_id(i+1), "l") &&
!word_res->uch_set->eq(best_choice->unichar_id(i+1), "I")) ||
(i == best_choice->length() - 1))) {
best_choice->set_unichar_id(unichar_O, i);
}
/* 9O9 */
if (non_0_digit(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
(i+1) < best_choice->length() &&
non_0_digit(*word_res->uch_set, best_choice->unichar_id(i+1))) {
best_choice->set_unichar_id(unichar_0, i);
}
/* 9OOO */
if (non_0_digit(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
(i+2) < best_choice->length() &&
(best_choice->unichar_id(i+1) == unichar_0 ||
best_choice->unichar_id(i+1) == unichar_O) &&
(best_choice->unichar_id(i+2) == unichar_0 ||
best_choice->unichar_id(i+2) == unichar_O)) {
best_choice->set_unichar_id(unichar_0, i);
best_choice->set_unichar_id(unichar_0, i+1);
best_choice->set_unichar_id(unichar_0, i+2);
i += 2;
}
/* 9OO<non upper> */
if (non_0_digit(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
(i+2) < best_choice->length() &&
(best_choice->unichar_id(i+1) == unichar_0 ||
best_choice->unichar_id(i+1) == unichar_O) &&
!word_res->uch_set->get_isupper(best_choice->unichar_id(i+2))) {
best_choice->set_unichar_id(unichar_0, i);
best_choice->set_unichar_id(unichar_0, i+1);
i++;
}
/* 9O<non upper> */
if (non_0_digit(*word_res->uch_set, best_choice->unichar_id(i-1)) &&
(i+1) < best_choice->length() &&
!word_res->uch_set->get_isupper(best_choice->unichar_id(i+1))) {
best_choice->set_unichar_id(unichar_0, i);
}
/* 9[.,]OOO.. */
if ((i > 1) &&
(word_res->uch_set->eq(best_choice->unichar_id(i-1), ".") ||
word_res->uch_set->eq(best_choice->unichar_id(i-1), ",")) &&
(word_res->uch_set->get_isdigit(best_choice->unichar_id(i-2)) ||
best_choice->unichar_id(i-2) == unichar_O)) {
if (best_choice->unichar_id(i-2) == unichar_O) {
best_choice->set_unichar_id(unichar_0, i-2);
}
while (i < best_choice->length() &&
(best_choice->unichar_id(i) == unichar_O ||
best_choice->unichar_id(i) == unichar_0)) {
best_choice->set_unichar_id(unichar_0, i);
i++;
}
i--;
}
}
}
}
BOOL8 Tesseract::non_O_upper(const UNICHARSET& ch_set, UNICHAR_ID unichar_id) {
return ch_set.get_isupper(unichar_id) && !ch_set.eq(unichar_id, "O");
}
BOOL8 Tesseract::non_0_digit(const UNICHARSET& ch_set, UNICHAR_ID unichar_id) {
return ch_set.get_isdigit(unichar_id) && !ch_set.eq(unichar_id, "0");
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: cube_reco_context.cpp
* Description: Implementation of the Cube Recognition Context Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string>
#include <limits.h>
#include "cube_reco_context.h"
#include "classifier_factory.h"
#include "cube_tuning_params.h"
#include "dict.h"
#include "feature_bmp.h"
#include "tessdatamanager.h"
#include "tesseractclass.h"
#include "tess_lang_model.h"
namespace tesseract {
// Instantiate a CubeRecoContext object using a Tesseract object.
// CubeRecoContext will not take ownership of tess_obj, but will
// record the pointer to it and will make use of various Tesseract
// components (language model, flags, etc). Thus the caller should
// keep tess_obj alive so long as the instantiated CubeRecoContext is used.
CubeRecoContext::CubeRecoContext(Tesseract *tess_obj) {
tess_obj_ = tess_obj;
lang_ = "";
loaded_ = false;
lang_mod_ = NULL;
params_ = NULL;
char_classifier_ = NULL;
char_set_ = NULL;
word_size_model_ = NULL;
char_bigrams_ = NULL;
word_unigrams_ = NULL;
noisy_input_ = false;
size_normalization_ = false;
}
CubeRecoContext::~CubeRecoContext() {
if (char_classifier_ != NULL) {
delete char_classifier_;
char_classifier_ = NULL;
}
if (word_size_model_ != NULL) {
delete word_size_model_;
word_size_model_ = NULL;
}
if (char_set_ != NULL) {
delete char_set_;
char_set_ = NULL;
}
if (char_bigrams_ != NULL) {
delete char_bigrams_;
char_bigrams_ = NULL;
}
if (word_unigrams_ != NULL) {
delete word_unigrams_;
word_unigrams_ = NULL;
}
if (lang_mod_ != NULL) {
delete lang_mod_;
lang_mod_ = NULL;
}
if (params_ != NULL) {
delete params_;
params_ = NULL;
}
}
// Returns the path of the data files by looking up the TESSDATA_PREFIX
// environment variable and appending a "tessdata" directory to it
bool CubeRecoContext::GetDataFilePath(string *path) const {
*path = tess_obj_->datadir.string();
return true;
}
// The object initialization function that loads all the necessary
// components of a RecoContext. TessdataManager is used to load the
// data from [lang].traineddata file. If TESSDATA_CUBE_UNICHARSET
// component is present, Cube will be instantiated with the unicharset
// specified in this component and the corresponding dictionary
// (TESSDATA_CUBE_SYSTEM_DAWG), and will map Cube's unicharset to
// Tesseract's. Otherwise, TessdataManager will assume that Cube will
// be using Tesseract's unicharset and dawgs, and will load the
// unicharset from the TESSDATA_UNICHARSET component and will load the
// dawgs from TESSDATA_*_DAWG components.
bool CubeRecoContext::Load(TessdataManager *tessdata_manager,
UNICHARSET *tess_unicharset) {
ASSERT_HOST(tess_obj_ != NULL);
tess_unicharset_ = tess_unicharset;
string data_file_path;
// Get the data file path.
if (GetDataFilePath(&data_file_path) == false) {
fprintf(stderr, "Unable to get data file path\n");
return false;
}
// Get the language from the Tesseract object.
lang_ = tess_obj_->lang.string();
// Create the char set.
if ((char_set_ =
CharSet::Create(tessdata_manager, tess_unicharset)) == NULL) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Load): unable to load "
"CharSet\n");
return false;
}
// Create the language model.
string lm_file_name = data_file_path + lang_ + ".cube.lm";
string lm_params;
if (!CubeUtils::ReadFileToString(lm_file_name, &lm_params)) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Load): unable to read cube "
"language model params from %s\n", lm_file_name.c_str());
return false;
}
lang_mod_ = new TessLangModel(lm_params, data_file_path,
tess_obj_->getDict().load_system_dawg,
tessdata_manager, this);
if (lang_mod_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Load): unable to create "
"TessLangModel\n");
return false;
}
// Create the optional char bigrams object.
char_bigrams_ = CharBigrams::Create(data_file_path, lang_);
// Create the optional word unigrams object.
word_unigrams_ = WordUnigrams::Create(data_file_path, lang_);
// Create the optional size model.
word_size_model_ = WordSizeModel::Create(data_file_path, lang_,
char_set_, Contextual());
// Load tuning params.
params_ = CubeTuningParams::Create(data_file_path, lang_);
if (params_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Load): unable to read "
"CubeTuningParams from %s\n", data_file_path.c_str());
return false;
}
// Create the char classifier.
char_classifier_ = CharClassifierFactory::Create(data_file_path, lang_,
lang_mod_, char_set_,
params_);
if (char_classifier_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Load): unable to load "
"CharClassifierFactory object from %s\n", data_file_path.c_str());
return false;
}
loaded_ = true;
return true;
}
// Creates a CubeRecoContext object using a tesseract object
CubeRecoContext * CubeRecoContext::Create(Tesseract *tess_obj,
TessdataManager *tessdata_manager,
UNICHARSET *tess_unicharset) {
// create the object
CubeRecoContext *cntxt = new CubeRecoContext(tess_obj);
if (cntxt == NULL) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Create): unable to create "
"CubeRecoContext object\n");
return NULL;
}
// load the necessary components
if (cntxt->Load(tessdata_manager, tess_unicharset) == false) {
fprintf(stderr, "Cube ERROR (CubeRecoContext::Create): unable to init "
"CubeRecoContext object\n");
delete cntxt;
return NULL;
}
// success
return cntxt;
}
} // tesseract}
| C++ |
///////////////////////////////////////////////////////////////////////
// File: tesseractclass.cpp
// Description: The Tesseract class. It holds/owns everything needed
// to run Tesseract on a single language, and also a set of
// sub-Tesseracts to run sub-languages. For thread safety, *every*
// variable that was previously global or static (except for
// constant data, and some visual debugging flags) has been moved
// in here, directly, or indirectly.
// This makes it safe to run multiple Tesseracts in different
// threads in parallel, and keeps the different language
// instances separate.
// Some global functions remain, but they are isolated re-entrant
// functions that operate on their arguments. Functions that work
// on variable data have been moved to an appropriate class based
// mostly on the directory hierarchy. For more information see
// slide 6 of "2ArchitectureAndDataStructures" in
// https://drive.google.com/file/d/0B7l10Bj_LprhbUlIUFlCdGtDYkE/edit?usp=sharing
// Some global data and related functions still exist in the
// training-related code, but they don't interfere with normal
// recognition operation.
// Author: Ray Smith
// Created: Fri Mar 07 08:17:01 PST 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tesseractclass.h"
#include "allheaders.h"
#include "cube_reco_context.h"
#include "edgblob.h"
#include "equationdetect.h"
#include "globals.h"
#include "tesseract_cube_combiner.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
namespace tesseract {
Tesseract::Tesseract()
: BOOL_MEMBER(tessedit_resegment_from_boxes, false,
"Take segmentation and labeling from box file",
this->params()),
BOOL_MEMBER(tessedit_resegment_from_line_boxes, false,
"Conversion of word/line box file to char box file",
this->params()),
BOOL_MEMBER(tessedit_train_from_boxes, false,
"Generate training data from boxed chars", this->params()),
BOOL_MEMBER(tessedit_make_boxes_from_boxes, false,
"Generate more boxes from boxed chars", this->params()),
BOOL_MEMBER(tessedit_dump_pageseg_images, false,
"Dump intermediate images made during page segmentation",
this->params()),
// The default for pageseg_mode is the old behaviour, so as not to
// upset anything that relies on that.
INT_MEMBER(tessedit_pageseg_mode, PSM_SINGLE_BLOCK,
"Page seg mode: 0=osd only, 1=auto+osd, 2=auto, 3=col, 4=block,"
" 5=line, 6=word, 7=char"
" (Values from PageSegMode enum in publictypes.h)",
this->params()),
INT_INIT_MEMBER(tessedit_ocr_engine_mode, tesseract::OEM_TESSERACT_ONLY,
"Which OCR engine(s) to run (Tesseract, Cube, both)."
" Defaults to loading and running only Tesseract"
" (no Cube,no combiner)."
" Values from OcrEngineMode enum in tesseractclass.h)",
this->params()),
STRING_MEMBER(tessedit_char_blacklist, "",
"Blacklist of chars not to recognize", this->params()),
STRING_MEMBER(tessedit_char_whitelist, "",
"Whitelist of chars to recognize", this->params()),
STRING_MEMBER(tessedit_char_unblacklist, "",
"List of chars to override tessedit_char_blacklist",
this->params()),
BOOL_MEMBER(tessedit_ambigs_training, false,
"Perform training for ambiguities", this->params()),
INT_MEMBER(pageseg_devanagari_split_strategy,
tesseract::ShiroRekhaSplitter::NO_SPLIT,
"Whether to use the top-line splitting process for Devanagari "
"documents while performing page-segmentation.", this->params()),
INT_MEMBER(ocr_devanagari_split_strategy,
tesseract::ShiroRekhaSplitter::NO_SPLIT,
"Whether to use the top-line splitting process for Devanagari "
"documents while performing ocr.", this->params()),
STRING_MEMBER(tessedit_write_params_to_file, "",
"Write all parameters to the given file.", this->params()),
BOOL_MEMBER(tessedit_adaption_debug, false, "Generate and print debug"
" information for adaption", this->params()),
INT_MEMBER(bidi_debug, 0, "Debug level for BiDi", this->params()),
INT_MEMBER(applybox_debug, 1, "Debug level", this->params()),
INT_MEMBER(applybox_page, 0,
"Page number to apply boxes from", this->params()),
STRING_MEMBER(applybox_exposure_pattern, ".exp", "Exposure value follows"
" this pattern in the image filename. The name of the image"
" files are expected to be in the form"
" [lang].[fontname].exp[num].tif", this->params()),
BOOL_MEMBER(applybox_learn_chars_and_char_frags_mode, false,
"Learn both character fragments (as is done in the"
" special low exposure mode) as well as unfragmented"
" characters.", this->params()),
BOOL_MEMBER(applybox_learn_ngrams_mode, false, "Each bounding box"
" is assumed to contain ngrams. Only learn the ngrams"
" whose outlines overlap horizontally.", this->params()),
BOOL_MEMBER(tessedit_display_outwords, false,
"Draw output words", this->params()),
BOOL_MEMBER(tessedit_dump_choices, false,
"Dump char choices", this->params()),
BOOL_MEMBER(tessedit_timing_debug, false, "Print timing stats",
this->params()),
BOOL_MEMBER(tessedit_fix_fuzzy_spaces, true,
"Try to improve fuzzy spaces", this->params()),
BOOL_MEMBER(tessedit_unrej_any_wd, false,
"Dont bother with word plausibility", this->params()),
BOOL_MEMBER(tessedit_fix_hyphens, true,
"Crunch double hyphens?", this->params()),
BOOL_MEMBER(tessedit_redo_xheight, true,
"Check/Correct x-height", this->params()),
BOOL_MEMBER(tessedit_enable_doc_dict, true,
"Add words to the document dictionary", this->params()),
BOOL_MEMBER(tessedit_debug_fonts, false,
"Output font info per char", this->params()),
BOOL_MEMBER(tessedit_debug_block_rejection, false,
"Block and Row stats", this->params()),
BOOL_MEMBER(tessedit_enable_bigram_correction, true,
"Enable correction based on the word bigram dictionary.",
this->params()),
BOOL_MEMBER(tessedit_enable_dict_correction, false,
"Enable single word correction based on the dictionary.",
this->params()),
INT_MEMBER(tessedit_bigram_debug, 0,
"Amount of debug output for bigram correction.",
this->params()),
INT_MEMBER(debug_x_ht_level, 0, "Reestimate debug", this->params()),
BOOL_MEMBER(debug_acceptable_wds, false,
"Dump word pass/fail chk", this->params()),
STRING_MEMBER(chs_leading_punct, "('`\"",
"Leading punctuation", this->params()),
STRING_MEMBER(chs_trailing_punct1, ").,;:?!",
"1st Trailing punctuation", this->params()),
STRING_MEMBER(chs_trailing_punct2, ")'`\"",
"2nd Trailing punctuation", this->params()),
double_MEMBER(quality_rej_pc, 0.08,
"good_quality_doc lte rejection limit", this->params()),
double_MEMBER(quality_blob_pc, 0.0,
"good_quality_doc gte good blobs limit", this->params()),
double_MEMBER(quality_outline_pc, 1.0,
"good_quality_doc lte outline error limit", this->params()),
double_MEMBER(quality_char_pc, 0.95,
"good_quality_doc gte good char limit", this->params()),
INT_MEMBER(quality_min_initial_alphas_reqd, 2,
"alphas in a good word", this->params()),
INT_MEMBER(tessedit_tess_adaption_mode, 0x27,
"Adaptation decision algorithm for tess", this->params()),
BOOL_MEMBER(tessedit_minimal_rej_pass1, false,
"Do minimal rejection on pass 1 output", this->params()),
BOOL_MEMBER(tessedit_test_adaption, false,
"Test adaption criteria", this->params()),
BOOL_MEMBER(tessedit_matcher_log, false,
"Log matcher activity", this->params()),
INT_MEMBER(tessedit_test_adaption_mode, 3,
"Adaptation decision algorithm for tess", this->params()),
BOOL_MEMBER(test_pt, false, "Test for point", this->params()),
double_MEMBER(test_pt_x, 99999.99, "xcoord", this->params()),
double_MEMBER(test_pt_y, 99999.99, "ycoord", this->params()),
INT_MEMBER(paragraph_debug_level, 0, "Print paragraph debug info.",
this->params()),
BOOL_MEMBER(paragraph_text_based, true,
"Run paragraph detection on the post-text-recognition "
"(more accurate)", this->params()),
INT_MEMBER(cube_debug_level, 0, "Print cube debug info.", this->params()),
STRING_MEMBER(outlines_odd, "%| ", "Non standard number of outlines",
this->params()),
STRING_MEMBER(outlines_2, "ij!?%\":;",
"Non standard number of outlines", this->params()),
BOOL_MEMBER(docqual_excuse_outline_errs, false,
"Allow outline errs in unrejection?", this->params()),
BOOL_MEMBER(tessedit_good_quality_unrej, true,
"Reduce rejection on good docs", this->params()),
BOOL_MEMBER(tessedit_use_reject_spaces, true,
"Reject spaces?", this->params()),
double_MEMBER(tessedit_reject_doc_percent, 65.00,
"%rej allowed before rej whole doc", this->params()),
double_MEMBER(tessedit_reject_block_percent, 45.00,
"%rej allowed before rej whole block", this->params()),
double_MEMBER(tessedit_reject_row_percent, 40.00,
"%rej allowed before rej whole row", this->params()),
double_MEMBER(tessedit_whole_wd_rej_row_percent, 70.00,
"Number of row rejects in whole word rejects"
"which prevents whole row rejection", this->params()),
BOOL_MEMBER(tessedit_preserve_blk_rej_perfect_wds, true,
"Only rej partially rejected words in block rejection",
this->params()),
BOOL_MEMBER(tessedit_preserve_row_rej_perfect_wds, true,
"Only rej partially rejected words in row rejection",
this->params()),
BOOL_MEMBER(tessedit_dont_blkrej_good_wds, false,
"Use word segmentation quality metric", this->params()),
BOOL_MEMBER(tessedit_dont_rowrej_good_wds, false,
"Use word segmentation quality metric", this->params()),
INT_MEMBER(tessedit_preserve_min_wd_len, 2,
"Only preserve wds longer than this", this->params()),
BOOL_MEMBER(tessedit_row_rej_good_docs, true,
"Apply row rejection to good docs", this->params()),
double_MEMBER(tessedit_good_doc_still_rowrej_wd, 1.1,
"rej good doc wd if more than this fraction rejected",
this->params()),
BOOL_MEMBER(tessedit_reject_bad_qual_wds, true,
"Reject all bad quality wds", this->params()),
BOOL_MEMBER(tessedit_debug_doc_rejection, false,
"Page stats", this->params()),
BOOL_MEMBER(tessedit_debug_quality_metrics, false,
"Output data to debug file", this->params()),
BOOL_MEMBER(bland_unrej, false,
"unrej potential with no chekcs", this->params()),
double_MEMBER(quality_rowrej_pc, 1.1,
"good_quality_doc gte good char limit", this->params()),
BOOL_MEMBER(unlv_tilde_crunching, true,
"Mark v.bad words for tilde crunch", this->params()),
BOOL_MEMBER(hocr_font_info, false,
"Add font info to hocr output", this->params()),
BOOL_MEMBER(crunch_early_merge_tess_fails, true,
"Before word crunch?", this->params()),
BOOL_MEMBER(crunch_early_convert_bad_unlv_chs, false,
"Take out ~^ early?", this->params()),
double_MEMBER(crunch_terrible_rating, 80.0,
"crunch rating lt this", this->params()),
BOOL_MEMBER(crunch_terrible_garbage, true, "As it says", this->params()),
double_MEMBER(crunch_poor_garbage_cert, -9.0,
"crunch garbage cert lt this", this->params()),
double_MEMBER(crunch_poor_garbage_rate, 60,
"crunch garbage rating lt this", this->params()),
double_MEMBER(crunch_pot_poor_rate, 40,
"POTENTIAL crunch rating lt this", this->params()),
double_MEMBER(crunch_pot_poor_cert, -8.0,
"POTENTIAL crunch cert lt this", this->params()),
BOOL_MEMBER(crunch_pot_garbage, true,
"POTENTIAL crunch garbage", this->params()),
double_MEMBER(crunch_del_rating, 60,
"POTENTIAL crunch rating lt this", this->params()),
double_MEMBER(crunch_del_cert, -10.0,
"POTENTIAL crunch cert lt this", this->params()),
double_MEMBER(crunch_del_min_ht, 0.7,
"Del if word ht lt xht x this", this->params()),
double_MEMBER(crunch_del_max_ht, 3.0,
"Del if word ht gt xht x this", this->params()),
double_MEMBER(crunch_del_min_width, 3.0,
"Del if word width lt xht x this", this->params()),
double_MEMBER(crunch_del_high_word, 1.5,
"Del if word gt xht x this above bl", this->params()),
double_MEMBER(crunch_del_low_word, 0.5,
"Del if word gt xht x this below bl", this->params()),
double_MEMBER(crunch_small_outlines_size, 0.6,
"Small if lt xht x this", this->params()),
INT_MEMBER(crunch_rating_max, 10,
"For adj length in rating per ch", this->params()),
INT_MEMBER(crunch_pot_indicators, 1,
"How many potential indicators needed", this->params()),
BOOL_MEMBER(crunch_leave_ok_strings, true,
"Dont touch sensible strings", this->params()),
BOOL_MEMBER(crunch_accept_ok, true,
"Use acceptability in okstring", this->params()),
BOOL_MEMBER(crunch_leave_accept_strings, false,
"Dont pot crunch sensible strings", this->params()),
BOOL_MEMBER(crunch_include_numerals, false,
"Fiddle alpha figures", this->params()),
INT_MEMBER(crunch_leave_lc_strings, 4,
"Dont crunch words with long lower case strings",
this->params()),
INT_MEMBER(crunch_leave_uc_strings, 4,
"Dont crunch words with long lower case strings",
this->params()),
INT_MEMBER(crunch_long_repetitions, 3,
"Crunch words with long repetitions", this->params()),
INT_MEMBER(crunch_debug, 0, "As it says", this->params()),
INT_MEMBER(fixsp_non_noise_limit, 1,
"How many non-noise blbs either side?", this->params()),
double_MEMBER(fixsp_small_outlines_size, 0.28,
"Small if lt xht x this", this->params()),
BOOL_MEMBER(tessedit_prefer_joined_punct, false,
"Reward punctation joins", this->params()),
INT_MEMBER(fixsp_done_mode, 1,
"What constitues done for spacing", this->params()),
INT_MEMBER(debug_fix_space_level, 0,
"Contextual fixspace debug", this->params()),
STRING_MEMBER(numeric_punctuation, ".,",
"Punct. chs expected WITHIN numbers", this->params()),
INT_MEMBER(x_ht_acceptance_tolerance, 8,
"Max allowed deviation of blob top outside of font data",
this->params()),
INT_MEMBER(x_ht_min_change, 8,
"Min change in xht before actually trying it", this->params()),
INT_MEMBER(superscript_debug, 0, "Debug level for sub & superscript fixer",
this->params()),
double_MEMBER(superscript_worse_certainty, 2.0, "How many times worse "
"certainty does a superscript position glyph need to be for "
"us to try classifying it as a char with a different "
"baseline?", this->params()),
double_MEMBER(superscript_bettered_certainty, 0.97, "What reduction in "
"badness do we think sufficient to choose a superscript "
"over what we'd thought. For example, a value of 0.6 means "
"we want to reduce badness of certainty by at least 40%",
this->params()),
double_MEMBER(superscript_scaledown_ratio, 0.4,
"A superscript scaled down more than this is unbelievably "
"small. For example, 0.3 means we expect the font size to "
"be no smaller than 30% of the text line font size.",
this->params()),
double_MEMBER(subscript_max_y_top, 0.5,
"Maximum top of a character measured as a multiple of "
"x-height above the baseline for us to reconsider whether "
"it's a subscript.", this->params()),
double_MEMBER(superscript_min_y_bottom, 0.3,
"Minimum bottom of a character measured as a multiple of "
"x-height above the baseline for us to reconsider whether "
"it's a superscript.", this->params()),
BOOL_MEMBER(tessedit_write_block_separators, false,
"Write block separators in output", this->params()),
BOOL_MEMBER(tessedit_write_rep_codes, false,
"Write repetition char code", this->params()),
BOOL_MEMBER(tessedit_write_unlv, false,
"Write .unlv output file", this->params()),
BOOL_MEMBER(tessedit_create_txt, true,
"Write .txt output file", this->params()),
BOOL_MEMBER(tessedit_create_hocr, false,
"Write .html hOCR output file", this->params()),
BOOL_MEMBER(tessedit_create_pdf, false,
"Write .pdf output file", this->params()),
STRING_MEMBER(unrecognised_char, "|",
"Output char for unidentified blobs", this->params()),
INT_MEMBER(suspect_level, 99, "Suspect marker level", this->params()),
INT_MEMBER(suspect_space_level, 100,
"Min suspect level for rejecting spaces", this->params()),
INT_MEMBER(suspect_short_words, 2,
"Dont Suspect dict wds longer than this", this->params()),
BOOL_MEMBER(suspect_constrain_1Il, false,
"UNLV keep 1Il chars rejected", this->params()),
double_MEMBER(suspect_rating_per_ch, 999.9,
"Dont touch bad rating limit", this->params()),
double_MEMBER(suspect_accept_rating, -999.9,
"Accept good rating limit", this->params()),
BOOL_MEMBER(tessedit_minimal_rejection, false,
"Only reject tess failures", this->params()),
BOOL_MEMBER(tessedit_zero_rejection, false,
"Dont reject ANYTHING", this->params()),
BOOL_MEMBER(tessedit_word_for_word, false,
"Make output have exactly one word per WERD", this->params()),
BOOL_MEMBER(tessedit_zero_kelvin_rejection, false,
"Dont reject ANYTHING AT ALL", this->params()),
BOOL_MEMBER(tessedit_consistent_reps, true,
"Force all rep chars the same", this->params()),
INT_MEMBER(tessedit_reject_mode, 0, "Rejection algorithm", this->params()),
BOOL_MEMBER(tessedit_rejection_debug, false,
"Adaption debug", this->params()),
BOOL_MEMBER(tessedit_flip_0O, true,
"Contextual 0O O0 flips", this->params()),
double_MEMBER(tessedit_lower_flip_hyphen, 1.5,
"Aspect ratio dot/hyphen test", this->params()),
double_MEMBER(tessedit_upper_flip_hyphen, 1.8,
"Aspect ratio dot/hyphen test", this->params()),
BOOL_MEMBER(rej_trust_doc_dawg, false,
"Use DOC dawg in 11l conf. detector", this->params()),
BOOL_MEMBER(rej_1Il_use_dict_word, false,
"Use dictword test", this->params()),
BOOL_MEMBER(rej_1Il_trust_permuter_type, true,
"Dont double check", this->params()),
BOOL_MEMBER(rej_use_tess_accepted, true,
"Individual rejection control", this->params()),
BOOL_MEMBER(rej_use_tess_blanks, true,
"Individual rejection control", this->params()),
BOOL_MEMBER(rej_use_good_perm, true,
"Individual rejection control", this->params()),
BOOL_MEMBER(rej_use_sensible_wd, false,
"Extend permuter check", this->params()),
BOOL_MEMBER(rej_alphas_in_number_perm, false,
"Extend permuter check", this->params()),
double_MEMBER(rej_whole_of_mostly_reject_word_fract, 0.85,
"if >this fract", this->params()),
INT_MEMBER(tessedit_image_border, 2,
"Rej blbs near image edge limit", this->params()),
STRING_MEMBER(ok_repeated_ch_non_alphanum_wds, "-?*\075",
"Allow NN to unrej", this->params()),
STRING_MEMBER(conflict_set_I_l_1, "Il1[]",
"Il1 conflict set", this->params()),
INT_MEMBER(min_sane_x_ht_pixels, 8,
"Reject any x-ht lt or eq than this", this->params()),
BOOL_MEMBER(tessedit_create_boxfile, false,
"Output text with boxes", this->params()),
INT_MEMBER(tessedit_page_number, -1, "-1 -> All pages"
" , else specifc page to process", this->params()),
BOOL_MEMBER(tessedit_write_images, false,
"Capture the image from the IPE", this->params()),
BOOL_MEMBER(interactive_display_mode, false, "Run interactively?",
this->params()),
STRING_MEMBER(file_type, ".tif", "Filename extension", this->params()),
BOOL_MEMBER(tessedit_override_permuter, true,
"According to dict_word", this->params()),
INT_MEMBER(tessdata_manager_debug_level, 0, "Debug level for"
" TessdataManager functions.", this->params()),
STRING_MEMBER(tessedit_load_sublangs, "",
"List of languages to load with this one", this->params()),
BOOL_MEMBER(tessedit_use_primary_params_model, false,
"In multilingual mode use params model of the"
" primary language", this->params()),
double_MEMBER(min_orientation_margin, 7.0,
"Min acceptable orientation margin", this->params()),
BOOL_MEMBER(textord_tabfind_show_vlines, false, "Debug line finding",
this->params()),
BOOL_MEMBER(textord_use_cjk_fp_model, FALSE, "Use CJK fixed pitch model",
this->params()),
BOOL_MEMBER(poly_allow_detailed_fx, false,
"Allow feature extractors to see the original outline",
this->params()),
BOOL_INIT_MEMBER(tessedit_init_config_only, false,
"Only initialize with the config file. Useful if the "
"instance is not going to be used for OCR but say only "
"for layout analysis.", this->params()),
BOOL_MEMBER(textord_equation_detect, false, "Turn on equation detector",
this->params()),
BOOL_MEMBER(textord_tabfind_vertical_text, true,
"Enable vertical detection", this->params()),
BOOL_MEMBER(textord_tabfind_force_vertical_text, false,
"Force using vertical text page mode", this->params()),
double_MEMBER(textord_tabfind_vertical_text_ratio, 0.5,
"Fraction of textlines deemed vertical to use vertical page "
"mode", this->params()),
double_MEMBER(textord_tabfind_aligned_gap_fraction, 0.75,
"Fraction of height used as a minimum gap for aligned blobs.",
this->params()),
INT_MEMBER(tessedit_parallelize, 0, "Run in parallel where possible",
this->params()),
// The following parameters were deprecated and removed from their original
// locations. The parameters are temporarily kept here to give Tesseract
// users a chance to updated their [lang].traineddata and config files
// without introducing failures during Tesseract initialization.
// TODO(ocr-team): remove these parameters from the code once we are
// reasonably sure that Tesseract users have updated their data files.
//
// BEGIN DEPRECATED PARAMETERS
BOOL_MEMBER(textord_tabfind_vertical_horizontal_mix, true,
"find horizontal lines such as headers in vertical page mode",
this->params()),
INT_MEMBER(tessedit_ok_mode, 5,
"Acceptance decision algorithm", this->params()),
BOOL_INIT_MEMBER(load_fixed_length_dawgs, true, "Load fixed length dawgs"
" (e.g. for non-space delimited languages)",
this->params()),
INT_MEMBER(segment_debug, 0, "Debug the whole segmentation process",
this->params()),
BOOL_MEMBER(permute_debug, 0, "Debug char permutation process",
this->params()),
double_MEMBER(bestrate_pruning_factor, 2.0, "Multiplying factor of"
" current best rate to prune other hypotheses",
this->params()),
BOOL_MEMBER(permute_script_word, 0,
"Turn on word script consistency permuter",
this->params()),
BOOL_MEMBER(segment_segcost_rating, 0,
"incorporate segmentation cost in word rating?",
this->params()),
double_MEMBER(segment_reward_script, 0.95,
"Score multipler for script consistency within a word. "
"Being a 'reward' factor, it should be <= 1. "
"Smaller value implies bigger reward.",
this->params()),
BOOL_MEMBER(permute_fixed_length_dawg, 0,
"Turn on fixed-length phrasebook search permuter",
this->params()),
BOOL_MEMBER(permute_chartype_word, 0,
"Turn on character type (property) consistency permuter",
this->params()),
double_MEMBER(segment_reward_chartype, 0.97,
"Score multipler for char type consistency within a word. ",
this->params()),
double_MEMBER(segment_reward_ngram_best_choice, 0.99,
"Score multipler for ngram permuter's best choice"
" (only used in the Han script path).",
this->params()),
BOOL_MEMBER(ngram_permuter_activated, false,
"Activate character-level n-gram-based permuter",
this->params()),
BOOL_MEMBER(permute_only_top, false, "Run only the top choice permuter",
this->params()),
INT_MEMBER(language_model_fixed_length_choices_depth, 3,
"Depth of blob choice lists to explore"
" when fixed length dawgs are on",
this->params()),
BOOL_MEMBER(use_new_state_cost, FALSE,
"use new state cost heuristics for segmentation state"
" evaluation", this->params()),
double_MEMBER(heuristic_segcost_rating_base, 1.25,
"base factor for adding segmentation cost into word rating."
"It's a multiplying factor, the larger the value above 1, "
"the bigger the effect of segmentation cost.",
this->params()),
double_MEMBER(heuristic_weight_rating, 1.0,
"weight associated with char rating in combined cost of"
"state", this->params()),
double_MEMBER(heuristic_weight_width, 1000.0,
"weight associated with width evidence in combined cost of"
" state", this->params()),
double_MEMBER(heuristic_weight_seamcut, 0.0,
"weight associated with seam cut in combined cost of state",
this->params()),
double_MEMBER(heuristic_max_char_wh_ratio, 2.0,
"max char width-to-height ratio allowed in segmentation",
this->params()),
BOOL_MEMBER(enable_new_segsearch, true,
"Enable new segmentation search path.", this->params()),
double_MEMBER(segsearch_max_fixed_pitch_char_wh_ratio, 2.0,
"Maximum character width-to-height ratio for"
" fixed-pitch fonts",
this->params()),
// END DEPRECATED PARAMETERS
backup_config_file_(NULL),
pix_binary_(NULL),
cube_binary_(NULL),
pix_grey_(NULL),
pix_thresholds_(NULL),
source_resolution_(0),
textord_(this),
right_to_left_(false),
scaled_color_(NULL),
scaled_factor_(-1),
deskew_(1.0f, 0.0f),
reskew_(1.0f, 0.0f),
most_recently_used_(this),
font_table_size_(0),
cube_cntxt_(NULL),
tess_cube_combiner_(NULL),
equ_detect_(NULL) {
}
Tesseract::~Tesseract() {
Clear();
end_tesseract();
sub_langs_.delete_data_pointers();
// Delete cube objects.
if (cube_cntxt_ != NULL) {
delete cube_cntxt_;
cube_cntxt_ = NULL;
}
if (tess_cube_combiner_ != NULL) {
delete tess_cube_combiner_;
tess_cube_combiner_ = NULL;
}
}
void Tesseract::Clear() {
pixDestroy(&pix_binary_);
pixDestroy(&cube_binary_);
pixDestroy(&pix_grey_);
pixDestroy(&pix_thresholds_);
pixDestroy(&scaled_color_);
deskew_ = FCOORD(1.0f, 0.0f);
reskew_ = FCOORD(1.0f, 0.0f);
splitter_.Clear();
scaled_factor_ = -1;
for (int i = 0; i < sub_langs_.size(); ++i)
sub_langs_[i]->Clear();
}
void Tesseract::SetEquationDetect(EquationDetect* detector) {
equ_detect_ = detector;
equ_detect_->SetLangTesseract(this);
}
// Clear all memory of adaption for this and all subclassifiers.
void Tesseract::ResetAdaptiveClassifier() {
ResetAdaptiveClassifierInternal();
for (int i = 0; i < sub_langs_.size(); ++i) {
sub_langs_[i]->ResetAdaptiveClassifierInternal();
}
}
// Clear the document dictionary for this and all subclassifiers.
void Tesseract::ResetDocumentDictionary() {
getDict().ResetDocumentDictionary();
for (int i = 0; i < sub_langs_.size(); ++i) {
sub_langs_[i]->getDict().ResetDocumentDictionary();
}
}
void Tesseract::SetBlackAndWhitelist() {
// Set the white and blacklists (if any)
unicharset.set_black_and_whitelist(tessedit_char_blacklist.string(),
tessedit_char_whitelist.string(),
tessedit_char_unblacklist.string());
// Black and white lists should apply to all loaded classifiers.
for (int i = 0; i < sub_langs_.size(); ++i) {
sub_langs_[i]->unicharset.set_black_and_whitelist(
tessedit_char_blacklist.string(), tessedit_char_whitelist.string(),
tessedit_char_unblacklist.string());
}
}
// Perform steps to prepare underlying binary image/other data structures for
// page segmentation.
void Tesseract::PrepareForPageseg() {
textord_.set_use_cjk_fp_model(textord_use_cjk_fp_model);
pixDestroy(&cube_binary_);
cube_binary_ = pixClone(pix_binary());
// Find the max splitter strategy over all langs.
ShiroRekhaSplitter::SplitStrategy max_pageseg_strategy =
static_cast<ShiroRekhaSplitter::SplitStrategy>(
static_cast<inT32>(pageseg_devanagari_split_strategy));
for (int i = 0; i < sub_langs_.size(); ++i) {
ShiroRekhaSplitter::SplitStrategy pageseg_strategy =
static_cast<ShiroRekhaSplitter::SplitStrategy>(
static_cast<inT32>(sub_langs_[i]->pageseg_devanagari_split_strategy));
if (pageseg_strategy > max_pageseg_strategy)
max_pageseg_strategy = pageseg_strategy;
// Clone the cube image to all the sub langs too.
pixDestroy(&sub_langs_[i]->cube_binary_);
sub_langs_[i]->cube_binary_ = pixClone(pix_binary());
pixDestroy(&sub_langs_[i]->pix_binary_);
sub_langs_[i]->pix_binary_ = pixClone(pix_binary());
}
// Perform shiro-rekha (top-line) splitting and replace the current image by
// the newly splitted image.
splitter_.set_orig_pix(pix_binary());
splitter_.set_pageseg_split_strategy(max_pageseg_strategy);
if (splitter_.Split(true)) {
ASSERT_HOST(splitter_.splitted_image());
pixDestroy(&pix_binary_);
pix_binary_ = pixClone(splitter_.splitted_image());
}
}
// Perform steps to prepare underlying binary image/other data structures for
// OCR. The current segmentation is required by this method.
// Note that this method resets pix_binary_ to the original binarized image,
// which may be different from the image actually used for OCR depending on the
// value of devanagari_ocr_split_strategy.
void Tesseract::PrepareForTessOCR(BLOCK_LIST* block_list,
Tesseract* osd_tess, OSResults* osr) {
// Find the max splitter strategy over all langs.
ShiroRekhaSplitter::SplitStrategy max_ocr_strategy =
static_cast<ShiroRekhaSplitter::SplitStrategy>(
static_cast<inT32>(ocr_devanagari_split_strategy));
for (int i = 0; i < sub_langs_.size(); ++i) {
ShiroRekhaSplitter::SplitStrategy ocr_strategy =
static_cast<ShiroRekhaSplitter::SplitStrategy>(
static_cast<inT32>(sub_langs_[i]->ocr_devanagari_split_strategy));
if (ocr_strategy > max_ocr_strategy)
max_ocr_strategy = ocr_strategy;
}
// Utilize the segmentation information available.
splitter_.set_segmentation_block_list(block_list);
splitter_.set_ocr_split_strategy(max_ocr_strategy);
// Run the splitter for OCR
bool split_for_ocr = splitter_.Split(false);
// Restore pix_binary to the binarized original pix for future reference.
ASSERT_HOST(splitter_.orig_pix());
pixDestroy(&pix_binary_);
pix_binary_ = pixClone(splitter_.orig_pix());
// If the pageseg and ocr strategies are different, refresh the block list
// (from the last SegmentImage call) with blobs from the real image to be used
// for OCR.
if (splitter_.HasDifferentSplitStrategies()) {
BLOCK block("", TRUE, 0, 0, 0, 0, pixGetWidth(pix_binary_),
pixGetHeight(pix_binary_));
Pix* pix_for_ocr = split_for_ocr ? splitter_.splitted_image() :
splitter_.orig_pix();
extract_edges(pix_for_ocr, &block);
splitter_.RefreshSegmentationWithNewBlobs(block.blob_list());
}
// The splitter isn't needed any more after this, so save memory by clearing.
splitter_.Clear();
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: paramsd.cpp
// Description: Tesseract parameter Editor
// Author: Joern Wanke
// Created: Wed Jul 18 10:05:01 PDT 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// The parameters editor is used to edit all the parameters used within
// tesseract from the ui.
#ifdef _WIN32
#else
#include <stdlib.h>
#include <stdio.h>
#endif
#include <map>
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
#include "paramsd.h"
#include "params.h"
#include "scrollview.h"
#include "svmnode.h"
#define VARDIR "configs/" /*parameters files */
#define MAX_ITEMS_IN_SUBMENU 30
// The following variables should remain static globals, since they
// are used by debug editor, which uses a single Tesseract instance.
//
// Contains the mappings from unique VC ids to their actual pointers.
static std::map<int, ParamContent*> vcMap;
static int nrParams = 0;
static int writeCommands[2];
ELISTIZE(ParamContent)
// Constructors for the various ParamTypes.
ParamContent::ParamContent(tesseract::StringParam* it) {
my_id_ = nrParams;
nrParams++;
param_type_ = VT_STRING;
sIt = it;
vcMap[my_id_] = this;
}
// Constructors for the various ParamTypes.
ParamContent::ParamContent(tesseract::IntParam* it) {
my_id_ = nrParams;
nrParams++;
param_type_ = VT_INTEGER;
iIt = it;
vcMap[my_id_] = this;
}
// Constructors for the various ParamTypes.
ParamContent::ParamContent(tesseract::BoolParam* it) {
my_id_ = nrParams;
nrParams++;
param_type_ = VT_BOOLEAN;
bIt = it;
vcMap[my_id_] = this;
}
// Constructors for the various ParamTypes.
ParamContent::ParamContent(tesseract::DoubleParam* it) {
my_id_ = nrParams;
nrParams++;
param_type_ = VT_DOUBLE;
dIt = it;
vcMap[my_id_] = this;
}
// Gets a VC object identified by its ID.
ParamContent* ParamContent::GetParamContentById(int id) {
return vcMap[id];
}
// Copy the first N words from the source string to the target string.
// Words are delimited by "_".
void ParamsEditor::GetFirstWords(
const char *s, // source string
int n, // number of words
char *t // target string
) {
int full_length = strlen(s);
int reqd_len = 0; // No. of chars requird
const char *next_word = s;
while ((n > 0) && reqd_len < full_length) {
reqd_len += strcspn(next_word, "_") + 1;
next_word += reqd_len;
n--;
}
strncpy(t, s, reqd_len);
t[reqd_len] = '\0'; // ensure null terminal
}
// Getter for the name.
const char* ParamContent::GetName() const {
if (param_type_ == VT_INTEGER) { return iIt->name_str(); }
else if (param_type_ == VT_BOOLEAN) { return bIt->name_str(); }
else if (param_type_ == VT_DOUBLE) { return dIt->name_str(); }
else if (param_type_ == VT_STRING) { return sIt->name_str(); }
else
return "ERROR: ParamContent::GetName()";
}
// Getter for the description.
const char* ParamContent::GetDescription() const {
if (param_type_ == VT_INTEGER) { return iIt->info_str(); }
else if (param_type_ == VT_BOOLEAN) { return bIt->info_str(); }
else if (param_type_ == VT_DOUBLE) { return dIt->info_str(); }
else if (param_type_ == VT_STRING) { return sIt->info_str(); }
else return NULL;
}
// Getter for the value.
STRING ParamContent::GetValue() const {
STRING result;
if (param_type_ == VT_INTEGER) {
result.add_str_int("", *iIt);
} else if (param_type_ == VT_BOOLEAN) {
result.add_str_int("", *bIt);
} else if (param_type_ == VT_DOUBLE) {
result.add_str_double("", *dIt);
} else if (param_type_ == VT_STRING) {
if (((STRING) * (sIt)).string() != NULL) {
result = sIt->string();
} else {
result = "Null";
}
}
return result;
}
// Setter for the value.
void ParamContent::SetValue(const char* val) {
// TODO (wanke) Test if the values actually are properly converted.
// (Quickly visible impacts?)
changed_ = TRUE;
if (param_type_ == VT_INTEGER) {
iIt->set_value(atoi(val));
} else if (param_type_ == VT_BOOLEAN) {
bIt->set_value(atoi(val));
} else if (param_type_ == VT_DOUBLE) {
dIt->set_value(strtod(val, NULL));
} else if (param_type_ == VT_STRING) {
sIt->set_value(val);
}
}
// Gets the up to the first 3 prefixes from s (split by _).
// For example, tesseract_foo_bar will be split into tesseract,foo and bar.
void ParamsEditor::GetPrefixes(const char* s, STRING* level_one,
STRING* level_two,
STRING* level_three) {
char* p = new char[1024];
GetFirstWords(s, 1, p);
*level_one = p;
GetFirstWords(s, 2, p);
*level_two = p;
GetFirstWords(s, 3, p);
*level_three = p;
delete[] p;
}
// Compare two VC objects by their name.
int ParamContent::Compare(const void* v1, const void* v2) {
const ParamContent* one =
*reinterpret_cast<const ParamContent* const *>(v1);
const ParamContent* two =
*reinterpret_cast<const ParamContent* const *>(v2);
return strcmp(one->GetName(), two->GetName());
}
// Find all editable parameters used within tesseract and create a
// SVMenuNode tree from it.
// TODO (wanke): This is actually sort of hackish.
SVMenuNode* ParamsEditor::BuildListOfAllLeaves(tesseract::Tesseract *tess) {
SVMenuNode* mr = new SVMenuNode();
ParamContent_LIST vclist;
ParamContent_IT vc_it(&vclist);
// Amount counts the number of entries for a specific char*.
// TODO(rays) get rid of the use of std::map.
std::map<const char*, int> amount;
// Add all parameters to a list.
int v, i;
int num_iterations = (tess->params() == NULL) ? 1 : 2;
for (v = 0; v < num_iterations; ++v) {
tesseract::ParamsVectors *vec = (v == 0) ? GlobalParams() : tess->params();
for (i = 0; i < vec->int_params.size(); ++i) {
vc_it.add_after_then_move(new ParamContent(vec->int_params[i]));
}
for (i = 0; i < vec->bool_params.size(); ++i) {
vc_it.add_after_then_move(new ParamContent(vec->bool_params[i]));
}
for (i = 0; i < vec->string_params.size(); ++i) {
vc_it.add_after_then_move(new ParamContent(vec->string_params[i]));
}
for (i = 0; i < vec->double_params.size(); ++i) {
vc_it.add_after_then_move(new ParamContent(vec->double_params[i]));
}
}
// Count the # of entries starting with a specific prefix.
for (vc_it.mark_cycle_pt(); !vc_it.cycled_list(); vc_it.forward()) {
ParamContent* vc = vc_it.data();
STRING tag;
STRING tag2;
STRING tag3;
GetPrefixes(vc->GetName(), &tag, &tag2, &tag3);
amount[tag.string()]++;
amount[tag2.string()]++;
amount[tag3.string()]++;
}
vclist.sort(ParamContent::Compare); // Sort the list alphabetically.
SVMenuNode* other = mr->AddChild("OTHER");
// go through the list again and this time create the menu structure.
vc_it.move_to_first();
for (vc_it.mark_cycle_pt(); !vc_it.cycled_list(); vc_it.forward()) {
ParamContent* vc = vc_it.data();
STRING tag;
STRING tag2;
STRING tag3;
GetPrefixes(vc->GetName(), &tag, &tag2, &tag3);
if (amount[tag.string()] == 1) {
other->AddChild(vc->GetName(), vc->GetId(), vc->GetValue().string(),
vc->GetDescription());
} else { // More than one would use this submenu -> create submenu.
SVMenuNode* sv = mr->AddChild(tag.string());
if ((amount[tag.string()] <= MAX_ITEMS_IN_SUBMENU) ||
(amount[tag2.string()] <= 1)) {
sv->AddChild(vc->GetName(), vc->GetId(),
vc->GetValue().string(), vc->GetDescription());
} else { // Make subsubmenus.
SVMenuNode* sv2 = sv->AddChild(tag2.string());
sv2->AddChild(vc->GetName(), vc->GetId(),
vc->GetValue().string(), vc->GetDescription());
}
}
}
return mr;
}
// Event listener. Waits for SVET_POPUP events and processes them.
void ParamsEditor::Notify(const SVEvent* sve) {
if (sve->type == SVET_POPUP) { // only catch SVET_POPUP!
char* param = sve->parameter;
if (sve->command_id == writeCommands[0]) {
WriteParams(param, false);
} else if (sve->command_id == writeCommands[1]) {
WriteParams(param, true);
} else {
ParamContent* vc = ParamContent::GetParamContentById(
sve->command_id);
vc->SetValue(param);
sv_window_->AddMessage("Setting %s to %s",
vc->GetName(), vc->GetValue().string());
}
}
}
// Integrate the parameters editor as popupmenu into the existing scrollview
// window (usually the pg editor). If sv == null, create a new empty
// empty window and attach the parameters editor to that window (ugly).
ParamsEditor::ParamsEditor(tesseract::Tesseract* tess,
ScrollView* sv) {
if (sv == NULL) {
const char* name = "ParamEditorMAIN";
sv = new ScrollView(name, 1, 1, 200, 200, 300, 200);
}
sv_window_ = sv;
//Only one event handler per window.
//sv->AddEventHandler((SVEventHandler*) this);
SVMenuNode* svMenuRoot = BuildListOfAllLeaves(tess);
STRING paramfile;
paramfile = tess->datadir;
paramfile += VARDIR; // parameters dir
paramfile += "edited"; // actual name
SVMenuNode* std_menu = svMenuRoot->AddChild ("Build Config File");
writeCommands[0] = nrParams+1;
std_menu->AddChild("All Parameters", writeCommands[0],
paramfile.string(), "Config file name?");
writeCommands[1] = nrParams+2;
std_menu->AddChild ("changed_ Parameters Only", writeCommands[1],
paramfile.string(), "Config file name?");
svMenuRoot->BuildMenu(sv, false);
}
// Write all (changed_) parameters to a config file.
void ParamsEditor::WriteParams(char *filename,
bool changes_only) {
FILE *fp; // input file
char msg_str[255];
// if file exists
if ((fp = fopen (filename, "rb")) != NULL) {
fclose(fp);
sprintf (msg_str, "Overwrite file " "%s" "? (Y/N)", filename);
int a = sv_window_->ShowYesNoDialog(msg_str);
if (a == 'n') { return; } // dont write
}
fp = fopen (filename, "wb"); // can we write to it?
if (fp == NULL) {
sv_window_->AddMessage("Cant write to file " "%s" "", filename);
return;
}
for (std::map<int, ParamContent*>::iterator iter = vcMap.begin();
iter != vcMap.end();
++iter) {
ParamContent* cur = iter->second;
if (!changes_only || cur->HasChanged()) {
fprintf(fp, "%-25s %-12s # %s\n",
cur->GetName(), cur->GetValue().string(), cur->GetDescription());
}
}
fclose(fp);
}
#endif
| C++ |
///////////////////////////////////////////////////////////////////////
// File: tesseractclass.h
// Description: The Tesseract class. It holds/owns everything needed
// to run Tesseract on a single language, and also a set of
// sub-Tesseracts to run sub-languages. For thread safety, *every*
// global variable goes in here, directly, or indirectly.
// This makes it safe to run multiple Tesseracts in different
// threads in parallel, and keeps the different language
// instances separate.
// Author: Ray Smith
// Created: Fri Mar 07 08:17:01 PST 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_TESSERACTCLASS_H__
#define TESSERACT_CCMAIN_TESSERACTCLASS_H__
#include "allheaders.h"
#include "control.h"
#include "docqual.h"
#include "devanagari_processing.h"
#include "genericvector.h"
#include "params.h"
#include "ocrclass.h"
#include "textord.h"
#include "wordrec.h"
class BLOB_CHOICE_LIST_CLIST;
class BLOCK_LIST;
class CharSamp;
struct OSResults;
class PAGE_RES;
class PAGE_RES_IT;
struct Pix;
class ROW;
class SVMenuNode;
class TBOX;
class TO_BLOCK_LIST;
class WERD;
class WERD_CHOICE;
class WERD_RES;
// Top-level class for all tesseract global instance data.
// This class either holds or points to all data used by an instance
// of Tesseract, including the memory allocator. When this is
// complete, Tesseract will be thread-safe. UNTIL THEN, IT IS NOT!
//
// NOTE to developers: Do not create cyclic dependencies through this class!
// The directory dependency tree must remain a tree! The keep this clean,
// lower-level code (eg in ccutil, the bottom level) must never need to
// know about the content of a higher-level directory.
// The following scheme will grant the easiest access to lower-level
// global members without creating a cyclic dependency:
//
// Class Hierarchy (^ = inheritance):
//
// CCUtil (ccutil/ccutil.h)
// ^ Members include: UNICHARSET
// CUtil (cutil/cutil_class.h)
// ^ Members include: TBLOB*, TEXTBLOCK*
// CCStruct (ccstruct/ccstruct.h)
// ^ Members include: Image
// Classify (classify/classify.h)
// ^ Members include: Dict
// WordRec (wordrec/wordrec.h)
// ^ Members include: WERD*, DENORM*
// Tesseract (ccmain/tesseractclass.h)
// Members include: Pix*, CubeRecoContext*,
// TesseractCubeCombiner*
//
// Other important classes:
//
// TessBaseAPI (api/baseapi.h)
// Members include: BLOCK_LIST*, PAGE_RES*,
// Tesseract*, ImageThresholder*
// Dict (dict/dict.h)
// Members include: Image* (private)
//
// NOTE: that each level contains members that correspond to global
// data that is defined (and used) at that level, not necessarily where
// the type is defined so for instance:
// BOOL_VAR_H(textord_show_blobs, false, "Display unsorted blobs");
// goes inside the Textord class, not the cc_util class.
namespace tesseract {
class ColumnFinder;
class CubeLineObject;
class CubeObject;
class CubeRecoContext;
class EquationDetect;
class Tesseract;
class TesseractCubeCombiner;
// A collection of various variables for statistics and debugging.
struct TesseractStats {
TesseractStats()
: adaption_word_number(0),
doc_blob_quality(0),
doc_outline_errs(0),
doc_char_quality(0),
good_char_count(0),
doc_good_char_quality(0),
word_count(0),
dict_words(0),
tilde_crunch_written(false),
last_char_was_newline(true),
last_char_was_tilde(false),
write_results_empty_block(true) {}
inT32 adaption_word_number;
inT16 doc_blob_quality;
inT16 doc_outline_errs;
inT16 doc_char_quality;
inT16 good_char_count;
inT16 doc_good_char_quality;
inT32 word_count; // count of word in the document
inT32 dict_words; // number of dicitionary words in the document
STRING dump_words_str; // accumulator used by dump_words()
// Flags used by write_results()
bool tilde_crunch_written;
bool last_char_was_newline;
bool last_char_was_tilde;
bool write_results_empty_block;
};
// Struct to hold all the pointers to relevant data for processing a word.
struct WordData {
WordData() : word(NULL), row(NULL), block(NULL), prev_word(NULL) {}
explicit WordData(const PAGE_RES_IT& page_res_it)
: word(page_res_it.word()), row(page_res_it.row()->row),
block(page_res_it.block()->block), prev_word(NULL) {}
WordData(BLOCK* block_in, ROW* row_in, WERD_RES* word_res)
: word(word_res), row(row_in), block(block_in), prev_word(NULL) {}
WERD_RES* word;
ROW* row;
BLOCK* block;
WordData* prev_word;
PointerVector<WERD_RES> lang_words;
};
// Definition of a Tesseract WordRecognizer. The WordData provides the context
// of row/block, in_word holds an initialized, possibly pre-classified word,
// that the recognizer may or may not consume (but if so it sets *in_word=NULL)
// and produces one or more output words in out_words, which may be the
// consumed in_word, or may be generated independently.
// This api allows both a conventional tesseract classifier to work, or a
// line-level classifier that generates multiple words from a merged input.
typedef void (Tesseract::*WordRecognizer)(const WordData& word_data,
WERD_RES** in_word,
PointerVector<WERD_RES>* out_words);
class Tesseract : public Wordrec {
public:
Tesseract();
~Tesseract();
// Clear as much used memory as possible without resetting the adaptive
// classifier or losing any other classifier data.
void Clear();
// Clear all memory of adaption for this and all subclassifiers.
void ResetAdaptiveClassifier();
// Clear the document dictionary for this and all subclassifiers.
void ResetDocumentDictionary();
// Set the equation detector.
void SetEquationDetect(EquationDetect* detector);
// Simple accessors.
const FCOORD& reskew() const {
return reskew_;
}
// Destroy any existing pix and return a pointer to the pointer.
Pix** mutable_pix_binary() {
Clear();
return &pix_binary_;
}
Pix* pix_binary() const {
return pix_binary_;
}
Pix* pix_grey() const {
return pix_grey_;
}
void set_pix_grey(Pix* grey_pix) {
pixDestroy(&pix_grey_);
pix_grey_ = grey_pix;
}
// Returns a pointer to a Pix representing the best available image of the
// page. The image will be 8-bit grey if the input was grey or color. Note
// that in grey 0 is black and 255 is white. If the input was binary, then
// the returned Pix will be binary. Note that here black is 1 and white is 0.
// To tell the difference pixGetDepth() will return 8 or 1.
// In either case, the return value is a borrowed Pix, and should not be
// deleted or pixDestroyed.
Pix* BestPix() const {
return pix_grey_ != NULL ? pix_grey_ : pix_binary_;
}
void set_pix_thresholds(Pix* thresholds) {
pixDestroy(&pix_thresholds_);
pix_thresholds_ = thresholds;
}
int source_resolution() const {
return source_resolution_;
}
void set_source_resolution(int ppi) {
source_resolution_ = ppi;
}
int ImageWidth() const {
return pixGetWidth(pix_binary_);
}
int ImageHeight() const {
return pixGetHeight(pix_binary_);
}
Pix* scaled_color() const {
return scaled_color_;
}
int scaled_factor() const {
return scaled_factor_;
}
void SetScaledColor(int factor, Pix* color) {
scaled_factor_ = factor;
scaled_color_ = color;
}
const Textord& textord() const {
return textord_;
}
Textord* mutable_textord() {
return &textord_;
}
bool right_to_left() const {
return right_to_left_;
}
int num_sub_langs() const {
return sub_langs_.size();
}
Tesseract* get_sub_lang(int index) const {
return sub_langs_[index];
}
// Returns true if any language uses Tesseract (as opposed to cube).
bool AnyTessLang() const {
if (tessedit_ocr_engine_mode != OEM_CUBE_ONLY) return true;
for (int i = 0; i < sub_langs_.size(); ++i) {
if (sub_langs_[i]->tessedit_ocr_engine_mode != OEM_CUBE_ONLY)
return true;
}
return false;
}
void SetBlackAndWhitelist();
// Perform steps to prepare underlying binary image/other data structures for
// page segmentation. Uses the strategy specified in the global variable
// pageseg_devanagari_split_strategy for perform splitting while preparing for
// page segmentation.
void PrepareForPageseg();
// Perform steps to prepare underlying binary image/other data structures for
// Tesseract OCR. The current segmentation is required by this method.
// Uses the strategy specified in the global variable
// ocr_devanagari_split_strategy for performing splitting while preparing for
// Tesseract ocr.
void PrepareForTessOCR(BLOCK_LIST* block_list,
Tesseract* osd_tess, OSResults* osr);
int SegmentPage(const STRING* input_file, BLOCK_LIST* blocks,
Tesseract* osd_tess, OSResults* osr);
void SetupWordScripts(BLOCK_LIST* blocks);
int AutoPageSeg(PageSegMode pageseg_mode,
BLOCK_LIST* blocks, TO_BLOCK_LIST* to_blocks,
Tesseract* osd_tess, OSResults* osr);
ColumnFinder* SetupPageSegAndDetectOrientation(
bool single_column, bool osd, bool only_osd,
BLOCK_LIST* blocks, Tesseract* osd_tess, OSResults* osr,
TO_BLOCK_LIST* to_blocks, Pix** photo_mask_pix, Pix** music_mask_pix);
// par_control.cpp
void PrerecAllWordsPar(const GenericVector<WordData>& words);
//// control.h /////////////////////////////////////////////////////////
bool ProcessTargetWord(const TBOX& word_box, const TBOX& target_word_box,
const char* word_config, int pass);
// Sets up the words ready for whichever engine is to be run
void SetupAllWordsPassN(int pass_n,
const TBOX* target_word_box,
const char* word_config,
PAGE_RES* page_res,
GenericVector<WordData>* words);
// Sets up the single word ready for whichever engine is to be run.
void SetupWordPassN(int pass_n, WordData* word);
// Runs word recognition on all the words.
bool RecogAllWordsPassN(int pass_n, ETEXT_DESC* monitor,
PAGE_RES_IT* pr_it,
GenericVector<WordData>* words);
bool recog_all_words(PAGE_RES* page_res,
ETEXT_DESC* monitor,
const TBOX* target_word_box,
const char* word_config,
int dopasses);
void rejection_passes(PAGE_RES* page_res,
ETEXT_DESC* monitor,
const TBOX* target_word_box,
const char* word_config);
void bigram_correction_pass(PAGE_RES *page_res);
void blamer_pass(PAGE_RES* page_res);
// Sets script positions and detects smallcaps on all output words.
void script_pos_pass(PAGE_RES* page_res);
// Helper to recognize the word using the given (language-specific) tesseract.
// Returns positive if this recognizer found more new best words than the
// number kept from best_words.
int RetryWithLanguage(const WordData& word_data,
WordRecognizer recognizer,
WERD_RES** in_word,
PointerVector<WERD_RES>* best_words);
void classify_word_and_language(WordRecognizer recognizer,
PAGE_RES_IT* pr_it,
WordData* word_data);
void classify_word_pass1(const WordData& word_data,
WERD_RES** in_word,
PointerVector<WERD_RES>* out_words);
void recog_pseudo_word(PAGE_RES* page_res, // blocks to check
TBOX &selection_box);
void fix_rep_char(PAGE_RES_IT* page_res_it);
ACCEPTABLE_WERD_TYPE acceptable_word_string(const UNICHARSET& char_set,
const char *s,
const char *lengths);
void match_word_pass_n(int pass_n, WERD_RES *word, ROW *row, BLOCK* block);
void classify_word_pass2(const WordData& word_data,
WERD_RES** in_word,
PointerVector<WERD_RES>* out_words);
void ReportXhtFixResult(bool accept_new_word, float new_x_ht,
WERD_RES* word, WERD_RES* new_word);
bool RunOldFixXht(WERD_RES *word, BLOCK* block, ROW *row);
bool TrainedXheightFix(WERD_RES *word, BLOCK* block, ROW *row);
BOOL8 recog_interactive(PAGE_RES_IT* pr_it);
// Set fonts of this word.
void set_word_fonts(WERD_RES *word);
void font_recognition_pass(PAGE_RES* page_res);
void dictionary_correction_pass(PAGE_RES* page_res);
BOOL8 check_debug_pt(WERD_RES *word, int location);
//// superscript.cpp ////////////////////////////////////////////////////
bool SubAndSuperscriptFix(WERD_RES *word_res);
void GetSubAndSuperscriptCandidates(const WERD_RES *word,
int *num_rebuilt_leading,
ScriptPos *leading_pos,
float *leading_certainty,
int *num_rebuilt_trailing,
ScriptPos *trailing_pos,
float *trailing_certainty,
float *avg_certainty,
float *unlikely_threshold);
WERD_RES *TrySuperscriptSplits(int num_chopped_leading,
float leading_certainty,
ScriptPos leading_pos,
int num_chopped_trailing,
float trailing_certainty,
ScriptPos trailing_pos,
WERD_RES *word,
bool *is_good,
int *retry_leading,
int *retry_trailing);
bool BelievableSuperscript(bool debug,
const WERD_RES &word,
float certainty_threshold,
int *left_ok,
int *right_ok) const;
//// cube_control.cpp ///////////////////////////////////////////////////
bool init_cube_objects(bool load_combiner,
TessdataManager *tessdata_manager);
// Iterates through tesseract's results and calls cube on each word,
// combining the results with the existing tesseract result.
void run_cube_combiner(PAGE_RES *page_res);
// Recognizes a single word using (only) cube. Compatible with
// Tesseract's classify_word_pass1/classify_word_pass2.
void cube_word_pass1(BLOCK* block, ROW *row, WERD_RES *word);
// Cube recognizer to recognize a single word as with classify_word_pass1
// but also returns the cube object in case the combiner is needed.
CubeObject* cube_recognize_word(BLOCK* block, WERD_RES* word);
// Combines the cube and tesseract results for a single word, leaving the
// result in tess_word.
void cube_combine_word(CubeObject* cube_obj, WERD_RES* cube_word,
WERD_RES* tess_word);
// Call cube on the current word, and write the result to word.
// Sets up a fake result and returns false if something goes wrong.
bool cube_recognize(CubeObject *cube_obj, BLOCK* block, WERD_RES *word);
void fill_werd_res(const BoxWord& cube_box_word,
const char* cube_best_str,
WERD_RES* tess_werd_res);
bool extract_cube_state(CubeObject* cube_obj, int* num_chars,
Boxa** char_boxes, CharSamp*** char_samples);
bool create_cube_box_word(Boxa *char_boxes, int num_chars,
TBOX word_box, BoxWord* box_word);
//// output.h //////////////////////////////////////////////////////////
void output_pass(PAGE_RES_IT &page_res_it, const TBOX *target_word_box);
void write_results(PAGE_RES_IT &page_res_it, // full info
char newline_type, // type of newline
BOOL8 force_eol // override tilde crunch?
);
void set_unlv_suspects(WERD_RES *word);
UNICHAR_ID get_rep_char(WERD_RES *word); // what char is repeated?
BOOL8 acceptable_number_string(const char *s,
const char *lengths);
inT16 count_alphanums(const WERD_CHOICE &word);
inT16 count_alphas(const WERD_CHOICE &word);
//// tessedit.h ////////////////////////////////////////////////////////
void read_config_file(const char *filename, SetParamConstraint constraint);
// Initialize for potentially a set of languages defined by the language
// string and recursively any additional languages required by any language
// traineddata file (via tessedit_load_sublangs in its config) that is loaded.
// See init_tesseract_internal for args.
int init_tesseract(const char *arg0,
const char *textbase,
const char *language,
OcrEngineMode oem,
char **configs,
int configs_size,
const GenericVector<STRING> *vars_vec,
const GenericVector<STRING> *vars_values,
bool set_only_init_params);
int init_tesseract(const char *datapath,
const char *language,
OcrEngineMode oem) {
return init_tesseract(datapath, NULL, language, oem,
NULL, 0, NULL, NULL, false);
}
// Common initialization for a single language.
// arg0 is the datapath for the tessdata directory, which could be the
// path of the tessdata directory with no trailing /, or (if tessdata
// lives in the same directory as the executable, the path of the executable,
// hence the name arg0.
// textbase is an optional output file basename (used only for training)
// language is the language code to load.
// oem controls which engine(s) will operate on the image
// configs (argv) is an array of config filenames to load variables from.
// May be NULL.
// configs_size (argc) is the number of elements in configs.
// vars_vec is an optional vector of variables to set.
// vars_values is an optional corresponding vector of values for the variables
// in vars_vec.
// If set_only_init_params is true, then only the initialization variables
// will be set.
int init_tesseract_internal(const char *arg0,
const char *textbase,
const char *language,
OcrEngineMode oem,
char **configs,
int configs_size,
const GenericVector<STRING> *vars_vec,
const GenericVector<STRING> *vars_values,
bool set_only_init_params);
// Set the universal_id member of each font to be unique among all
// instances of the same font loaded.
void SetupUniversalFontIds();
int init_tesseract_lm(const char *arg0,
const char *textbase,
const char *language);
void recognize_page(STRING& image_name);
void end_tesseract();
bool init_tesseract_lang_data(const char *arg0,
const char *textbase,
const char *language,
OcrEngineMode oem,
char **configs,
int configs_size,
const GenericVector<STRING> *vars_vec,
const GenericVector<STRING> *vars_values,
bool set_only_init_params);
void ParseLanguageString(const char* lang_str,
GenericVector<STRING>* to_load,
GenericVector<STRING>* not_to_load);
//// pgedit.h //////////////////////////////////////////////////////////
SVMenuNode *build_menu_new();
#ifndef GRAPHICS_DISABLED
void pgeditor_main(int width, int height, PAGE_RES* page_res);
#endif // GRAPHICS_DISABLED
void process_image_event( // action in image win
const SVEvent &event);
BOOL8 process_cmd_win_event( // UI command semantics
inT32 cmd_event, // which menu item?
char *new_value // any prompt data
);
void debug_word(PAGE_RES* page_res, const TBOX &selection_box);
void do_re_display(
BOOL8 (tesseract::Tesseract::*word_painter)(PAGE_RES_IT* pr_it));
BOOL8 word_display(PAGE_RES_IT* pr_it);
BOOL8 word_bln_display(PAGE_RES_IT* pr_it);
BOOL8 word_blank_and_set_display(PAGE_RES_IT* pr_its);
BOOL8 word_set_display(PAGE_RES_IT* pr_it);
// #ifndef GRAPHICS_DISABLED
BOOL8 word_dumper(PAGE_RES_IT* pr_it);
// #endif // GRAPHICS_DISABLED
void blob_feature_display(PAGE_RES* page_res, const TBOX& selection_box);
//// reject.h //////////////////////////////////////////////////////////
// make rej map for word
void make_reject_map(WERD_RES *word, ROW *row, inT16 pass);
BOOL8 one_ell_conflict(WERD_RES *word_res, BOOL8 update_map);
inT16 first_alphanum_index(const char *word,
const char *word_lengths);
inT16 first_alphanum_offset(const char *word,
const char *word_lengths);
inT16 alpha_count(const char *word,
const char *word_lengths);
BOOL8 word_contains_non_1_digit(const char *word,
const char *word_lengths);
void dont_allow_1Il(WERD_RES *word);
inT16 count_alphanums( //how many alphanums
WERD_RES *word);
void flip_0O(WERD_RES *word);
BOOL8 non_0_digit(const UNICHARSET& ch_set, UNICHAR_ID unichar_id);
BOOL8 non_O_upper(const UNICHARSET& ch_set, UNICHAR_ID unichar_id);
BOOL8 repeated_nonalphanum_wd(WERD_RES *word, ROW *row);
void nn_match_word( //Match a word
WERD_RES *word,
ROW *row);
void nn_recover_rejects(WERD_RES *word, ROW *row);
void set_done( //set done flag
WERD_RES *word,
inT16 pass);
inT16 safe_dict_word(const WERD_RES *werd_res); // is best_choice in dict?
void flip_hyphens(WERD_RES *word);
void reject_I_1_L(WERD_RES *word);
void reject_edge_blobs(WERD_RES *word);
void reject_mostly_rejects(WERD_RES *word);
//// adaptions.h ///////////////////////////////////////////////////////
BOOL8 word_adaptable( //should we adapt?
WERD_RES *word,
uinT16 mode);
//// tfacepp.cpp ///////////////////////////////////////////////////////
void recog_word_recursive(WERD_RES* word);
void recog_word(WERD_RES *word);
void split_and_recog_word(WERD_RES* word);
void split_word(WERD_RES *word,
int split_pt,
WERD_RES **right_piece,
BlamerBundle **orig_blamer_bundle) const;
void join_words(WERD_RES *word,
WERD_RES *word2,
BlamerBundle *orig_bb) const;
//// fixspace.cpp ///////////////////////////////////////////////////////
BOOL8 digit_or_numeric_punct(WERD_RES *word, int char_position);
inT16 eval_word_spacing(WERD_RES_LIST &word_res_list);
void match_current_words(WERD_RES_LIST &words, ROW *row, BLOCK* block);
inT16 fp_eval_word_spacing(WERD_RES_LIST &word_res_list);
void fix_noisy_space_list(WERD_RES_LIST &best_perm, ROW *row, BLOCK* block);
void fix_fuzzy_space_list(WERD_RES_LIST &best_perm, ROW *row, BLOCK* block);
void fix_sp_fp_word(WERD_RES_IT &word_res_it, ROW *row, BLOCK* block);
void fix_fuzzy_spaces( //find fuzzy words
ETEXT_DESC *monitor, //progress monitor
inT32 word_count, //count of words in doc
PAGE_RES *page_res);
void dump_words(WERD_RES_LIST &perm, inT16 score,
inT16 mode, BOOL8 improved);
BOOL8 fixspace_thinks_word_done(WERD_RES *word);
inT16 worst_noise_blob(WERD_RES *word_res, float *worst_noise_score);
float blob_noise_score(TBLOB *blob);
void break_noisiest_blob_word(WERD_RES_LIST &words);
//// docqual.cpp ////////////////////////////////////////////////////////
GARBAGE_LEVEL garbage_word(WERD_RES *word, BOOL8 ok_dict_word);
BOOL8 potential_word_crunch(WERD_RES *word,
GARBAGE_LEVEL garbage_level,
BOOL8 ok_dict_word);
void tilde_crunch(PAGE_RES_IT &page_res_it);
void unrej_good_quality_words( //unreject potential
PAGE_RES_IT &page_res_it);
void doc_and_block_rejection( //reject big chunks
PAGE_RES_IT &page_res_it,
BOOL8 good_quality_doc);
void quality_based_rejection(PAGE_RES_IT &page_res_it,
BOOL8 good_quality_doc);
void convert_bad_unlv_chs(WERD_RES *word_res);
void tilde_delete(PAGE_RES_IT &page_res_it);
inT16 word_blob_quality(WERD_RES *word, ROW *row);
void word_char_quality(WERD_RES *word, ROW *row, inT16 *match_count,
inT16 *accepted_match_count);
void unrej_good_chs(WERD_RES *word, ROW *row);
inT16 count_outline_errs(char c, inT16 outline_count);
inT16 word_outline_errs(WERD_RES *word);
BOOL8 terrible_word_crunch(WERD_RES *word, GARBAGE_LEVEL garbage_level);
CRUNCH_MODE word_deletable(WERD_RES *word, inT16 &delete_mode);
inT16 failure_count(WERD_RES *word);
BOOL8 noise_outlines(TWERD *word);
//// pagewalk.cpp ///////////////////////////////////////////////////////
void
process_selected_words (
PAGE_RES* page_res, // blocks to check
//function to call
TBOX & selection_box,
BOOL8 (tesseract::Tesseract::*word_processor)(PAGE_RES_IT* pr_it));
//// tessbox.cpp ///////////////////////////////////////////////////////
void tess_add_doc_word( //test acceptability
WERD_CHOICE *word_choice //after context
);
void tess_segment_pass_n(int pass_n, WERD_RES *word);
bool tess_acceptable_word(WERD_RES *word);
//// applybox.cpp //////////////////////////////////////////////////////
// Applies the box file based on the image name fname, and resegments
// the words in the block_list (page), with:
// blob-mode: one blob per line in the box file, words as input.
// word/line-mode: one blob per space-delimited unit after the #, and one word
// per line in the box file. (See comment above for box file format.)
// If find_segmentation is true, (word/line mode) then the classifier is used
// to re-segment words/lines to match the space-delimited truth string for
// each box. In this case, the input box may be for a word or even a whole
// text line, and the output words will contain multiple blobs corresponding
// to the space-delimited input string.
// With find_segmentation false, no classifier is needed, but the chopper
// can still be used to correctly segment touching characters with the help
// of the input boxes.
// In the returned PAGE_RES, the WERD_RES are setup as they would be returned
// from normal classification, ie. with a word, chopped_word, rebuild_word,
// seam_array, denorm, box_word, and best_state, but NO best_choice or
// raw_choice, as they would require a UNICHARSET, which we aim to avoid.
// Instead, the correct_text member of WERD_RES is set, and this may be later
// converted to a best_choice using CorrectClassifyWords. CorrectClassifyWords
// is not required before calling ApplyBoxTraining.
PAGE_RES* ApplyBoxes(const STRING& fname, bool find_segmentation,
BLOCK_LIST *block_list);
// Any row xheight that is significantly different from the median is set
// to the median.
void PreenXHeights(BLOCK_LIST *block_list);
// Builds a PAGE_RES from the block_list in the way required for ApplyBoxes:
// All fuzzy spaces are removed, and all the words are maximally chopped.
PAGE_RES* SetupApplyBoxes(const GenericVector<TBOX>& boxes,
BLOCK_LIST *block_list);
// Tests the chopper by exhaustively running chop_one_blob.
// The word_res will contain filled chopped_word, seam_array, denorm,
// box_word and best_state for the maximally chopped word.
void MaximallyChopWord(const GenericVector<TBOX>& boxes,
BLOCK* block, ROW* row, WERD_RES* word_res);
// Gather consecutive blobs that match the given box into the best_state
// and corresponding correct_text.
// Fights over which box owns which blobs are settled by pre-chopping and
// applying the blobs to box or next_box with the least non-overlap.
// Returns false if the box was in error, which can only be caused by
// failing to find an appropriate blob for a box.
// This means that occasionally, blobs may be incorrectly segmented if the
// chopper fails to find a suitable chop point.
bool ResegmentCharBox(PAGE_RES* page_res, const TBOX *prev_box,
const TBOX& box, const TBOX& next_box,
const char* correct_text);
// Consume all source blobs that strongly overlap the given box,
// putting them into a new word, with the correct_text label.
// Fights over which box owns which blobs are settled by
// applying the blobs to box or next_box with the least non-overlap.
// Returns false if the box was in error, which can only be caused by
// failing to find an overlapping blob for a box.
bool ResegmentWordBox(BLOCK_LIST *block_list,
const TBOX& box, const TBOX& next_box,
const char* correct_text);
// Resegments the words by running the classifier in an attempt to find the
// correct segmentation that produces the required string.
void ReSegmentByClassification(PAGE_RES* page_res);
// Converts the space-delimited string of utf8 text to a vector of UNICHAR_ID.
// Returns false if an invalid UNICHAR_ID is encountered.
bool ConvertStringToUnichars(const char* utf8,
GenericVector<UNICHAR_ID>* class_ids);
// Resegments the word to achieve the target_text from the classifier.
// Returns false if the re-segmentation fails.
// Uses brute-force combination of upto kMaxGroupSize adjacent blobs, and
// applies a full search on the classifier results to find the best classified
// segmentation. As a compromise to obtain better recall, 1-1 ambigiguity
// substitutions ARE used.
bool FindSegmentation(const GenericVector<UNICHAR_ID>& target_text,
WERD_RES* word_res);
// Recursive helper to find a match to the target_text (from text_index
// position) in the choices (from choices_pos position).
// Choices is an array of GenericVectors, of length choices_length, with each
// element representing a starting position in the word, and the
// GenericVector holding classification results for a sequence of consecutive
// blobs, with index 0 being a single blob, index 1 being 2 blobs etc.
void SearchForText(const GenericVector<BLOB_CHOICE_LIST*>* choices,
int choices_pos, int choices_length,
const GenericVector<UNICHAR_ID>& target_text,
int text_index,
float rating, GenericVector<int>* segmentation,
float* best_rating, GenericVector<int>* best_segmentation);
// Counts up the labelled words and the blobs within.
// Deletes all unused or emptied words, counting the unused ones.
// Resets W_BOL and W_EOL flags correctly.
// Builds the rebuild_word and rebuilds the box_word.
void TidyUp(PAGE_RES* page_res);
// Logs a bad box by line in the box file and box coords.
void ReportFailedBox(int boxfile_lineno, TBOX box, const char *box_ch,
const char *err_msg);
// Creates a fake best_choice entry in each WERD_RES with the correct text.
void CorrectClassifyWords(PAGE_RES* page_res);
// Call LearnWord to extract features for labelled blobs within each word.
// Features are written to the given filename.
void ApplyBoxTraining(const STRING& filename, PAGE_RES* page_res);
//// fixxht.cpp ///////////////////////////////////////////////////////
// Returns the number of misfit blob tops in this word.
int CountMisfitTops(WERD_RES *word_res);
// Returns a new x-height in pixels (original image coords) that is
// maximally compatible with the result in word_res.
// Returns 0.0f if no x-height is found that is better than the current
// estimate.
float ComputeCompatibleXheight(WERD_RES *word_res);
//// Data members ///////////////////////////////////////////////////////
// TODO(ocr-team): Find and remove obsolete parameters.
BOOL_VAR_H(tessedit_resegment_from_boxes, false,
"Take segmentation and labeling from box file");
BOOL_VAR_H(tessedit_resegment_from_line_boxes, false,
"Conversion of word/line box file to char box file");
BOOL_VAR_H(tessedit_train_from_boxes, false,
"Generate training data from boxed chars");
BOOL_VAR_H(tessedit_make_boxes_from_boxes, false,
"Generate more boxes from boxed chars");
BOOL_VAR_H(tessedit_dump_pageseg_images, false,
"Dump intermediate images made during page segmentation");
INT_VAR_H(tessedit_pageseg_mode, PSM_SINGLE_BLOCK,
"Page seg mode: 0=osd only, 1=auto+osd, 2=auto, 3=col, 4=block,"
" 5=line, 6=word, 7=char"
" (Values from PageSegMode enum in publictypes.h)");
INT_VAR_H(tessedit_ocr_engine_mode, tesseract::OEM_TESSERACT_ONLY,
"Which OCR engine(s) to run (Tesseract, Cube, both). Defaults"
" to loading and running only Tesseract (no Cube, no combiner)."
" (Values from OcrEngineMode enum in tesseractclass.h)");
STRING_VAR_H(tessedit_char_blacklist, "",
"Blacklist of chars not to recognize");
STRING_VAR_H(tessedit_char_whitelist, "",
"Whitelist of chars to recognize");
STRING_VAR_H(tessedit_char_unblacklist, "",
"List of chars to override tessedit_char_blacklist");
BOOL_VAR_H(tessedit_ambigs_training, false,
"Perform training for ambiguities");
INT_VAR_H(pageseg_devanagari_split_strategy,
tesseract::ShiroRekhaSplitter::NO_SPLIT,
"Whether to use the top-line splitting process for Devanagari "
"documents while performing page-segmentation.");
INT_VAR_H(ocr_devanagari_split_strategy,
tesseract::ShiroRekhaSplitter::NO_SPLIT,
"Whether to use the top-line splitting process for Devanagari "
"documents while performing ocr.");
STRING_VAR_H(tessedit_write_params_to_file, "",
"Write all parameters to the given file.");
BOOL_VAR_H(tessedit_adaption_debug, false,
"Generate and print debug information for adaption");
INT_VAR_H(bidi_debug, 0, "Debug level for BiDi");
INT_VAR_H(applybox_debug, 1, "Debug level");
INT_VAR_H(applybox_page, 0, "Page number to apply boxes from");
STRING_VAR_H(applybox_exposure_pattern, ".exp",
"Exposure value follows this pattern in the image"
" filename. The name of the image files are expected"
" to be in the form [lang].[fontname].exp[num].tif");
BOOL_VAR_H(applybox_learn_chars_and_char_frags_mode, false,
"Learn both character fragments (as is done in the"
" special low exposure mode) as well as unfragmented"
" characters.");
BOOL_VAR_H(applybox_learn_ngrams_mode, false,
"Each bounding box is assumed to contain ngrams. Only"
" learn the ngrams whose outlines overlap horizontally.");
BOOL_VAR_H(tessedit_display_outwords, false, "Draw output words");
BOOL_VAR_H(tessedit_dump_choices, false, "Dump char choices");
BOOL_VAR_H(tessedit_timing_debug, false, "Print timing stats");
BOOL_VAR_H(tessedit_fix_fuzzy_spaces, true,
"Try to improve fuzzy spaces");
BOOL_VAR_H(tessedit_unrej_any_wd, false,
"Dont bother with word plausibility");
BOOL_VAR_H(tessedit_fix_hyphens, true, "Crunch double hyphens?");
BOOL_VAR_H(tessedit_redo_xheight, true, "Check/Correct x-height");
BOOL_VAR_H(tessedit_enable_doc_dict, true,
"Add words to the document dictionary");
BOOL_VAR_H(tessedit_debug_fonts, false, "Output font info per char");
BOOL_VAR_H(tessedit_debug_block_rejection, false, "Block and Row stats");
BOOL_VAR_H(tessedit_enable_bigram_correction, true,
"Enable correction based on the word bigram dictionary.");
BOOL_VAR_H(tessedit_enable_dict_correction, false,
"Enable single word correction based on the dictionary.");
INT_VAR_H(tessedit_bigram_debug, 0, "Amount of debug output for bigram "
"correction.");
INT_VAR_H(debug_x_ht_level, 0, "Reestimate debug");
BOOL_VAR_H(debug_acceptable_wds, false, "Dump word pass/fail chk");
STRING_VAR_H(chs_leading_punct, "('`\"", "Leading punctuation");
STRING_VAR_H(chs_trailing_punct1, ").,;:?!", "1st Trailing punctuation");
STRING_VAR_H(chs_trailing_punct2, ")'`\"", "2nd Trailing punctuation");
double_VAR_H(quality_rej_pc, 0.08, "good_quality_doc lte rejection limit");
double_VAR_H(quality_blob_pc, 0.0, "good_quality_doc gte good blobs limit");
double_VAR_H(quality_outline_pc, 1.0,
"good_quality_doc lte outline error limit");
double_VAR_H(quality_char_pc, 0.95, "good_quality_doc gte good char limit");
INT_VAR_H(quality_min_initial_alphas_reqd, 2, "alphas in a good word");
INT_VAR_H(tessedit_tess_adaption_mode, 0x27,
"Adaptation decision algorithm for tess");
BOOL_VAR_H(tessedit_minimal_rej_pass1, false,
"Do minimal rejection on pass 1 output");
BOOL_VAR_H(tessedit_test_adaption, false, "Test adaption criteria");
BOOL_VAR_H(tessedit_matcher_log, false, "Log matcher activity");
INT_VAR_H(tessedit_test_adaption_mode, 3,
"Adaptation decision algorithm for tess");
BOOL_VAR_H(test_pt, false, "Test for point");
double_VAR_H(test_pt_x, 99999.99, "xcoord");
double_VAR_H(test_pt_y, 99999.99, "ycoord");
INT_VAR_H(paragraph_debug_level, 0, "Print paragraph debug info.");
BOOL_VAR_H(paragraph_text_based, true,
"Run paragraph detection on the post-text-recognition "
"(more accurate)");
INT_VAR_H(cube_debug_level, 1, "Print cube debug info.");
STRING_VAR_H(outlines_odd, "%| ", "Non standard number of outlines");
STRING_VAR_H(outlines_2, "ij!?%\":;", "Non standard number of outlines");
BOOL_VAR_H(docqual_excuse_outline_errs, false,
"Allow outline errs in unrejection?");
BOOL_VAR_H(tessedit_good_quality_unrej, true,
"Reduce rejection on good docs");
BOOL_VAR_H(tessedit_use_reject_spaces, true, "Reject spaces?");
double_VAR_H(tessedit_reject_doc_percent, 65.00,
"%rej allowed before rej whole doc");
double_VAR_H(tessedit_reject_block_percent, 45.00,
"%rej allowed before rej whole block");
double_VAR_H(tessedit_reject_row_percent, 40.00,
"%rej allowed before rej whole row");
double_VAR_H(tessedit_whole_wd_rej_row_percent, 70.00,
"Number of row rejects in whole word rejects"
"which prevents whole row rejection");
BOOL_VAR_H(tessedit_preserve_blk_rej_perfect_wds, true,
"Only rej partially rejected words in block rejection");
BOOL_VAR_H(tessedit_preserve_row_rej_perfect_wds, true,
"Only rej partially rejected words in row rejection");
BOOL_VAR_H(tessedit_dont_blkrej_good_wds, false,
"Use word segmentation quality metric");
BOOL_VAR_H(tessedit_dont_rowrej_good_wds, false,
"Use word segmentation quality metric");
INT_VAR_H(tessedit_preserve_min_wd_len, 2,
"Only preserve wds longer than this");
BOOL_VAR_H(tessedit_row_rej_good_docs, true,
"Apply row rejection to good docs");
double_VAR_H(tessedit_good_doc_still_rowrej_wd, 1.1,
"rej good doc wd if more than this fraction rejected");
BOOL_VAR_H(tessedit_reject_bad_qual_wds, true,
"Reject all bad quality wds");
BOOL_VAR_H(tessedit_debug_doc_rejection, false, "Page stats");
BOOL_VAR_H(tessedit_debug_quality_metrics, false,
"Output data to debug file");
BOOL_VAR_H(bland_unrej, false, "unrej potential with no chekcs");
double_VAR_H(quality_rowrej_pc, 1.1,
"good_quality_doc gte good char limit");
BOOL_VAR_H(unlv_tilde_crunching, true,
"Mark v.bad words for tilde crunch");
BOOL_VAR_H(hocr_font_info, false,
"Add font info to hocr output");
BOOL_VAR_H(crunch_early_merge_tess_fails, true, "Before word crunch?");
BOOL_VAR_H(crunch_early_convert_bad_unlv_chs, false, "Take out ~^ early?");
double_VAR_H(crunch_terrible_rating, 80.0, "crunch rating lt this");
BOOL_VAR_H(crunch_terrible_garbage, true, "As it says");
double_VAR_H(crunch_poor_garbage_cert, -9.0,
"crunch garbage cert lt this");
double_VAR_H(crunch_poor_garbage_rate, 60, "crunch garbage rating lt this");
double_VAR_H(crunch_pot_poor_rate, 40, "POTENTIAL crunch rating lt this");
double_VAR_H(crunch_pot_poor_cert, -8.0, "POTENTIAL crunch cert lt this");
BOOL_VAR_H(crunch_pot_garbage, true, "POTENTIAL crunch garbage");
double_VAR_H(crunch_del_rating, 60, "POTENTIAL crunch rating lt this");
double_VAR_H(crunch_del_cert, -10.0, "POTENTIAL crunch cert lt this");
double_VAR_H(crunch_del_min_ht, 0.7, "Del if word ht lt xht x this");
double_VAR_H(crunch_del_max_ht, 3.0, "Del if word ht gt xht x this");
double_VAR_H(crunch_del_min_width, 3.0, "Del if word width lt xht x this");
double_VAR_H(crunch_del_high_word, 1.5,
"Del if word gt xht x this above bl");
double_VAR_H(crunch_del_low_word, 0.5, "Del if word gt xht x this below bl");
double_VAR_H(crunch_small_outlines_size, 0.6, "Small if lt xht x this");
INT_VAR_H(crunch_rating_max, 10, "For adj length in rating per ch");
INT_VAR_H(crunch_pot_indicators, 1, "How many potential indicators needed");
BOOL_VAR_H(crunch_leave_ok_strings, true, "Dont touch sensible strings");
BOOL_VAR_H(crunch_accept_ok, true, "Use acceptability in okstring");
BOOL_VAR_H(crunch_leave_accept_strings, false,
"Dont pot crunch sensible strings");
BOOL_VAR_H(crunch_include_numerals, false, "Fiddle alpha figures");
INT_VAR_H(crunch_leave_lc_strings, 4,
"Dont crunch words with long lower case strings");
INT_VAR_H(crunch_leave_uc_strings, 4,
"Dont crunch words with long lower case strings");
INT_VAR_H(crunch_long_repetitions, 3, "Crunch words with long repetitions");
INT_VAR_H(crunch_debug, 0, "As it says");
INT_VAR_H(fixsp_non_noise_limit, 1,
"How many non-noise blbs either side?");
double_VAR_H(fixsp_small_outlines_size, 0.28, "Small if lt xht x this");
BOOL_VAR_H(tessedit_prefer_joined_punct, false, "Reward punctation joins");
INT_VAR_H(fixsp_done_mode, 1, "What constitues done for spacing");
INT_VAR_H(debug_fix_space_level, 0, "Contextual fixspace debug");
STRING_VAR_H(numeric_punctuation, ".,",
"Punct. chs expected WITHIN numbers");
INT_VAR_H(x_ht_acceptance_tolerance, 8,
"Max allowed deviation of blob top outside of font data");
INT_VAR_H(x_ht_min_change, 8, "Min change in xht before actually trying it");
INT_VAR_H(superscript_debug, 0, "Debug level for sub & superscript fixer");
double_VAR_H(superscript_worse_certainty, 2.0, "How many times worse "
"certainty does a superscript position glyph need to be for us "
"to try classifying it as a char with a different baseline?");
double_VAR_H(superscript_bettered_certainty, 0.97, "What reduction in "
"badness do we think sufficient to choose a superscript over "
"what we'd thought. For example, a value of 0.6 means we want "
"to reduce badness of certainty by 40%");
double_VAR_H(superscript_scaledown_ratio, 0.4,
"A superscript scaled down more than this is unbelievably "
"small. For example, 0.3 means we expect the font size to "
"be no smaller than 30% of the text line font size.");
double_VAR_H(subscript_max_y_top, 0.5,
"Maximum top of a character measured as a multiple of x-height "
"above the baseline for us to reconsider whether it's a "
"subscript.");
double_VAR_H(superscript_min_y_bottom, 0.3,
"Minimum bottom of a character measured as a multiple of "
"x-height above the baseline for us to reconsider whether it's "
"a superscript.");
BOOL_VAR_H(tessedit_write_block_separators, false,
"Write block separators in output");
BOOL_VAR_H(tessedit_write_rep_codes, false,
"Write repetition char code");
BOOL_VAR_H(tessedit_write_unlv, false, "Write .unlv output file");
BOOL_VAR_H(tessedit_create_txt, true, "Write .txt output file");
BOOL_VAR_H(tessedit_create_hocr, false, "Write .html hOCR output file");
BOOL_VAR_H(tessedit_create_pdf, false, "Write .pdf output file");
STRING_VAR_H(unrecognised_char, "|",
"Output char for unidentified blobs");
INT_VAR_H(suspect_level, 99, "Suspect marker level");
INT_VAR_H(suspect_space_level, 100,
"Min suspect level for rejecting spaces");
INT_VAR_H(suspect_short_words, 2,
"Dont Suspect dict wds longer than this");
BOOL_VAR_H(suspect_constrain_1Il, false, "UNLV keep 1Il chars rejected");
double_VAR_H(suspect_rating_per_ch, 999.9, "Dont touch bad rating limit");
double_VAR_H(suspect_accept_rating, -999.9, "Accept good rating limit");
BOOL_VAR_H(tessedit_minimal_rejection, false, "Only reject tess failures");
BOOL_VAR_H(tessedit_zero_rejection, false, "Dont reject ANYTHING");
BOOL_VAR_H(tessedit_word_for_word, false,
"Make output have exactly one word per WERD");
BOOL_VAR_H(tessedit_zero_kelvin_rejection, false,
"Dont reject ANYTHING AT ALL");
BOOL_VAR_H(tessedit_consistent_reps, true, "Force all rep chars the same");
INT_VAR_H(tessedit_reject_mode, 0, "Rejection algorithm");
BOOL_VAR_H(tessedit_rejection_debug, false, "Adaption debug");
BOOL_VAR_H(tessedit_flip_0O, true, "Contextual 0O O0 flips");
double_VAR_H(tessedit_lower_flip_hyphen, 1.5,
"Aspect ratio dot/hyphen test");
double_VAR_H(tessedit_upper_flip_hyphen, 1.8,
"Aspect ratio dot/hyphen test");
BOOL_VAR_H(rej_trust_doc_dawg, false, "Use DOC dawg in 11l conf. detector");
BOOL_VAR_H(rej_1Il_use_dict_word, false, "Use dictword test");
BOOL_VAR_H(rej_1Il_trust_permuter_type, true, "Dont double check");
BOOL_VAR_H(rej_use_tess_accepted, true, "Individual rejection control");
BOOL_VAR_H(rej_use_tess_blanks, true, "Individual rejection control");
BOOL_VAR_H(rej_use_good_perm, true, "Individual rejection control");
BOOL_VAR_H(rej_use_sensible_wd, false, "Extend permuter check");
BOOL_VAR_H(rej_alphas_in_number_perm, false, "Extend permuter check");
double_VAR_H(rej_whole_of_mostly_reject_word_fract, 0.85, "if >this fract");
INT_VAR_H(tessedit_image_border, 2, "Rej blbs near image edge limit");
STRING_VAR_H(ok_repeated_ch_non_alphanum_wds, "-?*\075",
"Allow NN to unrej");
STRING_VAR_H(conflict_set_I_l_1, "Il1[]", "Il1 conflict set");
INT_VAR_H(min_sane_x_ht_pixels, 8, "Reject any x-ht lt or eq than this");
BOOL_VAR_H(tessedit_create_boxfile, false, "Output text with boxes");
INT_VAR_H(tessedit_page_number, -1,
"-1 -> All pages, else specifc page to process");
BOOL_VAR_H(tessedit_write_images, false, "Capture the image from the IPE");
BOOL_VAR_H(interactive_display_mode, false, "Run interactively?");
STRING_VAR_H(file_type, ".tif", "Filename extension");
BOOL_VAR_H(tessedit_override_permuter, true, "According to dict_word");
INT_VAR_H(tessdata_manager_debug_level, 0,
"Debug level for TessdataManager functions.");
STRING_VAR_H(tessedit_load_sublangs, "",
"List of languages to load with this one");
BOOL_VAR_H(tessedit_use_primary_params_model, false,
"In multilingual mode use params model of the primary language");
// Min acceptable orientation margin (difference in scores between top and 2nd
// choice in OSResults::orientations) to believe the page orientation.
double_VAR_H(min_orientation_margin, 7.0,
"Min acceptable orientation margin");
BOOL_VAR_H(textord_tabfind_show_vlines, false, "Debug line finding");
BOOL_VAR_H(textord_use_cjk_fp_model, FALSE, "Use CJK fixed pitch model");
BOOL_VAR_H(poly_allow_detailed_fx, false,
"Allow feature extractors to see the original outline");
BOOL_VAR_H(tessedit_init_config_only, false,
"Only initialize with the config file. Useful if the instance is "
"not going to be used for OCR but say only for layout analysis.");
BOOL_VAR_H(textord_equation_detect, false, "Turn on equation detector");
BOOL_VAR_H(textord_tabfind_vertical_text, true, "Enable vertical detection");
BOOL_VAR_H(textord_tabfind_force_vertical_text, false,
"Force using vertical text page mode");
double_VAR_H(textord_tabfind_vertical_text_ratio, 0.5,
"Fraction of textlines deemed vertical to use vertical page "
"mode");
double_VAR_H(textord_tabfind_aligned_gap_fraction, 0.75,
"Fraction of height used as a minimum gap for aligned blobs.");
INT_VAR_H(tessedit_parallelize, 0, "Run in parallel where possible");
// The following parameters were deprecated and removed from their original
// locations. The parameters are temporarily kept here to give Tesseract
// users a chance to updated their [lang].traineddata and config files
// without introducing failures during Tesseract initialization.
// TODO(ocr-team): remove these parameters from the code once we are
// reasonably sure that Tesseract users have updated their data files.
//
// BEGIN DEPRECATED PARAMETERS
BOOL_VAR_H(textord_tabfind_vertical_horizontal_mix, true,
"find horizontal lines such as headers in vertical page mode");
INT_VAR_H(tessedit_ok_mode, 5, "Acceptance decision algorithm");
BOOL_VAR_H(load_fixed_length_dawgs, true, "Load fixed length"
" dawgs (e.g. for non-space delimited languages)");
INT_VAR_H(segment_debug, 0, "Debug the whole segmentation process");
BOOL_VAR_H(permute_debug, 0, "char permutation debug");
double_VAR_H(bestrate_pruning_factor, 2.0, "Multiplying factor of"
" current best rate to prune other hypotheses");
BOOL_VAR_H(permute_script_word, 0,
"Turn on word script consistency permuter");
BOOL_VAR_H(segment_segcost_rating, 0,
"incorporate segmentation cost in word rating?");
double_VAR_H(segment_reward_script, 0.95,
"Score multipler for script consistency within a word. "
"Being a 'reward' factor, it should be <= 1. "
"Smaller value implies bigger reward.");
BOOL_VAR_H(permute_fixed_length_dawg, 0,
"Turn on fixed-length phrasebook search permuter");
BOOL_VAR_H(permute_chartype_word, 0,
"Turn on character type (property) consistency permuter");
double_VAR_H(segment_reward_chartype, 0.97,
"Score multipler for char type consistency within a word. ");
double_VAR_H(segment_reward_ngram_best_choice, 0.99,
"Score multipler for ngram permuter's best choice"
" (only used in the Han script path).");
BOOL_VAR_H(ngram_permuter_activated, false,
"Activate character-level n-gram-based permuter");
BOOL_VAR_H(permute_only_top, false, "Run only the top choice permuter");
INT_VAR_H(language_model_fixed_length_choices_depth, 3,
"Depth of blob choice lists to explore"
" when fixed length dawgs are on");
BOOL_VAR_H(use_new_state_cost, FALSE,
"use new state cost heuristics for segmentation state evaluation");
double_VAR_H(heuristic_segcost_rating_base, 1.25,
"base factor for adding segmentation cost into word rating."
"It's a multiplying factor, the larger the value above 1, "
"the bigger the effect of segmentation cost.");
double_VAR_H(heuristic_weight_rating, 1,
"weight associated with char rating in combined cost of state");
double_VAR_H(heuristic_weight_width, 1000.0,
"weight associated with width evidence in combined cost of"
" state");
double_VAR_H(heuristic_weight_seamcut, 0,
"weight associated with seam cut in combined cost of state");
double_VAR_H(heuristic_max_char_wh_ratio, 2.0,
"max char width-to-height ratio allowed in segmentation");
BOOL_VAR_H(enable_new_segsearch, false,
"Enable new segmentation search path.");
double_VAR_H(segsearch_max_fixed_pitch_char_wh_ratio, 2.0,
"Maximum character width-to-height ratio for"
"fixed pitch fonts");
// END DEPRECATED PARAMETERS
//// ambigsrecog.cpp /////////////////////////////////////////////////////////
FILE *init_recog_training(const STRING &fname);
void recog_training_segmented(const STRING &fname,
PAGE_RES *page_res,
volatile ETEXT_DESC *monitor,
FILE *output_file);
void ambigs_classify_and_output(const char *label,
PAGE_RES_IT* pr_it,
FILE *output_file);
inline CubeRecoContext *GetCubeRecoContext() { return cube_cntxt_; }
private:
// The filename of a backup config file. If not null, then we currently
// have a temporary debug config file loaded, and backup_config_file_
// will be loaded, and set to null when debug is complete.
const char* backup_config_file_;
// The filename of a config file to read when processing a debug word.
STRING word_config_;
// Image used for input to layout analysis and tesseract recognition.
// May be modified by the ShiroRekhaSplitter to eliminate the top-line.
Pix* pix_binary_;
// Unmodified image used for input to cube. Always valid.
Pix* cube_binary_;
// Grey-level input image if the input was not binary, otherwise NULL.
Pix* pix_grey_;
// Thresholds that were used to generate the thresholded image from grey.
Pix* pix_thresholds_;
// Input image resolution after any scaling. The resolution is not well
// transmitted by operations on Pix, so we keep an independent record here.
int source_resolution_;
// The shiro-rekha splitter object which is used to split top-lines in
// Devanagari words to provide a better word and grapheme segmentation.
ShiroRekhaSplitter splitter_;
// Page segmentation/layout
Textord textord_;
// True if the primary language uses right_to_left reading order.
bool right_to_left_;
Pix* scaled_color_;
int scaled_factor_;
FCOORD deskew_;
FCOORD reskew_;
TesseractStats stats_;
// Sub-languages to be tried in addition to this.
GenericVector<Tesseract*> sub_langs_;
// Most recently used Tesseract out of this and sub_langs_. The default
// language for the next word.
Tesseract* most_recently_used_;
// The size of the font table, ie max possible font id + 1.
int font_table_size_;
// Cube objects.
CubeRecoContext* cube_cntxt_;
TesseractCubeCombiner *tess_cube_combiner_;
// Equation detector. Note: this pointer is NOT owned by the class.
EquationDetect* equ_detect_;
};
} // namespace tesseract
#endif // TESSERACT_CCMAIN_TESSERACTCLASS_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: thresholder.cpp
// Description: Base API for thresolding images in tesseract.
// Author: Ray Smith
// Created: Mon May 12 11:28:15 PDT 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "allheaders.h"
#include "thresholder.h"
#include <string.h>
#include "otsuthr.h"
#include "openclwrapper.h"
namespace tesseract {
ImageThresholder::ImageThresholder()
: pix_(NULL),
image_width_(0), image_height_(0),
pix_channels_(0), pix_wpl_(0),
scale_(1), yres_(300), estimated_res_(300) {
SetRectangle(0, 0, 0, 0);
}
ImageThresholder::~ImageThresholder() {
Clear();
}
// Destroy the Pix if there is one, freeing memory.
void ImageThresholder::Clear() {
pixDestroy(&pix_);
}
// Return true if no image has been set.
bool ImageThresholder::IsEmpty() const {
return pix_ == NULL;
}
// SetImage makes a copy of all the image data, so it may be deleted
// immediately after this call.
// Greyscale of 8 and color of 24 or 32 bits per pixel may be given.
// Palette color images will not work properly and must be converted to
// 24 bit.
// Binary images of 1 bit per pixel may also be given but they must be
// byte packed with the MSB of the first byte being the first pixel, and a
// one pixel is WHITE. For binary images set bytes_per_pixel=0.
void ImageThresholder::SetImage(const unsigned char* imagedata,
int width, int height,
int bytes_per_pixel, int bytes_per_line) {
int bpp = bytes_per_pixel * 8;
if (bpp == 0) bpp = 1;
Pix* pix = pixCreate(width, height, bpp == 24 ? 32 : bpp);
l_uint32* data = pixGetData(pix);
int wpl = pixGetWpl(pix);
switch (bpp) {
case 1:
for (int y = 0; y < height; ++y, data += wpl, imagedata += bytes_per_line) {
for (int x = 0; x < width; ++x) {
if (imagedata[x / 8] & (0x80 >> (x % 8)))
CLEAR_DATA_BIT(data, x);
else
SET_DATA_BIT(data, x);
}
}
break;
case 8:
// Greyscale just copies the bytes in the right order.
for (int y = 0; y < height; ++y, data += wpl, imagedata += bytes_per_line) {
for (int x = 0; x < width; ++x)
SET_DATA_BYTE(data, x, imagedata[x]);
}
break;
case 24:
// Put the colors in the correct places in the line buffer.
for (int y = 0; y < height; ++y, imagedata += bytes_per_line) {
for (int x = 0; x < width; ++x, ++data) {
SET_DATA_BYTE(data, COLOR_RED, imagedata[3 * x]);
SET_DATA_BYTE(data, COLOR_GREEN, imagedata[3 * x + 1]);
SET_DATA_BYTE(data, COLOR_BLUE, imagedata[3 * x + 2]);
}
}
break;
case 32:
// Maintain byte order consistency across different endianness.
for (int y = 0; y < height; ++y, imagedata += bytes_per_line, data += wpl) {
for (int x = 0; x < width; ++x) {
data[x] = (imagedata[x * 4] << 24) | (imagedata[x * 4 + 1] << 16) |
(imagedata[x * 4 + 2] << 8) | imagedata[x * 4 + 3];
}
}
break;
default:
tprintf("Cannot convert RAW image to Pix with bpp = %d\n", bpp);
}
pixSetYRes(pix, 300);
SetImage(pix);
pixDestroy(&pix);
}
// Store the coordinates of the rectangle to process for later use.
// Doesn't actually do any thresholding.
void ImageThresholder::SetRectangle(int left, int top, int width, int height) {
rect_left_ = left;
rect_top_ = top;
rect_width_ = width;
rect_height_ = height;
}
// Get enough parameters to be able to rebuild bounding boxes in the
// original image (not just within the rectangle).
// Left and top are enough with top-down coordinates, but
// the height of the rectangle and the image are needed for bottom-up.
void ImageThresholder::GetImageSizes(int* left, int* top,
int* width, int* height,
int* imagewidth, int* imageheight) {
*left = rect_left_;
*top = rect_top_;
*width = rect_width_;
*height = rect_height_;
*imagewidth = image_width_;
*imageheight = image_height_;
}
// Pix vs raw, which to use? Pix is the preferred input for efficiency,
// since raw buffers are copied.
// SetImage for Pix clones its input, so the source pix may be pixDestroyed
// immediately after, but may not go away until after the Thresholder has
// finished with it.
void ImageThresholder::SetImage(const Pix* pix) {
if (pix_ != NULL)
pixDestroy(&pix_);
Pix* src = const_cast<Pix*>(pix);
int depth;
pixGetDimensions(src, &image_width_, &image_height_, &depth);
// Convert the image as necessary so it is one of binary, plain RGB, or
// 8 bit with no colormap.
if (depth > 1 && depth < 8) {
pix_ = pixConvertTo8(src, false);
} else if (pixGetColormap(src)) {
pix_ = pixRemoveColormap(src, REMOVE_CMAP_BASED_ON_SRC);
} else {
pix_ = pixClone(src);
}
depth = pixGetDepth(pix_);
pix_channels_ = depth / 8;
pix_wpl_ = pixGetWpl(pix_);
scale_ = 1;
estimated_res_ = yres_ = pixGetYRes(src);
Init();
}
// Threshold the source image as efficiently as possible to the output Pix.
// Creates a Pix and sets pix to point to the resulting pointer.
// Caller must use pixDestroy to free the created Pix.
void ImageThresholder::ThresholdToPix(PageSegMode pageseg_mode, Pix** pix) {
if (pix_channels_ == 0) {
// We have a binary image, so it just has to be cloned.
*pix = GetPixRect();
} else {
OtsuThresholdRectToPix(pix_, pix);
}
}
// Gets a pix that contains an 8 bit threshold value at each pixel. The
// returned pix may be an integer reduction of the binary image such that
// the scale factor may be inferred from the ratio of the sizes, even down
// to the extreme of a 1x1 pixel thresholds image.
// Ideally the 8 bit threshold should be the exact threshold used to generate
// the binary image in ThresholdToPix, but this is not a hard constraint.
// Returns NULL if the input is binary. PixDestroy after use.
Pix* ImageThresholder::GetPixRectThresholds() {
if (IsBinary()) return NULL;
Pix* pix_grey = GetPixRectGrey();
int width = pixGetWidth(pix_grey);
int height = pixGetHeight(pix_grey);
int* thresholds;
int* hi_values;
OtsuThreshold(pix_grey, 0, 0, width, height, &thresholds, &hi_values);
pixDestroy(&pix_grey);
Pix* pix_thresholds = pixCreate(width, height, 8);
int threshold = thresholds[0] > 0 ? thresholds[0] : 128;
pixSetAllArbitrary(pix_thresholds, threshold);
delete [] thresholds;
delete [] hi_values;
return pix_thresholds;
}
// Common initialization shared between SetImage methods.
void ImageThresholder::Init() {
SetRectangle(0, 0, image_width_, image_height_);
}
// Get a clone/copy of the source image rectangle.
// The returned Pix must be pixDestroyed.
// This function will be used in the future by the page layout analysis, and
// the layout analysis that uses it will only be available with Leptonica,
// so there is no raw equivalent.
Pix* ImageThresholder::GetPixRect() {
if (IsFullImage()) {
// Just clone the whole thing.
return pixClone(pix_);
} else {
// Crop to the given rectangle.
Box* box = boxCreate(rect_left_, rect_top_, rect_width_, rect_height_);
Pix* cropped = pixClipRectangle(pix_, box, NULL);
boxDestroy(&box);
return cropped;
}
}
// Get a clone/copy of the source image rectangle, reduced to greyscale,
// and at the same resolution as the output binary.
// The returned Pix must be pixDestroyed.
// Provided to the classifier to extract features from the greyscale image.
Pix* ImageThresholder::GetPixRectGrey() {
Pix* pix = GetPixRect(); // May have to be reduced to grey.
int depth = pixGetDepth(pix);
if (depth != 8) {
Pix* result = depth < 8 ? pixConvertTo8(pix, false)
: pixConvertRGBToLuminance(pix);
pixDestroy(&pix);
return result;
}
return pix;
}
// Otsu thresholds the rectangle, taking the rectangle from *this.
void ImageThresholder::OtsuThresholdRectToPix(Pix* src_pix,
Pix** out_pix) const {
PERF_COUNT_START("OtsuThresholdRectToPix")
int* thresholds;
int* hi_values;
int num_channels = OtsuThreshold(src_pix, rect_left_, rect_top_, rect_width_,
rect_height_, &thresholds, &hi_values);
// only use opencl if compiled w/ OpenCL and selected device is opencl
#ifdef USE_OPENCL
OpenclDevice od;
if ((num_channels == 4 || num_channels == 1) &&
od.selectedDeviceIsOpenCL() && rect_top_ == 0 && rect_left_ == 0 ) {
od.ThresholdRectToPixOCL((const unsigned char*)pixGetData(src_pix),
num_channels, pixGetWpl(src_pix) * 4,
thresholds, hi_values, out_pix /*pix_OCL*/,
rect_height_, rect_width_, rect_top_, rect_left_);
} else {
#endif
ThresholdRectToPix(src_pix, num_channels, thresholds, hi_values, out_pix);
#ifdef USE_OPENCL
}
#endif
delete [] thresholds;
delete [] hi_values;
PERF_COUNT_END
}
/// Threshold the rectangle, taking everything except the src_pix
/// from the class, using thresholds/hi_values to the output pix.
/// NOTE that num_channels is the size of the thresholds and hi_values
// arrays and also the bytes per pixel in src_pix.
void ImageThresholder::ThresholdRectToPix(Pix* src_pix,
int num_channels,
const int* thresholds,
const int* hi_values,
Pix** pix) const {
PERF_COUNT_START("ThresholdRectToPix")
*pix = pixCreate(rect_width_, rect_height_, 1);
uinT32* pixdata = pixGetData(*pix);
int wpl = pixGetWpl(*pix);
int src_wpl = pixGetWpl(src_pix);
uinT32* srcdata = pixGetData(src_pix);
for (int y = 0; y < rect_height_; ++y) {
const uinT32* linedata = srcdata + (y + rect_top_) * src_wpl;
uinT32* pixline = pixdata + y * wpl;
for (int x = 0; x < rect_width_; ++x) {
bool white_result = true;
for (int ch = 0; ch < num_channels; ++ch) {
int pixel = GET_DATA_BYTE(const_cast<void*>(
reinterpret_cast<const void *>(linedata)),
(x + rect_left_) * num_channels + ch);
if (hi_values[ch] >= 0 &&
(pixel > thresholds[ch]) == (hi_values[ch] == 0)) {
white_result = false;
break;
}
}
if (white_result)
CLEAR_DATA_BIT(pixline, x);
else
SET_DATA_BIT(pixline, x);
}
}
PERF_COUNT_END
}
} // namespace tesseract.
| C++ |
/**********************************************************************
* File: paragraphs.cpp
* Description: Paragraph detection for tesseract.
* Author: David Eger
* Created: 25 February 2011
*
* (C) Copyright 2011, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#define __func__ __FUNCTION__
#endif
#include <ctype.h>
#include "genericvector.h"
#include "helpers.h"
#include "mutableiterator.h"
#include "ocrpara.h"
#include "pageres.h"
#include "paragraphs.h"
#include "paragraphs_internal.h"
#include "publictypes.h"
#include "ratngs.h"
#include "rect.h"
#include "statistc.h"
#include "strngs.h"
#include "tprintf.h"
#include "unicharset.h"
#include "unicodes.h"
namespace tesseract {
// Special "weak" ParagraphModels.
const ParagraphModel *kCrownLeft
= reinterpret_cast<ParagraphModel *>(0xDEAD111F);
const ParagraphModel *kCrownRight
= reinterpret_cast<ParagraphModel *>(0xDEAD888F);
// Given the width of a typical space between words, what is the threshold
// by which by which we think left and right alignments for paragraphs
// can vary and still be aligned.
static int Epsilon(int space_pix) {
return space_pix * 4 / 5;
}
static bool AcceptableRowArgs(
int debug_level, int min_num_rows, const char *function_name,
const GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end) {
if (row_start < 0 || row_end > rows->size() || row_start > row_end) {
tprintf("Invalid arguments rows[%d, %d) while rows is of size %d.\n",
row_start, row_end, rows->size());
return false;
}
if (row_end - row_start < min_num_rows) {
if (debug_level > 1) {
tprintf("# Too few rows[%d, %d) for %s.\n",
row_start, row_end, function_name);
}
return false;
}
return true;
}
// =============================== Debug Code ================================
// Convert an integer to a decimal string.
static STRING StrOf(int num) {
char buffer[30];
snprintf(buffer, sizeof(buffer), "%d", num);
return STRING(buffer);
}
// Given a row-major matrix of unicode text and a column separator, print
// a formatted table. For ASCII, we get good column alignment.
static void PrintTable(const GenericVector<GenericVector<STRING> > &rows,
const STRING &colsep) {
GenericVector<int> max_col_widths;
for (int r = 0; r < rows.size(); r++) {
int num_columns = rows[r].size();
for (int c = 0; c < num_columns; c++) {
int num_unicodes = 0;
for (int i = 0; i < rows[r][c].size(); i++) {
if ((rows[r][c][i] & 0xC0) != 0x80) num_unicodes++;
}
if (c >= max_col_widths.size()) {
max_col_widths.push_back(num_unicodes);
} else {
if (num_unicodes > max_col_widths[c])
max_col_widths[c] = num_unicodes;
}
}
}
GenericVector<STRING> col_width_patterns;
for (int c = 0; c < max_col_widths.size(); c++) {
col_width_patterns.push_back(
STRING("%-") + StrOf(max_col_widths[c]) + "s");
}
for (int r = 0; r < rows.size(); r++) {
for (int c = 0; c < rows[r].size(); c++) {
if (c > 0)
tprintf("%s", colsep.string());
tprintf(col_width_patterns[c].string(), rows[r][c].string());
}
tprintf("\n");
}
}
STRING RtlEmbed(const STRING &word, bool rtlify) {
if (rtlify)
return STRING(kRLE) + word + STRING(kPDF);
return word;
}
// Print the current thoughts of the paragraph detector.
static void PrintDetectorState(const ParagraphTheory &theory,
const GenericVector<RowScratchRegisters> &rows) {
GenericVector<GenericVector<STRING> > output;
output.push_back(GenericVector<STRING>());
output.back().push_back("#row");
output.back().push_back("space");
output.back().push_back("..");
output.back().push_back("lword[widthSEL]");
output.back().push_back("rword[widthSEL]");
RowScratchRegisters::AppendDebugHeaderFields(&output.back());
output.back().push_back("text");
for (int i = 0; i < rows.size(); i++) {
output.push_back(GenericVector<STRING>());
GenericVector<STRING> &row = output.back();
const RowInfo& ri = *rows[i].ri_;
row.push_back(StrOf(i));
row.push_back(StrOf(ri.average_interword_space));
row.push_back(ri.has_leaders ? ".." : " ");
row.push_back(RtlEmbed(ri.lword_text, !ri.ltr) +
"[" + StrOf(ri.lword_box.width()) +
(ri.lword_likely_starts_idea ? "S" : "s") +
(ri.lword_likely_ends_idea ? "E" : "e") +
(ri.lword_indicates_list_item ? "L" : "l") +
"]");
row.push_back(RtlEmbed(ri.rword_text, !ri.ltr) +
"[" + StrOf(ri.rword_box.width()) +
(ri.rword_likely_starts_idea ? "S" : "s") +
(ri.rword_likely_ends_idea ? "E" : "e") +
(ri.rword_indicates_list_item ? "L" : "l") +
"]");
rows[i].AppendDebugInfo(theory, &row);
row.push_back(RtlEmbed(ri.text, !ri.ltr));
}
PrintTable(output, " ");
tprintf("Active Paragraph Models:\n");
for (int m = 0; m < theory.models().size(); m++) {
tprintf(" %d: %s\n", m + 1, theory.models()[m]->ToString().string());
}
}
static void DebugDump(
bool should_print,
const STRING &phase,
const ParagraphTheory &theory,
const GenericVector<RowScratchRegisters> &rows) {
if (!should_print)
return;
tprintf("# %s\n", phase.string());
PrintDetectorState(theory, rows);
}
// Print out the text for rows[row_start, row_end)
static void PrintRowRange(const GenericVector<RowScratchRegisters> &rows,
int row_start, int row_end) {
tprintf("======================================\n");
for (int row = row_start; row < row_end; row++) {
tprintf("%s\n", rows[row].ri_->text.string());
}
tprintf("======================================\n");
}
// ============= Brain Dead Language Model (ASCII Version) ===================
bool IsLatinLetter(int ch) {
return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z');
}
bool IsDigitLike(int ch) {
return ch == 'o' || ch == 'O' || ch == 'l' || ch == 'I';
}
bool IsOpeningPunct(int ch) {
return strchr("'\"({[", ch) != NULL;
}
bool IsTerminalPunct(int ch) {
return strchr(":'\".?!]})", ch) != NULL;
}
// Return a pointer after consuming as much text as qualifies as roman numeral.
const char *SkipChars(const char *str, const char *toskip) {
while (*str != '\0' && strchr(toskip, *str)) { str++; }
return str;
}
const char *SkipChars(const char *str, bool (*skip)(int)) {
while (*str != '\0' && skip(*str)) { str++; }
return str;
}
const char *SkipOne(const char *str, const char *toskip) {
if (*str != '\0' && strchr(toskip, *str)) return str + 1;
return str;
}
// Return whether it is very likely that this is a numeral marker that could
// start a list item. Some examples include:
// A I iii. VI (2) 3.5. [C-4]
bool LikelyListNumeral(const STRING &word) {
const char *kRomans = "ivxlmdIVXLMD";
const char *kDigits = "012345789";
const char *kOpen = "[{(";
const char *kSep = ":;-.,";
const char *kClose = "]})";
int num_segments = 0;
const char *pos = word.string();
while (*pos != '\0' && num_segments < 3) {
// skip up to two open parens.
const char *numeral_start = SkipOne(SkipOne(pos, kOpen), kOpen);
const char *numeral_end = SkipChars(numeral_start, kRomans);
if (numeral_end != numeral_start) {
// Got Roman Numeral. Great.
} else {
numeral_end = SkipChars(numeral_start, kDigits);
if (numeral_end == numeral_start) {
// If there's a single latin letter, we can use that.
numeral_end = SkipChars(numeral_start, IsLatinLetter);
if (numeral_end - numeral_start != 1)
break;
}
}
// We got some sort of numeral.
num_segments++;
// Skip any trailing parens or punctuation.
pos = SkipChars(SkipChars(numeral_end, kClose), kSep);
if (pos == numeral_end)
break;
}
return *pos == '\0';
}
bool LikelyListMark(const STRING &word) {
const char *kListMarks = "0Oo*.,+.";
return word.size() == 1 && strchr(kListMarks, word[0]) != NULL;
}
bool AsciiLikelyListItem(const STRING &word) {
return LikelyListMark(word) || LikelyListNumeral(word);
}
// ========== Brain Dead Language Model (Tesseract Version) ================
// Return the first Unicode Codepoint from werd[pos].
int UnicodeFor(const UNICHARSET *u, const WERD_CHOICE *werd, int pos) {
if (!u || !werd || pos > werd->length())
return 0;
return UNICHAR(u->id_to_unichar(werd->unichar_id(pos)), -1).first_uni();
}
// A useful helper class for finding the first j >= i so that word[j]
// does not have given character type.
class UnicodeSpanSkipper {
public:
UnicodeSpanSkipper(const UNICHARSET *unicharset, const WERD_CHOICE *word)
: u_(unicharset), word_(word) { wordlen_ = word->length(); }
// Given an input position, return the first position >= pos not punc.
int SkipPunc(int pos);
// Given an input position, return the first position >= pos not digit.
int SkipDigits(int pos);
// Given an input position, return the first position >= pos not roman.
int SkipRomans(int pos);
// Given an input position, return the first position >= pos not alpha.
int SkipAlpha(int pos);
private:
const UNICHARSET *u_;
const WERD_CHOICE *word_;
int wordlen_;
};
int UnicodeSpanSkipper::SkipPunc(int pos) {
while (pos < wordlen_ && u_->get_ispunctuation(word_->unichar_id(pos))) pos++;
return pos;
}
int UnicodeSpanSkipper::SkipDigits(int pos) {
while (pos < wordlen_ && (u_->get_isdigit(word_->unichar_id(pos)) ||
IsDigitLike(UnicodeFor(u_, word_, pos)))) pos++;
return pos;
}
int UnicodeSpanSkipper::SkipRomans(int pos) {
const char *kRomans = "ivxlmdIVXLMD";
while (pos < wordlen_) {
int ch = UnicodeFor(u_, word_, pos);
if (ch >= 0xF0 || strchr(kRomans, ch) == 0) break;
pos++;
}
return pos;
}
int UnicodeSpanSkipper::SkipAlpha(int pos) {
while (pos < wordlen_ && u_->get_isalpha(word_->unichar_id(pos))) pos++;
return pos;
}
bool LikelyListMarkUnicode(int ch) {
if (ch < 0x80) {
STRING single_ch;
single_ch += ch;
return LikelyListMark(single_ch);
}
switch (ch) {
// TODO(eger) expand this list of unicodes as needed.
case 0x00B0: // degree sign
case 0x2022: // bullet
case 0x25E6: // white bullet
case 0x00B7: // middle dot
case 0x25A1: // white square
case 0x25A0: // black square
case 0x25AA: // black small square
case 0x2B1D: // black very small square
case 0x25BA: // black right-pointing pointer
case 0x25CF: // black circle
case 0x25CB: // white circle
return true;
default:
break; // fall through
}
return false;
}
// Return whether it is very likely that this is a numeral marker that could
// start a list item. Some examples include:
// A I iii. VI (2) 3.5. [C-4]
bool UniLikelyListItem(const UNICHARSET *u, const WERD_CHOICE *werd) {
if (werd->length() == 1 && LikelyListMarkUnicode(UnicodeFor(u, werd, 0)))
return true;
UnicodeSpanSkipper m(u, werd);
int num_segments = 0;
int pos = 0;
while (pos < werd->length() && num_segments < 3) {
int numeral_start = m.SkipPunc(pos);
if (numeral_start > pos + 1) break;
int numeral_end = m.SkipRomans(numeral_start);
if (numeral_end == numeral_start) {
numeral_end = m.SkipDigits(numeral_start);
if (numeral_end == numeral_start) {
// If there's a single latin letter, we can use that.
numeral_end = m.SkipAlpha(numeral_start);
if (numeral_end - numeral_start != 1)
break;
}
}
// We got some sort of numeral.
num_segments++;
// Skip any trailing punctuation.
pos = m.SkipPunc(numeral_end);
if (pos == numeral_end)
break;
}
return pos == werd->length();
}
// ========= Brain Dead Language Model (combined entry points) ================
// Given the leftmost word of a line either as a Tesseract unicharset + werd
// or a utf8 string, set the following attributes for it:
// is_list - this word might be a list number or bullet.
// starts_idea - this word is likely to start a sentence.
// ends_idea - this word is likely to end a sentence.
void LeftWordAttributes(const UNICHARSET *unicharset, const WERD_CHOICE *werd,
const STRING &utf8,
bool *is_list, bool *starts_idea, bool *ends_idea) {
*is_list = false;
*starts_idea = false;
*ends_idea = false;
if (utf8.size() == 0 || (werd != NULL && werd->length() == 0)) { // Empty
*ends_idea = true;
return;
}
if (unicharset && werd) { // We have a proper werd and unicharset so use it.
if (UniLikelyListItem(unicharset, werd)) {
*is_list = true;
*starts_idea = true;
*ends_idea = true;
}
if (unicharset->get_isupper(werd->unichar_id(0))) {
*starts_idea = true;
}
if (unicharset->get_ispunctuation(werd->unichar_id(0))) {
*starts_idea = true;
*ends_idea = true;
}
} else { // Assume utf8 is mostly ASCII
if (AsciiLikelyListItem(utf8)) {
*is_list = true;
*starts_idea = true;
}
int start_letter = utf8[0];
if (IsOpeningPunct(start_letter)) {
*starts_idea = true;
}
if (IsTerminalPunct(start_letter)) {
*ends_idea = true;
}
if (start_letter >= 'A' && start_letter <= 'Z') {
*starts_idea = true;
}
}
}
// Given the rightmost word of a line either as a Tesseract unicharset + werd
// or a utf8 string, set the following attributes for it:
// is_list - this word might be a list number or bullet.
// starts_idea - this word is likely to start a sentence.
// ends_idea - this word is likely to end a sentence.
void RightWordAttributes(const UNICHARSET *unicharset, const WERD_CHOICE *werd,
const STRING &utf8,
bool *is_list, bool *starts_idea, bool *ends_idea) {
*is_list = false;
*starts_idea = false;
*ends_idea = false;
if (utf8.size() == 0 || (werd != NULL && werd->length() == 0)) { // Empty
*ends_idea = true;
return;
}
if (unicharset && werd) { // We have a proper werd and unicharset so use it.
if (UniLikelyListItem(unicharset, werd)) {
*is_list = true;
*starts_idea = true;
}
UNICHAR_ID last_letter = werd->unichar_id(werd->length() - 1);
if (unicharset->get_ispunctuation(last_letter)) {
*ends_idea = true;
}
} else { // Assume utf8 is mostly ASCII
if (AsciiLikelyListItem(utf8)) {
*is_list = true;
*starts_idea = true;
}
int last_letter = utf8[utf8.size() - 1];
if (IsOpeningPunct(last_letter) || IsTerminalPunct(last_letter)) {
*ends_idea = true;
}
}
}
// =============== Implementation of RowScratchRegisters =====================
/* static */
void RowScratchRegisters::AppendDebugHeaderFields(
GenericVector<STRING> *header) {
header->push_back("[lmarg,lind;rind,rmarg]");
header->push_back("model");
}
void RowScratchRegisters::AppendDebugInfo(const ParagraphTheory &theory,
GenericVector<STRING> *dbg) const {
char s[30];
snprintf(s, sizeof(s), "[%3d,%3d;%3d,%3d]",
lmargin_, lindent_, rindent_, rmargin_);
dbg->push_back(s);
STRING model_string;
model_string += static_cast<char>(GetLineType());
model_string += ":";
int model_numbers = 0;
for (int h = 0; h < hypotheses_.size(); h++) {
if (hypotheses_[h].model == NULL)
continue;
if (model_numbers > 0)
model_string += ",";
if (StrongModel(hypotheses_[h].model)) {
model_string += StrOf(1 + theory.IndexOf(hypotheses_[h].model));
} else if (hypotheses_[h].model == kCrownLeft) {
model_string += "CrL";
} else if (hypotheses_[h].model == kCrownRight) {
model_string += "CrR";
}
model_numbers++;
}
if (model_numbers == 0)
model_string += "0";
dbg->push_back(model_string);
}
void RowScratchRegisters::Init(const RowInfo &row) {
ri_ = &row;
lmargin_ = 0;
lindent_ = row.pix_ldistance;
rmargin_ = 0;
rindent_ = row.pix_rdistance;
}
LineType RowScratchRegisters::GetLineType() const {
if (hypotheses_.empty())
return LT_UNKNOWN;
bool has_start = false;
bool has_body = false;
for (int i = 0; i < hypotheses_.size(); i++) {
switch (hypotheses_[i].ty) {
case LT_START: has_start = true; break;
case LT_BODY: has_body = true; break;
default:
tprintf("Encountered bad value in hypothesis list: %c\n",
hypotheses_[i].ty);
break;
}
}
if (has_start && has_body)
return LT_MULTIPLE;
return has_start ? LT_START : LT_BODY;
}
LineType RowScratchRegisters::GetLineType(const ParagraphModel *model) const {
if (hypotheses_.empty())
return LT_UNKNOWN;
bool has_start = false;
bool has_body = false;
for (int i = 0; i < hypotheses_.size(); i++) {
if (hypotheses_[i].model != model)
continue;
switch (hypotheses_[i].ty) {
case LT_START: has_start = true; break;
case LT_BODY: has_body = true; break;
default:
tprintf("Encountered bad value in hypothesis list: %c\n",
hypotheses_[i].ty);
break;
}
}
if (has_start && has_body)
return LT_MULTIPLE;
return has_start ? LT_START : LT_BODY;
}
void RowScratchRegisters::SetStartLine() {
LineType current_lt = GetLineType();
if (current_lt != LT_UNKNOWN && current_lt != LT_START) {
tprintf("Trying to set a line to be START when it's already BODY.\n");
}
if (current_lt == LT_UNKNOWN || current_lt == LT_BODY) {
hypotheses_.push_back_new(LineHypothesis(LT_START, NULL));
}
}
void RowScratchRegisters::SetBodyLine() {
LineType current_lt = GetLineType();
if (current_lt != LT_UNKNOWN && current_lt != LT_BODY) {
tprintf("Trying to set a line to be BODY when it's already START.\n");
}
if (current_lt == LT_UNKNOWN || current_lt == LT_START) {
hypotheses_.push_back_new(LineHypothesis(LT_BODY, NULL));
}
}
void RowScratchRegisters::AddStartLine(const ParagraphModel *model) {
hypotheses_.push_back_new(LineHypothesis(LT_START, model));
int old_idx = hypotheses_.get_index(LineHypothesis(LT_START, NULL));
if (old_idx >= 0)
hypotheses_.remove(old_idx);
}
void RowScratchRegisters::AddBodyLine(const ParagraphModel *model) {
hypotheses_.push_back_new(LineHypothesis(LT_BODY, model));
int old_idx = hypotheses_.get_index(LineHypothesis(LT_BODY, NULL));
if (old_idx >= 0)
hypotheses_.remove(old_idx);
}
void RowScratchRegisters::StartHypotheses(SetOfModels *models) const {
for (int h = 0; h < hypotheses_.size(); h++) {
if (hypotheses_[h].ty == LT_START && StrongModel(hypotheses_[h].model))
models->push_back_new(hypotheses_[h].model);
}
}
void RowScratchRegisters::StrongHypotheses(SetOfModels *models) const {
for (int h = 0; h < hypotheses_.size(); h++) {
if (StrongModel(hypotheses_[h].model))
models->push_back_new(hypotheses_[h].model);
}
}
void RowScratchRegisters::NonNullHypotheses(SetOfModels *models) const {
for (int h = 0; h < hypotheses_.size(); h++) {
if (hypotheses_[h].model != NULL)
models->push_back_new(hypotheses_[h].model);
}
}
const ParagraphModel *RowScratchRegisters::UniqueStartHypothesis() const {
if (hypotheses_.size() != 1 || hypotheses_[0].ty != LT_START)
return NULL;
return hypotheses_[0].model;
}
const ParagraphModel *RowScratchRegisters::UniqueBodyHypothesis() const {
if (hypotheses_.size() != 1 || hypotheses_[0].ty != LT_BODY)
return NULL;
return hypotheses_[0].model;
}
// Discard any hypotheses whose model is not in the given list.
void RowScratchRegisters::DiscardNonMatchingHypotheses(
const SetOfModels &models) {
if (models.empty())
return;
for (int h = hypotheses_.size() - 1; h >= 0; h--) {
if (!models.contains(hypotheses_[h].model)) {
hypotheses_.remove(h);
}
}
}
// ============ Geometry based Paragraph Detection Algorithm =================
struct Cluster {
Cluster() : center(0), count(0) {}
Cluster(int cen, int num) : center(cen), count(num) {}
int center; // The center of the cluster.
int count; // The number of entries within the cluster.
};
class SimpleClusterer {
public:
explicit SimpleClusterer(int max_cluster_width)
: max_cluster_width_(max_cluster_width) {}
void Add(int value) { values_.push_back(value); }
int size() const { return values_.size(); }
void GetClusters(GenericVector<Cluster> *clusters);
private:
int max_cluster_width_;
GenericVectorEqEq<int> values_;
};
// Return the index of the cluster closest to value.
int ClosestCluster(const GenericVector<Cluster> &clusters, int value) {
int best_index = 0;
for (int i = 0; i < clusters.size(); i++) {
if (abs(value - clusters[i].center) <
abs(value - clusters[best_index].center))
best_index = i;
}
return best_index;
}
void SimpleClusterer::GetClusters(GenericVector<Cluster> *clusters) {
clusters->clear();
values_.sort();
for (int i = 0; i < values_.size();) {
int orig_i = i;
int lo = values_[i];
int hi = lo;
while (++i < values_.size() && values_[i] <= lo + max_cluster_width_) {
hi = values_[i];
}
clusters->push_back(Cluster((hi + lo) / 2, i - orig_i));
}
}
// Calculate left- and right-indent tab stop values seen in
// rows[row_start, row_end) given a tolerance of tolerance.
void CalculateTabStops(GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
int tolerance,
GenericVector<Cluster> *left_tabs,
GenericVector<Cluster> *right_tabs) {
if (!AcceptableRowArgs(0, 1, __func__, rows, row_start, row_end))
return;
// First pass: toss all left and right indents into clusterers.
SimpleClusterer initial_lefts(tolerance);
SimpleClusterer initial_rights(tolerance);
GenericVector<Cluster> initial_left_tabs;
GenericVector<Cluster> initial_right_tabs;
for (int i = row_start; i < row_end; i++) {
initial_lefts.Add((*rows)[i].lindent_);
initial_rights.Add((*rows)[i].rindent_);
}
initial_lefts.GetClusters(&initial_left_tabs);
initial_rights.GetClusters(&initial_right_tabs);
// Second pass: cluster only lines that are not "stray"
// An example of a stray line is a page number -- a line whose start
// and end tab-stops are far outside the typical start and end tab-stops
// for the block.
// Put another way, we only cluster data from lines whose start or end
// tab stop is frequent.
SimpleClusterer lefts(tolerance);
SimpleClusterer rights(tolerance);
// Outlier elimination. We might want to switch this to test outlier-ness
// based on how strange a position an outlier is in instead of or in addition
// to how rare it is. These outliers get re-added if we end up having too
// few tab stops, to work with, however.
int infrequent_enough_to_ignore = 0;
if (row_end - row_start >= 8) infrequent_enough_to_ignore = 1;
if (row_end - row_start >= 20) infrequent_enough_to_ignore = 2;
for (int i = row_start; i < row_end; i++) {
int lidx = ClosestCluster(initial_left_tabs, (*rows)[i].lindent_);
int ridx = ClosestCluster(initial_right_tabs, (*rows)[i].rindent_);
if (initial_left_tabs[lidx].count > infrequent_enough_to_ignore ||
initial_right_tabs[ridx].count > infrequent_enough_to_ignore) {
lefts.Add((*rows)[i].lindent_);
rights.Add((*rows)[i].rindent_);
}
}
lefts.GetClusters(left_tabs);
rights.GetClusters(right_tabs);
if ((left_tabs->size() == 1 && right_tabs->size() >= 4) ||
(right_tabs->size() == 1 && left_tabs->size() >= 4)) {
// One side is really ragged, and the other only has one tab stop,
// so those "insignificant outliers" are probably important, actually.
// This often happens on a page of an index. Add back in the ones
// we omitted in the first pass.
for (int i = row_start; i < row_end; i++) {
int lidx = ClosestCluster(initial_left_tabs, (*rows)[i].lindent_);
int ridx = ClosestCluster(initial_right_tabs, (*rows)[i].rindent_);
if (!(initial_left_tabs[lidx].count > infrequent_enough_to_ignore ||
initial_right_tabs[ridx].count > infrequent_enough_to_ignore)) {
lefts.Add((*rows)[i].lindent_);
rights.Add((*rows)[i].rindent_);
}
}
}
lefts.GetClusters(left_tabs);
rights.GetClusters(right_tabs);
// If one side is almost a two-indent aligned side, and the other clearly
// isn't, try to prune out the least frequent tab stop from that side.
if (left_tabs->size() == 3 && right_tabs->size() >= 4) {
int to_prune = -1;
for (int i = left_tabs->size() - 1; i >= 0; i--) {
if (to_prune < 0 ||
(*left_tabs)[i].count < (*left_tabs)[to_prune].count) {
to_prune = i;
}
}
if (to_prune >= 0 &&
(*left_tabs)[to_prune].count <= infrequent_enough_to_ignore) {
left_tabs->remove(to_prune);
}
}
if (right_tabs->size() == 3 && left_tabs->size() >= 4) {
int to_prune = -1;
for (int i = right_tabs->size() - 1; i >= 0; i--) {
if (to_prune < 0 ||
(*right_tabs)[i].count < (*right_tabs)[to_prune].count) {
to_prune = i;
}
}
if (to_prune >= 0 &&
(*right_tabs)[to_prune].count <= infrequent_enough_to_ignore) {
right_tabs->remove(to_prune);
}
}
}
// Given a paragraph model mark rows[row_start, row_end) as said model
// start or body lines.
//
// Case 1: model->first_indent_ != model->body_indent_
// Differentiating the paragraph start lines from the paragraph body lines in
// this case is easy, we just see how far each line is indented.
//
// Case 2: model->first_indent_ == model->body_indent_
// Here, we find end-of-paragraph lines by looking for "short lines."
// What constitutes a "short line" changes depending on whether the text
// ragged-right[left] or fully justified (aligned left and right).
//
// Case 2a: Ragged Right (or Left) text. (eop_threshold == 0)
// We have a new paragraph it the first word would have at the end
// of the previous line.
//
// Case 2b: Fully Justified. (eop_threshold > 0)
// We mark a line as short (end of paragraph) if the offside indent
// is greater than eop_threshold.
void MarkRowsWithModel(GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
const ParagraphModel *model,
bool ltr,
int eop_threshold) {
if (!AcceptableRowArgs(0, 0, __func__, rows, row_start, row_end))
return;
for (int row = row_start; row < row_end; row++) {
bool valid_first = ValidFirstLine(rows, row, model);
bool valid_body = ValidBodyLine(rows, row, model);
if (valid_first && !valid_body) {
(*rows)[row].AddStartLine(model);
} else if (valid_body && !valid_first) {
(*rows)[row].AddBodyLine(model);
} else if (valid_body && valid_first) {
bool after_eop = (row == row_start);
if (row > row_start) {
if (eop_threshold > 0) {
if (model->justification() == JUSTIFICATION_LEFT) {
after_eop = (*rows)[row - 1].rindent_ > eop_threshold;
} else {
after_eop = (*rows)[row - 1].lindent_ > eop_threshold;
}
} else {
after_eop = FirstWordWouldHaveFit((*rows)[row - 1], (*rows)[row],
model->justification());
}
}
if (after_eop) {
(*rows)[row].AddStartLine(model);
} else {
(*rows)[row].AddBodyLine(model);
}
} else {
// Do nothing. Stray row.
}
}
}
// GeometricClassifierState holds all of the information we'll use while
// trying to determine a paragraph model for the text lines in a block of
// text:
// + the rows under consideration [row_start, row_end)
// + the common left- and right-indent tab stops
// + does the block start out left-to-right or right-to-left
// Further, this struct holds the data we amass for the (single) ParagraphModel
// we'll assign to the text lines (assuming we get that far).
struct GeometricClassifierState {
GeometricClassifierState(int dbg_level,
GenericVector<RowScratchRegisters> *r,
int r_start, int r_end)
: debug_level(dbg_level), rows(r), row_start(r_start), row_end(r_end),
margin(0) {
tolerance = InterwordSpace(*r, r_start, r_end);
CalculateTabStops(r, r_start, r_end, tolerance,
&left_tabs, &right_tabs);
if (debug_level >= 3) {
tprintf("Geometry: TabStop cluster tolerance = %d; "
"%d left tabs; %d right tabs\n",
tolerance, left_tabs.size(), right_tabs.size());
}
ltr = (*r)[r_start].ri_->ltr;
}
void AssumeLeftJustification() {
just = tesseract::JUSTIFICATION_LEFT;
margin = (*rows)[row_start].lmargin_;
}
void AssumeRightJustification() {
just = tesseract::JUSTIFICATION_RIGHT;
margin = (*rows)[row_start].rmargin_;
}
// Align tabs are the tab stops the text is aligned to.
const GenericVector<Cluster> &AlignTabs() const {
if (just == tesseract::JUSTIFICATION_RIGHT) return right_tabs;
return left_tabs;
}
// Offside tabs are the tab stops opposite the tabs used to align the text.
//
// Note that for a left-to-right text which is aligned to the right such as
// this function comment, the offside tabs are the horizontal tab stops
// marking the beginning of ("Note", "this" and "marking").
const GenericVector<Cluster> &OffsideTabs() const {
if (just == tesseract::JUSTIFICATION_RIGHT) return left_tabs;
return right_tabs;
}
// Return whether the i'th row extends from the leftmost left tab stop
// to the right most right tab stop.
bool IsFullRow(int i) const {
return ClosestCluster(left_tabs, (*rows)[i].lindent_) == 0 &&
ClosestCluster(right_tabs, (*rows)[i].rindent_) == 0;
}
int AlignsideTabIndex(int row_idx) const {
return ClosestCluster(AlignTabs(), (*rows)[row_idx].AlignsideIndent(just));
}
// Given what we know about the paragraph justification (just), would the
// first word of row_b have fit at the end of row_a?
bool FirstWordWouldHaveFit(int row_a, int row_b) {
return ::tesseract::FirstWordWouldHaveFit(
(*rows)[row_a], (*rows)[row_b], just);
}
void PrintRows() const { PrintRowRange(*rows, row_start, row_end); }
void Fail(int min_debug_level, const char *why) const {
if (debug_level < min_debug_level) return;
tprintf("# %s\n", why);
PrintRows();
}
ParagraphModel Model() const {
return ParagraphModel(just, margin, first_indent, body_indent, tolerance);
}
// We print out messages with a debug level at least as great as debug_level.
int debug_level;
// The Geometric Classifier was asked to find a single paragraph model
// to fit the text rows (*rows)[row_start, row_end)
GenericVector<RowScratchRegisters> *rows;
int row_start;
int row_end;
// The amount by which we expect the text edge can vary and still be aligned.
int tolerance;
// Is the script in this text block left-to-right?
// HORRIBLE ROUGH APPROXIMATION. TODO(eger): Improve
bool ltr;
// These left and right tab stops were determined to be the common tab
// stops for the given text.
GenericVector<Cluster> left_tabs;
GenericVector<Cluster> right_tabs;
// These are parameters we must determine to create a ParagraphModel.
tesseract::ParagraphJustification just;
int margin;
int first_indent;
int body_indent;
// eop_threshold > 0 if the text is fully justified. See MarkRowsWithModel()
int eop_threshold;
};
// Given a section of text where strong textual clues did not help identifying
// paragraph breaks, and for which the left and right indents have exactly
// three tab stops between them, attempt to find the paragraph breaks based
// solely on the outline of the text and whether the script is left-to-right.
//
// Algorithm Detail:
// The selected rows are in the form of a rectangle except
// for some number of "short lines" of the same length:
//
// (A1) xxxxxxxxxxxxx (B1) xxxxxxxxxxxx
// xxxxxxxxxxx xxxxxxxxxx # A "short" line.
// xxxxxxxxxxxxx xxxxxxxxxxxx
// xxxxxxxxxxxxx xxxxxxxxxxxx
//
// We have a slightly different situation if the only short
// line is at the end of the excerpt.
//
// (A2) xxxxxxxxxxxxx (B2) xxxxxxxxxxxx
// xxxxxxxxxxxxx xxxxxxxxxxxx
// xxxxxxxxxxxxx xxxxxxxxxxxx
// xxxxxxxxxxx xxxxxxxxxx # A "short" line.
//
// We'll interpret these as follows based on the reasoning in the comment for
// GeometricClassify():
// [script direction: first indent, body indent]
// (A1) LtR: 2,0 RtL: 0,0 (B1) LtR: 0,0 RtL: 2,0
// (A2) LtR: 2,0 RtL: CrR (B2) LtR: CrL RtL: 2,0
void GeometricClassifyThreeTabStopTextBlock(
int debug_level,
GeometricClassifierState &s,
ParagraphTheory *theory) {
int num_rows = s.row_end - s.row_start;
int num_full_rows = 0;
int last_row_full = 0;
for (int i = s.row_start; i < s.row_end; i++) {
if (s.IsFullRow(i)) {
num_full_rows++;
if (i == s.row_end - 1) last_row_full++;
}
}
if (num_full_rows < 0.7 * num_rows) {
s.Fail(1, "Not enough full lines to know which lines start paras.");
return;
}
// eop_threshold gets set if we're fully justified; see MarkRowsWithModel()
s.eop_threshold = 0;
if (s.ltr) {
s.AssumeLeftJustification();
} else {
s.AssumeRightJustification();
}
if (debug_level > 0) {
tprintf("# Not enough variety for clear outline classification. "
"Guessing these are %s aligned based on script.\n",
s.ltr ? "left" : "right");
s.PrintRows();
}
if (s.AlignTabs().size() == 2) { // case A1 or A2
s.first_indent = s.AlignTabs()[1].center;
s.body_indent = s.AlignTabs()[0].center;
} else { // case B1 or B2
if (num_rows - 1 == num_full_rows - last_row_full) {
// case B2
const ParagraphModel *model = s.ltr ? kCrownLeft : kCrownRight;
(*s.rows)[s.row_start].AddStartLine(model);
for (int i = s.row_start + 1; i < s.row_end; i++) {
(*s.rows)[i].AddBodyLine(model);
}
return;
} else {
// case B1
s.first_indent = s.body_indent = s.AlignTabs()[0].center;
s.eop_threshold = (s.OffsideTabs()[0].center +
s.OffsideTabs()[1].center) / 2;
}
}
const ParagraphModel *model = theory->AddModel(s.Model());
MarkRowsWithModel(s.rows, s.row_start, s.row_end, model,
s.ltr, s.eop_threshold);
return;
}
// This function is called if strong textual clues were not available, but
// the caller hopes that the paragraph breaks will be super obvious just
// by the outline of the text.
//
// The particularly difficult case is figuring out what's going on if you
// don't have enough short paragraph end lines to tell us what's going on.
//
// For instance, let's say you have the following outline:
//
// (A1) xxxxxxxxxxxxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxx
//
// Even if we know that the text is left-to-right and so will probably be
// left-aligned, both of the following are possible texts:
//
// (A1a) 1. Here our list item
// with two full lines.
// 2. Here a second item.
// 3. Here our third one.
//
// (A1b) so ends paragraph one.
// Here starts another
// paragraph we want to
// read. This continues
//
// These examples are obvious from the text and should have been caught
// by the StrongEvidenceClassify pass. However, for languages where we don't
// have capital letters to go on (e.g. Hebrew, Arabic, Hindi, Chinese),
// it's worth guessing that (A1b) is the correct interpretation if there are
// far more "full" lines than "short" lines.
void GeometricClassify(int debug_level,
GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
ParagraphTheory *theory) {
if (!AcceptableRowArgs(debug_level, 4, __func__, rows, row_start, row_end))
return;
if (debug_level > 1) {
tprintf("###############################################\n");
tprintf("##### GeometricClassify( rows[%d:%d) ) ####\n",
row_start, row_end);
tprintf("###############################################\n");
}
RecomputeMarginsAndClearHypotheses(rows, row_start, row_end, 10);
GeometricClassifierState s(debug_level, rows, row_start, row_end);
if (s.left_tabs.size() > 2 && s.right_tabs.size() > 2) {
s.Fail(2, "Too much variety for simple outline classification.");
return;
}
if (s.left_tabs.size() <= 1 && s.right_tabs.size() <= 1) {
s.Fail(1, "Not enough variety for simple outline classification.");
return;
}
if (s.left_tabs.size() + s.right_tabs.size() == 3) {
GeometricClassifyThreeTabStopTextBlock(debug_level, s, theory);
return;
}
// At this point, we know that one side has at least two tab stops, and the
// other side has one or two tab stops.
// Left to determine:
// (1) Which is the body indent and which is the first line indent?
// (2) Is the text fully justified?
// If one side happens to have three or more tab stops, assume that side
// is opposite of the aligned side.
if (s.right_tabs.size() > 2) {
s.AssumeLeftJustification();
} else if (s.left_tabs.size() > 2) {
s.AssumeRightJustification();
} else if (s.ltr) { // guess based on script direction
s.AssumeLeftJustification();
} else {
s.AssumeRightJustification();
}
if (s.AlignTabs().size() == 2) {
// For each tab stop on the aligned side, how many of them appear
// to be paragraph start lines? [first lines]
int firsts[2] = {0, 0};
// Count the first line as a likely paragraph start line.
firsts[s.AlignsideTabIndex(s.row_start)]++;
// For each line, if the first word would have fit on the previous
// line count it as a likely paragraph start line.
bool jam_packed = true;
for (int i = s.row_start + 1; i < s.row_end; i++) {
if (s.FirstWordWouldHaveFit(i - 1, i)) {
firsts[s.AlignsideTabIndex(i)]++;
jam_packed = false;
}
}
// Make an extra accounting for the last line of the paragraph just
// in case it's the only short line in the block. That is, take its
// first word as typical and see if this looks like the *last* line
// of a paragraph. If so, mark the *other* indent as probably a first.
if (jam_packed && s.FirstWordWouldHaveFit(s.row_end - 1, s.row_end - 1)) {
firsts[1 - s.AlignsideTabIndex(s.row_end - 1)]++;
}
int percent0firsts, percent1firsts;
percent0firsts = (100 * firsts[0]) / s.AlignTabs()[0].count;
percent1firsts = (100 * firsts[1]) / s.AlignTabs()[1].count;
// TODO(eger): Tune these constants if necessary.
if ((percent0firsts < 20 && 30 < percent1firsts) ||
percent0firsts + 30 < percent1firsts) {
s.first_indent = s.AlignTabs()[1].center;
s.body_indent = s.AlignTabs()[0].center;
} else if ((percent1firsts < 20 && 30 < percent0firsts) ||
percent1firsts + 30 < percent0firsts) {
s.first_indent = s.AlignTabs()[0].center;
s.body_indent = s.AlignTabs()[1].center;
} else {
// Ambiguous! Probably lineated (poetry)
if (debug_level > 1) {
tprintf("# Cannot determine %s indent likely to start paragraphs.\n",
s.just == tesseract::JUSTIFICATION_LEFT ? "left" : "right");
tprintf("# Indent of %d looks like a first line %d%% of the time.\n",
s.AlignTabs()[0].center, percent0firsts);
tprintf("# Indent of %d looks like a first line %d%% of the time.\n",
s.AlignTabs()[1].center, percent1firsts);
s.PrintRows();
}
return;
}
} else {
// There's only one tab stop for the "aligned to" side.
s.first_indent = s.body_indent = s.AlignTabs()[0].center;
}
// At this point, we have our model.
const ParagraphModel *model = theory->AddModel(s.Model());
// Now all we have to do is figure out if the text is fully justified or not.
// eop_threshold: default to fully justified unless we see evidence below.
// See description on MarkRowsWithModel()
s.eop_threshold =
(s.OffsideTabs()[0].center + s.OffsideTabs()[1].center) / 2;
// If the text is not fully justified, re-set the eop_threshold to 0.
if (s.AlignTabs().size() == 2) {
// Paragraphs with a paragraph-start indent.
for (int i = s.row_start; i < s.row_end - 1; i++) {
if (ValidFirstLine(s.rows, i + 1, model) &&
!NearlyEqual(s.OffsideTabs()[0].center,
(*s.rows)[i].OffsideIndent(s.just), s.tolerance)) {
// We found a non-end-of-paragraph short line: not fully justified.
s.eop_threshold = 0;
break;
}
}
} else {
// Paragraphs with no paragraph-start indent.
for (int i = s.row_start; i < s.row_end - 1; i++) {
if (!s.FirstWordWouldHaveFit(i, i + 1) &&
!NearlyEqual(s.OffsideTabs()[0].center,
(*s.rows)[i].OffsideIndent(s.just), s.tolerance)) {
// We found a non-end-of-paragraph short line: not fully justified.
s.eop_threshold = 0;
break;
}
}
}
MarkRowsWithModel(rows, row_start, row_end, model, s.ltr, s.eop_threshold);
}
// =============== Implementation of ParagraphTheory =====================
const ParagraphModel *ParagraphTheory::AddModel(const ParagraphModel &model) {
for (int i = 0; i < models_->size(); i++) {
if ((*models_)[i]->Comparable(model))
return (*models_)[i];
}
ParagraphModel *m = new ParagraphModel(model);
models_->push_back(m);
models_we_added_.push_back_new(m);
return m;
}
void ParagraphTheory::DiscardUnusedModels(const SetOfModels &used_models) {
for (int i = models_->size() - 1; i >= 0; i--) {
ParagraphModel *m = (*models_)[i];
if (!used_models.contains(m) && models_we_added_.contains(m)) {
models_->remove(i);
models_we_added_.remove(models_we_added_.get_index(m));
delete m;
}
}
}
// Examine rows[start, end) and try to determine if an existing non-centered
// paragraph model would fit them perfectly. If so, return a pointer to it.
// If not, return NULL.
const ParagraphModel *ParagraphTheory::Fits(
const GenericVector<RowScratchRegisters> *rows, int start, int end) const {
for (int m = 0; m < models_->size(); m++) {
const ParagraphModel *model = (*models_)[m];
if (model->justification() != JUSTIFICATION_CENTER &&
RowsFitModel(rows, start, end, model))
return model;
}
return NULL;
}
void ParagraphTheory::NonCenteredModels(SetOfModels *models) {
for (int m = 0; m < models_->size(); m++) {
const ParagraphModel *model = (*models_)[m];
if (model->justification() != JUSTIFICATION_CENTER)
models->push_back_new(model);
}
}
int ParagraphTheory::IndexOf(const ParagraphModel *model) const {
for (int i = 0; i < models_->size(); i++) {
if ((*models_)[i] == model)
return i;
}
return -1;
}
bool ValidFirstLine(const GenericVector<RowScratchRegisters> *rows,
int row, const ParagraphModel *model) {
if (!StrongModel(model)) {
tprintf("ValidFirstLine() should only be called with strong models!\n");
}
return StrongModel(model) &&
model->ValidFirstLine(
(*rows)[row].lmargin_, (*rows)[row].lindent_,
(*rows)[row].rindent_, (*rows)[row].rmargin_);
}
bool ValidBodyLine(const GenericVector<RowScratchRegisters> *rows,
int row, const ParagraphModel *model) {
if (!StrongModel(model)) {
tprintf("ValidBodyLine() should only be called with strong models!\n");
}
return StrongModel(model) &&
model->ValidBodyLine(
(*rows)[row].lmargin_, (*rows)[row].lindent_,
(*rows)[row].rindent_, (*rows)[row].rmargin_);
}
bool CrownCompatible(const GenericVector<RowScratchRegisters> *rows,
int a, int b, const ParagraphModel *model) {
if (model != kCrownRight && model != kCrownLeft) {
tprintf("CrownCompatible() should only be called with crown models!\n");
return false;
}
RowScratchRegisters &row_a = (*rows)[a];
RowScratchRegisters &row_b = (*rows)[b];
if (model == kCrownRight) {
return NearlyEqual(row_a.rindent_ + row_a.rmargin_,
row_b.rindent_ + row_b.rmargin_,
Epsilon(row_a.ri_->average_interword_space));
}
return NearlyEqual(row_a.lindent_ + row_a.lmargin_,
row_b.lindent_ + row_b.lmargin_,
Epsilon(row_a.ri_->average_interword_space));
}
// =============== Implementation of ParagraphModelSmearer ====================
ParagraphModelSmearer::ParagraphModelSmearer(
GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end, ParagraphTheory *theory)
: theory_(theory), rows_(rows), row_start_(row_start),
row_end_(row_end) {
if (!AcceptableRowArgs(0, 0, __func__, rows, row_start, row_end)) {
row_start_ = 0;
row_end_ = 0;
return;
}
SetOfModels no_models;
for (int row = row_start - 1; row <= row_end; row++) {
open_models_.push_back(no_models);
}
}
// see paragraphs_internal.h
void ParagraphModelSmearer::CalculateOpenModels(int row_start, int row_end) {
SetOfModels no_models;
if (row_start < row_start_) row_start = row_start_;
if (row_end > row_end_) row_end = row_end_;
for (int row = (row_start > 0) ? row_start - 1 : row_start; row < row_end;
row++) {
if ((*rows_)[row].ri_->num_words == 0) {
OpenModels(row + 1) = no_models;
} else {
SetOfModels &opened = OpenModels(row);
(*rows_)[row].StartHypotheses(&opened);
// Which models survive the transition from row to row + 1?
SetOfModels still_open;
for (int m = 0; m < opened.size(); m++) {
if (ValidFirstLine(rows_, row, opened[m]) ||
ValidBodyLine(rows_, row, opened[m])) {
// This is basic filtering; we check likely paragraph starty-ness down
// below in Smear() -- you know, whether the first word would have fit
// and such.
still_open.push_back_new(opened[m]);
}
}
OpenModels(row + 1) = still_open;
}
}
}
// see paragraphs_internal.h
void ParagraphModelSmearer::Smear() {
CalculateOpenModels(row_start_, row_end_);
// For each row which we're unsure about (that is, it is LT_UNKNOWN or
// we have multiple LT_START hypotheses), see if there's a model that
// was recently used (an "open" model) which might model it well.
for (int i = row_start_; i < row_end_; i++) {
RowScratchRegisters &row = (*rows_)[i];
if (row.ri_->num_words == 0)
continue;
// Step One:
// Figure out if there are "open" models which are left-alined or
// right-aligned. This is important for determining whether the
// "first" word in a row would fit at the "end" of the previous row.
bool left_align_open = false;
bool right_align_open = false;
for (int m = 0; m < OpenModels(i).size(); m++) {
switch (OpenModels(i)[m]->justification()) {
case JUSTIFICATION_LEFT: left_align_open = true; break;
case JUSTIFICATION_RIGHT: right_align_open = true; break;
default: left_align_open = right_align_open = true;
}
}
// Step Two:
// Use that knowledge to figure out if this row is likely to
// start a paragraph.
bool likely_start;
if (i == 0) {
likely_start = true;
} else {
if ((left_align_open && right_align_open) ||
(!left_align_open && !right_align_open)) {
likely_start = LikelyParagraphStart((*rows_)[i - 1], row,
JUSTIFICATION_LEFT) ||
LikelyParagraphStart((*rows_)[i - 1], row,
JUSTIFICATION_RIGHT);
} else if (left_align_open) {
likely_start = LikelyParagraphStart((*rows_)[i - 1], row,
JUSTIFICATION_LEFT);
} else {
likely_start = LikelyParagraphStart((*rows_)[i - 1], row,
JUSTIFICATION_RIGHT);
}
}
// Step Three:
// If this text line seems like an obvious first line of an
// open model, or an obvious continuation of an existing
// modelled paragraph, mark it up.
if (likely_start) {
// Add Start Hypotheses for all Open models that fit.
for (int m = 0; m < OpenModels(i).size(); m++) {
if (ValidFirstLine(rows_, i, OpenModels(i)[m])) {
row.AddStartLine(OpenModels(i)[m]);
}
}
} else {
// Add relevant body line hypotheses.
SetOfModels last_line_models;
if (i > 0) {
(*rows_)[i - 1].StrongHypotheses(&last_line_models);
} else {
theory_->NonCenteredModels(&last_line_models);
}
for (int m = 0; m < last_line_models.size(); m++) {
const ParagraphModel *model = last_line_models[m];
if (ValidBodyLine(rows_, i, model))
row.AddBodyLine(model);
}
}
// Step Four:
// If we're still quite unsure about this line, go through all
// models in our theory and see if this row could be the start
// of any of our models.
if (row.GetLineType() == LT_UNKNOWN ||
(row.GetLineType() == LT_START && !row.UniqueStartHypothesis())) {
SetOfModels all_models;
theory_->NonCenteredModels(&all_models);
for (int m = 0; m < all_models.size(); m++) {
if (ValidFirstLine(rows_, i, all_models[m])) {
row.AddStartLine(all_models[m]);
}
}
}
// Step Five:
// Since we may have updated the hypotheses about this row, we need
// to recalculate the Open models for the rest of rows[i + 1, row_end)
if (row.GetLineType() != LT_UNKNOWN) {
CalculateOpenModels(i + 1, row_end_);
}
}
}
// ================ Main Paragraph Detection Algorithm =======================
// Find out what ParagraphModels are actually used, and discard any
// that are not.
void DiscardUnusedModels(const GenericVector<RowScratchRegisters> &rows,
ParagraphTheory *theory) {
SetOfModels used_models;
for (int i = 0; i < rows.size(); i++) {
rows[i].StrongHypotheses(&used_models);
}
theory->DiscardUnusedModels(used_models);
}
// DowngradeWeakestToCrowns:
// Forget any flush-{left, right} models unless we see two or more
// of them in sequence.
//
// In pass 3, we start to classify even flush-left paragraphs (paragraphs
// where the first line and body indent are the same) as having proper Models.
// This is generally dangerous, since if you start imagining that flush-left
// is a typical paragraph model when it is not, it will lead you to chop normal
// indented paragraphs in the middle whenever a sentence happens to start on a
// new line (see "This" above). What to do?
// What we do is to take any paragraph which is flush left and is not
// preceded by another paragraph of the same model and convert it to a "Crown"
// paragraph. This is a weak pseudo-ParagraphModel which is a placeholder
// for later. It means that the paragraph is flush, but it would be desirable
// to mark it as the same model as following text if it fits. This downgrade
// FlushLeft -> CrownLeft -> Model of following paragraph. Means that we
// avoid making flush left Paragraph Models whenever we see a top-of-the-page
// half-of-a-paragraph. and instead we mark it the same as normal body text.
//
// Implementation:
//
// Comb backwards through the row scratch registers, and turn any
// sequences of body lines of equivalent type abutted against the beginning
// or a body or start line of a different type into a crown paragraph.
void DowngradeWeakestToCrowns(int debug_level,
ParagraphTheory *theory,
GenericVector<RowScratchRegisters> *rows) {
int start;
for (int end = rows->size(); end > 0; end = start) {
// Search back for a body line of a unique type.
const ParagraphModel *model = NULL;
while (end > 0 &&
(model = (*rows)[end - 1].UniqueBodyHypothesis()) == NULL) {
end--;
}
if (end == 0) break;
start = end - 1;
while (start >= 0 && (*rows)[start].UniqueBodyHypothesis() == model) {
start--; // walk back to the first line that is not the same body type.
}
if (start >= 0 && (*rows)[start].UniqueStartHypothesis() == model &&
StrongModel(model) &&
NearlyEqual(model->first_indent(), model->body_indent(),
model->tolerance())) {
start--;
}
start++;
// Now rows[start, end) is a sequence of unique body hypotheses of model.
if (StrongModel(model) && model->justification() == JUSTIFICATION_CENTER)
continue;
if (!StrongModel(model)) {
while (start > 0 &&
CrownCompatible(rows, start - 1, start, model))
start--;
}
if (start == 0 ||
(!StrongModel(model)) ||
(StrongModel(model) && !ValidFirstLine(rows, start - 1, model))) {
// crownify rows[start, end)
const ParagraphModel *crown_model = model;
if (StrongModel(model)) {
if (model->justification() == JUSTIFICATION_LEFT)
crown_model = kCrownLeft;
else
crown_model = kCrownRight;
}
(*rows)[start].SetUnknown();
(*rows)[start].AddStartLine(crown_model);
for (int row = start + 1; row < end; row++) {
(*rows)[row].SetUnknown();
(*rows)[row].AddBodyLine(crown_model);
}
}
}
DiscardUnusedModels(*rows, theory);
}
// Clear all hypotheses about lines [start, end) and reset margins.
//
// The empty space between the left of a row and the block boundary (and
// similarly for the right) is split into two pieces: margin and indent.
// In initial processing, we assume the block is tight and the margin for
// all lines is set to zero. However, if our first pass does not yield
// models for everything, it may be due to an inset paragraph like a
// block-quote. In that case, we make a second pass over that unmarked
// section of the page and reset the "margin" portion of the empty space
// to the common amount of space at the ends of the lines under consid-
// eration. This would be equivalent to percentile set to 0. However,
// sometimes we have a single character sticking out in the right margin
// of a text block (like the 'r' in 'for' on line 3 above), and we can
// really just ignore it as an outlier. To express this, we allow the
// user to specify the percentile (0..100) of indent values to use as
// the common margin for each row in the run of rows[start, end).
void RecomputeMarginsAndClearHypotheses(
GenericVector<RowScratchRegisters> *rows, int start, int end,
int percentile) {
if (!AcceptableRowArgs(0, 0, __func__, rows, start, end))
return;
int lmin, lmax, rmin, rmax;
lmin = lmax = (*rows)[start].lmargin_ + (*rows)[start].lindent_;
rmin = rmax = (*rows)[start].rmargin_ + (*rows)[start].rindent_;
for (int i = start; i < end; i++) {
RowScratchRegisters &sr = (*rows)[i];
sr.SetUnknown();
if (sr.ri_->num_words == 0)
continue;
UpdateRange(sr.lmargin_ + sr.lindent_, &lmin, &lmax);
UpdateRange(sr.rmargin_ + sr.rindent_, &rmin, &rmax);
}
STATS lefts(lmin, lmax + 1);
STATS rights(rmin, rmax + 1);
for (int i = start; i < end; i++) {
RowScratchRegisters &sr = (*rows)[i];
if (sr.ri_->num_words == 0)
continue;
lefts.add(sr.lmargin_ + sr.lindent_, 1);
rights.add(sr.rmargin_ + sr.rindent_, 1);
}
int ignorable_left = lefts.ile(ClipToRange(percentile, 0, 100) / 100.0);
int ignorable_right = rights.ile(ClipToRange(percentile, 0, 100) / 100.0);
for (int i = start; i < end; i++) {
RowScratchRegisters &sr = (*rows)[i];
int ldelta = ignorable_left - sr.lmargin_;
sr.lmargin_ += ldelta;
sr.lindent_ -= ldelta;
int rdelta = ignorable_right - sr.rmargin_;
sr.rmargin_ += rdelta;
sr.rindent_ -= rdelta;
}
}
// Return the median inter-word space in rows[row_start, row_end).
int InterwordSpace(const GenericVector<RowScratchRegisters> &rows,
int row_start, int row_end) {
if (row_end < row_start + 1) return 1;
int word_height = (rows[row_start].ri_->lword_box.height() +
rows[row_end - 1].ri_->lword_box.height()) / 2;
int word_width = (rows[row_start].ri_->lword_box.width() +
rows[row_end - 1].ri_->lword_box.width()) / 2;
STATS spacing_widths(0, 5 + word_width);
for (int i = row_start; i < row_end; i++) {
if (rows[i].ri_->num_words > 1) {
spacing_widths.add(rows[i].ri_->average_interword_space, 1);
}
}
int minimum_reasonable_space = word_height / 3;
if (minimum_reasonable_space < 2)
minimum_reasonable_space = 2;
int median = spacing_widths.median();
return (median > minimum_reasonable_space)
? median : minimum_reasonable_space;
}
// Return whether the first word on the after line can fit in the space at
// the end of the before line (knowing which way the text is aligned and read).
bool FirstWordWouldHaveFit(const RowScratchRegisters &before,
const RowScratchRegisters &after,
tesseract::ParagraphJustification justification) {
if (before.ri_->num_words == 0 || after.ri_->num_words == 0)
return true;
if (justification == JUSTIFICATION_UNKNOWN) {
tprintf("Don't call FirstWordWouldHaveFit(r, s, JUSTIFICATION_UNKNOWN).\n");
}
int available_space;
if (justification == JUSTIFICATION_CENTER) {
available_space = before.lindent_ + before.rindent_;
} else {
available_space = before.OffsideIndent(justification);
}
available_space -= before.ri_->average_interword_space;
if (before.ri_->ltr)
return after.ri_->lword_box.width() < available_space;
return after.ri_->rword_box.width() < available_space;
}
// Return whether the first word on the after line can fit in the space at
// the end of the before line (not knowing which way the text goes) in a left
// or right alignemnt.
bool FirstWordWouldHaveFit(const RowScratchRegisters &before,
const RowScratchRegisters &after) {
if (before.ri_->num_words == 0 || after.ri_->num_words == 0)
return true;
int available_space = before.lindent_;
if (before.rindent_ > available_space)
available_space = before.rindent_;
available_space -= before.ri_->average_interword_space;
if (before.ri_->ltr)
return after.ri_->lword_box.width() < available_space;
return after.ri_->rword_box.width() < available_space;
}
bool TextSupportsBreak(const RowScratchRegisters &before,
const RowScratchRegisters &after) {
if (before.ri_->ltr) {
return before.ri_->rword_likely_ends_idea &&
after.ri_->lword_likely_starts_idea;
} else {
return before.ri_->lword_likely_ends_idea &&
after.ri_->rword_likely_starts_idea;
}
}
bool LikelyParagraphStart(const RowScratchRegisters &before,
const RowScratchRegisters &after) {
return before.ri_->num_words == 0 ||
(FirstWordWouldHaveFit(before, after) &&
TextSupportsBreak(before, after));
}
bool LikelyParagraphStart(const RowScratchRegisters &before,
const RowScratchRegisters &after,
tesseract::ParagraphJustification j) {
return before.ri_->num_words == 0 ||
(FirstWordWouldHaveFit(before, after, j) &&
TextSupportsBreak(before, after));
}
// Examine rows[start, end) and try to determine what sort of ParagraphModel
// would fit them as a single paragraph.
// If we can't produce a unique model justification_ = JUSTIFICATION_UNKNOWN.
// If the rows given could be a consistent start to a paragraph, set *consistent
// true.
ParagraphModel InternalParagraphModelByOutline(
const GenericVector<RowScratchRegisters> *rows,
int start, int end, int tolerance, bool *consistent) {
int ltr_line_count = 0;
for (int i = start; i < end; i++) {
ltr_line_count += static_cast<int>((*rows)[i].ri_->ltr);
}
bool ltr = (ltr_line_count >= (end - start) / 2);
*consistent = true;
if (!AcceptableRowArgs(0, 2, __func__, rows, start, end))
return ParagraphModel();
// Ensure the caller only passed us a region with a common rmargin and
// lmargin.
int lmargin = (*rows)[start].lmargin_;
int rmargin = (*rows)[start].rmargin_;
int lmin, lmax, rmin, rmax, cmin, cmax;
lmin = lmax = (*rows)[start + 1].lindent_;
rmin = rmax = (*rows)[start + 1].rindent_;
cmin = cmax = 0;
for (int i = start + 1; i < end; i++) {
if ((*rows)[i].lmargin_ != lmargin || (*rows)[i].rmargin_ != rmargin) {
tprintf("Margins don't match! Software error.\n");
*consistent = false;
return ParagraphModel();
}
UpdateRange((*rows)[i].lindent_, &lmin, &lmax);
UpdateRange((*rows)[i].rindent_, &rmin, &rmax);
UpdateRange((*rows)[i].rindent_ - (*rows)[i].lindent_, &cmin, &cmax);
}
int ldiff = lmax - lmin;
int rdiff = rmax - rmin;
int cdiff = cmax - cmin;
if (rdiff > tolerance && ldiff > tolerance) {
if (cdiff < tolerance * 2) {
if (end - start < 3)
return ParagraphModel();
return ParagraphModel(JUSTIFICATION_CENTER, 0, 0, 0, tolerance);
}
*consistent = false;
return ParagraphModel();
}
if (end - start < 3) // Don't return a model for two line paras.
return ParagraphModel();
// These booleans keep us from saying something is aligned left when the body
// left variance is too large.
bool body_admits_left_alignment = ldiff < tolerance;
bool body_admits_right_alignment = rdiff < tolerance;
ParagraphModel left_model =
ParagraphModel(JUSTIFICATION_LEFT, lmargin, (*rows)[start].lindent_,
(lmin + lmax) / 2, tolerance);
ParagraphModel right_model =
ParagraphModel(JUSTIFICATION_RIGHT, rmargin, (*rows)[start].rindent_,
(rmin + rmax) / 2, tolerance);
// These booleans keep us from having an indent on the "wrong side" for the
// first line.
bool text_admits_left_alignment = ltr || left_model.is_flush();
bool text_admits_right_alignment = !ltr || right_model.is_flush();
// At least one of the edges is less than tolerance in variance.
// If the other is obviously ragged, it can't be the one aligned to.
// [Note the last line is included in this raggedness.]
if (tolerance < rdiff) {
if (body_admits_left_alignment && text_admits_left_alignment)
return left_model;
*consistent = false;
return ParagraphModel();
}
if (tolerance < ldiff) {
if (body_admits_right_alignment && text_admits_right_alignment)
return right_model;
*consistent = false;
return ParagraphModel();
}
// At this point, we know the body text doesn't vary much on either side.
// If the first line juts out oddly in one direction or the other,
// that likely indicates the side aligned to.
int first_left = (*rows)[start].lindent_;
int first_right = (*rows)[start].rindent_;
if (ltr && body_admits_left_alignment &&
(first_left < lmin || first_left > lmax))
return left_model;
if (!ltr && body_admits_right_alignment &&
(first_right < rmin || first_right > rmax))
return right_model;
*consistent = false;
return ParagraphModel();
}
// Examine rows[start, end) and try to determine what sort of ParagraphModel
// would fit them as a single paragraph. If nothing fits,
// justification_ = JUSTIFICATION_UNKNOWN and print the paragraph to debug
// output if we're debugging.
ParagraphModel ParagraphModelByOutline(
int debug_level,
const GenericVector<RowScratchRegisters> *rows,
int start, int end, int tolerance) {
bool unused_consistent;
ParagraphModel retval = InternalParagraphModelByOutline(
rows, start, end, tolerance, &unused_consistent);
if (debug_level >= 2 && retval.justification() == JUSTIFICATION_UNKNOWN) {
tprintf("Could not determine a model for this paragraph:\n");
PrintRowRange(*rows, start, end);
}
return retval;
}
// Do rows[start, end) form a single instance of the given paragraph model?
bool RowsFitModel(const GenericVector<RowScratchRegisters> *rows,
int start, int end, const ParagraphModel *model) {
if (!AcceptableRowArgs(0, 1, __func__, rows, start, end))
return false;
if (!ValidFirstLine(rows, start, model)) return false;
for (int i = start + 1 ; i < end; i++) {
if (!ValidBodyLine(rows, i, model)) return false;
}
return true;
}
// Examine rows[row_start, row_end) as an independent section of text,
// and mark rows that are exceptionally clear as start-of-paragraph
// and paragraph-body lines.
//
// We presume that any lines surrounding rows[row_start, row_end) may
// have wildly different paragraph models, so we don't key any data off
// of those lines.
//
// We only take the very strongest signals, as we don't want to get
// confused and marking up centered text, poetry, or source code as
// clearly part of a typical paragraph.
void MarkStrongEvidence(GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end) {
// Record patently obvious body text.
for (int i = row_start + 1; i < row_end; i++) {
const RowScratchRegisters &prev = (*rows)[i - 1];
RowScratchRegisters &curr = (*rows)[i];
tesseract::ParagraphJustification typical_justification =
prev.ri_->ltr ? JUSTIFICATION_LEFT : JUSTIFICATION_RIGHT;
if (!curr.ri_->rword_likely_starts_idea &&
!curr.ri_->lword_likely_starts_idea &&
!FirstWordWouldHaveFit(prev, curr, typical_justification)) {
curr.SetBodyLine();
}
}
// Record patently obvious start paragraph lines.
//
// It's an extremely good signal of the start of a paragraph that
// the first word would have fit on the end of the previous line.
// However, applying just that signal would have us mark random
// start lines of lineated text (poetry and source code) and some
// centered headings as paragraph start lines. Therefore, we use
// a second qualification for a paragraph start: Not only should
// the first word of this line have fit on the previous line,
// but also, this line should go full to the right of the block,
// disallowing a subsequent word from having fit on this line.
// First row:
{
RowScratchRegisters &curr = (*rows)[row_start];
RowScratchRegisters &next = (*rows)[row_start + 1];
tesseract::ParagraphJustification j =
curr.ri_->ltr ? JUSTIFICATION_LEFT : JUSTIFICATION_RIGHT;
if (curr.GetLineType() == LT_UNKNOWN &&
!FirstWordWouldHaveFit(curr, next, j) &&
(curr.ri_->lword_likely_starts_idea ||
curr.ri_->rword_likely_starts_idea)) {
curr.SetStartLine();
}
}
// Middle rows
for (int i = row_start + 1; i < row_end - 1; i++) {
RowScratchRegisters &prev = (*rows)[i - 1];
RowScratchRegisters &curr = (*rows)[i];
RowScratchRegisters &next = (*rows)[i + 1];
tesseract::ParagraphJustification j =
curr.ri_->ltr ? JUSTIFICATION_LEFT : JUSTIFICATION_RIGHT;
if (curr.GetLineType() == LT_UNKNOWN &&
!FirstWordWouldHaveFit(curr, next, j) &&
LikelyParagraphStart(prev, curr, j)) {
curr.SetStartLine();
}
}
// Last row
{ // the short circuit at the top means we have at least two lines.
RowScratchRegisters &prev = (*rows)[row_end - 2];
RowScratchRegisters &curr = (*rows)[row_end - 1];
tesseract::ParagraphJustification j =
curr.ri_->ltr ? JUSTIFICATION_LEFT : JUSTIFICATION_RIGHT;
if (curr.GetLineType() == LT_UNKNOWN &&
!FirstWordWouldHaveFit(curr, curr, j) &&
LikelyParagraphStart(prev, curr, j)) {
curr.SetStartLine();
}
}
}
// Look for sequences of a start line followed by some body lines in
// rows[row_start, row_end) and create ParagraphModels for them if
// they seem coherent.
void ModelStrongEvidence(int debug_level,
GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
bool allow_flush_models,
ParagraphTheory *theory) {
if (!AcceptableRowArgs(debug_level, 2, __func__, rows, row_start, row_end))
return;
int start = row_start;
while (start < row_end) {
while (start < row_end && (*rows)[start].GetLineType() != LT_START)
start++;
if (start >= row_end - 1)
break;
int tolerance = Epsilon((*rows)[start + 1].ri_->average_interword_space);
int end = start;
ParagraphModel last_model;
bool next_consistent;
do {
++end;
// rows[row, end) was consistent.
// If rows[row, end + 1) is not consistent,
// just model rows[row, end)
if (end < row_end - 1) {
RowScratchRegisters &next = (*rows)[end];
LineType lt = next.GetLineType();
next_consistent = lt == LT_BODY ||
(lt == LT_UNKNOWN &&
!FirstWordWouldHaveFit((*rows)[end - 1], (*rows)[end]));
} else {
next_consistent = false;
}
if (next_consistent) {
ParagraphModel next_model = InternalParagraphModelByOutline(
rows, start, end + 1, tolerance, &next_consistent);
if (((*rows)[start].ri_->ltr &&
last_model.justification() == JUSTIFICATION_LEFT &&
next_model.justification() != JUSTIFICATION_LEFT) ||
(!(*rows)[start].ri_->ltr &&
last_model.justification() == JUSTIFICATION_RIGHT &&
next_model.justification() != JUSTIFICATION_RIGHT)) {
next_consistent = false;
}
last_model = next_model;
} else {
next_consistent = false;
}
} while (next_consistent && end < row_end);
// At this point, rows[start, end) looked like it could have been a
// single paragraph. If we can make a good ParagraphModel for it,
// do so and mark this sequence with that model.
if (end > start + 1) {
// emit a new paragraph if we have more than one line.
const ParagraphModel *model = NULL;
ParagraphModel new_model = ParagraphModelByOutline(
debug_level, rows, start, end,
Epsilon(InterwordSpace(*rows, start, end)));
if (new_model.justification() == JUSTIFICATION_UNKNOWN) {
// couldn't create a good model, oh well.
} else if (new_model.is_flush()) {
if (end == start + 2) {
// It's very likely we just got two paragraph starts in a row.
end = start + 1;
} else if (start == row_start) {
// Mark this as a Crown.
if (new_model.justification() == JUSTIFICATION_LEFT) {
model = kCrownLeft;
} else {
model = kCrownRight;
}
} else if (allow_flush_models) {
model = theory->AddModel(new_model);
}
} else {
model = theory->AddModel(new_model);
}
if (model) {
(*rows)[start].AddStartLine(model);
for (int i = start + 1; i < end; i++) {
(*rows)[i].AddBodyLine(model);
}
}
}
start = end;
}
}
// We examine rows[row_start, row_end) and do the following:
// (1) Clear all existing hypotheses for the rows being considered.
// (2) Mark up any rows as exceptionally likely to be paragraph starts
// or paragraph body lines as such using both geometric and textual
// clues.
// (3) Form models for any sequence of start + continuation lines.
// (4) Smear the paragraph models to cover surrounding text.
void StrongEvidenceClassify(int debug_level,
GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
ParagraphTheory *theory) {
if (!AcceptableRowArgs(debug_level, 2, __func__, rows, row_start, row_end))
return;
if (debug_level > 1) {
tprintf("#############################################\n");
tprintf("# StrongEvidenceClassify( rows[%d:%d) )\n", row_start, row_end);
tprintf("#############################################\n");
}
RecomputeMarginsAndClearHypotheses(rows, row_start, row_end, 10);
MarkStrongEvidence(rows, row_start, row_end);
DebugDump(debug_level > 2, "Initial strong signals.", *theory, *rows);
// Create paragraph models.
ModelStrongEvidence(debug_level, rows, row_start, row_end, false, theory);
DebugDump(debug_level > 2, "Unsmeared hypotheses.s.", *theory, *rows);
// At this point, some rows are marked up as paragraphs with model numbers,
// and some rows are marked up as either LT_START or LT_BODY. Now let's
// smear any good paragraph hypotheses forward and backward.
ParagraphModelSmearer smearer(rows, row_start, row_end, theory);
smearer.Smear();
}
void SeparateSimpleLeaderLines(GenericVector<RowScratchRegisters> *rows,
int row_start, int row_end,
ParagraphTheory *theory) {
for (int i = row_start + 1; i < row_end - 1; i++) {
if ((*rows)[i - 1].ri_->has_leaders &&
(*rows)[i].ri_->has_leaders &&
(*rows)[i + 1].ri_->has_leaders) {
const ParagraphModel *model = theory->AddModel(
ParagraphModel(JUSTIFICATION_UNKNOWN, 0, 0, 0, 0));
(*rows)[i].AddStartLine(model);
}
}
}
// Collect sequences of unique hypotheses in row registers and create proper
// paragraphs for them, referencing the paragraphs in row_owners.
void ConvertHypothesizedModelRunsToParagraphs(
int debug_level,
const GenericVector<RowScratchRegisters> &rows,
GenericVector<PARA *> *row_owners,
ParagraphTheory *theory) {
int end = rows.size();
int start;
for (; end > 0; end = start) {
start = end - 1;
const ParagraphModel *model = NULL;
// TODO(eger): Be smarter about dealing with multiple hypotheses.
bool single_line_paragraph = false;
SetOfModels models;
rows[start].NonNullHypotheses(&models);
if (models.size() > 0) {
model = models[0];
if (rows[start].GetLineType(model) != LT_BODY)
single_line_paragraph = true;
}
if (model && !single_line_paragraph) {
// walk back looking for more body lines and then a start line.
while (--start > 0 && rows[start].GetLineType(model) == LT_BODY) {
// do nothing
}
if (start < 0 || rows[start].GetLineType(model) != LT_START) {
model = NULL;
}
}
if (model == NULL) {
continue;
}
// rows[start, end) should be a paragraph.
PARA *p = new PARA();
if (model == kCrownLeft || model == kCrownRight) {
p->is_very_first_or_continuation = true;
// Crown paragraph.
// If we can find an existing ParagraphModel that fits, use it,
// else create a new one.
for (int row = end; row < rows.size(); row++) {
if ((*row_owners)[row] &&
(ValidBodyLine(&rows, start, (*row_owners)[row]->model) &&
(start == 0 ||
ValidFirstLine(&rows, start, (*row_owners)[row]->model)))) {
model = (*row_owners)[row]->model;
break;
}
}
if (model == kCrownLeft) {
// No subsequent model fits, so cons one up.
model = theory->AddModel(ParagraphModel(
JUSTIFICATION_LEFT, rows[start].lmargin_ + rows[start].lindent_,
0, 0, Epsilon(rows[start].ri_->average_interword_space)));
} else if (model == kCrownRight) {
// No subsequent model fits, so cons one up.
model = theory->AddModel(ParagraphModel(
JUSTIFICATION_RIGHT, rows[start].rmargin_ + rows[start].rmargin_,
0, 0, Epsilon(rows[start].ri_->average_interword_space)));
}
}
rows[start].SetUnknown();
rows[start].AddStartLine(model);
for (int i = start + 1; i < end; i++) {
rows[i].SetUnknown();
rows[i].AddBodyLine(model);
}
p->model = model;
p->has_drop_cap = rows[start].ri_->has_drop_cap;
p->is_list_item =
model->justification() == JUSTIFICATION_RIGHT
? rows[start].ri_->rword_indicates_list_item
: rows[start].ri_->lword_indicates_list_item;
for (int row = start; row < end; row++) {
if ((*row_owners)[row] != NULL) {
tprintf("Memory leak! ConvertHypothesizeModelRunsToParagraphs() called "
"more than once!\n");
}
(*row_owners)[row] = p;
}
}
}
struct Interval {
Interval() : begin(0), end(0) {}
Interval(int b, int e) : begin(b), end(e) {}
int begin;
int end;
};
// Return whether rows[row] appears to be stranded, meaning that the evidence
// for this row is very weak due to context. For instance, two lines of source
// code may happen to be indented at the same tab vector as body text starts,
// leading us to think they are two start-of-paragraph lines. This is not
// optimal. However, we also don't want to mark a sequence of short dialog
// as "weak," so our heuristic is:
// (1) If a line is surrounded by lines of unknown type, it's weak.
// (2) If two lines in a row are start lines for a given paragraph type, but
// after that the same paragraph type does not continue, they're weak.
bool RowIsStranded(const GenericVector<RowScratchRegisters> &rows, int row) {
SetOfModels row_models;
rows[row].StrongHypotheses(&row_models);
for (int m = 0; m < row_models.size(); m++) {
bool all_starts = rows[row].GetLineType();
int run_length = 1;
bool continues = true;
for (int i = row - 1; i >= 0 && continues; i--) {
SetOfModels models;
rows[i].NonNullHypotheses(&models);
switch (rows[i].GetLineType(row_models[m])) {
case LT_START: run_length++; break;
case LT_MULTIPLE: // explicit fall-through
case LT_BODY: run_length++; all_starts = false; break;
case LT_UNKNOWN: // explicit fall-through
default: continues = false;
}
}
continues = true;
for (int i = row + 1; i < rows.size() && continues; i++) {
SetOfModels models;
rows[i].NonNullHypotheses(&models);
switch (rows[i].GetLineType(row_models[m])) {
case LT_START: run_length++; break;
case LT_MULTIPLE: // explicit fall-through
case LT_BODY: run_length++; all_starts = false; break;
case LT_UNKNOWN: // explicit fall-through
default: continues = false;
}
}
if (run_length > 2 || (!all_starts && run_length > 1)) return false;
}
return true;
}
// Go through rows[row_start, row_end) and gather up sequences that need better
// classification.
// + Sequences of non-empty rows without hypotheses.
// + Crown paragraphs not immediately followed by a strongly modeled line.
// + Single line paragraphs surrounded by text that doesn't match the
// model.
void LeftoverSegments(const GenericVector<RowScratchRegisters> &rows,
GenericVector<Interval> *to_fix,
int row_start, int row_end) {
to_fix->clear();
for (int i = row_start; i < row_end; i++) {
bool needs_fixing = false;
SetOfModels models;
SetOfModels models_w_crowns;
rows[i].StrongHypotheses(&models);
rows[i].NonNullHypotheses(&models_w_crowns);
if (models.empty() && models_w_crowns.size() > 0) {
// Crown paragraph. Is it followed by a modeled line?
for (int end = i + 1; end < rows.size(); end++) {
SetOfModels end_models;
SetOfModels strong_end_models;
rows[end].NonNullHypotheses(&end_models);
rows[end].StrongHypotheses(&strong_end_models);
if (end_models.size() == 0) {
needs_fixing = true;
break;
} else if (strong_end_models.size() > 0) {
needs_fixing = false;
break;
}
}
} else if (models.empty() && rows[i].ri_->num_words > 0) {
// No models at all.
needs_fixing = true;
}
if (!needs_fixing && !models.empty()) {
needs_fixing = RowIsStranded(rows, i);
}
if (needs_fixing) {
if (!to_fix->empty() && to_fix->back().end == i - 1)
to_fix->back().end = i;
else
to_fix->push_back(Interval(i, i));
}
}
// Convert inclusive intervals to half-open intervals.
for (int i = 0; i < to_fix->size(); i++) {
(*to_fix)[i].end = (*to_fix)[i].end + 1;
}
}
// Given a set of row_owners pointing to PARAs or NULL (no paragraph known),
// normalize each row_owner to point to an actual PARA, and output the
// paragraphs in order onto paragraphs.
void CanonicalizeDetectionResults(
GenericVector<PARA *> *row_owners,
PARA_LIST *paragraphs) {
GenericVector<PARA *> &rows = *row_owners;
paragraphs->clear();
PARA_IT out(paragraphs);
PARA *formerly_null = NULL;
for (int i = 0; i < rows.size(); i++) {
if (rows[i] == NULL) {
if (i == 0 || rows[i - 1] != formerly_null) {
rows[i] = formerly_null = new PARA();
} else {
rows[i] = formerly_null;
continue;
}
} else if (i > 0 && rows[i - 1] == rows[i]) {
continue;
}
out.add_after_then_move(rows[i]);
}
}
// Main entry point for Paragraph Detection Algorithm.
//
// Given a set of equally spaced textlines (described by row_infos),
// Split them into paragraphs.
//
// Output:
// row_owners - one pointer for each row, to the paragraph it belongs to.
// paragraphs - this is the actual list of PARA objects.
// models - the list of paragraph models referenced by the PARA objects.
// caller is responsible for deleting the models.
void DetectParagraphs(int debug_level,
GenericVector<RowInfo> *row_infos,
GenericVector<PARA *> *row_owners,
PARA_LIST *paragraphs,
GenericVector<ParagraphModel *> *models) {
GenericVector<RowScratchRegisters> rows;
ParagraphTheory theory(models);
// Initialize row_owners to be a bunch of NULL pointers.
row_owners->init_to_size(row_infos->size(), NULL);
// Set up row scratch registers for the main algorithm.
rows.init_to_size(row_infos->size(), RowScratchRegisters());
for (int i = 0; i < row_infos->size(); i++) {
rows[i].Init((*row_infos)[i]);
}
// Pass 1:
// Detect sequences of lines that all contain leader dots (.....)
// These are likely Tables of Contents. If there are three text lines in
// a row with leader dots, it's pretty safe to say the middle one should
// be a paragraph of its own.
SeparateSimpleLeaderLines(&rows, 0, rows.size(), &theory);
DebugDump(debug_level > 1, "End of Pass 1", theory, rows);
GenericVector<Interval> leftovers;
LeftoverSegments(rows, &leftovers, 0, rows.size());
for (int i = 0; i < leftovers.size(); i++) {
// Pass 2a:
// Find any strongly evidenced start-of-paragraph lines. If they're
// followed by two lines that look like body lines, make a paragraph
// model for that and see if that model applies throughout the text
// (that is, "smear" it).
StrongEvidenceClassify(debug_level, &rows,
leftovers[i].begin, leftovers[i].end, &theory);
// Pass 2b:
// If we had any luck in pass 2a, we got part of the page and didn't
// know how to classify a few runs of rows. Take the segments that
// didn't find a model and reprocess them individually.
GenericVector<Interval> leftovers2;
LeftoverSegments(rows, &leftovers2, leftovers[i].begin, leftovers[i].end);
bool pass2a_was_useful = leftovers2.size() > 1 ||
(leftovers2.size() == 1 &&
(leftovers2[0].begin != 0 || leftovers2[0].end != rows.size()));
if (pass2a_was_useful) {
for (int j = 0; j < leftovers2.size(); j++) {
StrongEvidenceClassify(debug_level, &rows,
leftovers2[j].begin, leftovers2[j].end,
&theory);
}
}
}
DebugDump(debug_level > 1, "End of Pass 2", theory, rows);
// Pass 3:
// These are the dregs for which we didn't have enough strong textual
// and geometric clues to form matching models for. Let's see if
// the geometric clues are simple enough that we could just use those.
LeftoverSegments(rows, &leftovers, 0, rows.size());
for (int i = 0; i < leftovers.size(); i++) {
GeometricClassify(debug_level, &rows,
leftovers[i].begin, leftovers[i].end, &theory);
}
// Undo any flush models for which there's little evidence.
DowngradeWeakestToCrowns(debug_level, &theory, &rows);
DebugDump(debug_level > 1, "End of Pass 3", theory, rows);
// Pass 4:
// Take everything that's still not marked up well and clear all markings.
LeftoverSegments(rows, &leftovers, 0, rows.size());
for (int i = 0; i < leftovers.size(); i++) {
for (int j = leftovers[i].begin; j < leftovers[i].end; j++) {
rows[j].SetUnknown();
}
}
DebugDump(debug_level > 1, "End of Pass 4", theory, rows);
// Convert all of the unique hypothesis runs to PARAs.
ConvertHypothesizedModelRunsToParagraphs(debug_level, rows, row_owners,
&theory);
DebugDump(debug_level > 0, "Final Paragraph Segmentation", theory, rows);
// Finally, clean up any dangling NULL row paragraph parents.
CanonicalizeDetectionResults(row_owners, paragraphs);
}
// ============ Code interfacing with the rest of Tesseract ==================
void InitializeTextAndBoxesPreRecognition(const MutableIterator &it,
RowInfo *info) {
// Set up text, lword_text, and rword_text (mostly for debug printing).
STRING fake_text;
PageIterator pit(static_cast<const PageIterator&>(it));
bool first_word = true;
if (!pit.Empty(RIL_WORD)) {
do {
fake_text += "x";
if (first_word) info->lword_text += "x";
info->rword_text += "x";
if (pit.IsAtFinalElement(RIL_WORD, RIL_SYMBOL) &&
!pit.IsAtFinalElement(RIL_TEXTLINE, RIL_SYMBOL)) {
fake_text += " ";
info->rword_text = "";
first_word = false;
}
} while (!pit.IsAtFinalElement(RIL_TEXTLINE, RIL_SYMBOL) &&
pit.Next(RIL_SYMBOL));
}
if (fake_text.size() == 0) return;
int lspaces = info->pix_ldistance / info->average_interword_space;
for (int i = 0; i < lspaces; i++) {
info->text += ' ';
}
info->text += fake_text;
// Set up lword_box, rword_box, and num_words.
PAGE_RES_IT page_res_it = *it.PageResIt();
WERD_RES *word_res = page_res_it.restart_row();
ROW_RES *this_row = page_res_it.row();
WERD_RES *lword = NULL;
WERD_RES *rword = NULL;
info->num_words = 0;
do {
if (word_res) {
if (!lword) lword = word_res;
if (rword != word_res) info->num_words++;
rword = word_res;
}
word_res = page_res_it.forward();
} while (page_res_it.row() == this_row);
info->lword_box = lword->word->bounding_box();
info->rword_box = rword->word->bounding_box();
}
// Given a Tesseract Iterator pointing to a text line, fill in the paragraph
// detector RowInfo with all relevant information from the row.
void InitializeRowInfo(bool after_recognition,
const MutableIterator &it,
RowInfo *info) {
if (it.PageResIt()->row() != NULL) {
ROW *row = it.PageResIt()->row()->row;
info->pix_ldistance = row->lmargin();
info->pix_rdistance = row->rmargin();
info->average_interword_space =
row->space() > 0 ? row->space() : MAX(row->x_height(), 1);
info->pix_xheight = row->x_height();
info->has_leaders = false;
info->has_drop_cap = row->has_drop_cap();
info->ltr = true; // set below depending on word scripts
} else {
info->pix_ldistance = info->pix_rdistance = 0;
info->average_interword_space = 1;
info->pix_xheight = 1.0;
info->has_leaders = false;
info->has_drop_cap = false;
info->ltr = true;
}
info->num_words = 0;
info->lword_indicates_list_item = false;
info->lword_likely_starts_idea = false;
info->lword_likely_ends_idea = false;
info->rword_indicates_list_item = false;
info->rword_likely_starts_idea = false;
info->rword_likely_ends_idea = false;
info->has_leaders = false;
info->ltr = 1;
if (!after_recognition) {
InitializeTextAndBoxesPreRecognition(it, info);
return;
}
info->text = "";
char *text = it.GetUTF8Text(RIL_TEXTLINE);
int trailing_ws_idx = strlen(text); // strip trailing space
while (trailing_ws_idx > 0 &&
// isspace() only takes ASCII
((text[trailing_ws_idx - 1] & 0x80) == 0) &&
isspace(text[trailing_ws_idx - 1]))
trailing_ws_idx--;
if (trailing_ws_idx > 0) {
int lspaces = info->pix_ldistance / info->average_interword_space;
for (int i = 0; i < lspaces; i++)
info->text += ' ';
for (int i = 0; i < trailing_ws_idx; i++)
info->text += text[i];
}
delete []text;
if (info->text.size() == 0) {
return;
}
PAGE_RES_IT page_res_it = *it.PageResIt();
GenericVector<WERD_RES *> werds;
WERD_RES *word_res = page_res_it.restart_row();
ROW_RES *this_row = page_res_it.row();
int num_leaders = 0;
int ltr = 0;
int rtl = 0;
do {
if (word_res && word_res->best_choice->unichar_string().length() > 0) {
werds.push_back(word_res);
ltr += word_res->AnyLtrCharsInWord() ? 1 : 0;
rtl += word_res->AnyRtlCharsInWord() ? 1 : 0;
if (word_res->word->flag(W_REP_CHAR)) num_leaders++;
}
word_res = page_res_it.forward();
} while (page_res_it.row() == this_row);
info->ltr = ltr >= rtl;
info->has_leaders = num_leaders > 3;
info->num_words = werds.size();
if (werds.size() > 0) {
WERD_RES *lword = werds[0], *rword = werds[werds.size() - 1];
info->lword_text = lword->best_choice->unichar_string().string();
info->rword_text = rword->best_choice->unichar_string().string();
info->lword_box = lword->word->bounding_box();
info->rword_box = rword->word->bounding_box();
LeftWordAttributes(lword->uch_set, lword->best_choice,
info->lword_text,
&info->lword_indicates_list_item,
&info->lword_likely_starts_idea,
&info->lword_likely_ends_idea);
RightWordAttributes(rword->uch_set, rword->best_choice,
info->rword_text,
&info->rword_indicates_list_item,
&info->rword_likely_starts_idea,
&info->rword_likely_ends_idea);
}
}
// This is called after rows have been identified and words are recognized.
// Much of this could be implemented before word recognition, but text helps
// to identify bulleted lists and gives good signals for sentence boundaries.
void DetectParagraphs(int debug_level,
bool after_text_recognition,
const MutableIterator *block_start,
GenericVector<ParagraphModel *> *models) {
// Clear out any preconceived notions.
if (block_start->Empty(RIL_TEXTLINE)) {
return;
}
BLOCK *block = block_start->PageResIt()->block()->block;
block->para_list()->clear();
bool is_image_block = block->poly_block() && !block->poly_block()->IsText();
// Convert the Tesseract structures to RowInfos
// for the paragraph detection algorithm.
MutableIterator row(*block_start);
if (row.Empty(RIL_TEXTLINE))
return; // end of input already.
GenericVector<RowInfo> row_infos;
do {
if (!row.PageResIt()->row())
continue; // empty row.
row.PageResIt()->row()->row->set_para(NULL);
row_infos.push_back(RowInfo());
RowInfo &ri = row_infos.back();
InitializeRowInfo(after_text_recognition, row, &ri);
} while (!row.IsAtFinalElement(RIL_BLOCK, RIL_TEXTLINE) &&
row.Next(RIL_TEXTLINE));
// If we're called before text recognition, we might not have
// tight block bounding boxes, so trim by the minimum on each side.
if (row_infos.size() > 0) {
int min_lmargin = row_infos[0].pix_ldistance;
int min_rmargin = row_infos[0].pix_rdistance;
for (int i = 1; i < row_infos.size(); i++) {
if (row_infos[i].pix_ldistance < min_lmargin)
min_lmargin = row_infos[i].pix_ldistance;
if (row_infos[i].pix_rdistance < min_rmargin)
min_rmargin = row_infos[i].pix_rdistance;
}
if (min_lmargin > 0 || min_rmargin > 0) {
for (int i = 0; i < row_infos.size(); i++) {
row_infos[i].pix_ldistance -= min_lmargin;
row_infos[i].pix_rdistance -= min_rmargin;
}
}
}
// Run the paragraph detection algorithm.
GenericVector<PARA *> row_owners;
GenericVector<PARA *> the_paragraphs;
if (!is_image_block) {
DetectParagraphs(debug_level, &row_infos, &row_owners, block->para_list(),
models);
} else {
row_owners.init_to_size(row_infos.size(), NULL);
CanonicalizeDetectionResults(&row_owners, block->para_list());
}
// Now stitch in the row_owners into the rows.
row = *block_start;
for (int i = 0; i < row_owners.size(); i++) {
while (!row.PageResIt()->row())
row.Next(RIL_TEXTLINE);
row.PageResIt()->row()->row->set_para(row_owners[i]);
row.Next(RIL_TEXTLINE);
}
}
} // namespace
| C++ |
/******************************************************************
* File: docqual.cpp (Formerly docqual.c)
* Description: Document Quality Metrics
* Author: Phil Cheatle
* Created: Mon May 9 11:27:28 BST 1994
*
* (C) Copyright 1994, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#include <ctype.h>
#include "docqual.h"
#include "reject.h"
#include "tesscallback.h"
#include "tessvars.h"
#include "globals.h"
#include "tesseractclass.h"
namespace tesseract{
// A little class to provide the callbacks as we have no pre-bound args.
struct DocQualCallbacks {
explicit DocQualCallbacks(WERD_RES* word0)
: word(word0), match_count(0), accepted_match_count(0) {}
void CountMatchingBlobs(int index) {
++match_count;
}
void CountAcceptedBlobs(int index) {
if (word->reject_map[index].accepted())
++accepted_match_count;
++match_count;
}
void AcceptIfGoodQuality(int index) {
if (word->reject_map[index].accept_if_good_quality())
word->reject_map[index].setrej_quality_accept();
}
WERD_RES* word;
inT16 match_count;
inT16 accepted_match_count;
};
/*************************************************************************
* word_blob_quality()
* How many blobs in the box_word are identical to those of the inword?
* ASSUME blobs in both initial word and box_word are in ascending order of
* left hand blob edge.
*************************************************************************/
inT16 Tesseract::word_blob_quality(WERD_RES *word, ROW *row) {
if (word->bln_boxes == NULL ||
word->rebuild_word == NULL || word->rebuild_word->blobs.empty())
return 0;
DocQualCallbacks cb(word);
word->bln_boxes->ProcessMatchedBlobs(
*word->rebuild_word,
NewPermanentTessCallback(&cb, &DocQualCallbacks::CountMatchingBlobs));
return cb.match_count;
}
inT16 Tesseract::word_outline_errs(WERD_RES *word) {
inT16 i = 0;
inT16 err_count = 0;
if (word->rebuild_word != NULL) {
for (int b = 0; b < word->rebuild_word->NumBlobs(); ++b) {
TBLOB* blob = word->rebuild_word->blobs[b];
err_count += count_outline_errs(word->best_choice->unichar_string()[i],
blob->NumOutlines());
i++;
}
}
return err_count;
}
/*************************************************************************
* word_char_quality()
* Combination of blob quality and outline quality - how many good chars are
* there? - I.e chars which pass the blob AND outline tests.
*************************************************************************/
void Tesseract::word_char_quality(WERD_RES *word,
ROW *row,
inT16 *match_count,
inT16 *accepted_match_count) {
if (word->bln_boxes == NULL ||
word->rebuild_word == NULL || word->rebuild_word->blobs.empty())
return;
DocQualCallbacks cb(word);
word->bln_boxes->ProcessMatchedBlobs(
*word->rebuild_word,
NewPermanentTessCallback(&cb, &DocQualCallbacks::CountAcceptedBlobs));
*match_count = cb.match_count;
*accepted_match_count = cb.accepted_match_count;
}
/*************************************************************************
* unrej_good_chs()
* Unreject POTENTIAL rejects if the blob passes the blob and outline checks
*************************************************************************/
void Tesseract::unrej_good_chs(WERD_RES *word, ROW *row) {
if (word->bln_boxes == NULL ||
word->rebuild_word == NULL || word->rebuild_word->blobs.empty())
return;
DocQualCallbacks cb(word);
word->bln_boxes->ProcessMatchedBlobs(
*word->rebuild_word,
NewPermanentTessCallback(&cb, &DocQualCallbacks::AcceptIfGoodQuality));
}
inT16 Tesseract::count_outline_errs(char c, inT16 outline_count) {
int expected_outline_count;
if (STRING (outlines_odd).contains (c))
return 0; //Dont use this char
else if (STRING (outlines_2).contains (c))
expected_outline_count = 2;
else
expected_outline_count = 1;
return abs (outline_count - expected_outline_count);
}
void Tesseract::quality_based_rejection(PAGE_RES_IT &page_res_it,
BOOL8 good_quality_doc) {
if ((tessedit_good_quality_unrej && good_quality_doc))
unrej_good_quality_words(page_res_it);
doc_and_block_rejection(page_res_it, good_quality_doc);
if (unlv_tilde_crunching) {
tilde_crunch(page_res_it);
tilde_delete(page_res_it);
}
}
/*************************************************************************
* unrej_good_quality_words()
* Accept potential rejects in words which pass the following checks:
* - Contains a potential reject
* - Word looks like a sensible alpha word.
* - Word segmentation is the same as the original image
* - All characters have the expected number of outlines
* NOTE - the rejection counts are recalculated after unrejection
* - CANT do it in a single pass without a bit of fiddling
* - keep it simple but inefficient
*************************************************************************/
void Tesseract::unrej_good_quality_words( //unreject potential
PAGE_RES_IT &page_res_it) {
WERD_RES *word;
ROW_RES *current_row;
BLOCK_RES *current_block;
int i;
page_res_it.restart_page ();
while (page_res_it.word () != NULL) {
check_debug_pt (page_res_it.word (), 100);
if (bland_unrej) {
word = page_res_it.word ();
for (i = 0; i < word->reject_map.length (); i++) {
if (word->reject_map[i].accept_if_good_quality ())
word->reject_map[i].setrej_quality_accept ();
}
page_res_it.forward ();
}
else if ((page_res_it.row ()->char_count > 0) &&
((page_res_it.row ()->rej_count /
(float) page_res_it.row ()->char_count) <=
quality_rowrej_pc)) {
word = page_res_it.word ();
if (word->reject_map.quality_recoverable_rejects() &&
(tessedit_unrej_any_wd ||
acceptable_word_string(*word->uch_set,
word->best_choice->unichar_string().string(),
word->best_choice->unichar_lengths().string())
!= AC_UNACCEPTABLE)) {
unrej_good_chs(word, page_res_it.row ()->row);
}
page_res_it.forward ();
}
else {
/* Skip to end of dodgy row */
current_row = page_res_it.row ();
while ((page_res_it.word () != NULL) &&
(page_res_it.row () == current_row))
page_res_it.forward ();
}
check_debug_pt (page_res_it.word (), 110);
}
page_res_it.restart_page ();
page_res_it.page_res->char_count = 0;
page_res_it.page_res->rej_count = 0;
current_block = NULL;
current_row = NULL;
while (page_res_it.word () != NULL) {
if (current_block != page_res_it.block ()) {
current_block = page_res_it.block ();
current_block->char_count = 0;
current_block->rej_count = 0;
}
if (current_row != page_res_it.row ()) {
current_row = page_res_it.row ();
current_row->char_count = 0;
current_row->rej_count = 0;
current_row->whole_word_rej_count = 0;
}
page_res_it.rej_stat_word ();
page_res_it.forward ();
}
}
/*************************************************************************
* doc_and_block_rejection()
*
* If the page has too many rejects - reject all of it.
* If any block has too many rejects - reject all words in the block
*************************************************************************/
void Tesseract::doc_and_block_rejection( //reject big chunks
PAGE_RES_IT &page_res_it,
BOOL8 good_quality_doc) {
inT16 block_no = 0;
inT16 row_no = 0;
BLOCK_RES *current_block;
ROW_RES *current_row;
BOOL8 rej_word;
BOOL8 prev_word_rejected;
inT16 char_quality = 0;
inT16 accepted_char_quality;
if (page_res_it.page_res->rej_count * 100.0 /
page_res_it.page_res->char_count > tessedit_reject_doc_percent) {
reject_whole_page(page_res_it);
if (tessedit_debug_doc_rejection) {
tprintf("REJECT ALL #chars: %d #Rejects: %d; \n",
page_res_it.page_res->char_count,
page_res_it.page_res->rej_count);
}
} else {
if (tessedit_debug_doc_rejection) {
tprintf("NO PAGE REJECTION #chars: %d # Rejects: %d; \n",
page_res_it.page_res->char_count,
page_res_it.page_res->rej_count);
}
/* Walk blocks testing for block rejection */
page_res_it.restart_page();
WERD_RES* word;
while ((word = page_res_it.word()) != NULL) {
current_block = page_res_it.block();
block_no = current_block->block->index();
if (current_block->char_count > 0 &&
(current_block->rej_count * 100.0 / current_block->char_count) >
tessedit_reject_block_percent) {
if (tessedit_debug_block_rejection) {
tprintf("REJECTING BLOCK %d #chars: %d; #Rejects: %d\n",
block_no, current_block->char_count,
current_block->rej_count);
}
prev_word_rejected = FALSE;
while ((word = page_res_it.word()) != NULL &&
(page_res_it.block() == current_block)) {
if (tessedit_preserve_blk_rej_perfect_wds) {
rej_word = word->reject_map.reject_count() > 0 ||
word->reject_map.length () < tessedit_preserve_min_wd_len;
if (rej_word && tessedit_dont_blkrej_good_wds &&
word->reject_map.length() >= tessedit_preserve_min_wd_len &&
acceptable_word_string(
*word->uch_set,
word->best_choice->unichar_string().string(),
word->best_choice->unichar_lengths().string()) !=
AC_UNACCEPTABLE) {
word_char_quality(word, page_res_it.row()->row,
&char_quality,
&accepted_char_quality);
rej_word = char_quality != word->reject_map.length();
}
} else {
rej_word = TRUE;
}
if (rej_word) {
/*
Reject spacing if both current and prev words are rejected.
NOTE - this is NOT restricted to FUZZY spaces. - When tried this
generated more space errors.
*/
if (tessedit_use_reject_spaces &&
prev_word_rejected &&
page_res_it.prev_row() == page_res_it.row() &&
word->word->space() == 1)
word->reject_spaces = TRUE;
word->reject_map.rej_word_block_rej();
}
prev_word_rejected = rej_word;
page_res_it.forward();
}
} else {
if (tessedit_debug_block_rejection) {
tprintf("NOT REJECTING BLOCK %d #chars: %d # Rejects: %d; \n",
block_no, page_res_it.block()->char_count,
page_res_it.block()->rej_count);
}
/* Walk rows in block testing for row rejection */
row_no = 0;
while (page_res_it.word() != NULL &&
page_res_it.block() == current_block) {
current_row = page_res_it.row();
row_no++;
/* Reject whole row if:
fraction of chars on row which are rejected exceed a limit AND
fraction rejects which occur in WHOLE WERD rejects is LESS THAN a
limit
*/
if (current_row->char_count > 0 &&
(current_row->rej_count * 100.0 / current_row->char_count) >
tessedit_reject_row_percent &&
(current_row->whole_word_rej_count * 100.0 /
current_row->rej_count) <
tessedit_whole_wd_rej_row_percent) {
if (tessedit_debug_block_rejection) {
tprintf("REJECTING ROW %d #chars: %d; #Rejects: %d\n",
row_no, current_row->char_count,
current_row->rej_count);
}
prev_word_rejected = FALSE;
while ((word = page_res_it.word()) != NULL &&
page_res_it.row () == current_row) {
/* Preserve words on good docs unless they are mostly rejected*/
if (!tessedit_row_rej_good_docs && good_quality_doc) {
rej_word = word->reject_map.reject_count() /
static_cast<float>(word->reject_map.length()) >
tessedit_good_doc_still_rowrej_wd;
} else if (tessedit_preserve_row_rej_perfect_wds) {
/* Preserve perfect words anyway */
rej_word = word->reject_map.reject_count() > 0 ||
word->reject_map.length () < tessedit_preserve_min_wd_len;
if (rej_word && tessedit_dont_rowrej_good_wds &&
word->reject_map.length() >= tessedit_preserve_min_wd_len &&
acceptable_word_string(*word->uch_set,
word->best_choice->unichar_string().string(),
word->best_choice->unichar_lengths().string()) !=
AC_UNACCEPTABLE) {
word_char_quality(word, page_res_it.row()->row,
&char_quality,
&accepted_char_quality);
rej_word = char_quality != word->reject_map.length();
}
} else {
rej_word = TRUE;
}
if (rej_word) {
/*
Reject spacing if both current and prev words are rejected.
NOTE - this is NOT restricted to FUZZY spaces. - When tried
this generated more space errors.
*/
if (tessedit_use_reject_spaces &&
prev_word_rejected &&
page_res_it.prev_row() == page_res_it.row() &&
word->word->space () == 1)
word->reject_spaces = TRUE;
word->reject_map.rej_word_row_rej();
}
prev_word_rejected = rej_word;
page_res_it.forward();
}
} else {
if (tessedit_debug_block_rejection) {
tprintf("NOT REJECTING ROW %d #chars: %d # Rejects: %d; \n",
row_no, current_row->char_count, current_row->rej_count);
}
while (page_res_it.word() != NULL &&
page_res_it.row() == current_row)
page_res_it.forward();
}
}
}
}
}
}
} // namespace tesseract
/*************************************************************************
* reject_whole_page()
* Dont believe any of it - set the reject map to 00..00 in all words
*
*************************************************************************/
void reject_whole_page(PAGE_RES_IT &page_res_it) {
page_res_it.restart_page ();
while (page_res_it.word () != NULL) {
page_res_it.word ()->reject_map.rej_word_doc_rej ();
page_res_it.forward ();
}
//whole page is rejected
page_res_it.page_res->rejected = TRUE;
}
namespace tesseract {
void Tesseract::tilde_crunch(PAGE_RES_IT &page_res_it) {
WERD_RES *word;
GARBAGE_LEVEL garbage_level;
PAGE_RES_IT copy_it;
BOOL8 prev_potential_marked = FALSE;
BOOL8 found_terrible_word = FALSE;
BOOL8 ok_dict_word;
page_res_it.restart_page();
while (page_res_it.word() != NULL) {
POLY_BLOCK* pb = page_res_it.block()->block->poly_block();
if (pb != NULL && !pb->IsText()) {
page_res_it.forward();
continue;
}
word = page_res_it.word();
if (crunch_early_convert_bad_unlv_chs)
convert_bad_unlv_chs(word);
if (crunch_early_merge_tess_fails)
word->merge_tess_fails();
if (word->reject_map.accept_count () != 0) {
found_terrible_word = FALSE;
//Forget earlier potential crunches
prev_potential_marked = FALSE;
}
else {
ok_dict_word = safe_dict_word(word);
garbage_level = garbage_word (word, ok_dict_word);
if ((garbage_level != G_NEVER_CRUNCH) &&
(terrible_word_crunch (word, garbage_level))) {
if (crunch_debug > 0) {
tprintf ("T CRUNCHING: \"%s\"\n",
word->best_choice->unichar_string().string());
}
word->unlv_crunch_mode = CR_KEEP_SPACE;
if (prev_potential_marked) {
while (copy_it.word () != word) {
if (crunch_debug > 0) {
tprintf ("P1 CRUNCHING: \"%s\"\n",
copy_it.word()->best_choice->unichar_string().string());
}
copy_it.word ()->unlv_crunch_mode = CR_KEEP_SPACE;
copy_it.forward ();
}
prev_potential_marked = FALSE;
}
found_terrible_word = TRUE;
}
else if ((garbage_level != G_NEVER_CRUNCH) &&
(potential_word_crunch (word,
garbage_level, ok_dict_word))) {
if (found_terrible_word) {
if (crunch_debug > 0) {
tprintf ("P2 CRUNCHING: \"%s\"\n",
word->best_choice->unichar_string().string());
}
word->unlv_crunch_mode = CR_KEEP_SPACE;
}
else if (!prev_potential_marked) {
copy_it = page_res_it;
prev_potential_marked = TRUE;
if (crunch_debug > 1) {
tprintf ("P3 CRUNCHING: \"%s\"\n",
word->best_choice->unichar_string().string());
}
}
}
else {
found_terrible_word = FALSE;
//Forget earlier potential crunches
prev_potential_marked = FALSE;
if (crunch_debug > 2) {
tprintf ("NO CRUNCH: \"%s\"\n",
word->best_choice->unichar_string().string());
}
}
}
page_res_it.forward ();
}
}
BOOL8 Tesseract::terrible_word_crunch(WERD_RES *word,
GARBAGE_LEVEL garbage_level) {
float rating_per_ch;
int adjusted_len;
int crunch_mode = 0;
if ((word->best_choice->unichar_string().length () == 0) ||
(strspn (word->best_choice->unichar_string().string(), " ") ==
word->best_choice->unichar_string().length ()))
crunch_mode = 1;
else {
adjusted_len = word->reject_map.length ();
if (adjusted_len > crunch_rating_max)
adjusted_len = crunch_rating_max;
rating_per_ch = word->best_choice->rating () / adjusted_len;
if (rating_per_ch > crunch_terrible_rating)
crunch_mode = 2;
else if (crunch_terrible_garbage && (garbage_level == G_TERRIBLE))
crunch_mode = 3;
else if ((word->best_choice->certainty () < crunch_poor_garbage_cert) &&
(garbage_level != G_OK))
crunch_mode = 4;
else if ((rating_per_ch > crunch_poor_garbage_rate) &&
(garbage_level != G_OK))
crunch_mode = 5;
}
if (crunch_mode > 0) {
if (crunch_debug > 2) {
tprintf ("Terrible_word_crunch (%d) on \"%s\"\n",
crunch_mode, word->best_choice->unichar_string().string());
}
return TRUE;
}
else
return FALSE;
}
BOOL8 Tesseract::potential_word_crunch(WERD_RES *word,
GARBAGE_LEVEL garbage_level,
BOOL8 ok_dict_word) {
float rating_per_ch;
int adjusted_len;
const char *str = word->best_choice->unichar_string().string();
const char *lengths = word->best_choice->unichar_lengths().string();
BOOL8 word_crunchable;
int poor_indicator_count = 0;
word_crunchable = !crunch_leave_accept_strings ||
word->reject_map.length() < 3 ||
(acceptable_word_string(*word->uch_set,
str, lengths) == AC_UNACCEPTABLE &&
!ok_dict_word);
adjusted_len = word->reject_map.length();
if (adjusted_len > 10)
adjusted_len = 10;
rating_per_ch = word->best_choice->rating() / adjusted_len;
if (rating_per_ch > crunch_pot_poor_rate) {
if (crunch_debug > 2) {
tprintf("Potential poor rating on \"%s\"\n",
word->best_choice->unichar_string().string());
}
poor_indicator_count++;
}
if (word_crunchable &&
word->best_choice->certainty() < crunch_pot_poor_cert) {
if (crunch_debug > 2) {
tprintf("Potential poor cert on \"%s\"\n",
word->best_choice->unichar_string().string());
}
poor_indicator_count++;
}
if (garbage_level != G_OK) {
if (crunch_debug > 2) {
tprintf("Potential garbage on \"%s\"\n",
word->best_choice->unichar_string().string());
}
poor_indicator_count++;
}
return poor_indicator_count >= crunch_pot_indicators;
}
void Tesseract::tilde_delete(PAGE_RES_IT &page_res_it) {
WERD_RES *word;
PAGE_RES_IT copy_it;
BOOL8 deleting_from_bol = FALSE;
BOOL8 marked_delete_point = FALSE;
inT16 debug_delete_mode;
CRUNCH_MODE delete_mode;
inT16 x_debug_delete_mode;
CRUNCH_MODE x_delete_mode;
page_res_it.restart_page();
while (page_res_it.word() != NULL) {
word = page_res_it.word();
delete_mode = word_deletable (word, debug_delete_mode);
if (delete_mode != CR_NONE) {
if (word->word->flag (W_BOL) || deleting_from_bol) {
if (crunch_debug > 0) {
tprintf ("BOL CRUNCH DELETING(%d): \"%s\"\n",
debug_delete_mode,
word->best_choice->unichar_string().string());
}
word->unlv_crunch_mode = delete_mode;
deleting_from_bol = TRUE;
} else if (word->word->flag(W_EOL)) {
if (marked_delete_point) {
while (copy_it.word() != word) {
x_delete_mode = word_deletable (copy_it.word (),
x_debug_delete_mode);
if (crunch_debug > 0) {
tprintf ("EOL CRUNCH DELETING(%d): \"%s\"\n",
x_debug_delete_mode,
copy_it.word()->best_choice->unichar_string().string());
}
copy_it.word ()->unlv_crunch_mode = x_delete_mode;
copy_it.forward ();
}
}
if (crunch_debug > 0) {
tprintf ("EOL CRUNCH DELETING(%d): \"%s\"\n",
debug_delete_mode,
word->best_choice->unichar_string().string());
}
word->unlv_crunch_mode = delete_mode;
deleting_from_bol = FALSE;
marked_delete_point = FALSE;
}
else {
if (!marked_delete_point) {
copy_it = page_res_it;
marked_delete_point = TRUE;
}
}
}
else {
deleting_from_bol = FALSE;
//Forget earlier potential crunches
marked_delete_point = FALSE;
}
/*
The following step has been left till now as the tess fails are used to
determine if the word is deletable.
*/
if (!crunch_early_merge_tess_fails)
word->merge_tess_fails();
page_res_it.forward ();
}
}
void Tesseract::convert_bad_unlv_chs(WERD_RES *word_res) {
int i;
UNICHAR_ID unichar_dash = word_res->uch_set->unichar_to_id("-");
UNICHAR_ID unichar_space = word_res->uch_set->unichar_to_id(" ");
UNICHAR_ID unichar_tilde = word_res->uch_set->unichar_to_id("~");
UNICHAR_ID unichar_pow = word_res->uch_set->unichar_to_id("^");
for (i = 0; i < word_res->reject_map.length(); ++i) {
if (word_res->best_choice->unichar_id(i) == unichar_tilde) {
word_res->best_choice->set_unichar_id(unichar_dash, i);
if (word_res->reject_map[i].accepted ())
word_res->reject_map[i].setrej_unlv_rej ();
}
if (word_res->best_choice->unichar_id(i) == unichar_pow) {
word_res->best_choice->set_unichar_id(unichar_space, i);
if (word_res->reject_map[i].accepted ())
word_res->reject_map[i].setrej_unlv_rej ();
}
}
}
GARBAGE_LEVEL Tesseract::garbage_word(WERD_RES *word, BOOL8 ok_dict_word) {
enum STATES
{
JUNK,
FIRST_UPPER,
FIRST_LOWER,
FIRST_NUM,
SUBSEQUENT_UPPER,
SUBSEQUENT_LOWER,
SUBSEQUENT_NUM
};
const char *str = word->best_choice->unichar_string().string();
const char *lengths = word->best_choice->unichar_lengths().string();
STATES state = JUNK;
int len = 0;
int isolated_digits = 0;
int isolated_alphas = 0;
int bad_char_count = 0;
int tess_rejs = 0;
int dodgy_chars = 0;
int ok_chars;
UNICHAR_ID last_char = -1;
int alpha_repetition_count = 0;
int longest_alpha_repetition_count = 0;
int longest_lower_run_len = 0;
int lower_string_count = 0;
int longest_upper_run_len = 0;
int upper_string_count = 0;
int total_alpha_count = 0;
int total_digit_count = 0;
for (; *str != '\0'; str += *(lengths++)) {
len++;
if (word->uch_set->get_isupper (str, *lengths)) {
total_alpha_count++;
switch (state) {
case SUBSEQUENT_UPPER:
case FIRST_UPPER:
state = SUBSEQUENT_UPPER;
upper_string_count++;
if (longest_upper_run_len < upper_string_count)
longest_upper_run_len = upper_string_count;
if (last_char == word->uch_set->unichar_to_id(str, *lengths)) {
alpha_repetition_count++;
if (longest_alpha_repetition_count < alpha_repetition_count) {
longest_alpha_repetition_count = alpha_repetition_count;
}
}
else {
last_char = word->uch_set->unichar_to_id(str, *lengths);
alpha_repetition_count = 1;
}
break;
case FIRST_NUM:
isolated_digits++;
default:
state = FIRST_UPPER;
last_char = word->uch_set->unichar_to_id(str, *lengths);
alpha_repetition_count = 1;
upper_string_count = 1;
break;
}
}
else if (word->uch_set->get_islower (str, *lengths)) {
total_alpha_count++;
switch (state) {
case SUBSEQUENT_LOWER:
case FIRST_LOWER:
state = SUBSEQUENT_LOWER;
lower_string_count++;
if (longest_lower_run_len < lower_string_count)
longest_lower_run_len = lower_string_count;
if (last_char == word->uch_set->unichar_to_id(str, *lengths)) {
alpha_repetition_count++;
if (longest_alpha_repetition_count < alpha_repetition_count) {
longest_alpha_repetition_count = alpha_repetition_count;
}
}
else {
last_char = word->uch_set->unichar_to_id(str, *lengths);
alpha_repetition_count = 1;
}
break;
case FIRST_NUM:
isolated_digits++;
default:
state = FIRST_LOWER;
last_char = word->uch_set->unichar_to_id(str, *lengths);
alpha_repetition_count = 1;
lower_string_count = 1;
break;
}
}
else if (word->uch_set->get_isdigit (str, *lengths)) {
total_digit_count++;
switch (state) {
case FIRST_NUM:
state = SUBSEQUENT_NUM;
case SUBSEQUENT_NUM:
break;
case FIRST_UPPER:
case FIRST_LOWER:
isolated_alphas++;
default:
state = FIRST_NUM;
break;
}
}
else {
if (*lengths == 1 && *str == ' ')
tess_rejs++;
else
bad_char_count++;
switch (state) {
case FIRST_NUM:
isolated_digits++;
break;
case FIRST_UPPER:
case FIRST_LOWER:
isolated_alphas++;
default:
break;
}
state = JUNK;
}
}
switch (state) {
case FIRST_NUM:
isolated_digits++;
break;
case FIRST_UPPER:
case FIRST_LOWER:
isolated_alphas++;
default:
break;
}
if (crunch_include_numerals) {
total_alpha_count += total_digit_count - isolated_digits;
}
if (crunch_leave_ok_strings && len >= 4 &&
2 * (total_alpha_count - isolated_alphas) > len &&
longest_alpha_repetition_count < crunch_long_repetitions) {
if ((crunch_accept_ok &&
acceptable_word_string(*word->uch_set, str, lengths) !=
AC_UNACCEPTABLE) ||
longest_lower_run_len > crunch_leave_lc_strings ||
longest_upper_run_len > crunch_leave_uc_strings)
return G_NEVER_CRUNCH;
}
if (word->reject_map.length() > 1 &&
strpbrk(str, " ") == NULL &&
(word->best_choice->permuter() == SYSTEM_DAWG_PERM ||
word->best_choice->permuter() == FREQ_DAWG_PERM ||
word->best_choice->permuter() == USER_DAWG_PERM ||
word->best_choice->permuter() == NUMBER_PERM ||
acceptable_word_string(*word->uch_set, str, lengths) !=
AC_UNACCEPTABLE || ok_dict_word))
return G_OK;
ok_chars = len - bad_char_count - isolated_digits -
isolated_alphas - tess_rejs;
if (crunch_debug > 3) {
tprintf("garbage_word: \"%s\"\n",
word->best_choice->unichar_string().string());
tprintf("LEN: %d bad: %d iso_N: %d iso_A: %d rej: %d\n",
len,
bad_char_count, isolated_digits, isolated_alphas, tess_rejs);
}
if (bad_char_count == 0 &&
tess_rejs == 0 &&
(len > isolated_digits + isolated_alphas || len <= 2))
return G_OK;
if (tess_rejs > ok_chars ||
(tess_rejs > 0 && (bad_char_count + tess_rejs) * 2 > len))
return G_TERRIBLE;
if (len > 4) {
dodgy_chars = 2 * tess_rejs + bad_char_count + isolated_digits +
isolated_alphas;
if (dodgy_chars > 5 || (dodgy_chars / (float) len) > 0.5)
return G_DODGY;
else
return G_OK;
} else {
dodgy_chars = 2 * tess_rejs + bad_char_count;
if ((len == 4 && dodgy_chars > 2) ||
(len == 3 && dodgy_chars > 2) || dodgy_chars >= len)
return G_DODGY;
else
return G_OK;
}
}
/*************************************************************************
* word_deletable()
* DELETE WERDS AT ENDS OF ROWS IF
* Word is crunched &&
* ( string length = 0 OR
* > 50% of chars are "|" (before merging) OR
* certainty < -10 OR
* rating /char > 60 OR
* TOP of word is more than 0.5 xht BELOW baseline OR
* BOTTOM of word is more than 0.5 xht ABOVE xht OR
* length of word < 3xht OR
* height of word < 0.7 xht OR
* height of word > 3.0 xht OR
* >75% of the outline BBs have longest dimension < 0.5xht
*************************************************************************/
CRUNCH_MODE Tesseract::word_deletable(WERD_RES *word, inT16 &delete_mode) {
int word_len = word->reject_map.length ();
float rating_per_ch;
TBOX box; //BB of word
if (word->unlv_crunch_mode == CR_NONE) {
delete_mode = 0;
return CR_NONE;
}
if (word_len == 0) {
delete_mode = 1;
return CR_DELETE;
}
if (word->rebuild_word != NULL) {
// Cube leaves rebuild_word NULL.
box = word->rebuild_word->bounding_box();
if (box.height () < crunch_del_min_ht * kBlnXHeight) {
delete_mode = 4;
return CR_DELETE;
}
if (noise_outlines(word->rebuild_word)) {
delete_mode = 5;
return CR_DELETE;
}
}
if ((failure_count (word) * 1.5) > word_len) {
delete_mode = 2;
return CR_LOOSE_SPACE;
}
if (word->best_choice->certainty () < crunch_del_cert) {
delete_mode = 7;
return CR_LOOSE_SPACE;
}
rating_per_ch = word->best_choice->rating () / word_len;
if (rating_per_ch > crunch_del_rating) {
delete_mode = 8;
return CR_LOOSE_SPACE;
}
if (box.top () < kBlnBaselineOffset - crunch_del_low_word * kBlnXHeight) {
delete_mode = 9;
return CR_LOOSE_SPACE;
}
if (box.bottom () >
kBlnBaselineOffset + crunch_del_high_word * kBlnXHeight) {
delete_mode = 10;
return CR_LOOSE_SPACE;
}
if (box.height () > crunch_del_max_ht * kBlnXHeight) {
delete_mode = 11;
return CR_LOOSE_SPACE;
}
if (box.width () < crunch_del_min_width * kBlnXHeight) {
delete_mode = 3;
return CR_LOOSE_SPACE;
}
delete_mode = 0;
return CR_NONE;
}
inT16 Tesseract::failure_count(WERD_RES *word) {
const char *str = word->best_choice->unichar_string().string();
int tess_rejs = 0;
for (; *str != '\0'; str++) {
if (*str == ' ')
tess_rejs++;
}
return tess_rejs;
}
BOOL8 Tesseract::noise_outlines(TWERD *word) {
TBOX box; // BB of outline
inT16 outline_count = 0;
inT16 small_outline_count = 0;
inT16 max_dimension;
float small_limit = kBlnXHeight * crunch_small_outlines_size;
for (int b = 0; b < word->NumBlobs(); ++b) {
TBLOB* blob = word->blobs[b];
for (TESSLINE* ol = blob->outlines; ol != NULL; ol = ol->next) {
outline_count++;
box = ol->bounding_box();
if (box.height() > box.width())
max_dimension = box.height();
else
max_dimension = box.width();
if (max_dimension < small_limit)
small_outline_count++;
}
}
return small_outline_count >= outline_count;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: equationdetect.h
// Description: The equation detection class that inherits equationdetectbase.
// Author: Zongyi (Joe) Liu (joeliu@google.com)
// Created: Fri Aug 31 11:13:01 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_EQUATIONDETECT_H__
#define TESSERACT_CCMAIN_EQUATIONDETECT_H__
#include "blobbox.h"
#include "equationdetectbase.h"
#include "genericvector.h"
#include "unichar.h"
class BLOBNBOX;
class BLOB_CHOICE;
class BLOB_CHOICE_LIST;
class TO_BLOCK_LIST;
class TBOX;
class UNICHARSET;
namespace tesseract {
class Tesseract;
class ColPartition;
class ColPartitionGrid;
class ColPartitionSet;
class EquationDetect : public EquationDetectBase {
public:
EquationDetect(const char* equ_datapath,
const char* equ_language);
~EquationDetect();
enum IndentType {
NO_INDENT,
LEFT_INDENT,
RIGHT_INDENT,
BOTH_INDENT,
INDENT_TYPE_COUNT
};
// Reset the lang_tesseract_ pointer. This function should be called before we
// do any detector work.
void SetLangTesseract(Tesseract* lang_tesseract);
// Iterate over the blobs inside to_block, and set the blobs that we want to
// process to BSTT_NONE. (By default, they should be BSTT_SKIP). The function
// returns 0 upon success.
int LabelSpecialText(TO_BLOCK* to_block);
// Find possible equation partitions from part_grid. Should be called
// after the special_text_type of blobs are set.
// It returns 0 upon success.
int FindEquationParts(ColPartitionGrid* part_grid,
ColPartitionSet** best_columns);
// Reset the resolution of the processing image. TEST only function.
void SetResolution(const int resolution);
protected:
// Identify the special text type for one blob, and update its field. When
// height_th is set (> 0), we will label the blob as BSTT_NONE if its height
// is less than height_th.
void IdentifySpecialText(BLOBNBOX *blob, const int height_th);
// Estimate the type for one unichar.
BlobSpecialTextType EstimateTypeForUnichar(
const UNICHARSET& unicharset, const UNICHAR_ID id) const;
// Compute special text type for each blobs in part_grid_.
void IdentifySpecialText();
// Identify blobs that we want to skip during special blob type
// classification.
void IdentifyBlobsToSkip(ColPartition* part);
// The ColPartitions in part_grid_ maybe over-segmented, particularly in the
// block equation regions. So we like to identify these partitions and merge
// them before we do the searching.
void MergePartsByLocation();
// Staring from the seed center, we do radius search. And for partitions that
// have large overlaps with seed, we remove them from part_grid_ and add into
// parts_overlap. Note: this function may update the part_grid_, so if the
// caller is also running ColPartitionGridSearch, use the RepositionIterator
// to continue.
void SearchByOverlap(ColPartition* seed,
GenericVector<ColPartition*>* parts_overlap);
// Insert part back into part_grid_, after it absorbs some other parts.
void InsertPartAfterAbsorb(ColPartition* part);
// Identify the colparitions in part_grid_, label them as PT_EQUATION, and
// save them into cp_seeds_.
void IdentifySeedParts();
// Check the blobs count for a seed region candidate.
bool CheckSeedBlobsCount(ColPartition* part);
// Compute the foreground pixel density for a tbox area.
float ComputeForegroundDensity(const TBOX& tbox);
// Check if part from seed2 label: with low math density and left indented. We
// are using two checks:
// 1. If its left is aligned with any coordinates in indented_texts_left,
// which we assume have been sorted.
// 2. If its foreground density is over foreground_density_th.
bool CheckForSeed2(
const GenericVector<int>& indented_texts_left,
const float foreground_density_th,
ColPartition* part);
// Count the number of values in sorted_vec that is close to val, used to
// check if a partition is aligned with text partitions.
int CountAlignment(
const GenericVector<int>& sorted_vec, const int val) const;
// Check for a seed candidate using the foreground pixel density. And we
// return true if the density is below a certain threshold, because characters
// in equation regions usually are apart with more white spaces.
bool CheckSeedFgDensity(const float density_th, ColPartition* part);
// A light version of SplitCPHor: instead of really doing the part split, we
// simply compute the union bounding box of each splitted part.
void SplitCPHorLite(ColPartition* part, GenericVector<TBOX>* splitted_boxes);
// Split the part (horizontally), and save the splitted result into
// parts_splitted. Note that it is caller's responsibility to release the
// memory owns by parts_splitted. On the other hand, the part is unchanged
// during this process and still owns the blobs, so do NOT call DeleteBoxes
// when freeing the colpartitions in parts_splitted.
void SplitCPHor(ColPartition* part,
GenericVector<ColPartition*>* parts_splitted);
// Check the density for a seed candidate (part) using its math density and
// italic density, returns true if the check passed.
bool CheckSeedDensity(const float math_density_high,
const float math_density_low,
const ColPartition* part) const;
// Check if part is indented.
IndentType IsIndented(ColPartition* part);
// Identify inline partitions from cp_seeds_, and re-label them.
void IdentifyInlineParts();
// Comute the super bounding box for all colpartitions inside part_grid_.
void ComputeCPsSuperBBox();
// Identify inline partitions from cp_seeds_ using the horizontal search.
void IdentifyInlinePartsHorizontal();
// Estimate the line spacing between two text partitions. Returns -1 if not
// enough data.
int EstimateTextPartLineSpacing();
// Identify inline partitions from cp_seeds_ using vertical search.
void IdentifyInlinePartsVertical(const bool top_to_bottom,
const int textPartsLineSpacing);
// Check if part is an inline equation zone. This should be called after we
// identified the seed regions.
bool IsInline(const bool search_bottom,
const int textPartsLineSpacing,
ColPartition* part);
// For a given seed partition, we search the part_grid_ and see if there is
// any partition can be merged with it. It returns true if the seed has been
// expanded.
bool ExpandSeed(ColPartition* seed);
// Starting from the seed position, we search the part_grid_
// horizontally/vertically, find all parititions that can be
// merged with seed, remove them from part_grid_, and put them into
// parts_to_merge.
void ExpandSeedHorizontal(const bool search_left,
ColPartition* seed,
GenericVector<ColPartition*>* parts_to_merge);
void ExpandSeedVertical(const bool search_bottom,
ColPartition* seed,
GenericVector<ColPartition*>* parts_to_merge);
// Check if a part_box is the small neighbor of seed_box.
bool IsNearSmallNeighbor(const TBOX& seed_box,
const TBOX& part_box) const;
// Perform the density check for part, which we assume is nearing a seed
// partition. It returns true if the check passed.
bool CheckSeedNeighborDensity(const ColPartition* part) const;
// After identify the math blocks, we do one more scanning on all text
// partitions, and check if any of them is the satellite of:
// math blocks: here a p is the satellite of q if:
// 1. q is the nearest vertical neighbor of p, and
// 2. y_gap(p, q) is less than a threshold, and
// 3. x_overlap(p, q) is over a threshold.
// Note that p can be the satellites of two blocks: its top neighbor and
// bottom neighbor.
void ProcessMathBlockSatelliteParts();
// Check if part is the satellite of one/two math blocks. If it is, we return
// true, and save the blocks into math_blocks.
bool IsMathBlockSatellite(
ColPartition* part, GenericVector<ColPartition*>* math_blocks);
// Search the nearest neighbor of part in one vertical direction as defined in
// search_bottom. It returns the neighbor found that major x overlap with it,
// or NULL when not found.
ColPartition* SearchNNVertical(const bool search_bottom,
const ColPartition* part);
// Check if the neighbor with vertical distance of y_gap is a near and math
// block partition.
bool IsNearMathNeighbor(const int y_gap, const ColPartition *neighbor) const;
// Generate the tiff file name for output/debug file.
void GetOutputTiffName(const char* name, STRING* image_name) const;
// Debugger function that renders ColPartitions on the input image, where:
// parts labeled as PT_EQUATION will be painted in red, PT_INLINE_EQUATION
// will be painted in green, and other parts will be painted in blue.
void PaintColParts(const STRING& outfile) const;
// Debugger function that renders the blobs in part_grid_ over the input
// image.
void PaintSpecialTexts(const STRING& outfile) const;
// Debugger function that print the math blobs density values for a
// ColPartition object.
void PrintSpecialBlobsDensity(const ColPartition* part) const;
// The tesseract engine intialized from equation training data.
Tesseract* equ_tesseract_;
// The tesseract engine used for OCR. This pointer is passed in by the caller,
// so do NOT destroy it in this class.
Tesseract* lang_tesseract_;
// The ColPartitionGrid that we are processing. This pointer is passed in from
// the caller, so do NOT destroy it in the class.
ColPartitionGrid* part_grid_;
// A simple array of pointers to the best assigned column division at
// each grid y coordinate. This pointer is passed in from the caller, so do
// NOT destroy it in the class.
ColPartitionSet** best_columns_;
// The super bounding box of all cps in the part_grid_.
TBOX* cps_super_bbox_;
// The seed ColPartition for equation region.
GenericVector<ColPartition*> cp_seeds_;
// The resolution (dpi) of the processing image.
int resolution_;
// The number of pages we have processed.
int page_count_;
};
} // namespace tesseract
#endif // TESSERACT_CCMAIN_EQUATIONDETECT_H_
| C++ |
/**********************************************************************
* File: tessvars.cpp (Formerly tessvars.c)
* Description: Variables and other globals for tessedit.
* Author: Ray Smith
* Created: Mon Apr 13 13:13:23 BST 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdio.h>
#include "tessvars.h"
FILE *debug_fp = stderr; // write debug stuff here
| C++ |
///////////////////////////////////////////////////////////////////////
// File: paramsd.cpp
// Description: Tesseract parameter editor
// Author: Joern Wanke
// Created: Wed Jul 18 10:05:01 PDT 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// Tesseract parameter editor is used to edit all the parameters used
// within tesseract from the ui.
#ifndef GRAPHICS_DISABLED
#ifndef VARABLED_H
#define VARABLED_H
#include "elst.h"
#include "scrollview.h"
#include "params.h"
#include "tesseractclass.h"
class SVMenuNode;
// A list of all possible parameter types used.
enum ParamType {
VT_INTEGER,
VT_BOOLEAN,
VT_STRING,
VT_DOUBLE
};
// A rather hackish helper structure which can take any kind of parameter input
// (defined by ParamType) and do a couple of common operations on them, like
// comparisond or getting its value. It is used in the context of the
// ParamsEditor as a bridge from the internal tesseract parameters to the
// ones displayed by the ScrollView server.
class ParamContent : public ELIST_LINK {
public:
// Compare two VC objects by their name.
static int Compare(const void* v1, const void* v2);
// Gets a VC object identified by its ID.
static ParamContent* GetParamContentById(int id);
// Constructors for the various ParamTypes.
ParamContent() {
}
explicit ParamContent(tesseract::StringParam* it);
explicit ParamContent(tesseract::IntParam* it);
explicit ParamContent(tesseract::BoolParam* it);
explicit ParamContent(tesseract::DoubleParam* it);
// Getters and Setters.
void SetValue(const char* val);
STRING GetValue() const;
const char* GetName() const;
const char* GetDescription() const;
int GetId() { return my_id_; }
bool HasChanged() { return changed_; }
private:
// The unique ID of this VC object.
int my_id_;
// Whether the parameter was changed_ and thus needs to be rewritten.
bool changed_;
// The actual ParamType of this VC object.
ParamType param_type_;
tesseract::StringParam* sIt;
tesseract::IntParam* iIt;
tesseract::BoolParam* bIt;
tesseract::DoubleParam* dIt;
};
ELISTIZEH(ParamContent)
// The parameters editor enables the user to edit all the parameters used within
// tesseract. It can be invoked on its own, but is supposed to be invoked by
// the program editor.
class ParamsEditor : public SVEventHandler {
public:
// Integrate the parameters editor as popupmenu into the existing scrollview
// window (usually the pg editor). If sv == null, create a new empty
// empty window and attach the parameter editor to that window (ugly).
explicit ParamsEditor(tesseract::Tesseract*, ScrollView* sv = NULL);
// Event listener. Waits for SVET_POPUP events and processes them.
void Notify(const SVEvent* sve);
private:
// Gets the up to the first 3 prefixes from s (split by _).
// For example, tesseract_foo_bar will be split into tesseract,foo and bar.
void GetPrefixes(const char* s, STRING* level_one,
STRING* level_two, STRING* level_three);
// Gets the first n words (split by _) and puts them in t.
// For example, tesseract_foo_bar with N=2 will yield tesseract_foo_.
void GetFirstWords(const char *s, // source string
int n, // number of words
char *t); // target string
// Find all editable parameters used within tesseract and create a
// SVMenuNode tree from it.
SVMenuNode *BuildListOfAllLeaves(tesseract::Tesseract *tess);
// Write all (changed_) parameters to a config file.
void WriteParams(char* filename, bool changes_only);
ScrollView* sv_window_;
};
#endif
#endif
| C++ |
///////////////////////////////////////////////////////////////////////
// File: osdetect.cpp
// Description: Orientation and script detection.
// Author: Samuel Charron
// Ranjith Unnikrishnan
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "osdetect.h"
#include "blobbox.h"
#include "blread.h"
#include "colfind.h"
#include "fontinfo.h"
#include "imagefind.h"
#include "linefind.h"
#include "oldlist.h"
#include "qrsequence.h"
#include "ratngs.h"
#include "strngs.h"
#include "tabvector.h"
#include "tesseractclass.h"
#include "textord.h"
const int kMinCharactersToTry = 50;
const int kMaxCharactersToTry = 5 * kMinCharactersToTry;
const float kSizeRatioToReject = 2.0;
const int kMinAcceptableBlobHeight = 10;
const float kOrientationAcceptRatio = 1.3;
const float kScriptAcceptRatio = 1.3;
const float kHanRatioInKorean = 0.7;
const float kHanRatioInJapanese = 0.3;
const float kNonAmbiguousMargin = 1.0;
// General scripts
static const char* han_script = "Han";
static const char* latin_script = "Latin";
static const char* katakana_script = "Katakana";
static const char* hiragana_script = "Hiragana";
static const char* hangul_script = "Hangul";
// Pseudo-scripts Name
const char* ScriptDetector::korean_script_ = "Korean";
const char* ScriptDetector::japanese_script_ = "Japanese";
const char* ScriptDetector::fraktur_script_ = "Fraktur";
// Minimum believable resolution.
const int kMinCredibleResolution = 70;
// Default resolution used if input is not believable.
const int kDefaultResolution = 300;
void OSResults::update_best_orientation() {
float first = orientations[0];
float second = orientations[1];
best_result.orientation_id = 0;
if (orientations[0] < orientations[1]) {
first = orientations[1];
second = orientations[0];
best_result.orientation_id = 1;
}
for (int i = 2; i < 4; ++i) {
if (orientations[i] > first) {
second = first;
first = orientations[i];
best_result.orientation_id = i;
} else if (orientations[i] > second) {
second = orientations[i];
}
}
// Store difference of top two orientation scores.
best_result.oconfidence = first - second;
}
void OSResults::set_best_orientation(int orientation_id) {
best_result.orientation_id = orientation_id;
best_result.oconfidence = 0;
}
void OSResults::update_best_script(int orientation) {
// We skip index 0 to ignore the "Common" script.
float first = scripts_na[orientation][1];
float second = scripts_na[orientation][2];
best_result.script_id = 1;
if (scripts_na[orientation][1] < scripts_na[orientation][2]) {
first = scripts_na[orientation][2];
second = scripts_na[orientation][1];
best_result.script_id = 2;
}
for (int i = 3; i < kMaxNumberOfScripts; ++i) {
if (scripts_na[orientation][i] > first) {
best_result.script_id = i;
second = first;
first = scripts_na[orientation][i];
} else if (scripts_na[orientation][i] > second) {
second = scripts_na[orientation][i];
}
}
best_result.sconfidence =
(first / second - 1.0) / (kScriptAcceptRatio - 1.0);
}
int OSResults::get_best_script(int orientation_id) const {
int max_id = -1;
for (int j = 0; j < kMaxNumberOfScripts; ++j) {
const char *script = unicharset->get_script_from_script_id(j);
if (strcmp(script, "Common") && strcmp(script, "NULL")) {
if (max_id == -1 ||
scripts_na[orientation_id][j] > scripts_na[orientation_id][max_id])
max_id = j;
}
}
return max_id;
}
// Print the script scores for all possible orientations.
void OSResults::print_scores(void) const {
for (int i = 0; i < 4; ++i) {
tprintf("Orientation id #%d", i);
print_scores(i);
}
}
// Print the script scores for the given candidate orientation.
void OSResults::print_scores(int orientation_id) const {
for (int j = 0; j < kMaxNumberOfScripts; ++j) {
if (scripts_na[orientation_id][j]) {
tprintf("%12s\t: %f\n", unicharset->get_script_from_script_id(j),
scripts_na[orientation_id][j]);
}
}
}
// Accumulate scores with given OSResults instance and update the best script.
void OSResults::accumulate(const OSResults& osr) {
for (int i = 0; i < 4; ++i) {
orientations[i] += osr.orientations[i];
for (int j = 0; j < kMaxNumberOfScripts; ++j)
scripts_na[i][j] += osr.scripts_na[i][j];
}
unicharset = osr.unicharset;
update_best_orientation();
update_best_script(best_result.orientation_id);
}
// Detect and erase horizontal/vertical lines and picture regions from the
// image, so that non-text blobs are removed from consideration.
void remove_nontext_regions(tesseract::Tesseract *tess, BLOCK_LIST *blocks,
TO_BLOCK_LIST *to_blocks) {
Pix *pix = tess->pix_binary();
ASSERT_HOST(pix != NULL);
int vertical_x = 0;
int vertical_y = 1;
tesseract::TabVector_LIST v_lines;
tesseract::TabVector_LIST h_lines;
const int kMinCredibleResolution = 70;
int resolution = (kMinCredibleResolution > pixGetXRes(pix)) ?
kMinCredibleResolution : pixGetXRes(pix);
tesseract::LineFinder::FindAndRemoveLines(resolution, false, pix,
&vertical_x, &vertical_y,
NULL, &v_lines, &h_lines);
Pix* im_pix = tesseract::ImageFind::FindImages(pix);
if (im_pix != NULL) {
pixSubtract(pix, pix, im_pix);
pixDestroy(&im_pix);
}
tess->mutable_textord()->find_components(tess->pix_binary(),
blocks, to_blocks);
}
// Find connected components in the page and process a subset until finished or
// a stopping criterion is met.
// Returns the number of blobs used in making the estimate. 0 implies failure.
int orientation_and_script_detection(STRING& filename,
OSResults* osr,
tesseract::Tesseract* tess) {
STRING name = filename; //truncated name
const char *lastdot; //of name
TBOX page_box;
lastdot = strrchr (name.string (), '.');
if (lastdot != NULL)
name[lastdot-name.string()] = '\0';
ASSERT_HOST(tess->pix_binary() != NULL)
int width = pixGetWidth(tess->pix_binary());
int height = pixGetHeight(tess->pix_binary());
BLOCK_LIST blocks;
if (!read_unlv_file(name, width, height, &blocks))
FullPageBlock(width, height, &blocks);
// Try to remove non-text regions from consideration.
TO_BLOCK_LIST land_blocks, port_blocks;
remove_nontext_regions(tess, &blocks, &port_blocks);
if (port_blocks.empty()) {
// page segmentation did not succeed, so we need to find_components first.
tess->mutable_textord()->find_components(tess->pix_binary(),
&blocks, &port_blocks);
} else {
page_box.set_left(0);
page_box.set_bottom(0);
page_box.set_right(width);
page_box.set_top(height);
// Filter_blobs sets up the TO_BLOCKs the same as find_components does.
tess->mutable_textord()->filter_blobs(page_box.topright(),
&port_blocks, true);
}
return os_detect(&port_blocks, osr, tess);
}
// Filter and sample the blobs.
// Returns a non-zero number of blobs if the page was successfully processed, or
// zero if the page had too few characters to be reliable
int os_detect(TO_BLOCK_LIST* port_blocks, OSResults* osr,
tesseract::Tesseract* tess) {
int blobs_total = 0;
TO_BLOCK_IT block_it;
block_it.set_to_list(port_blocks);
BLOBNBOX_CLIST filtered_list;
BLOBNBOX_C_IT filtered_it(&filtered_list);
for (block_it.mark_cycle_pt(); !block_it.cycled_list();
block_it.forward ()) {
TO_BLOCK* to_block = block_it.data();
if (to_block->block->poly_block() &&
!to_block->block->poly_block()->IsText()) continue;
BLOBNBOX_IT bbox_it;
bbox_it.set_to_list(&to_block->blobs);
for (bbox_it.mark_cycle_pt (); !bbox_it.cycled_list ();
bbox_it.forward ()) {
BLOBNBOX* bbox = bbox_it.data();
C_BLOB* blob = bbox->cblob();
TBOX box = blob->bounding_box();
++blobs_total;
float y_x = fabs((box.height() * 1.0) / box.width());
float x_y = 1.0f / y_x;
// Select a >= 1.0 ratio
float ratio = x_y > y_x ? x_y : y_x;
// Blob is ambiguous
if (ratio > kSizeRatioToReject) continue;
if (box.height() < kMinAcceptableBlobHeight) continue;
filtered_it.add_to_end(bbox);
}
}
return os_detect_blobs(NULL, &filtered_list, osr, tess);
}
// Detect orientation and script from a list of blobs.
// Returns a non-zero number of blobs if the list was successfully processed, or
// zero if the list had too few characters to be reliable.
// If allowed_scripts is non-null and non-empty, it is a list of scripts that
// constrains both orientation and script detection to consider only scripts
// from the list.
int os_detect_blobs(const GenericVector<int>* allowed_scripts,
BLOBNBOX_CLIST* blob_list, OSResults* osr,
tesseract::Tesseract* tess) {
OSResults osr_;
if (osr == NULL)
osr = &osr_;
osr->unicharset = &tess->unicharset;
OrientationDetector o(allowed_scripts, osr);
ScriptDetector s(allowed_scripts, osr, tess);
BLOBNBOX_C_IT filtered_it(blob_list);
int real_max = MIN(filtered_it.length(), kMaxCharactersToTry);
// tprintf("Total blobs found = %d\n", blobs_total);
// tprintf("Number of blobs post-filtering = %d\n", filtered_it.length());
// tprintf("Number of blobs to try = %d\n", real_max);
// If there are too few characters, skip this page entirely.
if (real_max < kMinCharactersToTry / 2) {
tprintf("Too few characters. Skipping this page\n");
return 0;
}
BLOBNBOX** blobs = new BLOBNBOX*[filtered_it.length()];
int number_of_blobs = 0;
for (filtered_it.mark_cycle_pt (); !filtered_it.cycled_list ();
filtered_it.forward ()) {
blobs[number_of_blobs++] = (BLOBNBOX*)filtered_it.data();
}
QRSequenceGenerator sequence(number_of_blobs);
int num_blobs_evaluated = 0;
for (int i = 0; i < real_max; ++i) {
if (os_detect_blob(blobs[sequence.GetVal()], &o, &s, osr, tess)
&& i > kMinCharactersToTry) {
break;
}
++num_blobs_evaluated;
}
delete [] blobs;
// Make sure the best_result is up-to-date
int orientation = o.get_orientation();
osr->update_best_script(orientation);
return num_blobs_evaluated;
}
// Processes a single blob to estimate script and orientation.
// Return true if estimate of orientation and script satisfies stopping
// criteria.
bool os_detect_blob(BLOBNBOX* bbox, OrientationDetector* o,
ScriptDetector* s, OSResults* osr,
tesseract::Tesseract* tess) {
tess->tess_cn_matching.set_value(true); // turn it on
tess->tess_bn_matching.set_value(false);
C_BLOB* blob = bbox->cblob();
TBLOB* tblob = TBLOB::PolygonalCopy(tess->poly_allow_detailed_fx, blob);
TBOX box = tblob->bounding_box();
FCOORD current_rotation(1.0f, 0.0f);
FCOORD rotation90(0.0f, 1.0f);
BLOB_CHOICE_LIST ratings[4];
// Test the 4 orientations
for (int i = 0; i < 4; ++i) {
// Normalize the blob. Set the origin to the place we want to be the
// bottom-middle after rotation.
// Scaling is to make the rotated height the x-height.
float scaling = static_cast<float>(kBlnXHeight) / box.height();
float x_origin = (box.left() + box.right()) / 2.0f;
float y_origin = (box.bottom() + box.top()) / 2.0f;
if (i == 0 || i == 2) {
// Rotation is 0 or 180.
y_origin = i == 0 ? box.bottom() : box.top();
} else {
// Rotation is 90 or 270.
scaling = static_cast<float>(kBlnXHeight) / box.width();
x_origin = i == 1 ? box.left() : box.right();
}
TBLOB* rotated_blob = new TBLOB(*tblob);
rotated_blob->Normalize(NULL, ¤t_rotation, NULL,
x_origin, y_origin, scaling, scaling,
0.0f, static_cast<float>(kBlnBaselineOffset),
false, NULL);
tess->AdaptiveClassifier(rotated_blob, ratings + i);
delete rotated_blob;
current_rotation.rotate(rotation90);
}
delete tblob;
bool stop = o->detect_blob(ratings);
s->detect_blob(ratings);
int orientation = o->get_orientation();
stop = s->must_stop(orientation) && stop;
return stop;
}
OrientationDetector::OrientationDetector(
const GenericVector<int>* allowed_scripts, OSResults* osr) {
osr_ = osr;
allowed_scripts_ = allowed_scripts;
}
// Score the given blob and return true if it is now sure of the orientation
// after adding this block.
bool OrientationDetector::detect_blob(BLOB_CHOICE_LIST* scores) {
float blob_o_score[4] = {0.0f, 0.0f, 0.0f, 0.0f};
float total_blob_o_score = 0.0f;
for (int i = 0; i < 4; ++i) {
BLOB_CHOICE_IT choice_it(scores + i);
if (!choice_it.empty()) {
BLOB_CHOICE* choice = NULL;
if (allowed_scripts_ != NULL && !allowed_scripts_->empty()) {
// Find the top choice in an allowed script.
for (choice_it.mark_cycle_pt(); !choice_it.cycled_list() &&
choice == NULL; choice_it.forward()) {
int choice_script = choice_it.data()->script_id();
int s = 0;
for (s = 0; s < allowed_scripts_->size(); ++s) {
if ((*allowed_scripts_)[s] == choice_script) {
choice = choice_it.data();
break;
}
}
}
} else {
choice = choice_it.data();
}
if (choice != NULL) {
// The certainty score ranges between [-20,0]. This is converted here to
// [0,1], with 1 indicating best match.
blob_o_score[i] = 1 + 0.05 * choice->certainty();
total_blob_o_score += blob_o_score[i];
}
}
}
if (total_blob_o_score == 0.0) return false;
// Fill in any blanks with the worst score of the others. This is better than
// picking an arbitrary probability for it and way better than -inf.
float worst_score = 0.0f;
int num_good_scores = 0;
for (int i = 0; i < 4; ++i) {
if (blob_o_score[i] > 0.0f) {
++num_good_scores;
if (worst_score == 0.0f || blob_o_score[i] < worst_score)
worst_score = blob_o_score[i];
}
}
if (num_good_scores == 1) {
// Lower worst if there is only one.
worst_score /= 2.0f;
}
for (int i = 0; i < 4; ++i) {
if (blob_o_score[i] == 0.0f) {
blob_o_score[i] = worst_score;
total_blob_o_score += worst_score;
}
}
// Normalize the orientation scores for the blob and use them to
// update the aggregated orientation score.
for (int i = 0; total_blob_o_score != 0 && i < 4; ++i) {
osr_->orientations[i] += log(blob_o_score[i] / total_blob_o_score);
}
// TODO(ranjith) Add an early exit test, based on min_orientation_margin,
// as used in pagesegmain.cpp.
return false;
}
int OrientationDetector::get_orientation() {
osr_->update_best_orientation();
return osr_->best_result.orientation_id;
}
ScriptDetector::ScriptDetector(const GenericVector<int>* allowed_scripts,
OSResults* osr, tesseract::Tesseract* tess) {
osr_ = osr;
tess_ = tess;
allowed_scripts_ = allowed_scripts;
katakana_id_ = tess_->unicharset.add_script(katakana_script);
hiragana_id_ = tess_->unicharset.add_script(hiragana_script);
han_id_ = tess_->unicharset.add_script(han_script);
hangul_id_ = tess_->unicharset.add_script(hangul_script);
japanese_id_ = tess_->unicharset.add_script(japanese_script_);
korean_id_ = tess_->unicharset.add_script(korean_script_);
latin_id_ = tess_->unicharset.add_script(latin_script);
fraktur_id_ = tess_->unicharset.add_script(fraktur_script_);
}
// Score the given blob and return true if it is now sure of the script after
// adding this blob.
void ScriptDetector::detect_blob(BLOB_CHOICE_LIST* scores) {
bool done[kMaxNumberOfScripts];
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < kMaxNumberOfScripts; ++j)
done[j] = false;
BLOB_CHOICE_IT choice_it;
choice_it.set_to_list(scores + i);
float prev_score = -1;
int script_count = 0;
int prev_id = -1;
int prev_fontinfo_id = -1;
const char* prev_unichar = "";
const char* unichar = "";
for (choice_it.mark_cycle_pt(); !choice_it.cycled_list();
choice_it.forward()) {
BLOB_CHOICE* choice = choice_it.data();
int id = choice->script_id();
if (allowed_scripts_ != NULL && !allowed_scripts_->empty()) {
// Check that the choice is in an allowed script.
int s = 0;
for (s = 0; s < allowed_scripts_->size(); ++s) {
if ((*allowed_scripts_)[s] == id) break;
}
if (s == allowed_scripts_->size()) continue; // Not found in list.
}
// Script already processed before.
if (done[id]) continue;
done[id] = true;
unichar = tess_->unicharset.id_to_unichar(choice->unichar_id());
// Save data from the first match
if (prev_score < 0) {
prev_score = -choice->certainty();
script_count = 1;
prev_id = id;
prev_unichar = unichar;
prev_fontinfo_id = choice->fontinfo_id();
} else if (-choice->certainty() < prev_score + kNonAmbiguousMargin) {
++script_count;
}
if (strlen(prev_unichar) == 1)
if (unichar[0] >= '0' && unichar[0] <= '9')
break;
// if script_count is >= 2, character is ambiguous, skip other matches
// since they are useless.
if (script_count >= 2)
break;
}
// Character is non ambiguous
if (script_count == 1) {
// Update the score of the winning script
osr_->scripts_na[i][prev_id] += 1.0;
// Workaround for Fraktur
if (prev_id == latin_id_) {
if (prev_fontinfo_id >= 0) {
const tesseract::FontInfo &fi =
tess_->get_fontinfo_table().get(prev_fontinfo_id);
//printf("Font: %s i:%i b:%i f:%i s:%i k:%i (%s)\n", fi.name,
// fi.is_italic(), fi.is_bold(), fi.is_fixed_pitch(),
// fi.is_serif(), fi.is_fraktur(),
// prev_unichar);
if (fi.is_fraktur()) {
osr_->scripts_na[i][prev_id] -= 1.0;
osr_->scripts_na[i][fraktur_id_] += 1.0;
}
}
}
// Update Japanese / Korean pseudo-scripts
if (prev_id == katakana_id_)
osr_->scripts_na[i][japanese_id_] += 1.0;
if (prev_id == hiragana_id_)
osr_->scripts_na[i][japanese_id_] += 1.0;
if (prev_id == hangul_id_)
osr_->scripts_na[i][korean_id_] += 1.0;
if (prev_id == han_id_) {
osr_->scripts_na[i][korean_id_] += kHanRatioInKorean;
osr_->scripts_na[i][japanese_id_] += kHanRatioInJapanese;
}
}
} // iterate over each orientation
}
bool ScriptDetector::must_stop(int orientation) {
osr_->update_best_script(orientation);
return osr_->best_result.sconfidence > 1;
}
// Helper method to convert an orientation index to its value in degrees.
// The value represents the amount of clockwise rotation in degrees that must be
// applied for the text to be upright (readable).
const int OrientationIdToValue(const int& id) {
switch (id) {
case 0:
return 0;
case 1:
return 270;
case 2:
return 180;
case 3:
return 90;
default:
return -1;
}
}
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: cubeclassifier.cpp
// Description: Cube implementation of a ShapeClassifier.
// Author: Ray Smith
// Created: Wed Nov 23 10:39:45 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "cubeclassifier.h"
#include "char_altlist.h"
#include "char_set.h"
#include "cube_object.h"
#include "cube_reco_context.h"
#include "tessclassifier.h"
#include "tesseractclass.h"
#include "trainingsample.h"
#include "unicharset.h"
namespace tesseract {
CubeClassifier::CubeClassifier(tesseract::Tesseract* tesseract)
: cube_cntxt_(tesseract->GetCubeRecoContext()),
shape_table_(*tesseract->shape_table()) {
}
CubeClassifier::~CubeClassifier() {
}
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
int CubeClassifier::UnicharClassifySample(
const TrainingSample& sample, Pix* page_pix, int debug,
UNICHAR_ID keep_this, GenericVector<UnicharRating>* results) {
results->clear();
if (page_pix == NULL) return 0;
ASSERT_HOST(cube_cntxt_ != NULL);
const TBOX& char_box = sample.bounding_box();
CubeObject* cube_obj = new tesseract::CubeObject(
cube_cntxt_, page_pix, char_box.left(),
pixGetHeight(page_pix) - char_box.top(),
char_box.width(), char_box.height());
CharAltList* alt_list = cube_obj->RecognizeChar();
if (alt_list != NULL) {
alt_list->Sort();
CharSet* char_set = cube_cntxt_->CharacterSet();
for (int i = 0; i < alt_list->AltCount(); ++i) {
// Convert cube representation to a shape_id.
int alt_id = alt_list->Alt(i);
int unichar_id = char_set->UnicharID(char_set->ClassString(alt_id));
if (unichar_id >= 0)
results->push_back(UnicharRating(unichar_id, alt_list->AltProb(i)));
}
delete alt_list;
}
delete cube_obj;
return results->size();
}
// Provides access to the ShapeTable that this classifier works with.
const ShapeTable* CubeClassifier::GetShapeTable() const {
return &shape_table_;
}
CubeTessClassifier::CubeTessClassifier(tesseract::Tesseract* tesseract)
: cube_cntxt_(tesseract->GetCubeRecoContext()),
shape_table_(*tesseract->shape_table()),
pruner_(new TessClassifier(true, tesseract)) {
}
CubeTessClassifier::~CubeTessClassifier() {
delete pruner_;
}
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
int CubeTessClassifier::UnicharClassifySample(
const TrainingSample& sample, Pix* page_pix, int debug,
UNICHAR_ID keep_this, GenericVector<UnicharRating>* results) {
int num_results = pruner_->UnicharClassifySample(sample, page_pix, debug,
keep_this, results);
if (page_pix == NULL) return num_results;
ASSERT_HOST(cube_cntxt_ != NULL);
const TBOX& char_box = sample.bounding_box();
CubeObject* cube_obj = new tesseract::CubeObject(
cube_cntxt_, page_pix, char_box.left(),
pixGetHeight(page_pix) - char_box.top(),
char_box.width(), char_box.height());
CharAltList* alt_list = cube_obj->RecognizeChar();
CharSet* char_set = cube_cntxt_->CharacterSet();
if (alt_list != NULL) {
for (int r = 0; r < num_results; ++r) {
// Get the best cube probability of the unichar in the result.
double best_prob = 0.0;
for (int i = 0; i < alt_list->AltCount(); ++i) {
int alt_id = alt_list->Alt(i);
int unichar_id = char_set->UnicharID(char_set->ClassString(alt_id));
if (unichar_id == (*results)[r].unichar_id &&
alt_list->AltProb(i) > best_prob) {
best_prob = alt_list->AltProb(i);
}
}
(*results)[r].rating = best_prob;
}
delete alt_list;
// Re-sort by rating.
results->sort(&UnicharRating::SortDescendingRating);
}
delete cube_obj;
return results->size();
}
// Provides access to the ShapeTable that this classifier works with.
const ShapeTable* CubeTessClassifier::GetShapeTable() const {
return &shape_table_;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: svmnode.h
// description_: ScrollView Menu Node
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// A SVMenuNode is an entity which contains the mapping from a menu entry on
// the server side to the corresponding associated commands on the client.
// It is designed to be a tree structure with a root node, which can then be
// used to generate the appropriate messages to the server to display the
// menu structure there.
// A SVMenuNode can both be used in the context_ of popup menus as well as
// menu bars.
#ifndef TESSERACT_VIEWER_SVMNODE_H__
#define TESSERACT_VIEWER_SVMNODE_H__
#include "strngs.h"
class ScrollView;
class SVMenuNode {
public:
// Creating the (empty) root menu node.
SVMenuNode();
// Destructor for every node.
~SVMenuNode();
// Create a new sub menu node with just a caption. This is used to create
// nodes which act as parent nodes to other nodes (e.g. submenus).
SVMenuNode* AddChild(const char* txt);
// Create a "normal" menu node which is associated with a command event.
void AddChild(const char* txt, int command_event);
// Create a flag menu node.
void AddChild(const char* txt, int command_event, int tv);
// Create a menu node with an associated value (which might be changed
// through the gui).
void AddChild(const char* txt, int command_event, const char* val);
// Create a menu node with an associated value and description_.
void AddChild(const char* txt, int command_event,
const char* val, const char* desc);
// Build a menu structure for the server and send the necessary messages.
// Should be called on the root node. If menu_bar is true, a menu_bar menu
// is built (e.g. on top of the window), if it is false a popup menu is
// built which gets shown by right clicking on the window.
void BuildMenu(ScrollView *sv, bool menu_bar = true);
private:
// Constructor holding the actual node data.
SVMenuNode(int command_event, const char* txt, int tv,
bool check_box_entry, const char* val, const char* desc);
// Adds a new menu node to the current node.
void AddChild(SVMenuNode* svmn);
// The parent node of this node.
SVMenuNode* parent_;
// The first child of this node.
SVMenuNode* child_;
// The next "sibling" of this node (e.g. same parent).
SVMenuNode* next_;
// Whether this menu node actually is a flag.
bool is_check_box_entry_;
// The command event associated with a specific menu node. Should be unique.
int cmd_event_;
// The caption associated with a specific menu node.
STRING text_;
// The value of the flag (if this menu node is a flag).
bool toggle_value_;
// The value of the menu node. (optional)
STRING value_;
// A description_ of the value. (optional)
STRING description_;
};
#endif // TESSERACT_VIEWER_SVMNODE_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: scrollview.h
// Description: ScrollView
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// ScrollView is designed as an UI which can be run remotely. This is the
// client code for it, the server part is written in java. The client consists
// mainly of 2 parts:
// The "core" ScrollView which sets up the remote connection,
// takes care of event handling etc.
// The other part of ScrollView consists of predefined API calls through LUA,
// which can basically be used to get a zoomable canvas in which it is possible
// to draw lines, text etc.
// Technically, thanks to LUA, its even possible to bypass the here defined LUA
// API calls at all and generate a java user interface from scratch (or
// basically generate any kind of java program, possibly even dangerous ones).
#ifndef TESSERACT_VIEWER_SCROLLVIEW_H__
#define TESSERACT_VIEWER_SCROLLVIEW_H__
// TODO(rays) Move ScrollView into the tesseract namespace.
#ifndef OCR_SCROLLVIEW_H__
#include <stdio.h>
class ScrollView;
class SVNetwork;
class SVMutex;
class SVSemaphore;
struct SVPolyLineBuffer;
enum SVEventType {
SVET_DESTROY, // Window has been destroyed by user.
SVET_EXIT, // User has destroyed the last window by clicking on the 'X'.
SVET_CLICK, // Left button pressed.
SVET_SELECTION, // Left button selection.
SVET_INPUT, // There is some input (single key or a whole string).
SVET_MOUSE, // The mouse has moved with a button pressed.
SVET_MOTION, // The mouse has moved with no button pressed.
SVET_HOVER, // The mouse has stayed still for a second.
SVET_POPUP, // A command selected through a popup menu.
SVET_MENU, // A command selected through the menubar.
SVET_ANY, // Any of the above.
SVET_COUNT // Array sizing.
};
struct SVEvent {
~SVEvent() { delete [] parameter; }
SVEvent* copy();
SVEventType type; // What kind of event.
ScrollView* window; // Window event relates to.
int x; // Coords of click or selection.
int y;
int x_size; // Size of selection.
int y_size;
int command_id; // The ID of the possibly associated event (e.g. MENU)
char* parameter; // Any string that might have been passed as argument.
int counter; // Used to detect which kind of event to process next.
SVEvent() {
window = NULL;
parameter = NULL;
}
SVEvent(const SVEvent&);
SVEvent& operator=(const SVEvent&);
};
// The SVEventHandler class is used for Event handling: If you register your
// class as SVEventHandler to a ScrollView Window, the SVEventHandler will be
// called whenever an appropriate event occurs.
class SVEventHandler {
public:
virtual ~SVEventHandler() {}
// Gets called by the SV Window. Does nothing on default, overwrite this
// to implement the desired behaviour
virtual void Notify(const SVEvent* sve) { }
};
// The ScrollView class provides the expernal API to the scrollviewer process.
// The scrollviewer process manages windows and displays images, graphics and
// text while allowing the user to zoom and scroll the windows arbitrarily.
// Each ScrollView class instance represents one window, and stuff is drawn in
// the window through method calls on the class. The constructor is used to
// create the class instance (and the window).
class ScrollView {
public:
// Color enum for pens and brushes.
enum Color {
NONE,
BLACK,
WHITE,
RED,
YELLOW,
GREEN,
CYAN,
BLUE,
MAGENTA,
AQUAMARINE,
DARK_SLATE_BLUE,
LIGHT_BLUE,
MEDIUM_BLUE,
MIDNIGHT_BLUE,
NAVY_BLUE,
SKY_BLUE,
SLATE_BLUE,
STEEL_BLUE,
CORAL,
BROWN,
SANDY_BROWN,
GOLD,
GOLDENROD,
DARK_GREEN,
DARK_OLIVE_GREEN,
FOREST_GREEN,
LIME_GREEN,
PALE_GREEN,
YELLOW_GREEN,
LIGHT_GREY,
DARK_SLATE_GREY,
DIM_GREY,
GREY,
KHAKI,
MAROON,
ORANGE,
ORCHID,
PINK,
PLUM,
INDIAN_RED,
ORANGE_RED,
VIOLET_RED,
SALMON,
TAN,
TURQUOISE,
DARK_TURQUOISE,
VIOLET,
WHEAT,
GREEN_YELLOW // Make sure this one is last.
};
~ScrollView();
#ifndef GRAPHICS_DISABLED
// Create a window. The pixel size of the window may be 0,0, in which case
// a default size is selected based on the size of your canvas.
// The canvas may not be 0,0 in size!
ScrollView(const char* name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size);
// With a flag whether the x axis is reversed.
ScrollView(const char* name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size, bool y_axis_reversed);
// Connect to a server other than localhost.
ScrollView(const char* name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size, bool y_axis_reversed,
const char* server_name);
/*******************************************************************************
* Event handling
* To register as listener, the class has to derive from the SVEventHandler
* class, which consists of a notifyMe(SVEvent*) function that should be
* overwritten to process the event the way you want.
*******************************************************************************/
// Add an Event Listener to this ScrollView Window.
void AddEventHandler(SVEventHandler* listener);
// Block until an event of the given type is received.
SVEvent* AwaitEvent(SVEventType type);
// Block until any event on any window is received.
SVEvent* AwaitEventAnyWindow();
/*******************************************************************************
* Getters and Setters
*******************************************************************************/
// Returns the title of the window.
const char* GetName() { return window_name_; }
// Returns the unique ID of the window.
int GetId() { return window_id_; }
/*******************************************************************************
* API functions for LUA calls
* the implementations for these can be found in svapi.cc
* (keep in mind that the window is actually created through the ScrollView
* constructor, so this is not listed here)
*******************************************************************************/
// Draw a Pix on (x,y).
void Image(struct Pix* image, int x_pos, int y_pos);
// Flush buffers and update display.
static void Update();
// Exit the program.
static void Exit();
// Update the contents of a specific window.
void UpdateWindow();
// Erase all content from the window, but do not destroy it.
void Clear();
// Set pen color with an enum.
void Pen(Color color);
// Set pen color to RGB (0-255).
void Pen(int red, int green, int blue);
// Set pen color to RGBA (0-255).
void Pen(int red, int green, int blue, int alpha);
// Set brush color with an enum.
void Brush(Color color);
// Set brush color to RGB (0-255).
void Brush(int red, int green, int blue);
// Set brush color to RGBA (0-255).
void Brush(int red, int green, int blue, int alpha);
// Set attributes for future text, like font name (e.g.
// "Times New Roman"), font size etc..
// Note: The underlined flag is currently not supported
void TextAttributes(const char* font, int pixel_size,
bool bold, bool italic, bool underlined);
// Draw line from (x1,y1) to (x2,y2) with the current pencolor.
void Line(int x1, int y1, int x2, int y2);
// Set the stroke width of the pen.
void Stroke(float width);
// Draw a rectangle given upper left corner and lower right corner.
// The current pencolor is used as outline, the brushcolor to fill the shape.
void Rectangle(int x1, int y1, int x2, int y2);
// Draw an ellipse centered on (x,y).
// The current pencolor is used as outline, the brushcolor to fill the shape.
void Ellipse(int x, int y, int width, int height);
// Draw text with the current pencolor
void Text(int x, int y, const char* mystring);
// Draw an image from a local filename. This should be faster than createImage.
// WARNING: This only works on a local machine. This also only works image
// types supported by java (like bmp,jpeg,gif,png) since the image is opened by
// the server.
void Image(const char* image, int x_pos, int y_pos);
// Set the current position to draw from (x,y). In conjunction with...
void SetCursor(int x, int y);
// ...this function, which draws a line from the current to (x,y) and then
// sets the new position to the new (x,y), this can be used to easily draw
// polygons using vertices
void DrawTo(int x, int y);
// Set the SVWindow visible/invisible.
void SetVisible(bool visible);
// Set the SVWindow always on top or not always on top.
void AlwaysOnTop(bool b);
// Shows a modal dialog with "msg" as question and returns 'y' or 'n'.
int ShowYesNoDialog(const char* msg);
// Shows a modal dialog with "msg" as question and returns a char* string.
// Constraint: As return, only words (e.g. no whitespaces etc.) are allowed.
char* ShowInputDialog(const char* msg);
// Adds a messagebox to the SVWindow. This way, it can show the messages...
void AddMessageBox();
// ...which can be added by this command.
// This is intended as an "debug" output window.
void AddMessage(const char* format, ...);
// Zoom the window to the rectangle given upper left corner and
// lower right corner.
void ZoomToRectangle(int x1, int y1, int x2, int y2);
// Custom messages (manipulating java code directly) can be send through this.
// Send a message to the server and attach the Id of the corresponding window.
// Note: This should only be called if you are know what you are doing, since
// you are fiddling with the Java objects on the server directly. Calling
// this just for fun will likely break your application!
// It is public so you can actually take use of the LUA functionalities, but
// be careful!
void SendMsg(const char* msg, ...);
// Custom messages (manipulating java code directly) can be send through this.
// Send a message to the server without adding the
// window id. Used for global events like Exit().
// Note: This should only be called if you are know what you are doing, since
// you are fiddling with the Java objects on the server directly. Calling
// this just for fun will likely break your application!
// It is public so you can actually take use of the LUA functionalities, but
// be careful!
static void SendRawMessage(const char* msg);
/*******************************************************************************
* Add new menu entries to parent. If parent is "", the entry gets added to the
* main menubar (toplevel).
*******************************************************************************/
// This adds a new submenu to the menubar.
void MenuItem(const char* parent, const char* name);
// This adds a new (normal) menu entry with an associated eventID, which should
// be unique among menubar eventIDs.
void MenuItem(const char* parent, const char* name, int cmdEvent);
// This adds a new checkbox entry, which might initally be flagged.
void MenuItem(const char* parent, const char* name,
int cmdEvent, bool flagged);
// This adds a new popup submenu to the popup menu. If parent is "", the entry
// gets added at "toplevel" popupmenu.
void PopupItem(const char* parent, const char* name);
// This adds a new popup entry with the associated eventID, which should be
// unique among popup eventIDs.
// If value and desc are given, on a click the server will ask you to modify
// the value and return the new value.
void PopupItem(const char* parent, const char* name,
int cmdEvent, const char* value, const char* desc);
// Returns the correct Y coordinate for a window, depending on whether it might
// have to be flipped (by ySize).
int TranslateYCoordinate(int y);
private:
// Transfers a binary Image.
void TransferBinaryImage(struct Pix* image);
// Transfers a gray scale Image.
void TransferGrayImage(struct Pix* image);
// Transfers a 32-Bit Image.
void Transfer32bppImage(struct Pix* image);
// Sets up ScrollView, depending on the variables from the constructor.
void Initialize(const char* name, int x_pos, int y_pos, int x_size,
int y_size, int x_canvas_size, int y_canvas_size,
bool y_axis_reversed, const char* server_name);
// Send the current buffered polygon (if any) and clear it.
void SendPolygon();
// Start the message receiving thread.
static void* MessageReceiver(void* a);
// Place an event into the event_table (synchronized).
void SetEvent(SVEvent* svevent);
// Wake up the semaphore.
void Signal();
// Returns the unique, shared network stream.
static SVNetwork* GetStream() { return stream_; }
// Starts a new event handler. Called whenever a new window is created.
static void* StartEventHandler(void* sv);
// Escapes the ' character with a \, so it can be processed by LUA.
char* AddEscapeChars(const char* input);
// The event handler for this window.
SVEventHandler* event_handler_;
// The name of the window.
const char* window_name_;
// The id of the window.
int window_id_;
// The points of the currently under-construction polyline.
SVPolyLineBuffer* points_;
// Whether the axis is reversed.
bool y_axis_is_reversed_;
// Set to true only after the event handler has terminated.
bool event_handler_ended_;
// If the y axis is reversed, flip all y values by ySize.
int y_size_;
// # of created windows (used to assign an id to each ScrollView* for svmap).
static int nr_created_windows_;
// Serial number of sent images to ensure that the viewer knows they
// are distinct.
static int image_index_;
// The stream through which the c++ client is connected to the server.
static SVNetwork* stream_;
// Table of all the currently queued events.
SVEvent* event_table_[SVET_COUNT];
// Mutex to access the event_table_ in a synchronized fashion.
SVMutex* mutex_;
// Semaphore to the thread belonging to this window.
SVSemaphore* semaphore_;
#endif // GRAPHICS_DISABLED
};
#endif // OCR_SCROLLVIEW_H__
#endif // TESSERACT_VIEWER_SCROLLVIEW_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: svmnode.cpp
// description_: ScrollView Menu Node
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// A SVMenuNode is an entity which contains the mapping from a menu entry on
// the server side to the corresponding associated commands on the client.
// It is designed to be a tree structure with a root node, which can then be
// used to generate the appropriate messages to the server to display the
// menu structure there.
// A SVMenuNode can both be used in the context_ of popup menus as well as
// menu bars.
#include <string.h>
#include <iostream>
#include <cstring>
#include "svmnode.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
#include "scrollview.h"
// Create the empty root menu node. with just a caption. All other nodes should
// be added to this or one of the submenus.
SVMenuNode::SVMenuNode() {
cmd_event_ = -1;
child_ = NULL;
next_ = NULL;
parent_ = NULL;
toggle_value_ = false;
is_check_box_entry_ = false;
}
SVMenuNode::~SVMenuNode() {
}
// Create a new sub menu node with just a caption. This is used to create
// nodes which act as parent nodes to other nodes (e.g. submenus).
SVMenuNode* SVMenuNode::AddChild(const char* txt) {
SVMenuNode* s = new SVMenuNode(-1, txt, false, false, NULL, NULL);
this->AddChild(s);
return s;
}
// Create a "normal" menu node which is associated with a command event.
void SVMenuNode::AddChild(const char* txt, int command_event) {
this->AddChild(new SVMenuNode(command_event, txt, false, false, NULL, NULL));
}
// Create a menu node with an associated value (which might be changed
// through the gui).
void SVMenuNode::AddChild(const char* txt, int command_event,
const char* val) {
this->AddChild(new SVMenuNode(command_event, txt, false, false, val, NULL));
}
// Create a menu node with an associated value and description_.
void SVMenuNode::AddChild(const char* txt, int command_event, const char* val,
const char* desc) {
this->AddChild(new SVMenuNode(command_event, txt, false, false, val, desc));
}
// Create a flag menu node.
void SVMenuNode::AddChild(const char* txt, int command_event, int tv) {
this->AddChild(new SVMenuNode(command_event, txt, tv, true, NULL, NULL));
}
// Convenience function called from the different constructors to initialize
// the different values of the menu node.
SVMenuNode::SVMenuNode(int command_event, const char* txt,
int tv, bool check_box_entry, const char* val,
const char* desc)
: text_(txt), value_(val), description_(desc) {
cmd_event_ = command_event;
child_ = NULL;
next_ = NULL;
parent_ = NULL;
toggle_value_ = tv != 0;
is_check_box_entry_ = check_box_entry;
}
// Add a child node to this menu node.
void SVMenuNode::AddChild(SVMenuNode* svmn) {
svmn->parent_ = this;
// No children yet.
if (child_ == NULL) {
child_ = svmn;
} else {
SVMenuNode* cur = child_;
while (cur->next_ != NULL) { cur = cur->next_; }
cur->next_ = svmn;
}
}
// Build a menu structure for the server and send the necessary messages.
// Should be called on the root node. If menu_bar is true, a menu_bar menu
// is built (e.g. on top of the window), if it is false a popup menu is
// built which gets shown by right clicking on the window.
// Deletes itself afterwards.
void SVMenuNode::BuildMenu(ScrollView* sv, bool menu_bar) {
if ((parent_ != NULL) && (menu_bar)) {
if (is_check_box_entry_) {
sv->MenuItem(parent_->text_.string(), text_.string(), cmd_event_,
toggle_value_);
} else {
sv->MenuItem(parent_->text_.string(), text_.string(), cmd_event_); }
} else if ((parent_ != NULL) && (!menu_bar)) {
if (description_.length() > 0) {
sv->PopupItem(parent_->text_.string(), text_.string(), cmd_event_,
value_.string(), description_.string());
} else {
sv->PopupItem(parent_->text_.string(), text_.string());
}
}
if (child_ != NULL) {
child_->BuildMenu(sv, menu_bar); delete child_;
}
if (next_ != NULL) {
next_->BuildMenu(sv, menu_bar); delete next_;
}
}
#endif // GRAPHICS_DISABLED
| C++ |
// Copyright 2007 Google Inc. All Rights Reserved.
//
// Author: Joern Wanke
//
// Simple drawing program to illustrate ScrollView capabilities.
//
// Functionality:
// - The menubar is used to select from different sample styles of input.
// - With the RMB it is possible to change the RGB values in different
// popup menus.
// - A LMB click either draws point-to-point, point or text.
// - A LMB dragging either draws a line, a rectangle or ellipse.
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
#include "scrollview.h"
#include "svmnode.h"
#include <stdlib.h>
#include <iostream>
// The current color values we use, initially white (== ScrollView::WHITE).
int rgb[3] = { 255, 255, 255 };
class SVPaint : public SVEventHandler {
public:
explicit SVPaint(const char* server_name);
// This is the main event handling function that we need to overwrite, defined
// in SVEventHandler.
void Notify(const SVEvent* sv_event);
private:
// The Handler take care of the SVET_POPUP, SVET_MENU, SVET_CLICK and
// SVET_SELECTION events.
void PopupHandler(const SVEvent* sv_event);
void MenuBarHandler(const SVEvent* sv_event);
void ClickHandler(const SVEvent* sv_event);
void SelectionHandler(const SVEvent* sv_event);
// Convenience functions to build little menus.
SVMenuNode* BuildPopupMenu();
SVMenuNode* BuildMenuBar();
// Our window.
ScrollView* window_;
// The mode we are in when an SVET_CLICK or an SVET_SELECTION event occurs.
int click_mode_;
int drag_mode_;
// In the point-to-point drawing mode, we need to set a start-point the first
// time we call it (e.g. call SetCursor).
bool has_start_point_;
};
// Build a sample popup menu.
SVMenuNode* SVPaint::BuildPopupMenu() {
SVMenuNode* root = new SVMenuNode(); // Empty root node
// Initial color is white, so we all values to 255.
root->AddChild("R", // Shown caption.
1, // assoc. command_id.
"255", // initial value.
"Red Color Value?"); // Shown description.
root->AddChild("G", 2, "255", "Green Color Value?");
root->AddChild("B", 3, "255", "Blue Color Value?");
return root;
}
// Build a sample menu bar.
SVMenuNode* SVPaint::BuildMenuBar() {
SVMenuNode* root = new SVMenuNode(); // Empty root node
// Create some submenus and add them to the root.
SVMenuNode* click = root->AddChild("Clicking");
SVMenuNode* drag = root->AddChild("Dragging");
// Put some nodes into the submenus.
click->AddChild("Point to Point Drawing", // Caption.
1); // command_id.
click->AddChild("Point Drawing", 2);
click->AddChild("Text Drawing", 3);
drag->AddChild("Line Drawing", 4);
drag->AddChild("Rectangle Drawing", 5);
drag->AddChild("Ellipse Drawing", 6);
return root;
}
// Takes care of the SVET_POPUP events.
// In our case, SVET_POPUP is used to set RGB values.
void SVPaint::PopupHandler(const SVEvent* sv_event) {
// Since we only have the RGB values as popup items,
// we take a shortcut to not bloat up code:
rgb[sv_event->command_id - 1] = atoi(sv_event->parameter);
window_->Pen(rgb[0], rgb[1], rgb[2]);
}
// Takes care of the SVET_MENU events.
// In our case, we change either the click_mode_ (commands 1-3)
// or the drag_mode_ (commands 4-6).
void SVPaint::MenuBarHandler(const SVEvent* sv_event) {
if ((sv_event->command_id > 0) && (sv_event->command_id < 4)) {
click_mode_ = sv_event->command_id;
has_start_point_ = false;
} else { drag_mode_ = sv_event->command_id; }
}
// Takes care of the SVET_CLICK events.
// Depending on the click_mode_ we are in, either do Point-to-Point drawing,
// point drawing, or draw text.
void SVPaint::ClickHandler(const SVEvent* sv_event) {
switch (click_mode_) {
case 1: //Point to Point
if (has_start_point_) { window_->DrawTo(sv_event->x, sv_event->y);
} else {
has_start_point_ = true;
window_->SetCursor(sv_event->x, sv_event->y);
}
break;
case 2: //Point Drawing..simulated by drawing a 1 pixel line.
window_->Line(sv_event->x, sv_event->y, sv_event->x, sv_event->y);
break;
case 3: //Text
// We show a modal input dialog on our window, then draw the input and
// finally delete the input pointer.
char* p = window_->ShowInputDialog("Text:");
window_->Text(sv_event->x, sv_event->y, p);
delete [] p;
break;
}
}
// Takes care of the SVET_SELECTION events.
// Depending on the drag_mode_ we are in, either draw a line, a rectangle or
// an ellipse.
void SVPaint::SelectionHandler(const SVEvent* sv_event) {
switch (drag_mode_) {
//FIXME inversed x_size, y_size
case 4: //Line
window_->Line(sv_event->x, sv_event->y,
sv_event->x - sv_event->x_size,
sv_event->y - sv_event->y_size);
break;
case 5: //Rectangle
window_->Rectangle(sv_event->x, sv_event->y,
sv_event->x - sv_event->x_size,
sv_event->y - sv_event->y_size);
break;
case 6: //Ellipse
window_->Ellipse(sv_event->x - sv_event->x_size,
sv_event->y - sv_event->y_size,
sv_event->x_size, sv_event->y_size);
break;
}
}
// The event handling function from ScrollView which we have to overwrite.
// We handle CLICK, SELECTION, MENU and POPUP and throw away all other events.
void SVPaint::Notify(const SVEvent* sv_event) {
if (sv_event->type == SVET_CLICK) { ClickHandler(sv_event); }
else if (sv_event->type == SVET_SELECTION) { SelectionHandler(sv_event); }
else if (sv_event->type == SVET_MENU) { MenuBarHandler(sv_event); }
else if (sv_event->type == SVET_POPUP) { PopupHandler(sv_event); }
else {} //throw other events away
}
// Builds a new window, initializes the variables and event handler and builds
// the menu.
SVPaint::SVPaint(const char *server_name) {
window_ = new ScrollView("ScrollView Paint Example", // window caption
0, 0, // x,y window position
500, 500, // window size
500, 500, // canvas size
false, // whether the Y axis is inversed.
// this is included due to legacy
// reasons for tesseract and enables
// us to have (0,0) as the LOWER left
// of the coordinate system.
server_name); // the server address.
// Set the start modes to point-to-point and line drawing.
click_mode_ = 1;
drag_mode_ = 4;
has_start_point_ = false;
// Bild our menus and add them to the window. The flag illustrates whether
// this is a menu bar.
SVMenuNode* popup_menu = BuildPopupMenu();
popup_menu->BuildMenu(window_,false);
SVMenuNode* bar_menu = BuildMenuBar();
bar_menu->BuildMenu(window_,true);
// Set the initial color values to White (could also be done by
// passing (rgb[0], rgb[1], rgb[2]).
window_->Pen(ScrollView::WHITE);
window_->Brush(ScrollView::WHITE);
// Adds the event handler to the window. This actually ensures that Notify
// gets called when events occur.
window_->AddEventHandler(this);
// Set the window visible (calling this is important to actually render
// everything. Without this call, the window would also be drawn, but the
// menu bars would be missing.
window_->SetVisible(true);
// Rest this thread until its window is destroyed.
// Note that a special eventhandling thread was created when constructing
// the window. Due to this, the application will not deadlock here.
window_->AwaitEvent(SVET_DESTROY);
// We now have 3 Threads running:
// (1) The MessageReceiver thread which fetches messages and distributes them
// (2) The EventHandler thread which handles all events for window_
// (3) The main thread which waits on window_ for a DESTROY event (blocked)
}
// If a parameter is given, we try to connect to the given server.
// This enables us to test the remote capabilites of ScrollView.
int main(int argc, char** argv) {
const char* server_name;
if (argc > 1) { server_name = argv[1]; } else { server_name = "localhost"; }
SVPaint svp(server_name);
}
#endif // GRAPHICS_DISABLED
| C++ |
///////////////////////////////////////////////////////////////////////
// File: scrollview.cc
// Description: ScrollView
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
#include <stdarg.h>
#include <limits.h>
#include <string.h>
#include <map>
#include <utility>
#include <algorithm>
#include <vector>
#include <string>
#include <cstring>
#include <climits>
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "scrollview.h"
#ifdef _MSC_VER
#pragma warning(disable:4786) // Don't give stupid warnings for stl
#pragma warning(disable:4018) // signed/unsigned warnings
#pragma warning(disable:4530) // exception warnings
#endif
const int kSvPort = 8461;
const int kMaxMsgSize = 4096;
const int kMaxIntPairSize = 45; // Holds %d,%d, for upto 64 bit.
#include "svutil.h"
#include "allheaders.h"
struct SVPolyLineBuffer {
bool empty; // Independent indicator to allow SendMsg to call SendPolygon.
std::vector<int> xcoords;
std::vector<int> ycoords;
};
// A map between the window IDs and their corresponding pointers.
static std::map<int, ScrollView*> svmap;
static SVMutex* svmap_mu;
// A map of all semaphores waiting for a specific event on a specific window.
static std::map<std::pair<ScrollView*, SVEventType>,
std::pair<SVSemaphore*, SVEvent*> > waiting_for_events;
static SVMutex* waiting_for_events_mu;
SVEvent* SVEvent::copy() {
SVEvent* any = new SVEvent;
any->command_id = command_id;
any->counter = counter;
any->parameter = new char[strlen(parameter) + 1];
strncpy(any->parameter, parameter, strlen(parameter));
any->parameter[strlen(parameter)] = '\0';
any->type = type;
any->x = x;
any->y = y;
any->x_size = x_size;
any->y_size = y_size;
any->window = window;
return any;
}
#ifndef GRAPHICS_DISABLED
/// This is the main loop which handles the ScrollView-logic from the server
/// to the client. It basically loops through messages, parses them to events
/// and distributes it to the waiting handlers.
/// It is run from a different thread and synchronizes via SVSync.
void* ScrollView::MessageReceiver(void* a) {
int counter_event_id = 0; // ongoing counter
char* message = NULL;
// Wait until a new message appears in the input stream_.
do {
message = ScrollView::GetStream()->Receive();
} while (message == NULL);
// This is the main loop which iterates until the server is dead (strlen = -1).
// It basically parses for 3 different messagetypes and then distributes the
// events accordingly.
while (1) {
// The new event we create.
SVEvent* cur = new SVEvent;
// The ID of the corresponding window.
int window_id;
int ev_type;
int n;
// Fill the new SVEvent properly.
sscanf(message, "%d,%d,%d,%d,%d,%d,%d,%n", &window_id, &ev_type, &cur->x,
&cur->y, &cur->x_size, &cur->y_size, &cur->command_id, &n);
char* p = (message + n);
svmap_mu->Lock();
cur->window = svmap[window_id];
if (cur->window != NULL) {
cur->parameter = new char[strlen(p) + 1];
strncpy(cur->parameter, p, strlen(p) + 1);
if (strlen(p) > 0) { // remove the last \n
cur->parameter[strlen(p)] = '\0';
}
cur->type = static_cast<SVEventType>(ev_type);
// Correct selection coordinates so x,y is the min pt and size is +ve.
if (cur->x_size > 0)
cur->x -= cur->x_size;
else
cur->x_size = -cur->x_size;
if (cur->y_size > 0)
cur->y -= cur->y_size;
else
cur->y_size = -cur->y_size;
// Returned y will be the bottom-left if y is reversed.
if (cur->window->y_axis_is_reversed_)
cur->y = cur->window->TranslateYCoordinate(cur->y + cur->y_size);
cur->counter = counter_event_id;
// Increase by 2 since we will also create an SVET_ANY event from cur,
// which will have a counter_id of cur + 1 (and thus gets processed
// after cur).
counter_event_id += 2;
// In case of an SVET_EXIT event, quit the whole application.
if (ev_type == SVET_EXIT) { ScrollView::Exit(); }
// Place two copies of it in the table for the window.
cur->window->SetEvent(cur);
// Check if any of the threads currently waiting want it.
std::pair<ScrollView*, SVEventType> awaiting_list(cur->window,
cur->type);
std::pair<ScrollView*, SVEventType> awaiting_list_any(cur->window,
SVET_ANY);
std::pair<ScrollView*, SVEventType> awaiting_list_any_window((ScrollView*)0,
SVET_ANY);
waiting_for_events_mu->Lock();
if (waiting_for_events.count(awaiting_list) > 0) {
waiting_for_events[awaiting_list].second = cur;
waiting_for_events[awaiting_list].first->Signal();
} else if (waiting_for_events.count(awaiting_list_any) > 0) {
waiting_for_events[awaiting_list_any].second = cur;
waiting_for_events[awaiting_list_any].first->Signal();
} else if (waiting_for_events.count(awaiting_list_any_window) > 0) {
waiting_for_events[awaiting_list_any_window].second = cur;
waiting_for_events[awaiting_list_any_window].first->Signal();
} else {
// No one wanted it, so delete it.
delete cur;
}
waiting_for_events_mu->Unlock();
// Signal the corresponding semaphore twice (for both copies).
ScrollView* sv = svmap[window_id];
if (sv != NULL) {
sv->Signal();
sv->Signal();
}
} else {
delete cur; // Applied to no window.
}
svmap_mu->Unlock();
// Wait until a new message appears in the input stream_.
do {
message = ScrollView::GetStream()->Receive();
} while (message == NULL);
}
return 0;
}
// Table to implement the color index values in the old system.
int table_colors[ScrollView::GREEN_YELLOW+1][4]= {
{0, 0, 0, 0}, // NONE (transparent)
{0, 0, 0, 255}, // BLACK.
{255, 255, 255, 255}, // WHITE.
{255, 0, 0, 255}, // RED.
{255, 255, 0, 255}, // YELLOW.
{0, 255, 0, 255}, // GREEN.
{0, 255, 255, 255}, // CYAN.
{0, 0, 255, 255}, // BLUE.
{255, 0, 255, 255}, // MAGENTA.
{0, 128, 255, 255}, // AQUAMARINE.
{0, 0, 64, 255}, // DARK_SLATE_BLUE.
{128, 128, 255, 255}, // LIGHT_BLUE.
{64, 64, 255, 255}, // MEDIUM_BLUE.
{0, 0, 32, 255}, // MIDNIGHT_BLUE.
{0, 0, 128, 255}, // NAVY_BLUE.
{192, 192, 255, 255}, // SKY_BLUE.
{64, 64, 128, 255}, // SLATE_BLUE.
{32, 32, 64, 255}, // STEEL_BLUE.
{255, 128, 128, 255}, // CORAL.
{128, 64, 0, 255}, // BROWN.
{128, 128, 0, 255}, // SANDY_BROWN.
{192, 192, 0, 255}, // GOLD.
{192, 192, 128, 255}, // GOLDENROD.
{0, 64, 0, 255}, // DARK_GREEN.
{32, 64, 0, 255}, // DARK_OLIVE_GREEN.
{64, 128, 0, 255}, // FOREST_GREEN.
{128, 255, 0, 255}, // LIME_GREEN.
{192, 255, 192, 255}, // PALE_GREEN.
{192, 255, 0, 255}, // YELLOW_GREEN.
{192, 192, 192, 255}, // LIGHT_GREY.
{64, 64, 128, 255}, // DARK_SLATE_GREY.
{64, 64, 64, 255}, // DIM_GREY.
{128, 128, 128, 255}, // GREY.
{64, 192, 0, 255}, // KHAKI.
{255, 0, 192, 255}, // MAROON.
{255, 128, 0, 255}, // ORANGE.
{255, 128, 64, 255}, // ORCHID.
{255, 192, 192, 255}, // PINK.
{128, 0, 128, 255}, // PLUM.
{255, 0, 64, 255}, // INDIAN_RED.
{255, 64, 0, 255}, // ORANGE_RED.
{255, 0, 192, 255}, // VIOLET_RED.
{255, 192, 128, 255}, // SALMON.
{128, 128, 0, 255}, // TAN.
{0, 255, 255, 255}, // TURQUOISE.
{0, 128, 128, 255}, // DARK_TURQUOISE.
{192, 0, 255, 255}, // VIOLET.
{128, 128, 0, 255}, // WHEAT.
{128, 255, 0, 255} // GREEN_YELLOW
};
/*******************************************************************************
* Scrollview implementation.
*******************************************************************************/
SVNetwork* ScrollView::stream_ = NULL;
int ScrollView::nr_created_windows_ = 0;
int ScrollView::image_index_ = 0;
/// Calls Initialize with all arguments given.
ScrollView::ScrollView(const char* name, int x_pos, int y_pos, int x_size,
int y_size, int x_canvas_size, int y_canvas_size,
bool y_axis_reversed, const char* server_name) {
Initialize(name, x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size,
y_axis_reversed, server_name);}
/// Calls Initialize with default argument for server_name_.
ScrollView::ScrollView(const char* name, int x_pos, int y_pos, int x_size,
int y_size, int x_canvas_size, int y_canvas_size,
bool y_axis_reversed) {
Initialize(name, x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size,
y_axis_reversed, "localhost");
}
/// Calls Initialize with default argument for server_name_ & y_axis_reversed.
ScrollView::ScrollView(const char* name, int x_pos, int y_pos, int x_size,
int y_size, int x_canvas_size, int y_canvas_size) {
Initialize(name, x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size,
false, "localhost");
}
/// Sets up a ScrollView window, depending on the constructor variables.
void ScrollView::Initialize(const char* name, int x_pos, int y_pos, int x_size,
int y_size, int x_canvas_size, int y_canvas_size,
bool y_axis_reversed, const char* server_name) {
// If this is the first ScrollView Window which gets created, there is no
// network connection yet and we have to set it up in a different thread.
if (stream_ == NULL) {
nr_created_windows_ = 0;
stream_ = new SVNetwork(server_name, kSvPort);
waiting_for_events_mu = new SVMutex();
svmap_mu = new SVMutex();
SendRawMessage(
"svmain = luajava.bindClass('com.google.scrollview.ScrollView')\n");
SVSync::StartThread(MessageReceiver, NULL);
}
// Set up the variables on the clientside.
nr_created_windows_++;
event_handler_ = NULL;
event_handler_ended_ = false;
y_axis_is_reversed_ = y_axis_reversed;
y_size_ = y_canvas_size;
window_name_ = name;
window_id_ = nr_created_windows_;
// Set up polygon buffering.
points_ = new SVPolyLineBuffer;
points_->empty = true;
svmap_mu->Lock();
svmap[window_id_] = this;
svmap_mu->Unlock();
for (int i = 0; i < SVET_COUNT; i++) {
event_table_[i] = NULL;
}
mutex_ = new SVMutex();
semaphore_ = new SVSemaphore();
// Set up an actual Window on the client side.
char message[kMaxMsgSize];
snprintf(message, sizeof(message),
"w%u = luajava.newInstance('com.google.scrollview.ui"
".SVWindow','%s',%u,%u,%u,%u,%u,%u,%u)\n",
window_id_, window_name_, window_id_,
x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size);
SendRawMessage(message);
SVSync::StartThread(StartEventHandler, this);
}
/// Sits and waits for events on this window.
void* ScrollView::StartEventHandler(void* a) {
ScrollView* sv = reinterpret_cast<ScrollView*>(a);
SVEvent* new_event;
do {
stream_->Flush();
sv->semaphore_->Wait();
new_event = NULL;
int serial = -1;
int k = -1;
sv->mutex_->Lock();
// Check every table entry if he is is valid and not already processed.
for (int i = 0; i < SVET_COUNT; i++) {
if (sv->event_table_[i] != NULL &&
(serial < 0 || sv->event_table_[i]->counter < serial)) {
new_event = sv->event_table_[i];
serial = sv->event_table_[i]->counter;
k = i;
}
}
// If we didnt find anything we had an old alarm and just sleep again.
if (new_event != NULL) {
sv->event_table_[k] = NULL;
sv->mutex_->Unlock();
if (sv->event_handler_ != NULL) { sv->event_handler_->Notify(new_event); }
if (new_event->type == SVET_DESTROY) {
// Signal the destructor that it is safe to terminate.
sv->event_handler_ended_ = true;
sv = NULL;
}
delete new_event; // Delete the pointer after it has been processed.
} else { sv->mutex_->Unlock(); }
// The thread should run as long as its associated window is alive.
} while (sv != NULL);
return 0;
}
#endif // GRAPHICS_DISABLED
ScrollView::~ScrollView() {
#ifndef GRAPHICS_DISABLED
svmap_mu->Lock();
if (svmap[window_id_] != NULL) {
svmap_mu->Unlock();
// So the event handling thread can quit.
SendMsg("destroy()");
SVEvent* sve = AwaitEvent(SVET_DESTROY);
delete sve;
svmap_mu->Lock();
svmap[window_id_] = NULL;
svmap_mu->Unlock();
// The event handler thread for this window *must* receive the
// destroy event and set its pointer to this to NULL before we allow
// the destructor to exit.
while (!event_handler_ended_)
Update();
} else {
svmap_mu->Unlock();
}
delete mutex_;
delete semaphore_;
delete points_;
for (int i = 0; i < SVET_COUNT; i++) {
delete event_table_[i];
}
#endif // GRAPHICS_DISABLED
}
#ifndef GRAPHICS_DISABLED
/// Send a message to the server, attaching the window id.
void ScrollView::SendMsg(const char* format, ...) {
if (!points_->empty)
SendPolygon();
va_list args;
char message[kMaxMsgSize];
va_start(args, format); // variable list
vsnprintf(message, kMaxMsgSize, format, args);
va_end(args);
char form[kMaxMsgSize];
snprintf(form, kMaxMsgSize, "w%u:%s\n", window_id_, message);
stream_->Send(form);
}
/// Send a message to the server without a
/// window id. Used for global events like exit().
void ScrollView::SendRawMessage(const char* msg) {
stream_->Send(msg);
}
/// Add an Event Listener to this ScrollView Window
void ScrollView::AddEventHandler(SVEventHandler* listener) {
event_handler_ = listener;
}
void ScrollView::Signal() {
semaphore_->Signal();
}
void ScrollView::SetEvent(SVEvent* svevent) {
// Copy event
SVEvent* any = svevent->copy();
SVEvent* specific = svevent->copy();
any->counter = specific->counter + 1;
// Place both events into the queue.
mutex_->Lock();
// Delete the old objects..
if (event_table_[specific->type] != NULL) {
delete event_table_[specific->type]; }
if (event_table_[SVET_ANY] != NULL) {
delete event_table_[SVET_ANY]; }
// ...and put the new ones in the table.
event_table_[specific->type] = specific;
event_table_[SVET_ANY] = any;
mutex_->Unlock();
}
/// Block until an event of the given type is received.
/// Note: The calling function is responsible for deleting the returned
/// SVEvent afterwards!
SVEvent* ScrollView::AwaitEvent(SVEventType type) {
// Initialize the waiting semaphore.
SVSemaphore* sem = new SVSemaphore();
std::pair<ScrollView*, SVEventType> ea(this, type);
waiting_for_events_mu->Lock();
waiting_for_events[ea] = std::pair<SVSemaphore*, SVEvent*> (sem, (SVEvent*)0);
waiting_for_events_mu->Unlock();
// Wait on it, but first flush.
stream_->Flush();
sem->Wait();
// Process the event we got woken up for (its in waiting_for_events pair).
waiting_for_events_mu->Lock();
SVEvent* ret = waiting_for_events[ea].second;
waiting_for_events.erase(ea);
delete sem;
waiting_for_events_mu->Unlock();
return ret;
}
// Block until any event on any window is received.
// No event is returned here!
SVEvent* ScrollView::AwaitEventAnyWindow() {
// Initialize the waiting semaphore.
SVSemaphore* sem = new SVSemaphore();
std::pair<ScrollView*, SVEventType> ea((ScrollView*)0, SVET_ANY);
waiting_for_events_mu->Lock();
waiting_for_events[ea] = std::pair<SVSemaphore*, SVEvent*> (sem, (SVEvent*)0);
waiting_for_events_mu->Unlock();
// Wait on it.
stream_->Flush();
sem->Wait();
// Process the event we got woken up for (its in waiting_for_events pair).
waiting_for_events_mu->Lock();
SVEvent* ret = waiting_for_events[ea].second;
waiting_for_events.erase(ea);
waiting_for_events_mu->Unlock();
return ret;
}
// Send the current buffered polygon (if any) and clear it.
void ScrollView::SendPolygon() {
if (!points_->empty) {
points_->empty = true; // Allows us to use SendMsg.
int length = points_->xcoords.size();
// length == 1 corresponds to 2 SetCursors in a row and only the
// last setCursor has any effect.
if (length == 2) {
// An isolated line!
SendMsg("drawLine(%d,%d,%d,%d)",
points_->xcoords[0], points_->ycoords[0],
points_->xcoords[1], points_->ycoords[1]);
} else if (length > 2) {
// A polyline.
SendMsg("createPolyline(%d)", length);
char coordpair[kMaxIntPairSize];
std::string decimal_coords;
for (int i = 0; i < length; ++i) {
snprintf(coordpair, kMaxIntPairSize, "%d,%d,",
points_->xcoords[i], points_->ycoords[i]);
decimal_coords += coordpair;
}
decimal_coords += '\n';
SendRawMessage(decimal_coords.c_str());
SendMsg("drawPolyline()");
}
points_->xcoords.clear();
points_->ycoords.clear();
}
}
/*******************************************************************************
* LUA "API" functions.
*******************************************************************************/
// Sets the position from which to draw to (x,y).
void ScrollView::SetCursor(int x, int y) {
SendPolygon();
DrawTo(x, y);
}
// Draws from the current position to (x,y) and sets the new position to it.
void ScrollView::DrawTo(int x, int y) {
points_->xcoords.push_back(x);
points_->ycoords.push_back(TranslateYCoordinate(y));
points_->empty = false;
}
// Draw a line using the current pen color.
void ScrollView::Line(int x1, int y1, int x2, int y2) {
if (!points_->xcoords.empty() && x1 == points_->xcoords.back() &&
TranslateYCoordinate(y1) == points_->ycoords.back()) {
// We are already at x1, y1, so just draw to x2, y2.
DrawTo(x2, y2);
} else if (!points_->xcoords.empty() && x2 == points_->xcoords.back() &&
TranslateYCoordinate(y2) == points_->ycoords.back()) {
// We are already at x2, y2, so just draw to x1, y1.
DrawTo(x1, y1);
} else {
// This is a new line.
SetCursor(x1, y1);
DrawTo(x2, y2);
}
}
// Set the visibility of the window.
void ScrollView::SetVisible(bool visible) {
if (visible) { SendMsg("setVisible(true)");
} else { SendMsg("setVisible(false)"); }
}
// Set the alwaysOnTop flag.
void ScrollView::AlwaysOnTop(bool b) {
if (b) { SendMsg("setAlwaysOnTop(true)");
} else { SendMsg("setAlwaysOnTop(false)"); }
}
// Adds a message entry to the message box.
void ScrollView::AddMessage(const char* format, ...) {
va_list args;
char message[kMaxMsgSize];
char form[kMaxMsgSize];
va_start(args, format); // variable list
vsnprintf(message, kMaxMsgSize, format, args);
va_end(args);
snprintf(form, kMaxMsgSize, "w%u:%s", window_id_, message);
char* esc = AddEscapeChars(form);
SendMsg("addMessage(\"%s\")", esc);
delete[] esc;
}
// Set a messagebox.
void ScrollView::AddMessageBox() {
SendMsg("addMessageBox()");
}
// Exit the client completely (and notify the server of it).
void ScrollView::Exit() {
SendRawMessage("svmain:exit()");
exit(0);
}
// Clear the canvas.
void ScrollView::Clear() {
SendMsg("clear()");
}
// Set the stroke width.
void ScrollView::Stroke(float width) {
SendMsg("setStrokeWidth(%f)", width);
}
// Draw a rectangle using the current pen color.
// The rectangle is filled with the current brush color.
void ScrollView::Rectangle(int x1, int y1, int x2, int y2) {
if (x1 == x2 && y1 == y2)
return; // Scrollviewer locks up.
SendMsg("drawRectangle(%d,%d,%d,%d)",
x1, TranslateYCoordinate(y1), x2, TranslateYCoordinate(y2));
}
// Draw an ellipse using the current pen color.
// The ellipse is filled with the current brush color.
void ScrollView::Ellipse(int x1, int y1, int width, int height) {
SendMsg("drawEllipse(%d,%d,%u,%u)",
x1, TranslateYCoordinate(y1), width, height);
}
// Set the pen color to the given RGB values.
void ScrollView::Pen(int red, int green, int blue) {
SendMsg("pen(%d,%d,%d)", red, green, blue);
}
// Set the pen color to the given RGB values.
void ScrollView::Pen(int red, int green, int blue, int alpha) {
SendMsg("pen(%d,%d,%d,%d)", red, green, blue, alpha);
}
// Set the brush color to the given RGB values.
void ScrollView::Brush(int red, int green, int blue) {
SendMsg("brush(%d,%d,%d)", red, green, blue);
}
// Set the brush color to the given RGB values.
void ScrollView::Brush(int red, int green, int blue, int alpha) {
SendMsg("brush(%d,%d,%d,%d)", red, green, blue, alpha);
}
// Set the attributes for future Text(..) calls.
void ScrollView::TextAttributes(const char* font, int pixel_size,
bool bold, bool italic, bool underlined) {
const char* b;
const char* i;
const char* u;
if (bold) { b = "true";
} else { b = "false"; }
if (italic) { i = "true";
} else { i = "false"; }
if (underlined) { u = "true";
} else { u = "false"; }
SendMsg("textAttributes('%s',%u,%s,%s,%s)", font, pixel_size,
b, i, u);
}
// Draw text at the given coordinates.
void ScrollView::Text(int x, int y, const char* mystring) {
SendMsg("drawText(%d,%d,'%s')", x, TranslateYCoordinate(y), mystring);
}
// Open and draw an image given a name at (x,y).
void ScrollView::Image(const char* image, int x_pos, int y_pos) {
SendMsg("openImage('%s')", image);
SendMsg("drawImage('%s',%d,%d)",
image, x_pos, TranslateYCoordinate(y_pos));
}
// Add new checkboxmenuentry to menubar.
void ScrollView::MenuItem(const char* parent, const char* name,
int cmdEvent, bool flag) {
if (parent == NULL) { parent = ""; }
if (flag) { SendMsg("addMenuBarItem('%s','%s',%d,true)",
parent, name, cmdEvent);
} else { SendMsg("addMenuBarItem('%s','%s',%d,false)",
parent, name, cmdEvent); }
}
// Add new menuentry to menubar.
void ScrollView::MenuItem(const char* parent, const char* name, int cmdEvent) {
if (parent == NULL) { parent = ""; }
SendMsg("addMenuBarItem('%s','%s',%d)", parent, name, cmdEvent);
}
// Add new submenu to menubar.
void ScrollView::MenuItem(const char* parent, const char* name) {
if (parent == NULL) { parent = ""; }
SendMsg("addMenuBarItem('%s','%s')", parent, name);
}
// Add new submenu to popupmenu.
void ScrollView::PopupItem(const char* parent, const char* name) {
if (parent == NULL) { parent = ""; }
SendMsg("addPopupMenuItem('%s','%s')", parent, name);
}
// Add new submenuentry to popupmenu.
void ScrollView::PopupItem(const char* parent, const char* name,
int cmdEvent, const char* value, const char* desc) {
if (parent == NULL) { parent = ""; }
char* esc = AddEscapeChars(value);
char* esc2 = AddEscapeChars(desc);
SendMsg("addPopupMenuItem('%s','%s',%d,'%s','%s')", parent, name,
cmdEvent, esc, esc2);
delete[] esc;
delete[] esc2;
}
// Send an update message for a single window.
void ScrollView::UpdateWindow() {
SendMsg("update()");
}
// Note: this is an update to all windows
void ScrollView::Update() {
svmap_mu->Lock();
for (std::map<int, ScrollView*>::iterator iter = svmap.begin();
iter != svmap.end(); ++iter) {
if (iter->second != NULL)
iter->second->UpdateWindow();
}
svmap_mu->Unlock();
}
// Set the pen color, using an enum value (e.g. ScrollView::ORANGE)
void ScrollView::Pen(Color color) {
Pen(table_colors[color][0], table_colors[color][1],
table_colors[color][2], table_colors[color][3]);
}
// Set the brush color, using an enum value (e.g. ScrollView::ORANGE)
void ScrollView::Brush(Color color) {
Brush(table_colors[color][0],
table_colors[color][1],
table_colors[color][2],
table_colors[color][3]);
}
// Shows a modal Input Dialog which can return any kind of String
char* ScrollView::ShowInputDialog(const char* msg) {
SendMsg("showInputDialog(\"%s\")", msg);
SVEvent* ev;
// wait till an input event (all others are thrown away)
ev = AwaitEvent(SVET_INPUT);
char* p = new char[strlen(ev->parameter) + 1];
strncpy(p, ev->parameter, strlen(ev->parameter));
p[strlen(ev->parameter)] = '\0';
delete ev;
return p;
}
// Shows a modal Yes/No Dialog which will return 'y' or 'n'
int ScrollView::ShowYesNoDialog(const char* msg) {
SendMsg("showYesNoDialog(\"%s\")", msg);
SVEvent* ev;
// Wait till an input event (all others are thrown away)
ev = AwaitEvent(SVET_INPUT);
int a = ev->parameter[0];
delete ev;
return a;
}
// Zoom the window to the rectangle given upper left corner and
// lower right corner.
void ScrollView::ZoomToRectangle(int x1, int y1, int x2, int y2) {
y1 = TranslateYCoordinate(y1);
y2 = TranslateYCoordinate(y2);
SendMsg("zoomRectangle(%d,%d,%d,%d)",
MIN(x1, x2), MIN(y1, y2), MAX(x1, x2), MAX(y1, y2));
}
// Send an image of type Pix.
void ScrollView::Image(struct Pix* image, int x_pos, int y_pos) {
l_uint8* data;
size_t size;
pixWriteMem(&data, &size, image, IFF_PNG);
int base64_len = (size + 2) / 3 * 4;
y_pos = TranslateYCoordinate(y_pos);
SendMsg("readImage(%d,%d,%d)", x_pos, y_pos, base64_len);
// Base64 encode the data.
const char kBase64Table[64] = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9', '+', '/',
};
char* base64 = new char[base64_len + 1];
memset(base64, '=', base64_len);
base64[base64_len] = '\0';
int remainder = 0;
int bits_left = 0;
int code_len = 0;
for (int i = 0; i < size; ++i) {
int code = (data[i] >> (bits_left + 2)) | remainder;
base64[code_len++] = kBase64Table[code & 63];
bits_left += 2;
remainder = data[i] << (6 - bits_left);
if (bits_left == 6) {
base64[code_len++] = kBase64Table[remainder & 63];
bits_left = 0;
remainder = 0;
}
}
if (bits_left > 0)
base64[code_len++] = kBase64Table[remainder & 63];
SendRawMessage(base64);
delete [] base64;
free(data);
}
// Escapes the ' character with a \, so it can be processed by LUA.
// Note: The caller will have to make sure he deletes the newly allocated item.
char* ScrollView::AddEscapeChars(const char* input) {
const char* nextptr = strchr(input, '\'');
const char* lastptr = input;
char* message = new char[kMaxMsgSize];
int pos = 0;
while (nextptr != NULL) {
strncpy(message+pos, lastptr, nextptr-lastptr);
pos += nextptr - lastptr;
message[pos] = '\\';
pos += 1;
lastptr = nextptr;
nextptr = strchr(nextptr+1, '\'');
}
strncpy(message+pos, lastptr, strlen(lastptr));
message[pos+strlen(lastptr)] = '\0';
return message;
}
// Inverse the Y axis if the coordinates are actually inversed.
int ScrollView::TranslateYCoordinate(int y) {
if (!y_axis_is_reversed_) { return y;
} else { return y_size_ - y; }
}
#endif // GRAPHICS_DISABLED
| C++ |
///////////////////////////////////////////////////////////////////////
// File: svutil.cpp
// Description: ScrollView Utilities
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// SVUtil contains the SVSync and SVNetwork classes, which are used for
// thread/process creation & synchronization and network connection.
#include <stdio.h>
#ifdef _WIN32
struct addrinfo {
struct sockaddr* ai_addr;
int ai_addrlen;
int ai_family;
int ai_socktype;
int ai_protocol;
};
#else
#include <arpa/inet.h>
#include <netinet/in.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <netdb.h>
#include <sys/socket.h>
#ifdef __linux__
#include <sys/prctl.h>
#endif
#include <unistd.h>
#endif
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <string>
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
#include "svutil.h"
const int kBufferSize = 65536;
const int kMaxMsgSize = 4096;
// Signals a thread to exit.
void SVSync::ExitThread() {
#ifdef _WIN32
// ExitThread(0);
#else
pthread_exit(0);
#endif
}
// Starts a new process.
void SVSync::StartProcess(const char* executable, const char* args) {
std::string proc;
proc.append(executable);
proc.append(" ");
proc.append(args);
std::cout << "Starting " << proc << std::endl;
#ifdef _WIN32
STARTUPINFO start_info;
PROCESS_INFORMATION proc_info;
GetStartupInfo(&start_info);
if (!CreateProcess(NULL, const_cast<char*>(proc.c_str()), NULL, NULL, FALSE,
CREATE_NO_WINDOW | DETACHED_PROCESS, NULL, NULL,
&start_info, &proc_info))
return;
#else
int pid = fork();
if (pid != 0) { // The father process returns
} else {
#ifdef __linux__
// Make sure the java process terminates on exit, since its
// broken socket detection seems to be useless.
prctl(PR_SET_PDEATHSIG, 2, 0, 0, 0);
#endif
char* mutable_args = strdup(args);
int argc = 1;
for (int i = 0; mutable_args[i]; ++i) {
if (mutable_args[i] == ' ') {
++argc;
}
}
char** argv = new char*[argc + 2];
argv[0] = strdup(executable);
argv[1] = mutable_args;
argc = 2;
bool inquote = false;
for (int i = 0; mutable_args[i]; ++i) {
if (!inquote && mutable_args[i] == ' ') {
mutable_args[i] = '\0';
argv[argc++] = mutable_args + i + 1;
} else if (mutable_args[i] == '"') {
inquote = !inquote;
mutable_args[i] = ' ';
}
}
argv[argc] = NULL;
execvp(executable, argv);
}
#endif
}
SVSemaphore::SVSemaphore() {
#ifdef _WIN32
semaphore_ = CreateSemaphore(0, 0, 10, 0);
#elif defined(__APPLE__)
char name[50];
snprintf(name, sizeof(name), "%d", random());
sem_unlink(name);
semaphore_ = sem_open(name, O_CREAT , S_IWUSR, 0);
if (semaphore_ == SEM_FAILED) {
perror("sem_open");
}
#else
sem_init(&semaphore_, 0, 0);
#endif
}
void SVSemaphore::Signal() {
#ifdef _WIN32
ReleaseSemaphore(semaphore_, 1, NULL);
#elif defined(__APPLE__)
sem_post(semaphore_);
#else
sem_post(&semaphore_);
#endif
}
void SVSemaphore::Wait() {
#ifdef _WIN32
WaitForSingleObject(semaphore_, INFINITE);
#elif defined(__APPLE__)
sem_wait(semaphore_);
#else
sem_wait(&semaphore_);
#endif
}
SVMutex::SVMutex() {
#ifdef _WIN32
mutex_ = CreateMutex(0, FALSE, 0);
#else
pthread_mutex_init(&mutex_, NULL);
#endif
}
void SVMutex::Lock() {
#ifdef _WIN32
WaitForSingleObject(mutex_, INFINITE);
#else
pthread_mutex_lock(&mutex_);
#endif
}
void SVMutex::Unlock() {
#ifdef _WIN32
ReleaseMutex(mutex_);
#else
pthread_mutex_unlock(&mutex_);
#endif
}
// Create new thread.
void SVSync::StartThread(void *(*func)(void*), void* arg) {
#ifdef _WIN32
LPTHREAD_START_ROUTINE f = (LPTHREAD_START_ROUTINE) func;
DWORD threadid;
HANDLE newthread = CreateThread(
NULL, // default security attributes
0, // use default stack size
f, // thread function
arg, // argument to thread function
0, // use default creation flags
&threadid); // returns the thread identifier
#else
pthread_t helper;
pthread_create(&helper, NULL, func, arg);
#endif
}
// Place a message in the message buffer (and flush it).
void SVNetwork::Send(const char* msg) {
mutex_send_->Lock();
msg_buffer_out_.append(msg);
mutex_send_->Unlock();
}
// Send the whole buffer.
void SVNetwork::Flush() {
mutex_send_->Lock();
while (msg_buffer_out_.size() > 0) {
int i = send(stream_, msg_buffer_out_.c_str(), msg_buffer_out_.length(), 0);
msg_buffer_out_.erase(0, i);
}
mutex_send_->Unlock();
}
// Receive a message from the server.
// This will always return one line of char* (denoted by \n).
char* SVNetwork::Receive() {
char* result = NULL;
#if defined(_WIN32) || defined(__CYGWIN__)
if (has_content) { result = strtok (NULL, "\n"); }
#else
if (buffer_ptr_ != NULL) { result = strtok_r(NULL, "\n", &buffer_ptr_); }
#endif
// This means there is something left in the buffer and we return it.
if (result != NULL) { return result;
// Otherwise, we read from the stream_.
} else {
buffer_ptr_ = NULL;
has_content = false;
// The timeout length is not really important since we are looping anyway
// until a new message is delivered.
struct timeval tv;
tv.tv_sec = 10;
tv.tv_usec = 0;
// Set the flags to return when the stream_ is ready to be read.
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(stream_, &readfds);
int i = select(stream_+1, &readfds, NULL, NULL, &tv);
// The stream_ died.
if (i == 0) { return NULL; }
// Read the message buffer.
i = recv(stream_, msg_buffer_in_, kMaxMsgSize, 0);
// Server quit (0) or error (-1).
if (i <= 0) { return NULL; }
msg_buffer_in_[i] = '\0';
has_content = true;
#ifdef _WIN32
return strtok(msg_buffer_in_, "\n");
#else
// Setup a new string tokenizer.
return strtok_r(msg_buffer_in_, "\n", &buffer_ptr_);
#endif
}
}
// Close the connection to the server.
void SVNetwork::Close() {
#ifdef _WIN32
closesocket(stream_);
#else
close(stream_);
#endif
}
// The program to invoke to start ScrollView
static const char* ScrollViewProg() {
#ifdef _WIN32
const char* prog = "java -Xms512m -Xmx1024m";
#else
const char* prog = "sh";
#endif
return prog;
}
// The arguments to the program to invoke to start ScrollView
static std::string ScrollViewCommand(std::string scrollview_path) {
// The following ugly ifdef is to enable the output of the java runtime
// to be sent down a black hole on non-windows to ignore all the
// exceptions in piccolo. Ideally piccolo would be debugged to make
// this unnecessary.
// Also the path has to be separated by ; on windows and : otherwise.
#ifdef _WIN32
const char* cmd_template = "-Djava.library.path=%s -cp %s/ScrollView.jar;"
"%s/piccolo2d-core-3.0.jar:%s/piccolo2d-extras-3.0.jar"
" com.google.scrollview.ScrollView";
#else
const char* cmd_template = "-c \"trap 'kill %%1' 0 1 2 ; java "
"-Xms1024m -Xmx2048m -Djava.library.path=%s -cp %s/ScrollView.jar:"
"%s/piccolo2d-core-3.0.jar:%s/piccolo2d-extras-3.0.jar"
" com.google.scrollview.ScrollView"
" & wait\"";
#endif
int cmdlen = strlen(cmd_template) + 4*strlen(scrollview_path.c_str()) + 1;
char* cmd = new char[cmdlen];
const char* sv_path = scrollview_path.c_str();
snprintf(cmd, cmdlen, cmd_template, sv_path, sv_path, sv_path, sv_path);
std::string command(cmd);
delete [] cmd;
return command;
}
// Platform-independent freeaddrinfo()
static void FreeAddrInfo(struct addrinfo* addr_info) {
#if defined(__linux__)
freeaddrinfo(addr_info);
#else
delete addr_info->ai_addr;
delete addr_info;
#endif
}
// Non-linux version of getaddrinfo()
#if !defined(__linux__)
static int GetAddrInfoNonLinux(const char* hostname, int port,
struct addrinfo** addr_info) {
// Get the host data depending on the OS.
struct sockaddr_in* address;
*addr_info = new struct addrinfo;
memset(*addr_info, 0, sizeof(struct addrinfo));
address = new struct sockaddr_in;
memset(address, 0, sizeof(struct sockaddr_in));
(*addr_info)->ai_addr = (struct sockaddr*) address;
(*addr_info)->ai_addrlen = sizeof(struct sockaddr);
(*addr_info)->ai_family = AF_INET;
(*addr_info)->ai_socktype = SOCK_STREAM;
struct hostent *name;
#ifdef _WIN32
WSADATA wsaData;
WSAStartup(MAKEWORD(1, 1), &wsaData);
name = gethostbyname(hostname);
#else
name = gethostbyname(hostname);
#endif
if (name == NULL) {
FreeAddrInfo(*addr_info);
*addr_info = NULL;
return -1;
}
// Fill in the appropriate variables to be able to connect to the server.
address->sin_family = name->h_addrtype;
memcpy((char *) &address->sin_addr.s_addr,
name->h_addr_list[0], name->h_length);
address->sin_port = htons(port);
return 0;
}
#endif
// Platform independent version of getaddrinfo()
// Given a hostname:port, produce an addrinfo struct
static int GetAddrInfo(const char* hostname, int port,
struct addrinfo** address) {
#if defined(__linux__)
char port_str[40];
snprintf(port_str, 40, "%d", port);
return getaddrinfo(hostname, port_str, NULL, address);
#else
return GetAddrInfoNonLinux(hostname, port, address);
#endif
}
// Set up a connection to a ScrollView on hostname:port.
SVNetwork::SVNetwork(const char* hostname, int port) {
mutex_send_ = new SVMutex();
msg_buffer_in_ = new char[kMaxMsgSize + 1];
msg_buffer_in_[0] = '\0';
has_content = false;
buffer_ptr_ = NULL;
struct addrinfo *addr_info = NULL;
if (GetAddrInfo(hostname, port, &addr_info) != 0) {
std::cerr << "Error resolving name for ScrollView host "
<< std::string(hostname) << ":" << port << std::endl;
}
stream_ = socket(addr_info->ai_family, addr_info->ai_socktype,
addr_info->ai_protocol);
// If server is not there, we will start a new server as local child process.
if (connect(stream_, addr_info->ai_addr, addr_info->ai_addrlen) < 0) {
const char* scrollview_path = getenv("SCROLLVIEW_PATH");
if (scrollview_path == NULL) {
#ifdef SCROLLVIEW_PATH
#define _STR(a) #a
#define _XSTR(a) _STR(a)
scrollview_path = _XSTR(SCROLLVIEW_PATH);
#undef _XSTR
#undef _STR
#else
scrollview_path = ".";
#endif
}
const char *prog = ScrollViewProg();
std::string command = ScrollViewCommand(scrollview_path);
SVSync::StartProcess(prog, command.c_str());
// Wait for server to show up.
// Note: There is no exception handling in case the server never turns up.
stream_ = socket(addr_info->ai_family, addr_info->ai_socktype,
addr_info->ai_protocol);
while (connect(stream_, addr_info->ai_addr,
addr_info->ai_addrlen) < 0) {
std::cout << "ScrollView: Waiting for server...\n";
#ifdef _WIN32
Sleep(1000);
#else
sleep(1);
#endif
stream_ = socket(addr_info->ai_family, addr_info->ai_socktype,
addr_info->ai_protocol);
}
}
FreeAddrInfo(addr_info);
}
SVNetwork::~SVNetwork() {
delete[] msg_buffer_in_;
delete mutex_send_;
}
#endif // GRAPHICS_DISABLED
| C++ |
///////////////////////////////////////////////////////////////////////
// File: svutil.h
// Description: ScrollView Utilities
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// SVUtil contains the SVSync, SVSemaphore, SVMutex and SVNetwork
// classes, which are used for thread/process creation & synchronization
// and network connection.
#ifndef TESSERACT_VIEWER_SVUTIL_H__
#define TESSERACT_VIEWER_SVUTIL_H__
#ifdef _WIN32
#ifndef __GNUC__
#include <windows.h>
#define snprintf _snprintf
#if (_MSC_VER <= 1400)
#define vsnprintf _vsnprintf
#endif
#pragma warning(disable:4786)
#else
#include "platform.h"
#include <windows.h>
#endif
#else
#include <pthread.h>
#include <semaphore.h>
#endif
#include <string>
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
/// The SVSync class provides functionality for Thread & Process Creation
class SVSync {
public:
/// Create new thread.
static void StartThread(void *(*func)(void*), void* arg);
/// Signals a thread to exit.
static void ExitThread();
/// Starts a new process.
static void StartProcess(const char* executable, const char* args);
};
/// A semaphore class which encapsulates the main signalling
/// and wait abilities of semaphores for windows and unix.
class SVSemaphore {
public:
/// Sets up a semaphore.
SVSemaphore();
/// Signal a semaphore.
void Signal();
/// Wait on a semaphore.
void Wait();
private:
#ifdef _WIN32
HANDLE semaphore_;
#elif defined(__APPLE__)
sem_t *semaphore_;
#else
sem_t semaphore_;
#endif
};
/// A mutex which encapsulates the main locking and unlocking
/// abilites of mutexes for windows and unix.
class SVMutex {
public:
/// Sets up a new mutex.
SVMutex();
/// Locks on a mutex.
void Lock();
/// Unlocks on a mutex.
void Unlock();
private:
#ifdef _WIN32
HANDLE mutex_;
#else
pthread_mutex_t mutex_;
#endif
};
/// The SVNetwork class takes care of the remote connection for ScrollView
/// This means setting up and maintaining a remote connection, sending and
/// receiving messages and closing the connection.
/// It is designed to work on both Linux and Windows.
class SVNetwork {
public:
/// Set up a connection to hostname on port.
SVNetwork(const char* hostname, int port);
/// Destructor.
~SVNetwork();
/// Put a message in the messagebuffer to the server and try to send it.
void Send(const char* msg);
/// Receive a message from the server.
/// This will always return one line of char* (denoted by \n).
char* Receive();
/// Close the connection to the server.
void Close();
/// Flush the buffer.
void Flush();
private:
/// The mutex for access to Send() and Flush().
SVMutex* mutex_send_;
/// The actual stream_ to the server.
int stream_;
/// Stores the last received message-chunk from the server.
char* msg_buffer_in_;
/// Stores the messages which are supposed to go out.
std::string msg_buffer_out_;
bool has_content; // Win32 (strtok)
/// Where we are at in our msg_buffer_in_
char* buffer_ptr_; // Unix (strtok_r)
};
#endif // TESSERACT_VIEWER_SVUTIL_H__
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* The class manages vertex values (vertex data).
*/
/* Note: This class shares a lot of code with the degree_data.hpp. It might be
useful to have a common base class "sequential-file". */
#ifdef DYNAMICVERTEXDATA
#include "engine/auxdata/dynamicdata/vertex_data_dynamic.hpp"
#else
#ifndef DEF_GRAPHCHI_VERTEXDATA
#define DEF_GRAPHCHI_VERTEXDATA
#include <stdlib.h>
#include <string>
#include <assert.h>
#include "graphchi_types.hpp"
#include "api/chifilenames.hpp"
#include "io/stripedio.hpp"
#include "util/ioutil.hpp"
namespace graphchi {
template <typename VertexDataType>
class vertex_data_store {
protected:
stripedio * iomgr;
/* Current range of vertices in memory */
vid_t vertex_st;
vid_t vertex_en;
std::string filename;
int filedesc;
VertexDataType * loaded_chunk;
virtual void open_file(std::string base_filename) {
filedesc = iomgr->open_session(filename.c_str(), false);
}
public:
vertex_data_store(std::string base_filename, size_t nvertices, stripedio * iomgr) : iomgr(iomgr), loaded_chunk(NULL){
vertex_st = vertex_en = 0;
filename = filename_vertex_data<VertexDataType>(base_filename);
check_size(nvertices);
iomgr->allow_preloading(filename);
open_file(filename);
}
virtual ~vertex_data_store() {
iomgr->close_session(filedesc);
iomgr->wait_for_writes();
if (loaded_chunk != NULL) {
iomgr->managed_release(filedesc, &loaded_chunk);
}
}
void check_size(size_t nvertices) {
checkarray_filesize<VertexDataType>(filename, nvertices);
}
void clear(size_t nvertices) {
check_size(0);
check_size(nvertices);
}
/**
* Loads a chunk of vertex values
* @param vertex_st first vertex id
* @param vertex_en last vertex id, inclusive
*/
virtual void load(vid_t _vertex_st, vid_t _vertex_en) {
assert(_vertex_en >= _vertex_st);
vertex_st = _vertex_st;
vertex_en = _vertex_en;
size_t datasize = (vertex_en - vertex_st + 1)* sizeof(VertexDataType);
size_t datastart = vertex_st * sizeof(VertexDataType);
if (loaded_chunk != NULL) {
iomgr->managed_release(filedesc, &loaded_chunk);
}
iomgr->managed_malloc(filedesc, &loaded_chunk, datasize, datastart);
iomgr->managed_preada_now(filedesc, &loaded_chunk, datasize, datastart);
}
/**
* Saves the current chunk of vertex values
*/
virtual void save(bool async=false) {
assert(loaded_chunk != NULL);
size_t datasize = (vertex_en - vertex_st + 1) * sizeof(VertexDataType);
size_t datastart = vertex_st * sizeof(VertexDataType);
if (async) {
iomgr->managed_pwritea_async(filedesc, &loaded_chunk, datasize, datastart, false);
} else {
iomgr->managed_pwritea_now(filedesc, &loaded_chunk, datasize, datastart);
}
}
/**
* Returns id of the first vertex currently in memory. Fails if nothing loaded yet.
*/
vid_t first_vertex_id() {
assert(loaded_chunk != NULL);
return vertex_st;
}
VertexDataType * vertex_data_ptr(vid_t vertexid) {
assert(vertexid >= vertex_st && vertexid <= vertex_en);
return &loaded_chunk[vertexid - vertex_st];
}
};
}
#endif
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* The class manages vertex values (vertex data) when the
* vertex data is dynamic. That is, the vertex data type must
* be a chivector.
*
* To enable dynamically sized data, vertex data must be stored in
* small (1 million-vertex) blocks.
*/
#ifndef DYNAMICVERTEXDATA
ERROR(DYNAMICVERTEXDATA NEEDS TO BE DEFINED)
#endif
#ifndef DEF_GRAPHCHI_VERTEXDATA
#define DEF_GRAPHCHI_VERTEXDATA
#include <stdlib.h>
#include <string>
#include <fcntl.h>
#include <errno.h>
#include <sys/stat.h>
#include <assert.h>
#include "graphchi_types.hpp"
#include "api/chifilenames.hpp"
#include "io/stripedio.hpp"
#include "util/ioutil.hpp"
#include "api/dynamicdata/chivector.hpp"
#include "shards/dynamicdata/dynamicblock.hpp"
namespace graphchi {
template <typename VertexDataType>
struct vdblock_t {
int blockid;
int fd;
uint8_t* data;
dynamicdata_block<VertexDataType> * dblock;
vdblock_t(int bid) : blockid(bid), data(NULL), dblock(NULL) {}
};
template <typename VertexDataType>
class vertex_data_store {
typedef vdblock_t<VertexDataType> vdblock;
protected:
stripedio * iomgr;
/* Current range of vertices in memory */
vid_t vertex_st;
vid_t vertex_en;
std::string dirname;
size_t verticesperblock;
VertexDataType * loaded_chunk;
std::vector<vdblock> loadedblocks; // Blocks currently in memory
public:
vertex_data_store(std::string base_filename, size_t nvertices, stripedio * iomgr) : iomgr(iomgr), loaded_chunk(NULL){
vertex_st = vertex_en = 0;
verticesperblock = 1024 * 1024;
dirname = filename_vertex_data<VertexDataType>(base_filename) + ".dynamic_blockdir";
check_size(nvertices);
}
virtual ~vertex_data_store() {
iomgr->wait_for_writes();
releaseblocks();
}
void check_size(size_t nvertices) {
int nblocks = (nvertices - 1) / verticesperblock + 1;
for(int i=0; i < nblocks; i++) {
init_block(i);
}
}
void clear(size_t nvertices) {
int nblocks = (nvertices - 1) / verticesperblock + 1;
for(int i=0; i < nblocks; i++) {
std::string bfilename = blockfilename(i);
if (file_exists(bfilename)) {
remove(bfilename.c_str());
}
delete_block_uncompressed_sizefile(bfilename);
}
}
private:
std::string blockfilename(int blockid) {
std::stringstream ss;
ss << dirname;
ss << "/";
ss << blockid;
return ss.str();
}
void releaseblocks() {
for(int i=0; i < (int)loadedblocks.size(); i++) {
delete(loadedblocks[i].dblock);
iomgr->managed_release(loadedblocks[i].fd, &loadedblocks[i].data);
iomgr->close_session(loadedblocks[i].fd);
loadedblocks[i].data = NULL;
loadedblocks[i].dblock = NULL;
}
loadedblocks.clear();
}
void init_block(int blockid) {
std::string bfilename = blockfilename(blockid);
if (!file_exists(bfilename)) {
mkdir(dirname.c_str(), 0777);
size_t initsize = verticesperblock * sizeof(typename VertexDataType::sizeword_t);
int f = open(bfilename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
uint8_t * zeros = (uint8_t *) calloc(verticesperblock, sizeof(typename VertexDataType::sizeword_t));
write_compressed(f, zeros, initsize);
free(zeros);
write_block_uncompressed_size(bfilename, initsize);
close(f);
}
}
vdblock load_block(int blockid) {
vdblock db(blockid);
std::string blockfname = blockfilename(blockid);
db.fd = iomgr->open_session(blockfname, false, true);
int realsize = get_block_uncompressed_size(blockfname, -1);
assert(realsize > 0);
iomgr->managed_malloc(db.fd, &db.data, realsize, 0);
iomgr->managed_preada_now(db.fd, &db.data, realsize, 0);
db.dblock = new dynamicdata_block<VertexDataType>(verticesperblock, (uint8_t *)db.data, realsize);
return db;
}
void write_block(vdblock &block) {
int realsize;
uint8_t * outdata;
block.dblock->write(&outdata, realsize);
std::string blockfname = blockfilename(block.blockid);
iomgr->managed_pwritea_now(block.fd, &outdata, realsize, 0); /* Need to write whole block in the compressed regime */
write_block_uncompressed_size(blockfname, realsize);
free(outdata);
}
public:
/**
* Loads a chunk of vertex values
* @param vertex_st first vertex id
* @param vertex_en last vertex id, inclusive
*/
virtual void load(vid_t _vertex_st, vid_t _vertex_en) {
assert(_vertex_en >= _vertex_st);
vertex_st = _vertex_st;
vertex_en = _vertex_en;
releaseblocks();
int min_blockid = vertex_st / verticesperblock;
int max_blockid = vertex_en / verticesperblock;
for(int i=min_blockid; i <= max_blockid; i++) {
loadedblocks.push_back(load_block(i));
}
}
/**
* Saves the current chunk of vertex values
*/
virtual void save(bool async=false) {
for(int i=0; i < (int)loadedblocks.size(); i++) {
write_block(loadedblocks[i]);
}
}
/**
* Returns id of the first vertex currently in memory. Fails if nothing loaded yet.
*/
vid_t first_vertex_id() {
return vertex_st;
}
VertexDataType * vertex_data_ptr(vid_t vertexid) {
int blockid = vertexid / verticesperblock;
int firstloaded = loadedblocks[0].blockid;
dynamicdata_block<VertexDataType> * dynblock = loadedblocks[blockid - firstloaded].dblock;
return dynblock->edgevec(vertexid % verticesperblock);
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* The class manages information about vertex degree, and allows
* sequential block access to the degree data file.
*/
#ifndef DEF_GRAPHCHI_DEGREE_DATA
#define DEF_GRAPHCHI_DEGREE_DATA
#include <fstream>
#include <assert.h>
#include <string>
#include <stdlib.h>
#include "graphchi_types.hpp"
#include "io/stripedio.hpp"
namespace graphchi {
struct degree {
int indegree;
int outdegree;
};
class degree_data {
protected:
/* Current range of vertices in memory */
vid_t vertex_st;
vid_t vertex_en;
stripedio * iomgr;
/* Current chunk in memory */
degree * loaded_chunk;
std::string filename;
int filedesc;
virtual void open_file(std::string base_filename) {
filename = filename_degree_data(base_filename);
iomgr->allow_preloading(filename);
filedesc = iomgr->open_session(filename.c_str(), false);
}
public:
/**
* Constructor
* @param base_filename base file prefix
*/
degree_data(std::string base_filename, stripedio * iomgr) : iomgr(iomgr), loaded_chunk(NULL) {
vertex_st = vertex_en = 0;
open_file(base_filename);
}
virtual ~degree_data() {
if (loaded_chunk != NULL) {
iomgr->managed_release(filedesc, &loaded_chunk);
}
iomgr->close_session(filedesc);
}
/**
* Loads a chunk of vertex degrees
* @param vertex_st first vertex id
* @param vertex_en last vertex id, inclusive
*/
virtual void load(vid_t _vertex_st, vid_t _vertex_en) {
assert(_vertex_en >= _vertex_st);
vertex_st = _vertex_st;
vertex_en = _vertex_en;
size_t datasize = (vertex_en - vertex_st + 1) * sizeof(degree);
size_t datastart = vertex_st * sizeof(degree);
if (loaded_chunk != NULL) {
iomgr->managed_release(filedesc, &loaded_chunk);
}
iomgr->managed_malloc(filedesc, &loaded_chunk, datasize, datastart);
iomgr->managed_preada_now(filedesc, &loaded_chunk, datasize, datastart);
}
/**
* Returns id of the first vertex currently in memory. Fails if nothing loaded yet.
*/
vid_t first_vertex_id() {
assert(loaded_chunk != NULL);
return vertex_st;
}
virtual void set_degree(vid_t vertexid, int indegree, int outdegree) {
assert(vertexid >= vertex_st && vertexid <= vertex_en);
loaded_chunk[vertexid - vertex_st].indegree = indegree;
loaded_chunk[vertexid - vertex_st].outdegree = outdegree;
}
virtual void set_degree(vid_t vertexid, degree d) {
assert(vertexid >= vertex_st && vertexid <= vertex_en);
loaded_chunk[vertexid - vertex_st] = d;
}
inline degree get_degree(vid_t vertexid) {
assert(vertexid >= vertex_st && vertexid <= vertex_en);
return loaded_chunk[vertexid - vertex_st];
}
void save() {
size_t datasize = (vertex_en - vertex_st + 1) * sizeof(degree);
size_t datastart = vertex_st * sizeof(degree);
iomgr->managed_pwritea_now(filedesc, &loaded_chunk, datasize, datastart);
}
void ensure_size(vid_t maxid) {
iomgr->truncate(filedesc, (1 + maxid) * sizeof(degree));
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Engine for the alternative "functional" API for GraphChi.
* The functional engine first processes in-edges, then executes "updates",
* and then loads and updates out-edges.
*/
#ifndef GRAPHCHI_FUNCTIONALENGINE_DEF
#define GRAPHCHI_FUNCTIONALENGINE_DEF
#include "engine/graphchi_engine.hpp"
#include "logger/logger.hpp"
namespace graphchi {
template <typename VertexDataType, typename EdgeDataType, typename fvertex_t>
class functional_engine : public graphchi_engine<VertexDataType, EdgeDataType, fvertex_t> {
public:
functional_engine(std::string base_filename, int nshards, bool selective_scheduling, metrics &_m) :
graphchi_engine<VertexDataType, EdgeDataType, fvertex_t>(base_filename, nshards, selective_scheduling, _m){
_m.set("engine", "functional");
}
protected:
/* Override - load only memory shard (i.e inedges) */
virtual void load_before_updates(std::vector<fvertex_t> &vertices) {
logstream(LOG_DEBUG) << "Processing in-edges." << std::endl;
/* Load memory shard */
if (!this->memoryshard->loaded()) {
this->memoryshard->load();
}
/* Load vertex edges from memory shard */
this->memoryshard->load_vertices(this->sub_interval_st, this->sub_interval_en, vertices, true, false);
/* Load vertices */
this->vertex_data_handler->load(this->sub_interval_st, this->sub_interval_en);
/* Wait for all reads to complete */
this->iomgr->wait_for_reads();
}
/* Override - do not allocate edge data */
virtual void init_vertices(std::vector<fvertex_t> &vertices, graphchi_edge<EdgeDataType> * &e) {
size_t nvertices = vertices.size();
/* Compute number of edges */
size_t num_edges = this->num_edges_subinterval(this->sub_interval_st, this->sub_interval_en);
/* Assign vertex edge array pointers */
size_t ecounter = 0;
for(int i=0; i < (int)nvertices; i++) {
degree d = this->degree_handler->get_degree(this->sub_interval_st + i);
int inc = d.indegree;
int outc = d.outdegree;
vertices[i] = fvertex_t(this->chicontext, this->sub_interval_st + i, inc, outc);
if (this->scheduler != NULL) {
bool is_sched = this->scheduler->is_scheduled(this->sub_interval_st + i);
if (is_sched) {
vertices[i].scheduled = true;
this->nupdates++;
ecounter += inc + outc;
}
} else {
this->nupdates++;
vertices[i].scheduled = true;
ecounter += inc + outc;
}
}
this->work += num_edges;
}
/* Override - now load sliding shards, to write (broadcast) to out vertices */
virtual void load_after_updates(std::vector<fvertex_t> &vertices) {
logstream(LOG_DEBUG) << "Processing out-edges (broadcast)." << std::endl;
omp_set_num_threads(this->load_threads);
#pragma omp parallel for schedule(dynamic, 1)
for(int p=0; p < this->nshards; p++) {
/* Stream forward other than the window partition */
if (p != this->exec_interval) {
this->sliding_shards[p]->read_next_vertices(vertices.size(), this->sub_interval_st, vertices,
this->scheduler != NULL && this->iter == 0);
} else {
this->memoryshard->load_vertices(this->sub_interval_st, this->sub_interval_en, vertices, false, true); // Inedges=false, outedges=true
}
}
}
}; // End class
}; // End namespace
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* The basic GraphChi engine.
*/
#ifndef DEF_GRAPHCHI_GRAPHCHI_ENGINE
#define DEF_GRAPHCHI_GRAPHCHI_ENGINE
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstdio>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <omp.h>
#include <vector>
#include <sys/time.h>
#include "api/chifilenames.hpp"
#include "api/graph_objects.hpp"
#include "api/graphchi_context.hpp"
#include "api/graphchi_program.hpp"
#include "engine/auxdata/degree_data.hpp"
#include "engine/auxdata/vertex_data.hpp"
#include "engine/bitset_scheduler.hpp"
#include "io/stripedio.hpp"
#include "logger/logger.hpp"
#include "metrics/metrics.hpp"
#include "shards/memoryshard.hpp"
#include "shards/slidingshard.hpp"
#include "util/pthread_tools.hpp"
namespace graphchi {
template <typename VertexDataType, typename EdgeDataType,
typename svertex_t = graphchi_vertex<VertexDataType, EdgeDataType> >
class graphchi_engine {
public:
typedef sliding_shard<VertexDataType, EdgeDataType, svertex_t> slidingshard_t;
typedef memory_shard<VertexDataType, EdgeDataType, svertex_t> memshard_t;
protected:
std::string base_filename;
int nshards;
/* IO manager */
stripedio * iomgr;
/* Shards */
std::vector<slidingshard_t *> sliding_shards;
memshard_t * memoryshard;
std::vector<std::pair<vid_t, vid_t> > intervals;
/* Auxilliary data handlers */
degree_data * degree_handler;
vertex_data_store<VertexDataType> * vertex_data_handler;
/* Computational context */
graphchi_context chicontext;
/* Scheduler */
bitset_scheduler * scheduler;
/* Configuration */
bool modifies_outedges;
bool modifies_inedges;
bool only_adjacency;
bool use_selective_scheduling;
bool enable_deterministic_parallelism;
bool store_inedges;
bool disable_vertexdata_storage;
bool preload_commit; //alow storing of modified edge data on preloaded data into memory
size_t blocksize;
int membudget_mb;
int load_threads;
int exec_threads;
/* State */
vid_t sub_interval_st;
vid_t sub_interval_en;
int iter;
int niters;
int exec_interval;
size_t nupdates;
size_t nedges;
size_t work; // work is the number of edges processed
unsigned int maxwindow;
mutex modification_lock;
bool reset_vertexdata;
/* Metrics */
metrics &m;
void print_config() {
logstream(LOG_INFO) << "Engine configuration: " << std::endl;
logstream(LOG_INFO) << " exec_threads = " << exec_threads << std::endl;
logstream(LOG_INFO) << " load_threads = " << load_threads << std::endl;
logstream(LOG_INFO) << " membudget_mb = " << membudget_mb << std::endl;
logstream(LOG_INFO) << " blocksize = " << blocksize << std::endl;
logstream(LOG_INFO) << " scheduler = " << use_selective_scheduling << std::endl;
}
public:
/**
* Initialize GraphChi engine
* @param base_filename prefix of the graph files
* @param nshards number of shards
* @param selective_scheduling if true, uses selective scheduling
*/
graphchi_engine(std::string _base_filename, int _nshards, bool _selective_scheduling, metrics &_m) : base_filename(_base_filename), nshards(_nshards), use_selective_scheduling(_selective_scheduling), m(_m) {
/* Initialize IO */
m.start_time("iomgr_init");
iomgr = new stripedio(m);
if (disable_preloading()) {
iomgr->set_disable_preloading(true);
}
m.stop_time("iomgr_init");
#ifndef DYNAMICEDATA
logstream(LOG_INFO) << "Initializing graphchi_engine. This engine expects " << sizeof(EdgeDataType)
<< "-byte edge data. " << std::endl;
#else
logstream(LOG_INFO) << "Initializing graphchi_engine with dynamic edge-data. This engine expects " << sizeof(int)
<< "-byte edge data. " << std::endl;
#endif
/* If number of shards is unspecified - discover */
if (nshards < 1) {
nshards = get_option_int("nshards", 0);
if (nshards < 1) {
logstream(LOG_WARNING) << "Number of shards was not specified (command-line argument 'nshards'). Trying to detect. " << std::endl;
nshards = discover_shard_num();
}
}
/* Initialize a plenty of fields */
memoryshard = NULL;
modifies_outedges = true;
modifies_inedges = true;
preload_commit = true;
only_adjacency = false;
reset_vertexdata = false;
blocksize = get_option_long("blocksize", 4096 * 1024);
#ifndef DYNAMICEDATA
while (blocksize % sizeof(EdgeDataType) != 0) blocksize++;
#endif
disable_vertexdata_storage = false;
membudget_mb = get_option_int("membudget_mb", 1024);
nupdates = 0;
iter = 0;
work = 0;
nedges = 0;
scheduler = NULL;
store_inedges = true;
degree_handler = NULL;
vertex_data_handler = NULL;
enable_deterministic_parallelism = true;
load_threads = get_option_int("loadthreads", 2);
exec_threads = get_option_int("execthreads", omp_get_max_threads());
maxwindow = 40000000;
/* Load graph shard interval information */
_load_vertex_intervals();
_m.set("file", _base_filename);
_m.set("engine", "default");
_m.set("nshards", (size_t)nshards);
}
virtual ~graphchi_engine() {
if (degree_handler != NULL) delete degree_handler;
if (vertex_data_handler != NULL) delete vertex_data_handler;
if (memoryshard != NULL) {
delete memoryshard;
memoryshard = NULL;
}
for(int i=0; i < (int)sliding_shards.size(); i++) {
if (sliding_shards[i] != NULL) {
delete sliding_shards[i];
}
sliding_shards[i] = NULL;
}
degree_handler = NULL;
vertex_data_handler = NULL;
delete iomgr;
}
protected:
virtual degree_data * create_degree_handler() {
return new degree_data(base_filename, iomgr);
}
virtual bool disable_preloading() {
return false;
}
/**
* Try to find suitable shards by trying with different
* shard numbers. Looks up to shard number 2000.
*/
int discover_shard_num() {
#ifndef DYNAMICEDATA
int _nshards = find_shards<EdgeDataType>(base_filename);
#else
int _nshards = find_shards<int>(base_filename);
#endif
if (_nshards == 0) {
logstream(LOG_ERROR) << "Could not find suitable shards - maybe you need to run sharder to create them?" << std::endl;
logstream(LOG_ERROR) << "Was looking with filename [" << base_filename << "]" << std::endl;
logstream(LOG_ERROR) << "You need to create the shards with edge data-type of size " << sizeof(EdgeDataType) << " bytes." << std::endl;
logstream(LOG_ERROR) << "To specify the number of shards, use command-line parameter 'nshards'" << std::endl;
assert(0);
}
return _nshards;
}
virtual void initialize_sliding_shards() {
assert(sliding_shards.size() == 0);
for(int p=0; p < nshards; p++) {
#ifndef DYNAMICEDATA
std::string edata_filename = filename_shard_edata<EdgeDataType>(base_filename, p, nshards);
std::string adj_filename = filename_shard_adj(base_filename, p, nshards);
/* Let the IO manager know that we will be reading these files, and
it should decide whether to preload them or not.
*/
iomgr->allow_preloading(edata_filename);
iomgr->allow_preloading(adj_filename);
#else
std::string edata_filename = filename_shard_edata<int>(base_filename, p, nshards);
std::string adj_filename = filename_shard_adj(base_filename, p, nshards);
#endif
sliding_shards.push_back(
new slidingshard_t(iomgr, edata_filename,
adj_filename,
intervals[p].first,
intervals[p].second,
blocksize,
m,
!modifies_outedges,
only_adjacency));
if (!only_adjacency)
nedges += sliding_shards[sliding_shards.size() - 1]->num_edges();
}
}
virtual void initialize_scheduler() {
if (use_selective_scheduling) {
if (scheduler != NULL) delete scheduler;
scheduler = new bitset_scheduler((int) num_vertices());
scheduler->add_task_to_all();
} else {
scheduler = NULL;
}
}
/**
* If the data is only in one shard, we can just
* keep running from memory.
*/
bool is_inmemory_mode() {
return nshards == 1;
}
/**
* Extends the window to fill the memory budget, but not over maxvid
*/
virtual vid_t determine_next_window(vid_t iinterval, vid_t fromvid, vid_t maxvid, size_t membudget) {
/* Load degrees */
degree_handler->load(fromvid, maxvid);
/* If is in-memory-mode, memory budget is not considered. */
if (is_inmemory_mode()) {
return maxvid;
} else {
size_t memreq = 0;
int max_interval = maxvid - fromvid;
for(int i=0; i < max_interval; i++) {
degree deg = degree_handler->get_degree(fromvid + i);
int inc = deg.indegree;
int outc = deg.outdegree;
// Raw data and object cost included
memreq += sizeof(svertex_t) + (sizeof(EdgeDataType) + sizeof(vid_t) + sizeof(graphchi_edge<EdgeDataType>))*(outc + inc);
if (memreq > membudget) {
logstream(LOG_DEBUG) << "Memory budget exceeded with " << memreq << " bytes." << std::endl;
return fromvid + i - 1; // Previous was enough
}
}
return maxvid;
}
}
/**
* Calculates the exact number of edges
* required to load in the subinterval.
*/
size_t num_edges_subinterval(vid_t st, vid_t en) {
size_t num_edges = 0;
int nvertices = en - st + 1;
if (scheduler != NULL) {
for(int i=0; i < nvertices; i++) {
bool is_sched = scheduler->is_scheduled(st + i);
if (is_sched) {
degree d = degree_handler->get_degree(st + i);
num_edges += d.indegree * store_inedges + d.outdegree;
}
}
} else {
for(int i=0; i < nvertices; i++) {
degree d = degree_handler->get_degree(st + i);
num_edges += d.indegree * store_inedges + d.outdegree;
}
}
return num_edges;
}
virtual void load_before_updates(std::vector<svertex_t> &vertices) {
omp_set_num_threads(load_threads);
#pragma omp parallel for schedule(dynamic, 1)
for(int p=-1; p < nshards; p++) {
if (p==(-1)) {
/* Load memory shard */
if (!memoryshard->loaded()) {
memoryshard->load();
}
/* Load vertex edges from memory shard */
memoryshard->load_vertices(sub_interval_st, sub_interval_en, vertices);
/* Load vertices */
vertex_data_handler->load(sub_interval_st, sub_interval_en);
/* Load vertices */
if (!disable_vertexdata_storage) {
vertex_data_handler->load(sub_interval_st, sub_interval_en);
}
} else {
/* Load edges from a sliding shard */
if (p != exec_interval) {
sliding_shards[p]->read_next_vertices((int) vertices.size(), sub_interval_st, vertices,
scheduler != NULL && chicontext.iteration == 0);
}
}
}
/* Wait for all reads to complete */
iomgr->wait_for_reads();
}
void exec_updates(GraphChiProgram<VertexDataType, EdgeDataType, svertex_t> &userprogram,
std::vector<svertex_t> &vertices) {
metrics_entry me = m.start_time();
size_t nvertices = vertices.size();
if (!enable_deterministic_parallelism) {
for(int i=0; i < (int)nvertices; i++) vertices[i].parallel_safe = true;
}
omp_set_num_threads(exec_threads);
#pragma omp parallel sections
{
#pragma omp section
{
#pragma omp parallel for schedule(dynamic)
for(int vid=sub_interval_st; vid <= (int)sub_interval_en; vid++) {
svertex_t & v = vertices[vid - sub_interval_st];
if (exec_threads == 1 || v.parallel_safe) {
if (!disable_vertexdata_storage)
v.dataptr = vertex_data_handler->vertex_data_ptr(vid);
if (v.scheduled)
userprogram.update(v, chicontext);
}
}
}
#pragma omp section
{
if (exec_threads > 1 && enable_deterministic_parallelism) {
int nonsafe_count = 0;
for(int vid=sub_interval_st; vid <= (int)sub_interval_en; vid++) {
svertex_t & v = vertices[vid - sub_interval_st];
if (!v.parallel_safe && v.scheduled) {
if (!disable_vertexdata_storage)
v.dataptr = vertex_data_handler->vertex_data_ptr(vid);
userprogram.update(v, chicontext);
nonsafe_count++;
}
}
m.add("serialized-updates", nonsafe_count);
}
}
}
m.stop_time(me, "execute-updates");
}
/**
Special method for running all iterations with the same vertex-vector.
This is a hacky solution.
FIXME: this does not work well with deterministic parallelism. Needs a
a separate analysis phase to check which vertices can be run in parallel, and
then run it in chunks. Not difficult.
**/
void exec_updates_inmemory_mode(GraphChiProgram<VertexDataType, EdgeDataType, svertex_t> &userprogram,
std::vector<svertex_t> &vertices) {
work = nupdates = 0;
for(iter=0; iter<niters; iter++) {
logstream(LOG_INFO) << "In-memory mode: Iteration " << iter << " starts." << std::endl;
chicontext.iteration = iter;
userprogram.before_iteration(iter, chicontext);
userprogram.before_exec_interval(0, (int)num_vertices(), chicontext);
if (use_selective_scheduling) {
if (iter > 0 && !scheduler->has_new_tasks) {
logstream(LOG_INFO) << "No new tasks to run!" << std::endl;
break;
}
for(int i=0; i < (int)vertices.size(); i++) { // Could, should parallelize
if (iter == 0 || scheduler->is_scheduled(i)) {
vertices[i].scheduled = true;
nupdates++;
work += vertices[i].inc + vertices[i].outc;
} else {
vertices[i].scheduled = false;
}
}
scheduler->has_new_tasks = false; // Kind of misleading since scheduler may still have tasks - but no new tasks.
scheduler->remove_tasks(0, (int)num_vertices());
} else {
nupdates += num_vertices();
work += num_edges();
}
exec_updates(userprogram, vertices);
load_after_updates(vertices);
userprogram.after_exec_interval(0, (int)num_vertices(), chicontext);
userprogram.after_iteration(iter, chicontext);
if (chicontext.last_iteration > 0 && chicontext.last_iteration <= iter){
logstream(LOG_INFO)<<"Stopping engine since last iteration was set to: " << chicontext.last_iteration << std::endl;
break;
}
}
}
virtual void init_vertices(std::vector<svertex_t> &vertices, graphchi_edge<EdgeDataType> * &edata) {
size_t nvertices = vertices.size();
/* Compute number of edges */
size_t num_edges = num_edges_subinterval(sub_interval_st, sub_interval_en);
/* Allocate edge buffer */
edata = (graphchi_edge<EdgeDataType>*) malloc(num_edges * sizeof(graphchi_edge<EdgeDataType>));
/* Assign vertex edge array pointers */
size_t ecounter = 0;
for(int i=0; i < (int)nvertices; i++) {
degree d = degree_handler->get_degree(sub_interval_st + i);
int inc = d.indegree;
int outc = d.outdegree;
vertices[i] = svertex_t(sub_interval_st + i, &edata[ecounter],
&edata[ecounter + inc * store_inedges], inc, outc);
if (scheduler != NULL) {
bool is_sched = ( scheduler->is_scheduled(sub_interval_st + i));
if (is_sched) {
vertices[i].scheduled = true;
nupdates++;
ecounter += inc * store_inedges + outc;
}
} else {
nupdates++;
vertices[i].scheduled = true;
ecounter += inc * store_inedges + outc;
}
}
work += ecounter;
assert(ecounter <= num_edges);
}
void save_vertices(std::vector<svertex_t> &vertices) {
if (disable_vertexdata_storage) return;
size_t nvertices = vertices.size();
bool modified_any_vertex = false;
for(int i=0; i < (int)nvertices; i++) {
if (vertices[i].modified) {
modified_any_vertex = true;
break;
}
}
if (modified_any_vertex) {
vertex_data_handler->save();
}
}
virtual void load_after_updates(std::vector<svertex_t> &vertices) {
// Do nothing.
}
virtual void write_delta_log() {
// Write delta log
std::string deltafname = iomgr->multiplexprefix(0) + base_filename + ".deltalog";
FILE * df = fopen(deltafname.c_str(), (chicontext.iteration == 0 ? "w" : "a"));
fprintf(df, "%d,%lu,%lu,%lf\n", chicontext.iteration, nupdates, work, chicontext.get_delta());
fclose(df);
}
public:
virtual std::pair<vid_t, vid_t> get_interval(int i) {
return intervals[i];
}
/**
* Returns first vertex of i'th interval.
*/
vid_t get_interval_start(int i) {
return get_interval(i).first;
}
/**
* Returns last vertex (inclusive) of i'th interval.
*/
vid_t get_interval_end(int i) {
return get_interval(i).second;
}
virtual size_t num_vertices() {
return 1 + intervals[nshards - 1].second;
}
graphchi_context &get_context() {
return chicontext;
}
virtual int get_nshards() {
return nshards;
}
size_t num_updates() {
return nupdates;
}
/**
* Thread-safe version of num_edges
*/
virtual size_t num_edges_safe() {
return num_edges();
}
virtual size_t num_buffered_edges() {
return 0;
}
/**
* Counts the number of edges from shard sizes.
*/
virtual size_t num_edges() {
if (sliding_shards.size() == 0) {
logstream(LOG_ERROR) << "engine.num_edges() can be called only after engine has been started. To be fixed later. As a workaround, put the engine into a global variable, and query the number afterwards in begin_iteration(), for example." << std::endl;
assert(false);
}
if (only_adjacency) {
// TODO: fix.
logstream(LOG_ERROR) << "Asked number of edges, but engine was run without edge-data." << std::endl;
return 0;
}
return nedges;
}
/**
* Checks whether any vertex is scheduled in the given interval.
* If no scheduler is configured, returns always true.
*/
// TODO: support for a minimum fraction of scheduled vertices
bool is_any_vertex_scheduled(vid_t st, vid_t en) {
if (scheduler == NULL) return true;
for(vid_t v=st; v<=en; v++) {
if (scheduler->is_scheduled(v)) {
return true;
}
}
return false;
}
virtual void initialize_iter() {
// Do nothing
}
virtual void initialize_before_run() {
if (reset_vertexdata) {
vertex_data_handler->clear(num_vertices());
}
}
virtual memshard_t * create_memshard(vid_t interval_st, vid_t interval_en) {
#ifndef DYNAMICEDATA
return new memshard_t(this->iomgr,
filename_shard_edata<EdgeDataType>(base_filename, exec_interval, nshards),
filename_shard_adj(base_filename, exec_interval, nshards),
interval_st,
interval_en,
blocksize,
m);
#else
return new memshard_t(this->iomgr,
filename_shard_edata<int>(base_filename, exec_interval, nshards),
filename_shard_adj(base_filename, exec_interval, nshards),
interval_st,
interval_en,
blocksize,
m);
#endif
}
/**
* Run GraphChi program, specified as a template
* parameter.
* @param niters number of iterations
*/
void run(GraphChiProgram<VertexDataType, EdgeDataType, svertex_t> &userprogram, int _niters) {
m.start_time("runtime");
if (degree_handler == NULL)
degree_handler = create_degree_handler();
niters = _niters;
logstream(LOG_INFO) << "GraphChi starting" << std::endl;
logstream(LOG_INFO) << "Licensed under the Apache License 2.0" << std::endl;
logstream(LOG_INFO) << "Copyright Aapo Kyrola et al., Carnegie Mellon University (2012)" << std::endl;
if (vertex_data_handler == NULL)
vertex_data_handler = new vertex_data_store<VertexDataType>(base_filename, num_vertices(), iomgr);
initialize_before_run();
/* Setup */
if (sliding_shards.size() == 0) {
initialize_sliding_shards();
} else {
logstream(LOG_DEBUG) << "Engine being restarted, do not reinitialize." << std::endl;
}
initialize_scheduler();
omp_set_nested(1);
/* Install a 'mock'-scheduler to chicontext if scheduler
is not used. */
chicontext.scheduler = scheduler;
if (scheduler == NULL) {
chicontext.scheduler = new non_scheduler();
}
/* Print configuration */
print_config();
/* Main loop */
for(iter=0; iter < niters; iter++) {
logstream(LOG_INFO) << "Start iteration: " << iter << std::endl;
initialize_iter();
/* Check vertex data file has the right size (number of vertices may change) */
if (!disable_vertexdata_storage)
vertex_data_handler->check_size(num_vertices());
/* Keep the context object updated */
chicontext.filename = base_filename;
chicontext.iteration = iter;
chicontext.num_iterations = niters;
chicontext.nvertices = num_vertices();
if (!only_adjacency) chicontext.nedges = num_edges();
chicontext.execthreads = exec_threads;
chicontext.reset_deltas(exec_threads);
/* Call iteration-begin event handler */
if (!is_inmemory_mode()) // Run sepately
userprogram.before_iteration(iter, chicontext);
/* Check scheduler. If no scheduled tasks, terminate. */
if (use_selective_scheduling) {
if (scheduler != NULL) {
if (!scheduler->has_new_tasks) {
logstream(LOG_INFO) << "No new tasks to run!" << std::endl;
break;
}
scheduler->has_new_tasks = false; // Kind of misleading since scheduler may still have tasks - but no new tasks.
}
}
/* Interval loop */
for(exec_interval=0; exec_interval < nshards; ++exec_interval) {
/* Determine interval limits */
vid_t interval_st = get_interval_start(exec_interval);
vid_t interval_en = get_interval_end(exec_interval);
if (interval_st > interval_en) continue; // Can happen on very very small graphs.
if (!is_inmemory_mode())
userprogram.before_exec_interval(interval_st, interval_en, chicontext);
/* Flush stream shard for the exec interval */
sliding_shards[exec_interval]->flush();
iomgr->wait_for_writes(); // Actually we would need to only wait for writes of given shard. TODO.
/* Initialize memory shard */
if (memoryshard != NULL) delete memoryshard;
memoryshard = create_memshard(interval_st, interval_en);
memoryshard->only_adjacency = only_adjacency;
sub_interval_st = interval_st;
logstream(LOG_INFO) << chicontext.runtime() << "s: Starting: "
<< sub_interval_st << " -- " << interval_en << std::endl;
while (sub_interval_st <= interval_en) {
modification_lock.lock();
/* Determine the sub interval */
sub_interval_en = determine_next_window(exec_interval,
sub_interval_st,
std::min(interval_en, sub_interval_st + maxwindow),
size_t(membudget_mb) * 1024 * 1024);
assert(sub_interval_en >= sub_interval_st);
logstream(LOG_INFO) << "Iteration " << iter << "/" << (niters - 1) << ", subinterval: " << sub_interval_st << " - " << sub_interval_en << std::endl;
bool any_vertex_scheduled = is_any_vertex_scheduled(sub_interval_st, sub_interval_en);
if (!any_vertex_scheduled) {
logstream(LOG_INFO) << "No vertices scheduled, skip." << std::endl;
sub_interval_st = sub_interval_en + 1;
modification_lock.unlock();
continue;
}
/* Initialize vertices */
int nvertices = sub_interval_en - sub_interval_st + 1;
graphchi_edge<EdgeDataType> * edata = NULL;
std::vector<svertex_t> vertices(nvertices, svertex_t());
init_vertices(vertices, edata);
/* Now clear scheduler bits for the interval */
if (scheduler != NULL)
scheduler->remove_tasks(sub_interval_st, sub_interval_en);
/* Load data */
load_before_updates(vertices);
modification_lock.unlock();
logstream(LOG_INFO) << "Start updates" << std::endl;
/* Execute updates */
if (!is_inmemory_mode()) {
exec_updates(userprogram, vertices);
/* Load phase after updates (used by the functional engine) */
load_after_updates(vertices);
} else {
exec_updates_inmemory_mode(userprogram, vertices);
}
logstream(LOG_INFO) << "Finished updates" << std::endl;
/* Save vertices */
if (!disable_vertexdata_storage) {
save_vertices(vertices);
}
sub_interval_st = sub_interval_en + 1;
/* Delete edge buffer. TODO: reuse. */
if (edata != NULL) {
delete edata;
edata = NULL;
}
} // while subintervals
if (memoryshard->loaded() && !is_inmemory_mode()) {
logstream(LOG_INFO) << "Commit memshard" << std::endl;
memoryshard->commit(modifies_inedges, modifies_outedges);
sliding_shards[exec_interval]->set_offset(memoryshard->offset_for_stream_cont(), memoryshard->offset_vid_for_stream_cont(),
memoryshard->edata_ptr_for_stream_cont());
delete memoryshard;
memoryshard = NULL;
}
if (!is_inmemory_mode())
userprogram.after_exec_interval(interval_st, interval_en, chicontext);
} // For exec_interval
if (!is_inmemory_mode()) // Run sepately
userprogram.after_iteration(iter, chicontext);
/* Move the sliding shard of the current interval to correct position and flush
writes of all shards for next iteration. */
for(int p=0; p<nshards; p++) {
sliding_shards[p]->flush();
sliding_shards[p]->set_offset(0, 0, 0);
}
iomgr->wait_for_writes();
/* Write progress log */
write_delta_log();
/* Check if user has defined a last iteration */
if (chicontext.last_iteration >= 0) {
niters = chicontext.last_iteration + 1;
logstream(LOG_DEBUG) << "Last iteration is now: " << (niters-1) << std::endl;
}
iteration_finished();
} // Iterations
// Commit preloaded shards
if (preload_commit)
iomgr->commit_preloaded();
m.stop_time("runtime");
m.set("updates", nupdates);
m.set("work", work);
m.set("nvertices", num_vertices());
m.set("execthreads", (size_t)exec_threads);
m.set("loadthreads", (size_t)load_threads);
#ifndef GRAPHCHI_DISABLE_COMPRESSION
m.set("compression", 1);
#endif
m.set("scheduler", (size_t)use_selective_scheduling);
m.set("niters", niters);
// Stop HTTP admin
}
virtual void iteration_finished() {
// Do nothing
}
stripedio * get_iomanager() {
return iomgr;
}
virtual void set_modifies_inedges(bool b) {
modifies_inedges = b;
}
virtual void set_modifies_outedges(bool b) {
modifies_outedges = b;
}
virtual void set_only_adjacency(bool b) {
only_adjacency = b;
}
virtual void set_preload_commit(bool b){
preload_commit = b;
}
/**
* Configure the blocksize used when loading shards.
* Default is one megabyte.
* @param blocksize_in_bytes the blocksize in bytes
*/
void set_blocksize(size_t blocksize_in_bytes) {
blocksize = blocksize_in_bytes;
}
/**
* Set the amount of memory available for loading graph
* data. Default is 1000 megabytes.
* @param mbs amount of memory to be used.
*/
void set_membudget_mb(int mbs) {
membudget_mb = mbs;
}
void set_load_threads(int lt) {
load_threads = lt;
}
void set_exec_threads(int et) {
exec_threads = et;
}
/**
* Sets whether the engine is run in the deterministic
* mode. Default true.
*/
void set_enable_deterministic_parallelism(bool b) {
#ifdef DYNAMICEDATA
if (!b) {
logstream(LOG_ERROR) << "With dynamic edge data, you cannot disable determinic parallelism." << std::endl;
logstream(LOG_ERROR) << "Otherwise race conditions would corrupt the structure of the data." << std::endl;
assert(b);
return;
}
#endif
enable_deterministic_parallelism = b;
}
public:
void set_disable_vertexdata_storage() {
this->disable_vertexdata_storage = true;
}
void set_enable_vertexdata_storage() {
this->disable_vertexdata_storage = false;
}
void set_maxwindow(unsigned int _maxwindow){
maxwindow = _maxwindow;
};
protected:
virtual void _load_vertex_intervals() {
load_vertex_intervals(base_filename, nshards, intervals);
}
protected:
mutex httplock;
std::map<std::string, std::string> json_params;
public:
/**
* Replace all shards with zero values in edges.
*/
template<typename ET>
void reinitialize_edge_data(ET zerovalue) {
for(int p=0; p < nshards; p++) {
std::string edatashardname = filename_shard_edata<ET>(base_filename, p, nshards);
std::string dirname = dirname_shard_edata_block(edatashardname, blocksize);
size_t edatasize = get_shard_edata_filesize<ET>(edatashardname);
logstream(LOG_INFO) << "Clearing data: " << edatashardname << " bytes: " << edatasize << std::endl;
int nblocks = (edatasize / blocksize) + (edatasize % blocksize == 0 ? 0 : 1);
for(int i=0; i < nblocks; i++) {
std::string block_filename = filename_shard_edata_block(edatashardname, i, blocksize);
int len = (int) std::min(edatasize - i * blocksize, blocksize);
int f = open(block_filename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
ET * buf = (ET *) malloc(len);
for(int i=0; i < (int) (len / sizeof(ET)); i++) {
buf[i] = zerovalue;
}
write_compressed(f, buf, len);
free(buf);
close(f);
#ifdef DYNAMICEDATA
write_block_uncompressed_size(block_filename, len);
#endif
}
}
}
/**
* If true, the vertex data is initialized before
* the engineis started. Default false.
*/
void set_reset_vertexdata(bool reset) {
reset_vertexdata = reset;
}
/**
* HTTP admin management
*/
void set_json(std::string key, std::string value) {
httplock.lock();
json_params[key] = value;
httplock.unlock();
}
template <typename T>
void set_json(std::string key, T val) {
std::stringstream ss;
ss << val;
set_json(key, ss.str());
}
std::string get_info_json() {
std::stringstream json;
json << "{";
json << "\"file\" : \"" << base_filename << "\",\n";
json << "\"numOfShards\": " << nshards << ",\n";
json << "\"iteration\": " << chicontext.iteration << ",\n";
json << "\"numIterations\": " << chicontext.num_iterations << ",\n";
json << "\"runTime\": " << chicontext.runtime() << ",\n";
json << "\"updates\": " << nupdates << ",\n";
json << "\"nvertices\": " << chicontext.nvertices << ",\n";
json << "\"interval\":" << exec_interval << ",\n";
json << "\"windowStart\":" << sub_interval_st << ",";
json << "\"windowEnd\": " << sub_interval_en << ",";
json << "\"shards\": [";
for(int p=0; p < (int)nshards; p++) {
if (p>0) json << ",";
json << "{";
json << "\"p\": " << p << ", ";
json << sliding_shards[p]->get_info_json();
json << "}";
}
json << "]";
json << "}";
return json.str();
}
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Bitset scheduler.
*/
#ifndef DEF_GRAPHCHI_BITSETSCHEDULER
#define DEF_GRAPHCHI_BITSETSCHEDULER
#include "graphchi_types.hpp"
#include "api/ischeduler.hpp"
#include "util/dense_bitset.hpp"
namespace graphchi {
class bitset_scheduler : public ischeduler {
private:
dense_bitset bitset;
public:
bool has_new_tasks;
bitset_scheduler(int nvertices) : bitset(nvertices) {
}
virtual ~bitset_scheduler() {}
inline void add_task(vid_t vertex) {
bitset.set_bit(vertex);
has_new_tasks = true;
}
void resize(vid_t maxsize) {
bitset.resize(maxsize);
}
inline bool is_scheduled(vid_t vertex) {
return bitset.get(vertex);
}
inline void remove_task(vid_t vertex) {
bitset.clear_bit(vertex);
}
void remove_tasks(vid_t fromvertex, vid_t tovertex) {
bitset.clear_bits(fromvertex, tovertex);
}
void add_task_to_all() {
has_new_tasks = true;
bitset.setall();
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Edge buffers used by the dynamic graph engine.
*/
#ifndef DEF_GRAPHCHI_EDGEBUFFERS
#define DEF_GRAPHCHI_EDGEBUFFERS
#include <stdlib.h>
#include <vector>
namespace graphchi {
/**
* Class for buffered edges. These are edges that
* are currently in memory, and waiting to be commited to disk.
*/
template <typename EdgeDataType>
struct created_edge {
vid_t src;
vid_t dst;
EdgeDataType data;
bool accounted_for_outc;
bool accounted_for_inc;
created_edge(vid_t src, vid_t dst, EdgeDataType _data) : src(src), dst(dst), data(_data), accounted_for_outc(false),
accounted_for_inc(false) {}
};
#define EDGE_BUFFER_CHUNKSIZE 65536
/**
* Efficient chunked edge-buffer with very low memory-overhead (compared
* to just using a std-vector.
*/
template <typename ET>
class edge_buffer_flat {
unsigned int count;
std::vector<created_edge<ET> *> bufs;
public:
edge_buffer_flat() : count(0) {
}
~edge_buffer_flat() {
clear();
}
void clear() {
for(int i=0; i< (int)bufs.size(); i++) {
free(bufs[i]);
}
bufs.clear();
count = 0;
}
unsigned int size() {
return count;
}
created_edge<ET> * operator[](unsigned int i) {
return &bufs[i / EDGE_BUFFER_CHUNKSIZE][i % EDGE_BUFFER_CHUNKSIZE];
}
void add(vid_t src, vid_t dst, ET data) {
add(created_edge<ET>(src, dst, data));
}
void add(created_edge<ET> cedge) {
int idx = count++;
int bufidx = idx / EDGE_BUFFER_CHUNKSIZE;
if (bufidx == (int) bufs.size()) {
bufs.push_back((created_edge<ET>*)calloc(sizeof(created_edge<ET>), EDGE_BUFFER_CHUNKSIZE));
}
bufs[bufidx][idx % EDGE_BUFFER_CHUNKSIZE] = cedge;
}
private:
// Disable value copying
edge_buffer_flat(const edge_buffer_flat&);
edge_buffer_flat& operator=(const edge_buffer_flat&);
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Engine for graphs that change. This is in alpha-stage now.
*/
#ifndef GRAPHCHI_DYNAMICGRAPHENGINE_DEF
#define GRAPHCHI_DYNAMICGRAPHENGINE_DEF
#include <stdlib.h>
#include <vector>
#include "engine/graphchi_engine.hpp"
#include "engine/dynamic_graphs/edgebuffers.hpp"
#include "logger/logger.hpp"
namespace graphchi {
/**
* The actual engine
*/
template <typename VertexDataType, typename EdgeDataType, typename svertex_t = graphchi_vertex<VertexDataType, EdgeDataType> >
class graphchi_dynamicgraph_engine : public graphchi_engine<VertexDataType, EdgeDataType, svertex_t> {
public:
typedef graphchi_engine<VertexDataType, EdgeDataType> base_engine;
typedef edge_buffer_flat<EdgeDataType> edge_buffer;
graphchi_dynamicgraph_engine(std::string base_filename, int nshards, bool selective_scheduling, metrics &_m) :
graphchi_engine<VertexDataType, EdgeDataType, svertex_t>(base_filename, nshards, selective_scheduling, _m){
_m.set("engine", "dynamicgraphs");
added_edges = 0;
maxshardsize = 200 * 1024 * 1024;
}
protected:
/**
* Bookkeeping of buffered and deleted edges.
*/
std::vector< std::vector< edge_buffer * > > new_edge_buffers;
std::vector<int> deletecounts;
std::vector<std::string> shard_suffices;
vid_t max_vertex_id;
size_t max_edge_buffer;
size_t last_commit;
size_t added_edges;
std::string state;
size_t maxshardsize;
size_t edges_in_shards;
size_t orig_edges;
/**
* Concurrency control
*/
mutex schedulerlock;
mutex shardlock;
/**
* Preloading will interfere with the operation.
*/
virtual bool disable_preloading() {
return true;
}
/**
* Create a dynamic version of the degree file.
*/
virtual degree_data * create_degree_handler() {
/* FIXME: This is bad software design - we should not have a filename dependency here. */
std::string orig_degree_file = filename_degree_data(this->base_filename);
std::string dynsuffix = ".dynamic";
std::string dynamic_degree_file = filename_degree_data(this->base_filename + dynsuffix);
cp(orig_degree_file, dynamic_degree_file);
return new degree_data(this->base_filename + dynsuffix, this->iomgr);
}
virtual size_t num_edges() {
shardlock.lock();
size_t ne = 0;
for(int i=0; i < this->nshards; i++) {
ne += this->sliding_shards[i]->num_edges();
for(int j=0; j < (int) new_edge_buffers[i].size(); j++)
ne += new_edge_buffers[i][j]->size();
}
shardlock.unlock();
return ne;
}
public:
size_t num_edges_safe() {
return added_edges + orig_edges;
}
size_t num_buffered_edges() {
return added_edges - last_commit;
}
protected:
void init_buffers() {
max_edge_buffer = get_option_long("max_edgebuffer_mb", 1000) * 1024 * 1024 / sizeof(created_edge<EdgeDataType>);
// Save old so if there are existing edges, they can be moved
std::vector< std::vector< edge_buffer * > > tmp_new_edge_buffers;
for(int i=0; i < this->nshards; i++) {
std::vector<edge_buffer *> shardbuffers = std::vector<edge_buffer *>();
for(int j=0; j < this->nshards; j++) {
shardbuffers.push_back(new edge_buffer());
}
tmp_new_edge_buffers.push_back(shardbuffers);
}
// Move old edges. This is not the fastest way... but takes only about 0.05 secs
// on the twitter experiment
int i = 0;
for(typename std::vector< std::vector< edge_buffer * > >::iterator oldit = new_edge_buffers.begin();
oldit != new_edge_buffers.end(); ++oldit) {
for(typename std::vector< edge_buffer *>::iterator bufit = oldit->begin(); bufit != oldit->end(); ++bufit) {
edge_buffer &buffer_for_window = **bufit;
for(unsigned int ebi = 0; ebi < buffer_for_window.size(); ebi++ ) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
int shard = get_shard_for(edge->dst);
int srcshard = get_shard_for(edge->src);
i++;
tmp_new_edge_buffers[shard][srcshard]->add(*edge);
}
delete *bufit;
}
}
std::cout << "TRANSFERRED " << i << " EDGES OVER." << std::endl;
new_edge_buffers = tmp_new_edge_buffers;
}
/**
* In the beginning of run, we copy the shards into dynamic versions.
*/
// Should be changed to read the file in smaller chunks
size_t cp(std::string origfile, std::string dstfile, bool zeroout=false) {
char * buf;
int f = open(origfile.c_str(), O_RDONLY);
size_t len = readfull(f, &buf);
std::cout << "Length: " << len << std::endl;
std::cout << origfile << " ----> " << dstfile << std::endl;
close(f);
remove(dstfile.c_str());
int of = open(dstfile.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
assert(of >= 0);
if (zeroout) {
memset(buf, 0, len);
}
writea(of, buf, len);
assert(get_filesize(origfile) == get_filesize(dstfile));
close(of);
free(buf);
return len;
}
// Copy the edata directory
void cpedata(std::string origfile, std::string dstfile, bool zeroout=false) {
cp(origfile + ".size", dstfile + ".size");
std::string dirname = dirname_shard_edata_block(dstfile, base_engine::blocksize);
mkdir(dirname.c_str(), 0777);
size_t edatasize = get_shard_edata_filesize<EdgeDataType>(origfile);
int nblocks = (int) ((edatasize / base_engine::blocksize) + (edatasize % base_engine::blocksize == 0 ? 0 : 1));
for(int i=0; i < nblocks; i++) {
std::string origblockname = filename_shard_edata_block(origfile, i, base_engine::blocksize);
std::string dstblockname = filename_shard_edata_block(dstfile, i, base_engine::blocksize);
cp(origblockname, dstblockname);
}
}
virtual typename base_engine::memshard_t * create_memshard(vid_t interval_st, vid_t interval_en) {
int p = this->exec_interval;
std::string adj_filename = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[p];
std::string edata_filename = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[p];
return new typename base_engine::memshard_t(this->iomgr,
edata_filename,
adj_filename,
interval_st,
interval_en,
base_engine::blocksize,
this->m);
}
/**
* Initialize streaming shards in the start of each iteration.
*/
virtual void initialize_sliding_shards() {
state = "initialize-shards";
shardlock.lock();
if (this->sliding_shards.empty()) {
for(int p=0; p < this->nshards; p++) {
std::string adj_filename = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[p];
std::string edata_filename = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[p];
this->sliding_shards.push_back(
new typename base_engine::slidingshard_t(this->iomgr, edata_filename,
adj_filename,
this->intervals[p].first,
this->intervals[p].second,
this->blocksize,
this->m,
!this->modifies_outedges,
false));
}
} else {
for(int p=0; p < this->nshards; p++) {
if (this->sliding_shards[p] == NULL) {
std::string adj_filename = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[p];
std::string edata_filename = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[p];
this->sliding_shards[p] = new typename base_engine::slidingshard_t(this->iomgr, edata_filename,
adj_filename,
this->intervals[p].first,
this->intervals[p].second,
this->blocksize,
this->m,
!this->modifies_outedges,
false);
}
}
}
shardlock.unlock();
edges_in_shards = num_edges();
if (orig_edges == 0) orig_edges = edges_in_shards;
}
void prepare_clean_slate() {
logstream(LOG_INFO) << "Preparing clean slate..." << std::endl;
for(int shard=0; shard < this->nshards; shard++) {
shard_suffices.push_back(get_part_str(shard, this->nshards));
std::string edata_filename = filename_shard_edata<EdgeDataType>(this->base_filename, shard, this->nshards);
std::string adj_filename = filename_shard_adj(this->base_filename, shard, this->nshards);
std::string dest_adj = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[shard];
std::string dest_edata = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[shard];
cpedata(edata_filename, dest_edata, true);
cp(adj_filename, dest_adj);
}
}
int get_shard_for(vid_t dst) {
for(int i=0; i < this->nshards; i++) {
if (dst >= this->intervals[i].first && dst <= this->intervals[i].second) {
return i;
}
}
return this->nshards - 1; // Last shard
}
public:
bool add_edge(vid_t src, vid_t dst, EdgeDataType edata) {
if (src == dst) {
logstream(LOG_WARNING) << "WARNING : tried to add self-edge!" << std::endl;
return true;
}
if (this->iter < 1) {
logstream(LOG_WARNING) << "Tried to add edge before first iteration has passed" << std::endl;
usleep(1000000);
return false;
}
if (added_edges - last_commit > 1.2 * max_edge_buffer) {
logstream(LOG_INFO) << "Over 20% of max buffer... hold on...." << std::endl;
usleep(1000000); // Sleep 1 sec
return false;
}
this->modification_lock.lock();
added_edges++;
int shard = get_shard_for(dst);
int srcshard = get_shard_for(src);
/* Maintain max vertex id */
vid_t prev_max_id = max_vertex_id;
max_vertex_id = std::max(max_vertex_id, dst);
max_vertex_id = std::max(max_vertex_id, src);
// Extend degree and vertex data files
if (max_vertex_id>prev_max_id) {
this->degree_handler->ensure_size(this->max_vertex_id); // Expand the file
// Expand scheduler
if (this->scheduler != NULL) {
schedulerlock.lock();
this->scheduler->resize(1 + max_vertex_id);
schedulerlock.unlock();
}
}
// Add edge to buffers
new_edge_buffers[shard][srcshard]->add(src, dst, edata);
this->modification_lock.unlock();
return true;
}
void add_task(vid_t vid) {
if (this->scheduler != NULL) {
this->modification_lock.lock();
this->scheduler->add_task(vid);
this->modification_lock.unlock();
}
}
protected:
void incorporate_buffered_edges(int window, vid_t window_st, vid_t window_en, std::vector<svertex_t> & vertices) {
// Lock acquired
int ncreated = 0;
// First outedges
for(int shard=0; shard<this->nshards; shard++) {
edge_buffer &buffer_for_window = *new_edge_buffers[shard][window];
for(unsigned int ebi=0; ebi<buffer_for_window.size(); ebi++) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
if (edge->src >= window_st && edge->src <= window_en) {
if (vertices[edge->src-window_st].scheduled) {
if (vertices[edge->src-window_st].scheduled)
vertices[edge->src-window_st].add_outedge(edge->dst, &edge->data, false);
ncreated++;
}
}
}
}
// Then inedges
for(int w=0; w<this->nshards; w++) {
edge_buffer &buffer_for_window = *new_edge_buffers[window][w];
for(unsigned int ebi=0; ebi<buffer_for_window.size(); ebi++) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
if (edge->dst >= window_st && edge->dst <= window_en) {
if (vertices[edge->dst - window_st].scheduled) {
assert(edge->data < 1e20);
if (vertices[edge->dst-window_st].scheduled)
vertices[edge->dst - window_st].add_inedge(edge->src, &edge->data, false);
ncreated++;
}
}
}
}
logstream(LOG_INFO) << "::: Used " << ncreated << " buffered edges." << std::endl;
}
bool incorporate_new_edge_degrees(int window, vid_t window_st, vid_t window_en) {
bool modified = false;
// First outedges
for(int shard=0; shard < this->nshards; shard++) {
edge_buffer &buffer_for_window = *new_edge_buffers[shard][window];
for(unsigned int ebi=0; ebi<buffer_for_window.size(); ebi++) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
if (edge->src >= window_st && edge->src <= window_en) {
if (!edge->accounted_for_outc) {
degree d = this->degree_handler->get_degree(edge->src);
d.outdegree++;
this->degree_handler->set_degree(edge->src, d);
modified = true;
edge->accounted_for_outc = true;
}
}
}
}
// Then inedges
for(int w=0; w < this->nshards; w++) {
edge_buffer &buffer_for_window = *new_edge_buffers[window][w];
for(unsigned int ebi=0; ebi<buffer_for_window.size(); ebi++) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
if (edge->dst >= window_st && edge->dst <= window_en) {
if (!edge->accounted_for_inc) {
degree d = this->degree_handler->get_degree(edge->dst);
d.indegree++;
this->degree_handler->set_degree(edge->dst, d);
edge->accounted_for_inc = true;
modified = true;
}
}
}
}
return modified;
}
void adjust_degrees_for_deleted(std::vector< svertex_t > &vertices, vid_t window_st) {
#ifdef SUPPORT_DELETIONS
bool somechanged = false;
for(int i=0; i < (int)vertices.size(); i++) {
svertex_t &v = vertices[i];
if (v.scheduled) {
this->degree_handler->set_degree(v.id(), v.inc, v.outc);
somechanged = somechanged || (v.deleted_inc + v.deleted_outc > 0);
degree deg = this->degree_handler->get_degree(v.id());
if (!(deg.indegree >=0 && deg.outdegree >= 0)) {
std::cout << "Degree discrepancy: " << deg.indegree << " " << deg.outdegree << std::endl;
}
assert(deg.indegree >=0 && deg.outdegree >= 0);
}
}
if (somechanged) {
this->degree_handler->save();
}
#endif
}
virtual vid_t determine_next_window(vid_t iinterval, vid_t fromvid, vid_t maxvid, size_t membudget) {
/* Load degrees */
this->degree_handler->load(fromvid, maxvid);
if (incorporate_new_edge_degrees(iinterval, fromvid, maxvid)) {
this->degree_handler->save();
}
size_t memreq = 0;
int max_interval = maxvid - fromvid;
for(int i=0; i < max_interval; i++) {
degree deg = this->degree_handler->get_degree(fromvid + i);
int inc = deg.indegree;
int outc = deg.outdegree;
// Raw data and object cost included
memreq += sizeof(svertex_t) + (sizeof(EdgeDataType) + sizeof(vid_t) +
sizeof(graphchi_edge<EdgeDataType>))*(outc + inc);
if (memreq > membudget) {
return fromvid + i - 1; // Previous was enough
}
}
return maxvid;
}
virtual void load_before_updates(std::vector<svertex_t> &vertices) {
state = "load-edges";
this->base_engine::load_before_updates(vertices);
#ifdef SUPPORT_DELETIONS
for(unsigned int i=0; i < (unsigned int)vertices.size(); i++) {
deletecounts[this->exec_interval] += vertices[i].deleted_inc;
}
#endif
state = "execute-updates";
}
virtual void init_vertices(std::vector<svertex_t> &vertices, graphchi_edge<EdgeDataType> * &edata) {
base_engine::init_vertices(vertices, edata);
incorporate_buffered_edges(this->exec_interval, this->sub_interval_st, this->sub_interval_en, vertices);
}
virtual void initialize_iter() {
this->intervals[this->nshards - 1].second = max_vertex_id;
this->vertex_data_handler->check_size(max_vertex_id + 1);
initialize_sliding_shards();
/* Deleted edge tracking */
deletecounts.clear();
for(int p=0; p < this->nshards; p++)
deletecounts.push_back(0);
}
virtual void iteration_finished() {
if (this->iter < this->niters - 1) {
// Flush and restart stream shards before commiting edges
for(int p=0; p < this->nshards; p++) {
this->sliding_shards[p]->flush();
this->sliding_shards[p]->set_offset(0, 0, 0);
}
this->iomgr->wait_for_writes();
commit_graph_changes();
}
}
virtual void initialize_before_run() {
prepare_clean_slate();
init_buffers();
max_vertex_id = (vid_t) (this->num_vertices() - 1);
this->vertex_data_handler->clear(this->num_vertices());
orig_edges = 0;
}
/* */
virtual void load_after_updates(std::vector<svertex_t> &vertices) {
this->base_engine::load_after_updates(vertices);
adjust_degrees_for_deleted(vertices, this->sub_interval_st);
}
public:
void finish_after_iters(int extra_iters) {
this->chicontext.last_iteration = this->chicontext.iteration + extra_iters;
}
protected:
#define BBUF 32000000
/**
* Code for committing changes to disk.
*/
void commit_graph_changes() {
// Count deleted
size_t ndeleted = 0;
for(size_t i=0; i < deletecounts.size(); i++) {
ndeleted += deletecounts[i];
}
// TODO: remove ad hoc limits, move to configuration.
// Perhaps do some cost estimation?
logstream(LOG_DEBUG) << "Total deleted: " << ndeleted << " total edges: " << this->num_edges() << std::endl;
if (added_edges - last_commit < max_edge_buffer * 0.8 && ndeleted < this->num_edges() * 0.1) {
std::cout << "==============================" << std::endl;
std::cout << "No time to commit yet.... Only " << (added_edges - last_commit) << " / " << max_edge_buffer
<< " in buffers" << std::endl;
return;
}
bool rangeschanged = false;
state = "commit-ingests";
vid_t maxwindow = 4000000; // FIXME: HARDCODE
size_t mem_budget = this->membudget_mb * 1024 * 1024;
this->modification_lock.lock();
// Clean up sliding shards
// NOTE: there is a problem since this will waste
// io-sessions
std::vector<int> edgespershard;
for(int p=0; p < this->nshards; p++) {
edgespershard.push_back(this->sliding_shards[p]->num_edges());
}
std::vector<std::pair<vid_t, vid_t> > newranges;
std::vector<std::string> newsuffices;
char iterstr[128];
sprintf(iterstr, "%d", this->iter);
size_t min_buffer_in_shard_to_commit = max_edge_buffer / this->nshards / 2;
std::vector<bool> was_commited(this->nshards, true);
for(int shard=0; shard < this->nshards; shard++) {
std::vector<edge_buffer*> &shard_buffer = new_edge_buffers[shard];
// Check there are any new edges
size_t bufedges = 0;
for(int w=0; w < this->nshards; w++) {
bufedges += shard_buffer[w]->size();
}
if (bufedges < min_buffer_in_shard_to_commit && deletecounts[shard] * 1.0 / edgespershard[shard] < 0.2) {
logstream(LOG_DEBUG) << shard << ": not enough edges for shard: " << bufedges << " deleted:" << deletecounts[shard] << "/" << edgespershard[shard] << std::endl;
newranges.push_back(this->intervals[shard]);
newsuffices.push_back(shard_suffices[shard]);
was_commited[shard] = false;
continue;
} else {
logstream(LOG_DEBUG) << shard << ": going to rewrite, deleted:" << deletecounts[shard] << "/" << edgespershard[shard] << " bufedges: " << bufedges << std::endl;
shardlock.lock();
delete this->sliding_shards[shard];
this->sliding_shards[shard] = NULL;
shardlock.unlock();
}
std::string origshardfile = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[shard];
std::string origadjfile = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[shard];
// Get file size
off_t sz = get_shard_edata_filesize<EdgeDataType>(origshardfile);
int outparts = ( sz >= (off_t) maxshardsize ? 2 : 1);
vid_t splitpos = 0;
std::cout << "Size: " << sz << " vs. maxshardsize: " << maxshardsize << std::endl;
if (sz > (off_t)maxshardsize) {
rangeschanged = true;
// Compute number edges (not including ingested ones!)
size_t halfedges = (sz / sizeof(EdgeDataType)) / 2;
// Correct to include estimate of ingested ones
for(int w=0; w < this->nshards; w++) {
halfedges += new_edge_buffers[shard][w]->size() / 2;
}
size_t nedges = 0;
vid_t st = this->intervals[shard].first;
splitpos = st + (this->intervals[shard].second - st) / 2;
bool found = false;
while(st < this->intervals[shard].second) {
vid_t en = std::min(st + maxwindow, this->intervals[shard].second);
this->degree_handler->load(st, en);
int nv = en - st + 1;
for(int i=0; i<nv; i++) {
nedges += this->degree_handler->get_degree(st + i).indegree;
if (nedges >= halfedges) {
splitpos = i+st-1;
found = true;
break;
}
}
if (found) break;
st = en+1;
}
assert(splitpos > this->intervals[shard].first && splitpos < this->intervals[shard].second);
}
for(int splits=0; splits<outparts; splits++) { // Note: this is not super-efficient because we do the operation twice in case of split
typename base_engine::slidingshard_t * curshard =
new typename base_engine::slidingshard_t(this->iomgr, origshardfile, origadjfile,
this->intervals[shard].first, this->intervals[shard].second,
base_engine::blocksize, this->m, true);
std::string suffix = "";
char partstr[128];
sprintf(partstr, "%d", shard);
if (splits == 0) {
suffix = std::string(partstr);
} else {
suffix = std::string(partstr) + ".split";
}
suffix = suffix + ".i" + std::string(iterstr);
newsuffices.push_back(suffix);
std::string outfile_edata = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + suffix;
std::string outfile_edata_dirname = dirname_shard_edata_block(outfile_edata, base_engine::blocksize);
mkdir(outfile_edata_dirname.c_str(), 0777);
std::string outfile_adj = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + suffix;
vid_t splitstart = this->intervals[shard].first;
vid_t splitend = this->intervals[shard].second;
if (shard == this->nshards - 1) splitend = max_vertex_id;
// This is looking more and more hacky
if (outparts == 2) {
if (splits==0) splitend = splitpos;
else splitstart = splitpos+1;
}
newranges.push_back(std::pair<vid_t,vid_t>(splitstart, splitend));
// Create the adj file
int f = open(outfile_adj.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
int err = ftruncate(f, 0);
if (err != 0) {
logstream(LOG_ERROR) << "Error truncating " << outfile_adj << ", error: " << strerror(errno) << std::endl;
}
assert(err == 0);
/* Create edge data file */
int ef = open(outfile_edata.c_str(), O_WRONLY | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
err = ftruncate(ef, 0);
if (err != 0) {
logstream(LOG_ERROR) << "Error truncating " << outfile_edata << ", error: " << strerror(errno) << std::endl;
}
assert(err == 0);
char * buf = (char*) malloc(BBUF);
char * bufptr = buf;
char * ebuf = (char*) malloc(BBUF);
char * ebufptr = ebuf;
size_t tot_edatabytes = 0;
// Now create a new shard file window by window
for(int window=0; window < this->nshards; window++) {
vid_t range_st = this->intervals[window].first;
vid_t range_en = this->intervals[window].second;
if (window == this->nshards - 1) range_en = max_vertex_id;
edge_buffer &buffer_for_window = *new_edge_buffers[shard][window];
for(vid_t window_st=range_st; window_st<range_en; ) {
// Check how much we can read
vid_t window_en = determine_next_window(window, window_st,
std::min(range_en, window_st + (vid_t)maxwindow), mem_budget);
// Create vertices
int nvertices = window_en-window_st+1;
std::vector< svertex_t > vertices(nvertices, svertex_t());
/* Allocate edge data: to do this, need to compute sum of in & out edges */
graphchi_edge<EdgeDataType> * edata = NULL;
size_t num_edges=0;
for(int i=0; i<nvertices; i++) {
degree d = this->degree_handler->get_degree(i + window_st);
num_edges += d.indegree+d.outdegree;
}
size_t ecounter = 0;
edata = (graphchi_edge<EdgeDataType>*)malloc(num_edges * sizeof(graphchi_edge<EdgeDataType>));
for(int i=0; i<(int)nvertices; i++) {
// int inc = degrees[i].indegree;
degree d = this->degree_handler->get_degree(i + window_st);
int outc = d.outdegree;
vertices[i] = svertex_t(window_st+i, &edata[ecounter],
&edata[ecounter+0], 0, outc);
vertices[i].scheduled = true; // guarantee that shard will read it
ecounter += 0 + outc;
}
// Read vertices in
curshard->read_next_vertices(nvertices, window_st, vertices, false, true);
// Incorporate buffered edges
for(unsigned int ebi=0; ebi<buffer_for_window.size(); ebi++) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
if (edge->src >= window_st && edge->src <= window_en) {
vertices[edge->src-window_st].add_outedge(edge->dst, &edge->data, false);
}
}
this->iomgr->wait_for_reads();
// If we are splitting, need to adjust counts
std::vector<int> adjusted_counts(vertices.size(), 0);
for(int iv=0; iv< (int)vertices.size(); iv++) adjusted_counts[iv] = vertices[iv].outc;
if (outparts == 2) {
// do actual counts by removing the edges not in this split
for(int iv=0; iv< (int)vertices.size(); iv++) {
svertex_t &vertex = vertices[iv];
for(int i=0; i<vertex.outc; i++) {
if (!(vertex.outedge(i)->vertexid >= splitstart && vertex.outedge(i)->vertexid <= splitend)) {
adjusted_counts[iv]--;
}
}
}
}
#ifdef SUPPORT_DELETIONS
// Adjust counts to remove deleted edges
for(int iv=0; iv< (int)vertices.size(); iv++) {
svertex_t &vertex = vertices[iv];
for(int i=0; i<vertex.outc; i++) {
if (is_deleted_edge_value(vertex.outedge(i)->get_data())) {
adjusted_counts[iv]--;
assert(false);
}
}
}
// Adjust degrees
// adjust_degrees_for_deleted(vertices, window_st); // Double counting problem, that is why commented out.
#endif
size_t ne = 0;
for(vid_t curvid=window_st; curvid<=window_en;) {
int iv = curvid - window_st;
svertex_t &vertex = vertices[iv];
int count = adjusted_counts[iv];
if (count == 0) {
// Check how many next ones are zeros
int nz=0;
curvid++;
for(; curvid <= window_en && nz<254; curvid++) {
if (adjusted_counts[curvid - window_st] == 0) {
nz++;
} else {
break;
}
}
uint8_t nnz = (uint8_t)nz;
// Write zero
bwrite<uint8_t>(f, buf, bufptr, 0);
bwrite<uint8_t>(f, buf, bufptr, nnz);
} else {
if (count < 255) {
uint8_t x = (uint8_t)count;
bwrite<uint8_t>(f, buf, bufptr, x);
} else {
bwrite<uint8_t>(f, buf, bufptr, 0xff);
bwrite<uint32_t>(f, buf, bufptr, (uint32_t)count);
}
for(int i=0; i<vertex.outc; i++) {
if (vertex.outedge(i)->vertexid >= splitstart && vertex.outedge(i)->vertexid <= splitend) {
#ifdef SUPPORT_DELETIONS
if (is_deleted_edge_value(vertex.outedge(i)->get_data())) {
assert(false);
}
#endif
bwrite(f, buf, bufptr, vertex.outedge(i)->vertexid);
bwrite_edata<EdgeDataType>(ebuf, ebufptr, vertex.outedge(i)->get_data(), tot_edatabytes, outfile_edata);
ne++;
} else assert(outparts == 2);
}
curvid++;
}
}
free(edata);
window_st = window_en+1;
}
} // end window
// Flush buffers
writea(f, buf, bufptr-buf);
edata_flush<EdgeDataType>(ebuf, ebufptr, outfile_edata, tot_edatabytes);
// Write .size file for the edata firectory
std::string sizefilename = outfile_edata + ".size";
std::ofstream ofs(sizefilename.c_str());
ofs << tot_edatabytes;
ofs.close();
// Release
free(buf);
free(ebuf);
delete curshard;
close(f);
close(ef);
this->iomgr->wait_for_writes();
} // splits
// Delete old shard
std::string old_file_adj = filename_shard_adj(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[shard];
std::string old_file_edata = filename_shard_edata<EdgeDataType>(this->base_filename, 0, 0) + ".dyngraph" + shard_suffices[shard];
std::string old_blockdir = dirname_shard_edata_block(old_file_edata, base_engine::blocksize);
remove(old_file_adj.c_str());
remove(old_blockdir.c_str());
std::string old_sizefilename = old_file_edata + ".size";
remove(old_sizefilename.c_str());
}
// Clear buffers
for(int shard=0; shard < this->nshards; shard++) {
if (was_commited[shard]) {
for (int win=0; win < this->nshards; win++) {
edge_buffer &buffer_for_window = *new_edge_buffers[shard][win];
for(unsigned int ebi=0; ebi<buffer_for_window.size(); ebi++) {
created_edge<EdgeDataType> * edge = buffer_for_window[ebi];
if (!edge->accounted_for_outc) {
std::cout << "Edge not accounted (out)! " << edge->src << " -- " << edge->dst << std::endl;
}
if (!edge->accounted_for_inc) {
std::cout << "Edge not accounted (in)! " << edge->src << " -- " << edge->dst << std::endl;
}
assert(edge->accounted_for_inc);
assert(edge->accounted_for_outc);
}
buffer_for_window.clear();
}
}
}
// Update number of shards:
last_commit = added_edges;
this->intervals = newranges;
shard_suffices = newsuffices;
this->nshards = (int) this->intervals.size();
/* If the vertex intervals change, need to recreate the shard objects. */
if (rangeschanged) {
shardlock.lock();
for (int i=0; i<(int)this->sliding_shards.size(); i++) {
if (this->sliding_shards[i] != NULL) delete this->sliding_shards[i];
}
this->sliding_shards.clear();
shardlock.unlock();
}
/* Write meta-file with the number of vertices */
std::string numv_filename = base_engine::base_filename + ".numvertices";
FILE * f = fopen(numv_filename.c_str(), "w");
fprintf(f, "%lu\n", base_engine::num_vertices());
fclose(f);
init_buffers();
this->modification_lock.unlock();
}
template <typename T>
void bwrite(int f, char * buf, char * &bufptr, T val) {
if (bufptr+sizeof(T)-buf>=BBUF) {
writea(f, buf, bufptr-buf);
bufptr = buf;
}
*((T*)bufptr) = val;
bufptr += sizeof(T);
}
template <typename T>
void edata_flush(char * buf, char * bufptr, std::string & shard_filename, size_t totbytes) {
int blockid = (int) ((totbytes - sizeof(T)) / base_engine::blocksize);
int len = (int) (bufptr - buf);
assert(len <= (int)base_engine::blocksize);
std::string block_filename = filename_shard_edata_block(shard_filename, blockid, base_engine::blocksize);
int f = open(block_filename.c_str(), O_RDWR | O_CREAT, S_IROTH | S_IWOTH | S_IWUSR | S_IRUSR);
write_compressed(f, buf, len);
close(f);
}
template <typename T>
void bwrite_edata(char * buf, char * &bufptr, T val, size_t & totbytes, std::string & shard_filename) {
if ((int) (bufptr + sizeof(T) - buf) > (int)base_engine::blocksize) {
edata_flush<T>(buf, bufptr, shard_filename, totbytes);
bufptr = buf;
}
totbytes += sizeof(T);
*((T*)bufptr) = val;
bufptr += sizeof(T);
}
/**
* HTTP admin
*/
public:
std::string get_info_json() {
std::stringstream json;
this->httplock.lock();
/**
* FIXME: too much duplicate with graphchi_engine
*/
json << "{";
json << "\"state\" : \"" << state << "\",\n";
json << "\"file\" : \"" << this->base_filename << "\",\n";
json << "\"numOfShards\": " << this->nshards << ",\n";
json << "\"iteration\": " << this->chicontext.iteration << ",\n";
json << "\"numIterations\": " << this->chicontext.num_iterations << ",\n";
json << "\"runTime\": " << this->chicontext.runtime() << ",\n";
json << "\"updates\": " << this->nupdates << ",\n";
json << "\"nvertices\": " << this->chicontext.nvertices << ",\n";
json << "\"edges\": " << num_edges_safe() << ",\n";
json << "\"edgesInBuffers\": " << added_edges << ",\n";
json << "\"interval\":" << this->exec_interval << ",\n";
json << "\"windowStart\":" << this->sub_interval_st << ",";
json << "\"windowEnd\": " << this->sub_interval_en << ",";
json << "\"shards\": [";
shardlock.lock();
for(int p=0; p < (int) this->sliding_shards.size(); p++) {
if (p>0) json << ",";
typename base_engine::slidingshard_t * shard = this->sliding_shards[p];
if (shard != NULL) {
json << "{";
json << "\"p\": " << p << ", ";
json << shard->get_info_json();
json << "}";
} else {
json << "{";
json << "\"p\": " << p << ", ";
json << "\"state\": \"recreated\"";
json << "}";
}
}
shardlock.unlock();
json << "]";
std::map<std::string, std::string>::iterator it;
for(it=this->json_params.begin(); it != this->json_params.end(); ++it) {
json << ", \"" << it->first << "\":\"";
json << it->second << "\"";
}
json << "}";
this->httplock.unlock();
return json.str();
}
}; // End class
}; // End namespace
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Interface for metrics reporters.
*/
#ifndef DEF_GRAPHCHI_IMETRICS_REPORTER
#define DEF_GRAPHCHI_IMETRICS_REPORTER
#include <map>
namespace graphchi {
class imetrics_reporter {
public:
virtual void do_report(std::string name, std::string id, std::map<std::string, metrics_entry> & entries) = 0;
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Metrics.
*/
#ifndef DEF_METRICS_HPP
#define DEF_METRICS_HPP
#include <cstring>
#include <map>
#include <vector>
#include <limits>
#include <assert.h>
#include <sys/time.h>
#include "util/pthread_tools.hpp"
#include "util/cmdopts.hpp"
namespace graphchi {
enum metrictype {REAL, INTEGER, TIME, STRING, VECTOR};
// Data structure for storing metric entries
// NOTE: This data structure is not very optimal, should
// of course use inheritance. But for this purpose,
// it works fine as the number of metrics entry is small.
struct metrics_entry {
size_t count;
double value;
double minvalue;
double cumvalue;
double maxvalue;
metrictype valtype;
std::string stringval;
std::vector<double> v;
timeval start_time;
double lasttime;
metrics_entry() {}
inline metrics_entry(double firstvalue, metrictype _valtype) {
minvalue = firstvalue;
maxvalue = firstvalue;
value = firstvalue;
valtype = _valtype;
cumvalue = value;
count = 1;
if (valtype == VECTOR) v.push_back(firstvalue);
};
inline metrics_entry(std::string svalue) {
valtype = STRING;
stringval = svalue;
}
inline metrics_entry(metrictype _valtype) {
valtype = _valtype;
count = 0;
cumvalue = 0;
value = 0;
minvalue = std::numeric_limits<double>::max();
maxvalue = std::numeric_limits<double>::min();
}
inline void adj(double v) {
if (count == 0) {
minvalue = v;
maxvalue = v;
} else {
minvalue = std::min(v,minvalue);
maxvalue = std::max(v,maxvalue);
}
}
inline void add(double x) {
adj(x);
value += x;
cumvalue += x;
++count;
if (valtype == VECTOR) {
v.push_back(x);
}
}
inline void set(double v) {
adj(v);
value = v;
cumvalue += v;
}
inline void set(std::string s) {
stringval = s;
}
inline void add_vector_entry(size_t i, double x) {
if (v.size() < i + 1) v.resize(i + 1);
count = v.size();
value += x;
cumvalue += x;
v[i] += x;
adj(v[i]);
}
inline void set_vector_entry(size_t i, double x) {
if (v.size() < i + 1) v.resize(i + 1);
count = v.size();
value = value - v[i] + x;
cumvalue = cumvalue - v[i] + x;
v[i] = x;
minvalue = x; maxvalue = x;
for (size_t i = 0; i < v.size(); ++i) {
adj(v[i]);
}
}
inline void timer_start() {
gettimeofday(&start_time, NULL);
}
inline void timer_stop() {
timeval end;
gettimeofday(&end, NULL);
lasttime = end.tv_sec - start_time.tv_sec + ((double)(end.tv_usec - start_time.tv_usec)) / 1.0E6;
add(lasttime);
}
};
class imetrics_reporter {
public:
virtual ~imetrics_reporter() {}
virtual void do_report(std::string name, std::string id, std::map<std::string, metrics_entry> & entries) = 0;
};
/**
* Metrics instance for logging metrics of a single object type.
* Name of the metrics instance is set on construction.
*/
class metrics {
std::string name, ident;
std::map<std::string, metrics_entry> entries;
mutex mlock;
public:
inline metrics(std::string _name = "", std::string _id = "") : name(_name), ident (_id) {
this->set("app", _name);
}
inline void clear() {
entries.clear();
}
inline std::string iterkey(std::string key, int iter) {
char s[256];
sprintf(s, "%s.%d", key.c_str(), iter);
return std::string(s);
}
/**
* Add to an existing value or create new.
*/
inline void add(std::string key, double value, metrictype type = REAL) {
mlock.lock();
if (entries.count(key) == 0) {
entries[key] = metrics_entry(value, type);
} else {
entries[key].add(value);
}
mlock.unlock();
}
inline void add_to_vector(std::string key, double value) {
if (entries.count(key) == 0) {
entries[key] = metrics_entry(value, VECTOR);
} else {
entries[key].add(value);
}
}
inline void add_vector_entry(std::string key, size_t idx, double value) {
if (entries.count(key) == 0) {
entries[key] = metrics_entry(VECTOR);
}
entries[key].add_vector_entry(idx, value);
}
inline void set(std::string key, size_t value) {
set(key, (double)value, INTEGER);
}
inline void set(std::string key, int value) {
set(key, (double)value, INTEGER);
}
inline void set(std::string key, double value, metrictype type = REAL) {
if (entries.count(key) == 0) {
entries[key] = metrics_entry(value, type);
} else {
entries[key].set(value);
}
}
inline void set_integer(std::string key, size_t value) {
if (entries.count(key) == 0) {
entries[key] = metrics_entry((double)value, INTEGER);
} else {
entries[key].set((double)value);
}
}
inline void set(std::string key, std::string s) {
if (entries.count(key) == 0) {
entries[key] = metrics_entry(s);
} else {
entries[key].set(s);
}
}
inline void set_vector_entry_integer(std::string key, size_t idx, size_t value) {
set_vector_entry(key, idx, (double)(value));
}
inline void set_vector_entry(std::string key, size_t idx, double value) {
mlock.lock();
if (entries.count(key) == 0) {
entries[key] = metrics_entry(VECTOR);
}
entries[key].set_vector_entry(idx, value);
mlock.unlock();
}
inline void start_time(std::string key) {
mlock.lock();
if (entries.count(key) == 0) {
entries[key] = metrics_entry(TIME);
}
entries[key].timer_start();
mlock.unlock();
}
metrics_entry start_time() {
metrics_entry me(TIME);
me.timer_start();
return me;
}
inline void stop_time(metrics_entry me, std::string key, bool show=false) {
me.timer_stop();
mlock.lock();
if (entries.count(key) == 0) {
entries[key] = metrics_entry(TIME);
}
entries[key].add(me.lasttime); // not thread safe
if (show)
std::cout << key << ": " << me.lasttime << " secs." << std::endl;
mlock.unlock();
}
inline void stop_time(metrics_entry me, std::string key, int iternum, bool show=false) {
me.timer_stop();
mlock.lock();
double t = me.lasttime;
if (entries.count(key) == 0) {
entries[key] = metrics_entry(TIME);
}
entries[key].add(t); // not thread safe
if (show)
std::cout << key << ": " << me.lasttime << " secs." << std::endl;
char s[256];
sprintf(s, "%s.%d", key.c_str(), iternum);
std::string ikey(s);
if (entries.count(ikey) == 0) {
entries[ikey] = metrics_entry(TIME);
}
entries[ikey].add(t);
mlock.unlock();
}
inline void stop_time(std::string key, bool show = false) {
entries[key].timer_stop();
if (show)
std::cout << key << ": " << entries[key].lasttime << " secs." << std::endl;
}
inline metrics_entry get(std::string key) {
return entries[key];
}
void report(imetrics_reporter & reporter) {
if (name != "") {
reporter.do_report(name, ident, entries);
}
}
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple metrics reporter that dumps metrics to
* standard output.
*/
#ifndef GRAPHCHI_BASIC_REPORTER
#define GRAPHCHI_BASIC_REPORTER
#include <iostream>
#include <map>
#include "metrics/metrics.hpp"
/**
* Simple metrics reporter that dumps metrics to
* standard output.
*/
namespace graphchi {
class basic_reporter : public imetrics_reporter {
public:
virtual ~basic_reporter() {}
virtual void do_report(std::string name, std::string ident, std::map<std::string, metrics_entry> & entries) {
// TODO: use reporters
if (ident != name) {
std::cout << std::endl << " === REPORT FOR " << name << "(" << ident << ") ===" << std::endl;
} else {
std::cout << std::endl << " === REPORT FOR " << name << " ===" << std::endl;
}
// First write numeral, then timings, then string entries
for(int round=0; round<4; round++) {
std::map<std::string, metrics_entry>::iterator it;
int c = 0;
for(it = entries.begin(); it != entries.end(); ++it) {
metrics_entry ent = it->second;
switch(ent.valtype) {
case REAL:
case INTEGER:
if (round == 0) {
if (c++ == 0) std::cout << "[Numeric]" << std::endl;
std::cout << it->first << ":\t\t";
if (ent.count > 1) {
std::cout << ent.value << "\t(count: " << ent.count << ", min: " << ent.minvalue <<
", max: " << ent.maxvalue << ", avg: "
<< ent.cumvalue/(double)ent.count << ")" << std::endl;
} else {
std::cout << ent.value << std::endl;
}
}
break;
case TIME:
if (round == 1) {
if (c++ == 0) std::cout << "[Timings]" << std::endl;
std::cout << it->first << ":\t\t";
if (ent.count>1) {
std::cout << ent.value << "s\t (count: " << ent.count << ", min: " << ent.minvalue <<
"s, " << "max: " << ent.maxvalue << ", avg: "
<< ent.cumvalue/(double)ent.count << "s)" << std::endl;
} else {
std::cout << ent.value << " s" << std::endl;
}
}
break;
case STRING:
if (round == 2) {
if (c++ == 0) std::cout << "[Other]" << std::endl;
std::cout << it->first << ":\t";
std::cout << ent.stringval << std::endl;
}
break;
case VECTOR:
if (round == 3) {
if (c++ == 0) std::cout << "[Numeric]" << std::endl;
std::cout << it->first << ":\t\t";
if (ent.count > 1) {
std::cout << ent.value << "\t(count: " << ent.count << ", min: " << ent.minvalue <<
", max: " << ent.maxvalue << ", avg: "
<< ent.cumvalue/(double)ent.count << ")" << std::endl;
} else {
std::cout << ent.value << std::endl;
}
std::cout << it->first << ".values:\t\t";
for(size_t j=0; j<ent.v.size(); j++) std::cout << ent.v[j] << ",";
std::cout << std::endl;
}
break;
}
}
}
};
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* File metrics reporter.
*/
#ifndef DEF_GRAPHCHI_FILE_REPORTER
#define DEF_GRAPHCHI_FILE_REPORTER
#include <fstream>
#include <cstdio>
#include "metrics/metrics.hpp"
#include "util/cmdopts.hpp"
namespace graphchi {
class file_reporter : public imetrics_reporter {
private:
file_reporter() {}
std::string filename;
FILE * f;
public:
file_reporter(std::string fname) : filename(fname) {
// Create new file
f = fopen(fname.c_str(), "w");
assert(f != NULL);
}
virtual ~file_reporter() {}
virtual void do_report(std::string name, std::string ident, std::map<std::string, metrics_entry> & entries) {
if (ident != name) {
fprintf(f, "[%s:%s]\n", name.c_str(), ident.c_str());
} else {
fprintf(f, "[%s]\n", name.c_str());
}
std::map<std::string, metrics_entry>::iterator it;
for(it = entries.begin(); it != entries.end(); ++it) {
metrics_entry ent = it->second;
switch(ent.valtype) {
case INTEGER:
fprintf(f, "%s.%s=%ld\n", ident.c_str(), it->first.c_str(), (long int) (ent.value));
fprintf(f, "%s.%s.count=%lu\n", ident.c_str(), it->first.c_str(), ent.count);
fprintf(f, "%s.%s.min=%ld\n", ident.c_str(), it->first.c_str(), (long int) (ent.minvalue));
fprintf(f, "%s.%s.max=%ld\n", ident.c_str(), it->first.c_str(), (long int) (ent.maxvalue));
fprintf(f, "%s.%s.avg=%lf\n", ident.c_str(), it->first.c_str(), ent.cumvalue/ent.count);
break;
case REAL:
case TIME:
fprintf(f, "%s.%s=%lf\n", ident.c_str(), it->first.c_str(), (ent.value));
fprintf(f, "%s.%s.count=%lu\n", ident.c_str(), it->first.c_str(), ent.count);
fprintf(f, "%s.%s.min=%lf\n", ident.c_str(), it->first.c_str(), (ent.minvalue));
fprintf(f, "%s.%s.max=%lf\n", ident.c_str(), it->first.c_str(), (ent.maxvalue));
fprintf(f, "%s.%s.avg=%lf\n", ident.c_str(), it->first.c_str(), ent.cumvalue/ent.count);
break;
case STRING:
fprintf(f, "%s.%s=%s\n", ident.c_str(), it->first.c_str(), it->second.stringval.c_str());
break;
case VECTOR:
break;
}
}
fflush(f);
fclose(f);
// Following code used only for research purposes.
if (get_option_int("metrics.insert_to_db", 0) == 1) {
std::string cmd = "python2.7 benchtodb.py " + filename;
int err = system(cmd.c_str());
if (err != 0) {
std::cout << "Error running the python script." << std::endl;
}
}
};
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Empty metrics reporter.
*/
#ifndef GRAPHLAB_NULL_REPORTER
#define GRAPHLAB_NULL_REPORTER
#include "metrics/metrics.hpp"
/**
* Simple metrics reporter that dumps metrics to
* standard output.
*/
namespace graphchi {
class null_reporter : public imetrics_reporter {
public:
virtual ~null_reporter();
virtual void do_report(std::string name, std::string ident, std::map<std::string, metrics_entry> & entries) {
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* File metrics reporter.
*/
#ifndef GRAPHCHI_HTML_REPORTER
#define GRAPHCHI_HTML_REPORTER
#include <cstdio>
#include "metrics/metrics.hpp"
/**
* Simple metrics reporter that dumps metrics to HTML
*/
namespace graphchi {
class html_reporter : public imetrics_reporter {
private:
html_reporter() {}
std::string filename;
FILE * f;
public:
html_reporter(std::string fname) : filename(fname) {
// Create new file
f = fopen(fname.c_str(), "w");
assert(f != NULL);
fprintf(f, "<html><head><title>GraphCHI Metrics Report</title>");
fprintf(f, "<style>\n");
fprintf(f, "table { border: 1px solid #999999; font: normal 80%%/140%% arial, helvetica, sans-serif; color: #555; background: #fff;} td, th {border: 1px dotted #bbb; padding: .5em; width:100px} ");
fprintf(f, "</style></head><body>");
}
virtual ~html_reporter() {
fprintf(f, "</body></html>");
fclose(f);
}
virtual void do_report(std::string name, std::string ident, std::map<std::string, metrics_entry> & entries) {
if (ident != name) {
fprintf(f, "<h3>%s:%s</h3>\n", name.c_str(), ident.c_str());
} else {
fprintf(f, "<h3>%s</h3>\n", name.c_str());
}
// First write numeral, then timings, then string entries
for(int round=0; round<4; round++) {
std::map<std::string, metrics_entry>::iterator it;
int c = 0;
fprintf(f, "<!-- Round %d -->\n", round);
fprintf(f, "\n<p>");
for(it = entries.begin(); it != entries.end(); ++it) {
metrics_entry ent = it->second;
switch(ent.valtype) {
case INTEGER:
if (round == 0) {
if (c++ == 0)
fprintf(f, "<table><tr><th>Key</th><th>Value</th><th>Count</th><th>Min</th><th>Max</th><th>Average</th></tr>");
fprintf(f, "<tr><td>%s</td>\n", it->first.c_str());
fprintf(f, "<td>%ld</td>\n", (long int) ent.value);
if (ent.count > 1) {
fprintf(f, "<td>%ld</td>\n", (long int) ent.count);
fprintf(f, "<td>%ld</td>\n", (long int) ent.minvalue);
fprintf(f, "<td>%ld</td>\n", (long int) ent.maxvalue);
fprintf(f, "<td>%.3lf</td>\n", ent.cumvalue/(double)ent.count);
} else fprintf(f, "<td colspan=4> </td>");
fprintf(f, "</tr>");
}
break;
case REAL:
if (round == 0) {
if (c++ == 0)
fprintf(f, "<table><tr><th>Key</th><th>Value</th><th>Count</th><th>Min</th><th>Max</th><th>Average</th></tr>");
}
case TIME:
if (ent.valtype == TIME && round == 1) {
if (c++ == 0)
fprintf(f, "<table><tr><th>Key</th><th>Value (sec)</th><th>Count</th><th>Min (sec)</th><th>Max (sec)</th><th>Average (sec)</th></tr>\n");
}
if ((round == 0 && ent.valtype == REAL)||(round == 1 && ent.valtype == TIME)) {
fprintf(f, "<tr><td>%s</td>\n", it->first.c_str());
fprintf(f, "<td>%lf</td>\n", ent.value);
if (ent.count > 1) {
fprintf(f, "<td>%ld</td>\n", (long int) ent.count);
fprintf(f, "<td>%.3lf</td>\n", ent.minvalue);
fprintf(f, "<td>%.3lf</td>\n", ent.maxvalue);
fprintf(f, "<td>%.3lf</td>\n", ent.cumvalue/(double)ent.count);
} else fprintf(f, "<td colspan=4> </td>");
fprintf(f, "</tr>");
}
break;
case STRING:
if (round == 2) {
if (c++ == 0)
fprintf(f, "<table><tr><th>Key</th><th>Value</th></tr>\n");
fprintf(f, "<tr><td>%s</td><td width=400>%s</td>\n", it->first.c_str(), ent.stringval.c_str());
fprintf(f, "</tr>");
}
break;
case VECTOR:
if (round == 3) {
// TODO
}
break;
}
}
if (c>0) fprintf(f, "</table>");
fprintf(f, " </p>");
}
fflush(f);
};
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Code that generates periodically engine status and performance
* related plots using python's matplotlib.
*
* Relatively hacky code.
*/
#ifndef DEF_GRAPHCHI_GNUPLOTTER
#define DEF_GRAPHCHI_GNUPLOTTER
#include <cstdio>
#include <stdlib.h>
#include <string>
#include "graphchi_basic_includes.hpp"
namespace graphchi {
static size_t initial_edges = 0;
static std::string plotdirectory();
static std::string plotdirectory() {
return "conf/adminhtml/plots/";}
static void init_plot(std::string plotname);
static void init_plot(std::string plotname) {
std::string dataname = plotdirectory() + plotname + ".dat";
FILE * df = fopen(dataname.c_str(), "w");
fclose(df);
std::cout << "---------- Initialized ------------" << std::endl;
}
template <typename ENGINE>
void addval(ENGINE * engine, std::string plotname, double val) {
graphchi_context &context = engine->get_context();
std::string dataname = plotdirectory() + plotname + ".dat";
FILE * df = fopen(dataname.c_str(), "a");
assert(df != NULL);
fprintf(df, "%lf %lf\n", context.runtime(), val);
fclose(df);
}
static void drawplot(std::string plotname, size_t lookback_secs);
static void drawplot(std::string plotname, size_t lookback_secs) {
std::string plotfile = plotdirectory() + plotname + ".dat";
std::stringstream ss;
ss << "python2.6 ";
ss << plotdirectory() + "plotter.py " + plotfile + " lastsecs ";
ss << lookback_secs;
std::string cmd = ss.str();
logstream(LOG_DEBUG) << "Executing: " << cmd << std::endl;
system(cmd.c_str());
}
template <typename ENGINE>
static void init_plots(ENGINE * engine) {
init_plot("edges");
init_plot("bufedges");
init_plot("updates");
init_plot("ingests");
init_plot("deltas");
initial_edges = engine->num_edges_safe();
}
static double last_update_time = 0;
static size_t last_edges = 0;
static size_t last_updates = 0;
static size_t ingested_edges = 0;
static void set_ingested_edges(size_t n);
static void set_ingested_edges(size_t n) {
ingested_edges = n;
}
template <typename ENGINE>
void update_plotdata(ENGINE * engine) {
addval(engine, "edges", (double)engine->num_edges_safe());
addval(engine, "bufedges", (double)engine->num_buffered_edges());
double rt = engine->get_context().runtime() - last_update_time;
if (last_update_time > 0) {
addval(engine, "ingests", (ingested_edges - last_edges) / rt);
addval(engine, "updates", (engine->num_updates() - last_updates) / rt);
addval(engine, "deltas", engine->get_context().last_deltasum);
}
if (last_update_time == 0) {
last_edges = ingested_edges;
last_update_time = engine->get_context().runtime();
last_updates = engine->num_updates();
}
}
static void drawplots();
static void drawplots() {
drawplot("edges", 1800);
drawplot("bufedges", 1800);
drawplot("updates", 300);
drawplot("ingests", 500);
drawplot("deltas", 7200);
}
}
#endif
| C++ |
/*
* chi_httpadmin.hpp
* graphchi_graphprocessing.π
*
* Created by Aapo Kyrola on 6/8/12.
* Copyright 2012 Carnegie Mellon University. All rights reserved.
*
*/
#ifndef CHI_HTTPADMIN_DEF
#define CHI_HTTPADMIN_DEF
#include <assert.h>
#include <string.h>
#include <time.h>
#include <stdarg.h>
#include <string>
#include "external/vpiotr-mongoose-cpp/mongoose.h"
extern "C" {
#include "external/vpiotr-mongoose-cpp/mongoose.c"
}
namespace graphchi {
class custom_request_handler {
public:
virtual std::string handle(const char * req) = 0;
virtual bool responds_to(const char * req) = 0;
};
static std::vector<custom_request_handler *> reqhandlers;
static void register_http_request_handler(custom_request_handler * rh) {
reqhandlers.push_back(rh);
}
static const char *ajax_reply_start =
"HTTP/1.1 200 OK\r\n"
"Cache: no-cache\r\n"
"Content-Type: application/x-javascript\r\n"
"\r\n";
static const char *options[] = {
"document_root", "conf/adminhtml",
"listening_ports", "3333",
"num_threads", "1",
NULL
};
static void get_qsvar(const struct mg_request_info *request_info,
const char *name, char *dst, size_t dst_len) {
const char *qs = request_info->query_string;
mg_get_var(qs, strlen(qs == NULL ? "" : qs), name, dst, dst_len);
}
// If "callback" param is present in query string, this is JSONP call.
// Return 1 in this case, or 0 if "callback" is not specified.
// Wrap an output in Javascript function call.
static int handle_jsonp(struct mg_connection *conn,
const struct mg_request_info *request_info) {
char cb[64];
get_qsvar(request_info, "callback", cb, sizeof(cb));
if (cb[0] != '\0') {
mg_printf(conn, "%s(", cb);
}
return cb[0] == '\0' ? 0 : 1;
}
static void send(std::string json_info, struct mg_connection * conn,
const struct mg_request_info *request_info) {
mg_printf(conn, "%s", ajax_reply_start);
const char * cstr = json_info.c_str();
int len = (int)strlen(cstr);
int is_jsonp = handle_jsonp(conn, request_info);
//mg_printf(conn, "%s", json_info.c_str());
// Send read bytes to the client, exit the loop on error
int num_written = 0;
while (len > 0) {
if ((num_written = mg_write(conn, cstr, (size_t)len)) != len)
break;
len -= num_written;
cstr += num_written;
}
if (is_jsonp) {
mg_printf(conn, "%s", ")");
}
}
template <typename ENGINE>
static void ajax_send_message(struct mg_connection *conn,
const struct mg_request_info *request_info) {
ENGINE * engine = (ENGINE*) request_info->user_data;
std::string json_info = engine->get_info_json();
send(json_info, conn, request_info);
}
template <typename ENGINE>
static void *event_handler(enum mg_event event,
struct mg_connection *conn,
const struct mg_request_info *request_info) {
void *processed = (void*) "yes";
if (event == MG_NEW_REQUEST) {
if (strcmp(request_info->uri, "/ajax/getinfo") == 0) {
ajax_send_message<ENGINE>(conn, request_info);
} else {
bool found = false;
for(std::vector<custom_request_handler *>::iterator it=reqhandlers.begin();
it != reqhandlers.end(); ++it) {
custom_request_handler * rh = *it;
if (rh->responds_to(request_info->uri)) {
std::string response = rh->handle(request_info->uri);
send(response, conn, request_info);
found = true;
}
}
// No suitable handler found, mark as not processed. Mongoose will
// try to serve the request.
if (!found) processed = NULL;
}
} else {
processed = NULL;
}
return processed;
}
template <typename ENGINE>
void start_httpadmin(ENGINE * engine) {
struct mg_context *ctx;
ctx = mg_start(&event_handler<ENGINE>, (void*)engine, options);
assert(ctx != NULL);
std::cout << "Started HTTP admin server. " << std::endl;
}
};
#endif
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
/**
* @file logger.hpp
* Usage:
* First include logger.hpp. To logger, use the logger() function
* There are 2 output levels. A "soft" output level which is
* set by calling global_logger.set_log_level(), as well as a "hard" output
* level OUTPUTLEVEL which is set in the source code (logger.h).
*
* when you call "logger()" with a loglevel and if the loglevel is greater than
* both of the output levels, the string will be written.
* written to a logger file. Otherwise, logger() has no effect.
*
* The difference between the hard level and the soft level is that the
* soft level can be changed at runtime, while the hard level optimizes away
* logging calls at compile time.
*
* @author Yucheng Low (ylow)
*/
/**
* NOTICE: This file taken from GraphLab (as stated in the license above).
* I have merged the CPP and HPP files.
* @author Aapo Kyrola
*/
#ifndef GRAPHCHI_LOG_LOG_HPP
#define GRAPHCHI_LOG_LOG_HPP
#include <fstream>
#include <sstream>
#include <cstdlib>
#include <iostream>
#include <cassert>
#include <cstring>
#include <cstdarg>
#include <pthread.h>
/**
* \def LOG_FATAL
* Used for fatal and probably irrecoverable conditions
* \def LOG_ERROR
* Used for errors which are recoverable within the scope of the function
* \def LOG_WARNING
* Logs interesting conditions which are probably not fatal
* \def LOG_INFO
* Used for providing general useful information
* \def LOG_DEBUG
* Debugging purposes only
*/
#define LOG_NONE 5
#define LOG_FATAL 4
#define LOG_ERROR 3
#define LOG_WARNING 2
#define LOG_INFO 1
#define LOG_DEBUG 0
/**
* \def OUTPUTLEVEL
* The minimum level to logger at
* \def LOG_NONE
* OUTPUTLEVEL to LOG_NONE to disable logging
*/
#ifndef OUTPUTLEVEL
#define OUTPUTLEVEL LOG_DEBUG
#endif
/// If set, logs to screen will be printed in color
#define COLOROUTPUT
/**
* \def logger(lvl,fmt,...)
* extracts the filename, line number
* and function name and calls _log. It will be optimized
* away if LOG_NONE is set
* This relies on a few compiler macros. As far as I know, these
* macros are pretty standard among most other C++ compilers.
*/
#if OUTPUTLEVEL == LOG_NONE
// totally disable logging
#define logger(lvl,fmt,...)
#define logbuf(lvl,fmt,...)
#define logstream
#else
#define logger(lvl,fmt,...) \
(log_dispatch<(lvl >= OUTPUTLEVEL)>::exec(lvl,__FILE__, __func__ ,__LINE__,fmt,##__VA_ARGS__))
#define logbuf(lvl,buf,len) \
(log_dispatch<(lvl >= OUTPUTLEVEL)>::exec(lvl,__FILE__, \
__func__ ,__LINE__,buf,len))
#define logstream(lvl) \
(log_stream_dispatch<(lvl >= OUTPUTLEVEL)>::exec(lvl,__FILE__, __func__ ,__LINE__) )
#endif
static const char* messages[] = { "DEBUG: ",
"INFO: ",
"WARNING: ",
"ERROR: ",
"FATAL: "};
namespace logger_impl {
struct streambuff_tls_entry {
std::stringstream streambuffer;
bool streamactive;
};
}
/**
logging class.
This writes to a file, and/or the system console.
*/
class file_logger{
public:
/** Closes the current logger file if one exists.
if 'file' is not an empty string, it will be opened and
all subsequent logger output will be written into 'file'.
Any existing content of 'file' will be cleared.
Return true on success and false on failure.
*/
/// If consolelog is true, subsequent logger output will be written to stderr
void set_log_to_console(bool consolelog) {
log_to_console = consolelog;
}
/// Returns the current logger file.
std::string get_log_file(void) {
return log_file;
}
/// Returns true if output is being written to stderr
bool get_log_to_console() {
return log_to_console;
}
/// Returns the current logger level
int get_log_level() {
return log_level;
}
template <typename T>
file_logger& operator<<(T a) {
// get the stream buffer
logger_impl::streambuff_tls_entry* streambufentry = reinterpret_cast<logger_impl::streambuff_tls_entry*>(
pthread_getspecific(streambuffkey));
if (streambufentry != NULL) {
std::stringstream& streambuffer = streambufentry->streambuffer;
bool& streamactive = streambufentry->streamactive;
if (streamactive) streambuffer << a;
}
return *this;
}
file_logger& operator<<(const char* a) {
// get the stream buffer
logger_impl::streambuff_tls_entry* streambufentry = reinterpret_cast<logger_impl::streambuff_tls_entry*>(
pthread_getspecific(streambuffkey));
if (streambufentry != NULL) {
std::stringstream& streambuffer = streambufentry->streambuffer;
bool& streamactive = streambufentry->streamactive;
if (streamactive) {
streambuffer << a;
if (a[strlen(a)-1] == '\n') {
stream_flush();
}
}
}
return *this;
}
file_logger& operator<<(std::ostream& (*f)(std::ostream&)){
// get the stream buffer
logger_impl::streambuff_tls_entry* streambufentry = reinterpret_cast<logger_impl::streambuff_tls_entry*>(
pthread_getspecific(streambuffkey));
if (streambufentry != NULL) {
std::stringstream& streambuffer = streambufentry->streambuffer;
bool& streamactive = streambufentry->streamactive;
typedef std::ostream& (*endltype)(std::ostream&);
if (streamactive) {
if (endltype(f) == endltype(std::endl)) {
streambuffer << "\n";
stream_flush();
if(streamloglevel == LOG_FATAL) {
throw "log fatal";
// exit(EXIT_FAILURE);
}
}
}
}
return *this;
}
/** Sets the current logger level. All logging commands below the current
logger level will not be written. */
void set_log_level(int new_log_level) {
log_level = new_log_level;
}
static void streambuffdestructor(void* v){
logger_impl::streambuff_tls_entry* t =
reinterpret_cast<logger_impl::streambuff_tls_entry*>(v);
delete t;
}
/** Default constructor. By default, log_to_console is off,
there is no logger file, and logger level is set to LOG_WARNING
*/
file_logger() {
log_file = "";
log_to_console = true;
log_level = LOG_DEBUG;
pthread_mutex_init(&mut, NULL);
pthread_key_create(&streambuffkey, streambuffdestructor);
}
~file_logger() {
if (fout.good()) {
fout.flush();
fout.close();
}
pthread_mutex_destroy(&mut);
}
bool set_log_file(std::string file) {
// close the file if it is open
if (fout.good()) {
fout.flush();
fout.close();
log_file = "";
}
// if file is not an empty string, open the new file
if (file.length() > 0) {
fout.open(file.c_str());
if (fout.fail()) return false;
log_file = file;
}
return true;
}
#define RESET 0
#define BRIGHT 1
#define DIM 2
#define UNDERLINE 3
#define BLINK 4
#define REVERSE 7
#define HIDDEN 8
#define BLACK 0
#define RED 1
#define GREEN 2
#define YELLOW 3
#define BLUE 4
#define MAGENTA 5
#define CYAN 6
#define WHITE 7
void textcolor(FILE* handle, int attr, int fg)
{
char command[13];
/* Command is the control command to the terminal */
sprintf(command, "%c[%d;%dm", 0x1B, attr, fg + 30);
fprintf(handle, "%s", command);
}
void reset_color(FILE* handle)
{
char command[20];
/* Command is the control command to the terminal */
sprintf(command, "%c[0m", 0x1B);
fprintf(handle, "%s", command);
}
void _log(int lineloglevel,const char* file,const char* function,
int line,const char* fmt, va_list ap ){
// if the logger level fits
if (lineloglevel >= 0 && lineloglevel <= 3 && lineloglevel >= log_level){
// get just the filename. this line found on a forum on line.
// claims to be from google.
file = ((strrchr(file, '/') ? : file- 1) + 1);
char str[1024];
// write the actual header
int byteswritten = snprintf(str,1024, "%s%s(%s:%d): ",
messages[lineloglevel],file,function,line);
// write the actual logger
byteswritten += vsnprintf(str + byteswritten,1024 - byteswritten,fmt,ap);
str[byteswritten] = '\n';
str[byteswritten+1] = 0;
// write the output
if (fout.good()) {
pthread_mutex_lock(&mut);
fout << str;;
pthread_mutex_unlock(&mut);
}
if (log_to_console) {
#ifdef COLOROUTPUT
if (lineloglevel == LOG_FATAL) {
textcolor(stderr, BRIGHT, RED);
}
else if (lineloglevel == LOG_ERROR) {
textcolor(stderr, BRIGHT, RED);
}
else if (lineloglevel == LOG_WARNING) {
textcolor(stderr, BRIGHT, GREEN);
}
#endif
std::cerr << str;;
#ifdef COLOROUTPUT
reset_color(stderr);
#endif
}
}
}
void _logbuf(int lineloglevel,const char* file,const char* function,
int line,const char* buf, int len) {
// if the logger level fits
if (lineloglevel >= 0 && lineloglevel <= 3 && lineloglevel >= log_level){
// get just the filename. this line found on a forum on line.
// claims to be from google.
file = ((strrchr(file, '/') ? : file- 1) + 1);
// length of the 'head' of the string
size_t headerlen = snprintf(NULL,0,"%s%s(%s:%d): ",
messages[lineloglevel],file,function,line);
if (headerlen> 2047) {
std::cerr << "Header length exceed buffer length!";
}
else {
char str[2048];
const char *newline="\n";
// write the actual header
int byteswritten = snprintf(str,2047,"%s%s(%s:%d): ",
messages[lineloglevel],file,function,line);
_lograw(lineloglevel,str, byteswritten);
_lograw(lineloglevel,buf, len);
_lograw(lineloglevel,newline, (int)strlen(newline));
}
}
}
void _lograw(int lineloglevel, const char* buf, int len) {
if (fout.good()) {
pthread_mutex_lock(&mut);
fout.write(buf,len);
pthread_mutex_unlock(&mut);
}
if (log_to_console) {
#ifdef COLOROUTPUT
if (lineloglevel == LOG_FATAL) {
textcolor(stderr, BRIGHT, RED);
}
else if (lineloglevel == LOG_ERROR) {
textcolor(stderr, BRIGHT, RED);
}
else if (lineloglevel == LOG_WARNING) {
textcolor(stderr, BRIGHT, GREEN);
}
else if (lineloglevel == LOG_DEBUG) {
textcolor(stderr, BRIGHT, YELLOW);
}
#endif
std::cerr.write(buf,len);
#ifdef COLOROUTPUT
reset_color(stderr);
#endif
}
}
file_logger& start_stream(int lineloglevel,const char* file,const char* function, int line) {
// get the stream buffer
logger_impl::streambuff_tls_entry* streambufentry = reinterpret_cast<logger_impl::streambuff_tls_entry*>(
pthread_getspecific(streambuffkey));
// create the key if it doesn't exist
if (streambufentry == NULL) {
streambufentry = new logger_impl::streambuff_tls_entry;
pthread_setspecific(streambuffkey, streambufentry);
}
std::stringstream& streambuffer = streambufentry->streambuffer;
bool& streamactive = streambufentry->streamactive;
file = ((strrchr(file, '/') ? : file- 1) + 1);
if (lineloglevel >= log_level){
if (streambuffer.str().length() == 0) {
streambuffer << messages[lineloglevel] << file
<< "(" << function << ":" <<line<<"): ";
}
streamactive = true;
streamloglevel = lineloglevel;
}
else {
streamactive = false;
}
return *this;
}
void stream_flush() {
// get the stream buffer
logger_impl::streambuff_tls_entry* streambufentry = reinterpret_cast<logger_impl::streambuff_tls_entry*>(
pthread_getspecific(streambuffkey));
if (streambufentry != NULL) {
std::stringstream& streambuffer = streambufentry->streambuffer;
streambuffer.flush();
_lograw(streamloglevel,
streambuffer.str().c_str(),
(int)(streambuffer.str().length()));
streambuffer.str("");
}
}
private:
std::ofstream fout;
std::string log_file;
pthread_key_t streambuffkey;
int streamloglevel;
pthread_mutex_t mut;
bool log_to_console;
int log_level;
};
static file_logger& global_logger();
/**
Wrapper to generate 0 code if the output level is lower than the log level
*/
template <bool dostuff>
struct log_dispatch {};
template <>
struct log_dispatch<true> {
inline static void exec(int loglevel,const char* file,const char* function,
int line,const char* fmt, ... ) {
va_list argp;
va_start(argp, fmt);
global_logger()._log(loglevel, file, function, line, fmt, argp);
va_end(argp);
}
};
template <>
struct log_dispatch<false> {
inline static void exec(int loglevel,const char* file,const char* function,
int line,const char* fmt, ... ) {}
};
struct null_stream {
template<typename T>
inline null_stream operator<<(T t) { return null_stream(); }
inline null_stream operator<<(const char* a) { return null_stream(); }
inline null_stream operator<<(std::ostream& (*f)(std::ostream&)) { return null_stream(); }
};
template <bool dostuff>
struct log_stream_dispatch {};
template <>
struct log_stream_dispatch<true> {
inline static file_logger& exec(int lineloglevel,const char* file,const char* function, int line) {
return global_logger().start_stream(lineloglevel, file, function, line);
}
};
template <>
struct log_stream_dispatch<false> {
inline static null_stream exec(int lineloglevel,const char* file,const char* function, int line) {
return null_stream();
}
};
void textcolor(FILE* handle, int attr, int fg);
void reset_color(FILE* handle);
static file_logger& global_logger() {
static file_logger l;
return l;
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple smoketest for the dynamic graph graphchi engine.
*/
#include <string>
#include "graphchi_basic_includes.hpp"
#include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType;
typedef vid_t EdgeDataType;
/**
* Smoke test. On every iteration, each vertex sets its id to be
* id + iteration number. Vertices check whether their neighbors were
* set correctly. This assumes that the vertices are executed in round-robin order.
*/
struct SmokeTestProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->set_data(vertex.id());
}
} else {
for(int i=0; i < vertex.num_inedges(); i++) {
graphchi_edge<vid_t> * edge = vertex.inedge(i);
vid_t inedgedata = edge->get_data();
vid_t expected = edge->vertex_id() + gcontext.iteration - (edge->vertex_id() > vertex.id());
if (inedgedata != expected) {
assert(false);
}
}
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->set_data(vertex.id() + gcontext.iteration);
}
}
vertex.set_data(gcontext.iteration + 1);
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
/**
* Vertex callback that checks the vertex data is ok.
*/
class VertexDataChecker : public VCallback<VertexDataType> {
int iters;
public:
size_t total;
VertexDataChecker(int iters) : iters(iters), total(0) {}
void callback(vid_t vertex_id, VertexDataType &vecvalue) {
assert(vecvalue == iters);
total += iters;
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("smoketest-dynamic-engine");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
bool scheduler = false; // Whether to use selective scheduling
/* Detect the number of shards or preprocess an input to creae them */
int nshards = convert_if_notexists<EdgeDataType>(filename,
get_option_string("nshards", "auto"));
/* Run */
SmokeTestProgram program;
graphchi_dynamicgraph_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
/* Check also the vertex data is ok */
VertexDataChecker vchecker(niters);
foreach_vertices(filename, 0, engine.num_vertices(), vchecker);
assert(vchecker.total == engine.num_vertices() * niters);
/* Report execution metrics */
metrics_report(m);
logstream(LOG_INFO) << "Dynamic Engine Smoketest passed successfully! Your system is working!" << std::endl;
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple smoke test for the bulk synchronous functional api.
*/
#define RANDOMRESETPROB 0.15
#include <string>
#include <fstream>
#include <cmath>
#include "util/cmdopts.hpp"
#include "api/graphchi_context.hpp"
#include "api/graph_objects.hpp"
#include "api/ischeduler.hpp"
#include "api/functional/functional_api.hpp"
#include "metrics/metrics.hpp"
#include "metrics/reps/basic_reporter.hpp"
#include "util/toplist.hpp"
using namespace graphchi;
struct smoketest_program : public functional_kernel<int, int> {
/* Initial value - on first iteration */
int initial_value(graphchi_context &info, vertex_info& myvertex) {
return 0;
}
/* Called before first "gather" */
int reset() {
return 0;
}
// Note: Unweighted version, edge value should also be passed
// "Gather"
int op_neighborval(graphchi_context &info, vertex_info& myvertex, vid_t nbid, int nbval) {
assert(nbval == (int) info.iteration - 1);
return nbval;
}
// "Sum"
int plus(int curval, int toadd) {
assert(curval == 0 || toadd == curval);
return toadd;
}
// "Apply"
int compute_vertexvalue(graphchi_context &ginfo, vertex_info& myvertex, int nbvalsum) {
return ginfo.iteration;
}
// "Scatter
int value_to_neighbor(graphchi_context &info, vertex_info& myvertex, vid_t nbid, int myval) {
assert(myval == (int) info.iteration);
return myval;
}
};
int main(int argc, const char ** argv) {
graphchi_init(argc, argv);
metrics m("test-functional");
std::string filename = get_option_string("file");
int niters = get_option_int("niters", 5);
std::string mode = get_option_string("mode", "semisync");
logstream(LOG_INFO) << "Running bulk sync smoke test." << std::endl;
run_functional_unweighted_synchronous<smoketest_program>(filename, niters, m);
logstream(LOG_INFO) << "Smoketest passed successfully! Your system is working!" << std::endl;
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Smoketest to test dynamically extended edge values.
*/
#define DYNAMICEDATA 1
#include <string>
#include "graphchi_basic_includes.hpp"
#include "api/dynamicdata/chivector.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType;
typedef chivector<vid_t> EdgeDataType;
/**
* Smoke test.
*/
struct DynamicDataSmokeTestProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType > &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
for(int i=0; i < vertex.num_outedges(); i++) {
chivector<vid_t> * evector = vertex.outedge(i)->get_vector();
evector->clear();
assert(evector->size() == 0);
evector->add(vertex.id());
assert(evector->size() == 1);
assert(evector->get(0) == vertex.id());
}
} else {
for(int i=0; i < vertex.num_inedges(); i++) {
graphchi_edge<EdgeDataType> * edge = vertex.inedge(i);
chivector<vid_t> * evector = edge->get_vector();
assert(evector->size() >= gcontext.iteration);
for(int j=0; j < evector->size(); j++) {
vid_t expected = edge->vertex_id() + j;
vid_t has = evector->get(j);
if (has != expected) {
std::cout << "Mismatch: " << has << " != " << expected << std::endl;
}
assert(has == expected);
}
}
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->get_vector()->add(vertex.id() + gcontext.iteration);
}
}
vertex.set_data(gcontext.iteration + 1);
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
/**
* Vertex callback that checks the vertex data is ok.
*/
class VertexDataChecker : public VCallback<VertexDataType> {
int iters;
public:
size_t total;
VertexDataChecker(int iters) : iters(iters), total(0) {}
void callback(vid_t vertex_id, VertexDataType &vecvalue) {
assert(vecvalue == (VertexDataType)iters);
total += (size_t) iters;
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("dynamicdata-smoketest");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
bool scheduler = false; // Whether to use selective scheduling
/* Detect the number of shards or preprocess an input to create them */
int nshards = convert_if_notexists<vid_t>(filename, get_option_string("nshards", "auto"));
/* Run */
DynamicDataSmokeTestProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
/* Check also the vertex data is ok */
VertexDataChecker vchecker(niters);
foreach_vertices(filename, 0, engine.num_vertices(), vchecker);
assert(vchecker.total == engine.num_vertices() * niters);
/* Report execution metrics */
metrics_report(m);
logstream(LOG_INFO) << "Smoketest passed successfully! Your system is working!" << std::endl;
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple smoketest for the graphchi engine.
*/
#include <string>
#include "graphchi_basic_includes.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType;
typedef vid_t EdgeDataType;
/**
* Smoke test. On every iteration, each vertex sets its id to be
* id + iteration number. Vertices check whether their neighbors were
* set correctly. This assumes that the vertices are executed in round-robin order.
*/
struct SmokeTestProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->set_data(vertex.id());
}
} else {
for(int i=0; i < vertex.num_inedges(); i++) {
graphchi_edge<vid_t> * edge = vertex.inedge(i);
vid_t inedgedata = edge->get_data();
vid_t expected = edge->vertex_id() + gcontext.iteration - (edge->vertex_id() > vertex.id());
if (inedgedata != expected) {
assert(false);
}
}
for(int i=0; i < vertex.num_outedges(); i++) {
vertex.outedge(i)->set_data(vertex.id() + gcontext.iteration);
}
}
vertex.set_data(gcontext.iteration + 1);
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
/**
* Vertex callback that checks the vertex data is ok.
*/
class VertexDataChecker : public VCallback<VertexDataType> {
int iters;
public:
size_t total;
VertexDataChecker(int iters) : iters(iters), total(0) {}
void callback(vid_t vertex_id, VertexDataType &vecvalue) {
assert(vecvalue == (VertexDataType)iters);
total += (size_t) iters;
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("smoketest");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
bool scheduler = false; // Whether to use selective scheduling
/* Detect the number of shards or preprocess an input to creae them */
int nshards = convert_if_notexists<EdgeDataType>(filename,
get_option_string("nshards", "auto"));
/* Run */
SmokeTestProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
/* Check also the vertex data is ok */
VertexDataChecker vchecker(niters);
foreach_vertices(filename, 0, engine.num_vertices(), vchecker);
assert(vchecker.total == engine.num_vertices() * niters);
/* Report execution metrics */
metrics_report(m);
logstream(LOG_INFO) << "Smoketest passed successfully! Your system is working!" << std::endl;
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Creates a graph with edge data for each edge and loads it and checks
* the initial values were read correctly.
*/
#define DYNAMICEDATA 1
#define DYNAMICVERTEXDATA 1
#include <string>
#include "graphchi_basic_includes.hpp"
#include "api/dynamicdata/chivector.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef chivector<size_t> VertexDataType;
typedef chivector<vid_t> EdgeDataType;
size_t checksum = 0;
size_t shouldbe = 0;
/**
* Smoke test.
*/
struct DynamicDataLoaderTestProgram : public GraphChiProgram<VertexDataType, EdgeDataType> {
mutex lock;
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType > &vertex, graphchi_context &gcontext) {
for(int i=0; i < vertex.num_edges(); i++) {
chivector<vid_t> * evector = vertex.edge(i)->get_vector();
assert(evector != NULL);
// Each edge has three or one values in the chi vector
int numelems = ((vertex.id() + vertex.edge(i)->vertex_id()) % 3 == 1 ? 3 : 1);
for(int k=0; k < numelems ; k++) {
vid_t expected = vertex.id() + vertex.edge(i)->vertex_id() + k;
if (expected != evector->get(k)) {
logstream(LOG_ERROR) << "Vertex " << vertex.id() << ", edge dst: " << vertex.edge(i)->vertex_id() << std::endl;
logstream(LOG_ERROR) << "Mismatch (" << k << "): expected " << expected << " but had " << evector->get(k) << std::endl;
}
assert(evector->get(k) == expected);
}
lock.lock();
checksum += evector->get(0);
lock.unlock();
}
// Modify vertex data by adding values there */
chivector<size_t> * vvector = vertex.get_vector();
int numitems = vertex.id() % 10;
for(int i=0; i<numitems; i++) {
vvector->add(vertex.id() * 982192l + i); // Arbitrary
}
/* Check vertex data immediatelly */
for(int i=0; i<numitems; i++) {
size_t x = vvector->get(i);
size_t expected = vertex.id() * 982192l + i;
assert(x == expected);
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
void generatedata(std::string filename);
void generatedata(std::string filename) {
std::cout << "Generating data..." << std::endl;
const char * fname = filename.c_str();
FILE * f = fopen(fname, "w");
set_conf("filetype", "edgelist");
shouldbe = 0;
int totalVertices = 200000; // 2 million
for(int i=0; i < totalVertices; i++) {
int nedges = random() % 50;
for(int j=0; j < nedges; j++) {
int dst = (totalVertices / nedges) * j + i % nedges;
if (dst != i) {
if ((i + dst) % 3 == 1) {
fprintf(f, "%d\t%d\t%d:%d:%d\n", i, dst, i + dst, i + dst + 1, i + dst + 2);
} else {
fprintf(f, "%d\t%d\t%d\n", i, dst, i + dst);
}
shouldbe += 2 * (i + dst);
}
}
}
fclose(f);
}
class VertexValidator : public VCallback<chivector<size_t> > {
public:
virtual void callback(vid_t vertex_id, chivector<size_t> &vec) {
int numitems = vertex_id % 10;
assert(vec.size() == numitems);
for(int j=0; j < numitems; j++) {
size_t x = vec.get(j);
assert(x == vertex_id * 982192l + (size_t)j);
}
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("test-dynamicedata");
/* Basic arguments for application */
std::string filename = "/tmp/__chi_dyntest/testgraph"; // Base filename
mkdir("/tmp/__chi_dyntest", 0777);
int niters = 1; // Number of iterations
bool scheduler = false; // Whether to use selective scheduling
/* Generate data */
generatedata(filename);
set_conf("filetype", "multivalueedgelist");
int nshards = convert_if_notexists<vid_t>(filename, "3");
checksum = 0;
/* Run */
DynamicDataLoaderTestProgram program;
graphchi_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.set_reset_vertexdata(true);
engine.run(program, niters);
/* Check */
std::cout << "Checksum: " << checksum << ", expecting: " << shouldbe << std::endl;
assert(shouldbe == checksum);
/* Check vertex values */
VertexValidator validator;
foreach_vertices(filename, 0, engine.num_vertices(), validator);
/* Clean up */
delete_shards<EdgeDataType>(filename, 3);
/* Report execution metrics */
metrics_report(m);
logstream(LOG_INFO) << "Test passed successfully! Your system is working!" << std::endl;
return 0;
}
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Simple smoketest for the dynamic graph graphchi engine.
*/
#include <string>
#define SUPPORT_DELETIONS 1
#include "graphchi_basic_includes.hpp"
#include "engine/dynamic_graphs/graphchi_dynamicgraph_engine.hpp"
using namespace graphchi;
/**
* Type definitions. Remember to create suitable graph shards using the
* Sharder-program.
*/
typedef vid_t VertexDataType;
typedef vid_t EdgeDataType;
/**
* Smoke test. On every iteration, each vertex sets its id to be
* id + iteration number. Vertices check whether their neighbors were
* set correctly. This assumes that the vertices are executed in round-robin order.
* - Uses edges in inverse order to the first smoketest.
*/
struct SmokeTestProgram2 : public GraphChiProgram<VertexDataType, EdgeDataType> {
volatile size_t ndeleted;
/**
* Vertex update function.
*/
void update(graphchi_vertex<VertexDataType, EdgeDataType> &vertex, graphchi_context &gcontext) {
int ninedges = 0;
if (gcontext.iteration == 0) {
for(int i=0; i < vertex.num_inedges(); i++) {
vertex.inedge(i)->set_data(vertex.id());
ninedges++;
}
} else {
// Keep track of the number of edegs to ensure that
// deletion works fine.
if (vertex.get_data() != vertex.num_inedges()) {
logstream(LOG_ERROR) << "Discrepancy in edge counts: " << vertex.get_data() << " != " << vertex.num_inedges() << std::endl;
}
assert(vertex.get_data() == vertex.num_inedges());
for(int i=0; i < vertex.num_outedges(); i++) {
graphchi_edge<vid_t> * edge = vertex.outedge(i);
vid_t outedgedata = edge->get_data();
vid_t expected = edge->vertex_id() + gcontext.iteration - (edge->vertex_id() > vertex.id());
if (!is_deleted_edge_value(edge->get_data())) {
if (outedgedata != expected) {
logstream(LOG_ERROR) << outedgedata << " != " << expected << std::endl;
assert(false);
}
}
}
for(int i=0; i < vertex.num_inedges(); i++) {
vertex.inedge(i)->set_data(vertex.id() + gcontext.iteration);
if (std::rand() % 4 == 1) {
vertex.remove_inedge(i);
__sync_add_and_fetch(&ndeleted, 1);
} else {
ninedges++;
}
}
}
if (gcontext.iteration == gcontext.num_iterations - 1) {
vertex.set_data(gcontext.iteration + 1);
} else {
vertex.set_data(ninedges);
}
}
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &gcontext) {
ndeleted = 0;
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &gcontext) {
if (gcontext.iteration > 0)
assert(ndeleted > 0);
logstream(LOG_INFO) << "Deleted: " << ndeleted << std::endl;
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
};
/**
* Vertex callback that checks the vertex data is ok.
*/
class VertexDataChecker : public VCallback<VertexDataType> {
int iters;
public:
size_t total;
VertexDataChecker(int iters) : iters(iters), total(0) {}
void callback(vid_t vertex_id, VertexDataType &vecvalue) {
assert(vecvalue == iters);
total += iters;
}
};
int main(int argc, const char ** argv) {
/* GraphChi initialization will read the command line
arguments and the configuration file. */
graphchi_init(argc, argv);
/* Metrics object for keeping track of performance counters
and other information. Currently required. */
metrics m("smoketest-dynamic-engine2");
/* Basic arguments for application */
std::string filename = get_option_string("file"); // Base filename
int niters = get_option_int("niters", 4); // Number of iterations
bool scheduler = false; // Whether to use selective scheduling
/* Detect the number of shards or preprocess an input to creae them */
int nshards = convert_if_notexists<EdgeDataType>(filename,
get_option_string("nshards", "auto"));
/* Run */
SmokeTestProgram2 program;
graphchi_dynamicgraph_engine<VertexDataType, EdgeDataType> engine(filename, nshards, scheduler, m);
engine.run(program, niters);
/* Check also the vertex data is ok */
VertexDataChecker vchecker(niters);
foreach_vertices(filename, 0, engine.num_vertices(), vchecker);
assert(vchecker.total == engine.num_vertices() * niters);
/* Report execution metrics */
metrics_report(m);
logstream(LOG_INFO) << "Dynamic Engine Smoketest passed successfully! Your system is working!" << std::endl;
return 0;
}
| C++ |
/*
Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef DEF_GRAPHCHI_TYPES
#define DEF_GRAPHCHI_TYPES
#include <stdint.h>
namespace graphchi {
typedef uint32_t vid_t;
/**
* PairContainer encapsulates a pair of values of some type.
* Useful for bulk-synchronuos computation.
*/
template <typename ET>
struct PairContainer {
ET left;
ET right;
PairContainer() {
left = ET();
right = ET();
}
ET & oldval(int iter) {
return (iter % 2 == 0 ? left : right);
}
void set_newval(int iter, ET x) {
if (iter % 2 == 0) {
right = x;
} else {
left = x;
}
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Vertex and Edge objects.
*/
#ifndef DEF_GRAPHCHI_OBJECTS
#define DEF_GRAPHCHI_OBJECTS
#include <vector>
#include <assert.h>
#include <omp.h>
#include <string.h>
#include "graphchi_types.hpp"
#include "util/qsort.hpp"
namespace graphchi {
/**
* GNU COMPILER HACK TO PREVENT WARNINGS "Unused variable", if
* the particular app being compiled does not use a function.
*/
#ifdef __GNUC__
#define VARIABLE_IS_NOT_USED __attribute__ ((unused))
#else
#define VARIABLE_IS_NOT_USED
#endif
template <typename EdgeDataType>
class graphchi_edge {
public:
vid_t vertexid; // Source or Target vertex id. Clear from context.
EdgeDataType * data_ptr;
graphchi_edge() {}
graphchi_edge(vid_t _vertexid, EdgeDataType * edata_ptr) : vertexid(_vertexid), data_ptr(edata_ptr) {
}
#ifndef DYNAMICEDATA
EdgeDataType get_data() {
return * data_ptr;
}
void set_data(EdgeDataType x) {
*data_ptr = x;
}
#else
EdgeDataType * get_vector() { // EdgeDataType is a chivector
return data_ptr;
}
#endif
/**
* Returns id of the endpoint of this edge.
*/
vid_t vertex_id() {
return vertexid;
}
} __attribute__((packed));
template <typename ET>
bool eptr_less(const graphchi_edge<ET> &a, const graphchi_edge<ET> &b) {
return a.vertexid < b.vertexid;
}
#ifdef SUPPORT_DELETIONS
/*
* Hacky support for edge deletions.
* Edges are deleted by setting the value of the edge to a special
* value that denotes it was deleted.
* In the future, a better system could be designed.
*/
// This is hacky...
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(int val);
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(bool val) {
return val;
}
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(int val);
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(int val) {
return 0xffffffff == (unsigned int)val;
}
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(vid_t val);
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(vid_t val) {
return 0xffffffffu == val;
}
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(float val);
static inline bool VARIABLE_IS_NOT_USED is_deleted_edge_value(float val) {
return !(val < 0 || val > 0);
}
static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<bool> * e);
static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<bool> * e) {
e->set_data(true);
}
static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<vid_t> * e);
static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<vid_t> * e) {
e->set_data(0xffffffff);
}
static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<int> * e);
static void VARIABLE_IS_NOT_USED remove_edgev(graphchi_edge<int> * e) {
e->set_data(0xffffffff);
}
#endif
template <typename VertexDataType, typename EdgeDataType>
class internal_graphchi_vertex {
public: // Todo, use friend
int inc;
volatile int outc;
vid_t vertexid;
protected:
graphchi_edge<EdgeDataType> * inedges_ptr;
graphchi_edge<EdgeDataType> * outedges_ptr;
public:
bool modified;
VertexDataType * dataptr;
/* Accessed directly by the engine */
bool scheduled;
bool parallel_safe;
#ifdef SUPPORT_DELETIONS
int deleted_inc;
int deleted_outc;
#endif
internal_graphchi_vertex() : inc(0), outc(0) {
#ifdef SUPPORT_DELETIONS
deleted_outc = deleted_inc = 0;
#endif
dataptr = NULL;
}
internal_graphchi_vertex(vid_t _id, graphchi_edge<EdgeDataType> * iptr,
graphchi_edge<EdgeDataType> * optr,
int indeg,
int outdeg) :
vertexid(_id), inedges_ptr(iptr), outedges_ptr(optr) {
inc = 0;
outc = 0;
scheduled = false;
modified = false;
parallel_safe = true;
dataptr = NULL;
#ifdef SUPPORT_DELETIONS
deleted_inc = 0;
deleted_outc = 0;
#endif
}
virtual ~internal_graphchi_vertex() {}
vid_t id() const {
return vertexid;
}
int num_inedges() const {
return inc;
}
int num_outedges() const {
return outc;
}
int num_edges() const {
return inc + outc;
}
// Optimization: as only memshard (not streaming shard) creates inedgers,
// we do not need atomic instructions here!
inline void add_inedge(vid_t src, EdgeDataType * ptr, bool special_edge) {
#ifdef SUPPORT_DELETIONS
if (inedges_ptr != NULL && is_deleted_edge_value(*ptr)) {
deleted_inc++;
return;
}
#endif
if (inedges_ptr != NULL)
inedges_ptr[inc] = graphchi_edge<EdgeDataType>(src, ptr);
inc++; // Note: do not move inside the brackets, since we need to still keep track of inc even if inedgeptr is null!
assert(src != vertexid);
/* if(inedges_ptr != NULL && inc > outedges_ptr - inedges_ptr) {
logstream(LOG_FATAL) << "Tried to add more in-edges as the stored in-degree of this vertex (" << src << "). Perhaps a preprocessing step had failed?" << std::endl;
assert(inc <= outedges_ptr - inedges_ptr);
} */ // Deleted, since does not work when we have separate in-edge and out-edge arrays
}
inline void add_outedge(vid_t dst, EdgeDataType * ptr, bool special_edge) {
#ifdef SUPPORT_DELETIONS
if (outedges_ptr != NULL && is_deleted_edge_value(*ptr)) {
deleted_outc++;
return;
}
#endif
int i = __sync_add_and_fetch(&outc, 1);
if (outedges_ptr != NULL) outedges_ptr[i-1] = graphchi_edge<EdgeDataType>(dst, ptr);
assert(dst != vertexid);
}
};
template <typename VertexDataType, typename EdgeDataType >
class graphchi_vertex : public internal_graphchi_vertex<VertexDataType, EdgeDataType> {
public:
graphchi_vertex() : internal_graphchi_vertex<VertexDataType, EdgeDataType>() { }
graphchi_vertex(vid_t _id,
graphchi_edge<EdgeDataType> * iptr,
graphchi_edge<EdgeDataType> * optr,
int indeg,
int outdeg) :
internal_graphchi_vertex<VertexDataType, EdgeDataType>(_id, iptr, optr, indeg, outdeg) {}
virtual ~graphchi_vertex() {}
/**
* Returns ith edge of a vertex, ignoring
* edge direction.
*/
graphchi_edge<EdgeDataType> * edge(int i) {
if (i < this->inc) return inedge(i);
else return outedge(i - this->inc);
}
graphchi_edge<EdgeDataType> * inedge(int i) {
assert(i >= 0 && i < this->inc);
return &this->inedges_ptr[i];
}
graphchi_edge<EdgeDataType> * outedge(int i) {
assert(i >= 0 && i < this->outc);
return &this->outedges_ptr[i];
}
graphchi_edge<EdgeDataType> * random_outedge() {
if (this->outc == 0) return NULL;
return outedge((int) (std::abs(random()) % this->outc));
}
/**
* Get the value of vertex
*/
#ifndef DYNAMICVERTEXDATA
VertexDataType get_data() {
return *(this->dataptr);
}
#else
// VertexDataType must be a chivector
VertexDataType * get_vector() {
this->modified = true; // Assume vector always modified... Temporaryh solution.
return this->dataptr;
}
#endif
/**
* Modify the vertex value. The new value will be
* stored on disk.
*/
virtual void set_data(VertexDataType d) {
*(this->dataptr) = d;
this->modified = true;
}
// TODO: rethink
static bool computational_edges() {
return false;
}
static bool read_outedges() {
return true;
}
/**
* Sorts all the edges. Note: this will destroy information
* about the in/out direction of an edge. Do use only if you
* ignore the edge direction.
*/
void VARIABLE_IS_NOT_USED sort_edges_indirect() {
// Check for deleted edges first...
if (this->inc != (this->outedges_ptr - this->inedges_ptr)) {
// Moving
memmove(&this->inedges_ptr[this->inc], this->outedges_ptr, this->outc * sizeof(graphchi_edge<EdgeDataType>));
this->outedges_ptr = &this->inedges_ptr[this->inc];
}
quickSort(this->inedges_ptr, (int) (this->inc + this->outc), eptr_less<EdgeDataType>);
}
#ifdef SUPPORT_DELETIONS
void VARIABLE_IS_NOT_USED remove_edge(int i) {
remove_edgev(edge(i));
}
void VARIABLE_IS_NOT_USED remove_inedge(int i) {
remove_edgev(inedge(i));
}
void VARIABLE_IS_NOT_USED remove_outedge(int i) {
remove_edgev(outedge(i));
}
#endif
};
/**
* Experimental code
*/
// If highest order bit is set, the edge is "special". This is used
// to indicate - in the neighborhood model - that neighbor's value is
// cached in memory.
#define HIGHMASK (1 + (2147483647 >> 1))
#define CLEARMASK (2147483647 >> 1)
inline vid_t translate_edge(vid_t rawid, bool &is_special) {
is_special = (rawid & HIGHMASK) != 0;
return rawid & CLEARMASK;
}
inline vid_t make_special(vid_t rawid) {
return rawid | HIGHMASK;
}
inline bool is_special(vid_t rawid) {
return (rawid & HIGHMASK) != 0;
}
} // Namespace
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* GraphChiProgram must be subclassed by GraphChi user programs.
* They can define an update function (run for each vertex), and
* call backs for iteration and interval beginning and ending.
*/
#ifndef GRAPHCHI_PROGRAM_DEF
#define GRAPHCHI_PROGRAM_DEF
#include "api/graph_objects.hpp"
#include "api/graphchi_context.hpp"
namespace graphchi {
template <typename VertexDataType_, typename EdgeDataType_,
typename vertex_t = graphchi_vertex<VertexDataType_, EdgeDataType_> >
class GraphChiProgram {
public:
typedef VertexDataType_ VertexDataType;
typedef EdgeDataType_ EdgeDataType;
virtual ~GraphChiProgram() {}
/**
* Called before an iteration starts.
*/
virtual void before_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called after an iteration has finished.
*/
virtual void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
virtual void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
virtual void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Update function.
*/
virtual void update(vertex_t &v, graphchi_context &gcontext) = 0;
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Semi-synchronous implementation of the functional API.
*/
#ifndef GRAPHCHI_FUNCTIONAL_SEMISYNC_DEF
#define GRAPHCHI_FUNCTIONAL_SEMISYNC_DEF
#include <assert.h>
#include "api/graph_objects.hpp"
#include "api/graphchi_context.hpp"
#include "api/functional/functional_defs.hpp"
#include "metrics/metrics.hpp"
#include "graphchi_types.hpp"
namespace graphchi {
template <typename KERNEL>
class functional_vertex_unweighted_semisync : public graphchi_vertex<typename KERNEL::VertexDataType, typename KERNEL::EdgeDataType> {
public:
typedef typename KERNEL::VertexDataType VT;
typedef typename KERNEL::EdgeDataType ET;
VT cumval;
KERNEL kernel;
vertex_info vinfo;
graphchi_context * gcontext;
functional_vertex_unweighted_semisync() : graphchi_vertex<VT, ET> () {}
functional_vertex_unweighted_semisync(graphchi_context &ginfo, vid_t _id, int indeg, int outdeg) :
graphchi_vertex<VT, ET> (_id, NULL, NULL, indeg, outdeg) {
vinfo.indegree = indeg;
vinfo.outdegree = outdeg;
vinfo.vertexid = _id;
cumval = kernel.reset();
gcontext = &ginfo;
}
functional_vertex_unweighted_semisync(vid_t _id,
graphchi_edge<ET> * iptr,
graphchi_edge<ET> * optr,
int indeg,
int outdeg) {
assert(false); // This should never be called.
}
void first_iteration(graphchi_context &gcontext_) {
this->set_data(kernel.initial_value(gcontext_, vinfo));
}
// Optimization: as only memshard (not streaming shard) creates inedgers,
// we do not need atomic instructions here!
inline void add_inedge(vid_t src, ET * ptr, bool special_edge) {
if (gcontext->iteration > 0) {
cumval = kernel.plus(cumval, kernel.op_neighborval(*gcontext, vinfo, src, *ptr));
}
}
void ready(graphchi_context &gcontext_) {
this->set_data(kernel.compute_vertexvalue(gcontext_, vinfo, cumval));
}
inline void add_outedge(vid_t dst, ET * ptr, bool special_edge) {
*ptr = kernel.value_to_neighbor(*gcontext, vinfo, dst, this->get_data());
}
bool computational_edges() {
return true;
}
/* Outedges do not need to be read, they just need to be written */
static bool read_outedges() {
return false;
}
};
template <typename KERNEL>
class FunctionalProgramProxySemisync : public GraphChiProgram<typename KERNEL::VertexDataType, typename KERNEL::EdgeDataType, functional_vertex_unweighted_semisync<KERNEL> > {
public:
typedef typename KERNEL::VertexDataType VertexDataType;
typedef typename KERNEL::EdgeDataType EdgeDataType;
typedef functional_vertex_unweighted_semisync<KERNEL> fvertex_t;
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &info) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
/**
* Pagerank update function.
*/
void update(fvertex_t &v, graphchi_context &ginfo) {
if (ginfo.iteration == 0) {
v.first_iteration(ginfo);
} else {
v.ready(ginfo);
}
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Bulk-synchronous implementation of the functional API.
* This API can be used to implement Sparse-Matrix-Vector-Multiply programs.
*
* @section TODO
*
* There is too much common code with the semi-sync version. Consolidate!
*/
#ifndef GRAPHCHI_FUNCTIONAL_BULKSYNC_DEF
#define GRAPHCHI_FUNCTIONAL_BULKSYNC_DEF
#include <assert.h>
#include "api/graph_objects.hpp"
#include "api/graphchi_context.hpp"
#include "api/functional/functional_defs.hpp"
#include "metrics/metrics.hpp"
#include "graphchi_types.hpp"
namespace graphchi {
template <typename KERNEL>
class functional_vertex_unweighted_bulksync : public graphchi_vertex<typename KERNEL::VertexDataType, PairContainer<typename KERNEL::EdgeDataType> > {
public:
typedef typename KERNEL::VertexDataType VT;
typedef PairContainer<typename KERNEL::EdgeDataType> ET;
KERNEL kernel;
VT cumval;
vertex_info vinfo;
graphchi_context * gcontext;
functional_vertex_unweighted_bulksync() : graphchi_vertex<VT, ET> () {}
functional_vertex_unweighted_bulksync(graphchi_context &ginfo, vid_t _id, int indeg, int outdeg) :
graphchi_vertex<VT, ET> (_id, NULL, NULL, indeg, outdeg) {
vinfo.indegree = indeg;
vinfo.outdegree = outdeg;
vinfo.vertexid = _id;
cumval = kernel.reset();
gcontext = &ginfo;
}
functional_vertex_unweighted_bulksync(vid_t _id,
graphchi_edge<ET> * iptr,
graphchi_edge<ET> * optr,
int indeg,
int outdeg) {
assert(false); // This should never be called.
}
void first_iteration(graphchi_context &ginfo) {
this->set_data(kernel.initial_value(ginfo, vinfo));
gcontext = &ginfo;
}
// Optimization: as only memshard (not streaming shard) creates inedgers,
// we do not need atomic instructions here!
inline void add_inedge(vid_t src, ET * ptr, bool special_edge) {
if (gcontext->iteration > 0) {
cumval = kernel.plus(cumval, kernel.op_neighborval(*gcontext,
vinfo,
src,
ptr->oldval(gcontext->iteration)));
}
}
void ready(graphchi_context &ginfo) {
this->set_data(kernel.compute_vertexvalue(*gcontext, vinfo, cumval));
}
inline void add_outedge(vid_t dst, ET * ptr, bool special_edge) {
typename KERNEL::EdgeDataType newval =
kernel.value_to_neighbor(*gcontext, vinfo, dst, this->get_data());
ET paircont = *ptr;
paircont.set_newval(gcontext->iteration, newval);
*ptr = paircont;
}
bool computational_edges() {
return true;
}
/**
* We also need to read the outedges, because we need
* to preserve the old value as well.
*/
static bool read_outedges() {
return true;
}
};
template <typename KERNEL>
class FunctionalProgramProxyBulkSync : public GraphChiProgram<typename KERNEL::VertexDataType, PairContainer<typename KERNEL::EdgeDataType>, functional_vertex_unweighted_bulksync<KERNEL> > {
public:
typedef typename KERNEL::VertexDataType VertexDataType;
typedef PairContainer<typename KERNEL::EdgeDataType> EdgeDataType;
typedef functional_vertex_unweighted_bulksync<KERNEL> fvertex_t;
/**
* Called before an iteration starts.
*/
void before_iteration(int iteration, graphchi_context &info) {
}
/**
* Called after an iteration has finished.
*/
void after_iteration(int iteration, graphchi_context &ginfo) {
}
/**
* Called before an execution interval is started.
*/
void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &ginfo) {
}
/**
* Pagerank update function.
*/
void update(fvertex_t &v, graphchi_context &ginfo) {
if (ginfo.iteration == 0) {
v.first_iteration(ginfo);
} else {
v.ready(ginfo);
}
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Functional API defs.
*/
#ifndef GRAPHCHI_FUNCTIONALDEFS_DEF
#define GRAPHCHI_FUNCTIONALDEFS_DEF
#include "api/graphchi_program.hpp"
namespace graphchi {
struct vertex_info {
vid_t vertexid;
int indegree;
int outdegree;
};
};
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Alternative "functional" API for GraphChi. The API is implemented as a
* layer on top of the standard API, but uses a specialized engine "functional_engine",
* which processes the graph data in different order. Namely, it first loads in-edges,
* then executes updates, and finally writes new values (broadcasts) to out-edges.
*/
#ifndef GRAPHCHI_FUNCTIONALAPI_DEF
#define GRAPHCHI_FUNCTIONALAPI_DEF
#include <assert.h>
#include "api/graph_objects.hpp"
#include "api/graphchi_context.hpp"
#include "engine/functional/functional_engine.hpp"
#include "metrics/metrics.hpp"
#include "graphchi_types.hpp"
#include "api/functional/functional_defs.hpp"
#include "api/functional/functional_semisync.hpp"
#include "api/functional/functional_bulksync.hpp"
#include "preprocessing/conversions.hpp"
namespace graphchi {
/**
* Superclass for kernels
*/
template <typename FVertexDataType, typename FEdgeDataType>
struct functional_kernel {
typedef FVertexDataType VertexDataType;
typedef FEdgeDataType EdgeDataType;
functional_kernel() {}
/* Initial value - on first iteration */
virtual VertexDataType initial_value(graphchi_context &info, vertex_info& myvertex) = 0;
/* Called before first "gather" */
virtual VertexDataType reset() = 0;
// Note: Unweighted version, edge value should also be passed
// "Gather"
virtual EdgeDataType op_neighborval(graphchi_context &info, vertex_info& myvertex, vid_t nbid, EdgeDataType nbval)= 0;
// "Sum"
virtual EdgeDataType plus(VertexDataType curval, EdgeDataType toadd) = 0;
// "Apply"
virtual VertexDataType compute_vertexvalue(graphchi_context &ginfo, vertex_info& myvertex, EdgeDataType nbvalsum) = 0;
// "Scatter
virtual EdgeDataType value_to_neighbor(graphchi_context &info, vertex_info& myvertex, vid_t nbid, VertexDataType myval) = 0;
};
/**
* Run a functional kernel with unweighted edges.
* The semantics of this API are
* less well-defined than the standard one, because this API is "semi-synchronous". That is,
* inside a sub-interval, new values of neighbors are not observed, but
* next sub-interval will observe the new values.
*
* See application "pagerank_functional" for an example.
* @param KERNEL needs to be a class/struct that subclasses the functional_kernel
* @param filename base filename
* @param nshards number of shards
* @param niters number of iterations to run
* @param _m metrics object
*/
template <class KERNEL>
void run_functional_unweighted_semisynchronous(std::string filename, int niters, metrics &_m) {
FunctionalProgramProxySemisync<KERNEL> program;
/* Process input file - if not already preprocessed */
int nshards
= convert_if_notexists<typename FunctionalProgramProxySemisync<KERNEL>::EdgeDataType>(filename, get_option_string("nshards", "auto"));
functional_engine<typename FunctionalProgramProxySemisync<KERNEL>::VertexDataType,
typename FunctionalProgramProxySemisync<KERNEL>::EdgeDataType,
typename FunctionalProgramProxySemisync<KERNEL>::fvertex_t >
engine(filename, nshards, false, _m);
engine.set_modifies_inedges(false); // Important
engine.set_modifies_outedges(true); // Important
engine.run(program, niters);
}
/**
* Run a functional kernel with unweighted edges in the bulk-synchronous model.
* Note: shards need to have space to store two values for each edge.
*
* See application "pagerank_functional" for an example.
* @param filename base filename
* @param nshards number of shards
* @param niters number of iterations to run
* @param _m metrics object
*/
template <class KERNEL>
void run_functional_unweighted_synchronous(std::string filename, int niters, metrics &_m) {
FunctionalProgramProxyBulkSync<KERNEL> program;
int nshards
= convert_if_notexists<typename FunctionalProgramProxyBulkSync<KERNEL>::EdgeDataType>(filename, get_option_string("nshards", "auto"));
functional_engine<typename FunctionalProgramProxyBulkSync<KERNEL>::VertexDataType,
typename FunctionalProgramProxyBulkSync<KERNEL>::EdgeDataType,
typename FunctionalProgramProxyBulkSync<KERNEL>::fvertex_t >
engine(filename, nshards, false, _m);
engine.set_modifies_inedges(false); // Important
engine.set_modifies_outedges(true); // Important
engine.set_enable_deterministic_parallelism(false); // Bulk synchronous does not need consistency.
engine.run(program, niters);
}
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Variable size typed vector (type must be a plain old datatype) that
* allows adding and removing of elements.
*/
#ifndef DEF_GRAPHCHI_CHIVECTOR
#define DEF_GRAPHCHI_CHIVECTOR
#include <vector>
#include <stdint.h>
namespace graphchi {
#define MINCAPACITY 2
/**
* Pool the extension parts of chi-vectors
*/
template <typename T>
class extension_pool {
};
template <typename T>
class chivector {
uint16_t nsize;
uint16_t ncapacity;
T * data;
std::vector<T> * extensions; // TODO: use a more memory efficient system?
public:
typedef T element_type_t;
typedef uint32_t sizeword_t;
chivector() {
extensions = NULL;
}
chivector(uint16_t sz, uint16_t cap, T * dataptr) : data(dataptr) {
nsize = sz;
ncapacity = cap;
assert(cap >= nsize);
extensions = NULL;
}
~chivector() {
if (extensions != NULL) {
delete extensions;
extensions = NULL;
}
}
void write(T * dest) {
int sz = (int) this->size();
for(int i=0; i < sz; i++) {
dest[i] = get(i); // TODO: use memcpy
}
}
uint16_t size() {
return nsize;
}
uint16_t capacity() {
return nsize > MINCAPACITY ? nsize : MINCAPACITY;
}
void add(T val) {
nsize ++;
if (nsize > ncapacity) {
if (extensions == NULL) extensions = new std::vector<T>();
extensions->push_back(val);
} else {
data[nsize - 1] = val;
}
}
//idx should already exist in the array
void set(int idx, T val){
if (idx >= ncapacity) {
(*extensions)[idx - (int)ncapacity] = val;
} else {
data[idx] = val;
}
}
// TODO: addmany()
T get(int idx) {
if (idx >= ncapacity) {
return (* extensions)[idx - (int)ncapacity];
} else {
return data[idx];
}
}
void remove(int idx) {
assert(false);
}
int find(T val) {
assert(false);
return -1;
}
void clear() {
nsize = 0;
}
// TODO: iterators
};
}
#endif
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#ifndef GRAPHLAB_RANDOM_HPP
#define GRAPHLAB_RANDOM_HPP
#include <cstdlib>
#include <stdint.h>
#include <vector>
#include <limits>
#include <algorithm>
#include <boost/random.hpp>
#include "util/pthread_tools.hpp"
using namespace graphchi;
namespace graphlab {
/**
* \ingroup random
* A collection of thread safe random number routines. Each thread
* is assigned its own generator however assigning a seed affects
* all current and future generators.
*/
namespace random {
///////////////////////////////////////////////////////////////////////
//// Underlying generator definition
namespace distributions {
/**
* The uniform distribution struct is used for partial function
* specialization. Generating uniform random real numbers is
* accomplished slightly differently than for integers.
* Therefore the base case is for integers and we then
* specialize the two real number types (floats and doubles).
*/
template<typename IntType>
struct uniform {
typedef boost::uniform_int<IntType> distribution_type;
template<typename RealRNG, typename DiscreteRNG>
static inline IntType sample(RealRNG& real_rng,
DiscreteRNG& discrete_rng,
const IntType& min, const IntType& max) {
return distribution_type(min, max)(discrete_rng);
}
};
template<>
struct uniform<double> {
typedef boost::uniform_real<double> distribution_type;
template<typename RealRNG, typename DiscreteRNG>
static inline double sample(RealRNG& real_rng,
DiscreteRNG& discrete_rng,
const double& min, const double& max) {
return distribution_type(min, max)(real_rng);
}
};
template<>
struct uniform<float> {
typedef boost::uniform_real<float> distribution_type;
template<typename RealRNG, typename DiscreteRNG>
static inline float sample(RealRNG& real_rng,
DiscreteRNG& discrete_rng,
const float& min, const float& max) {
return distribution_type(min, max)(real_rng);
}
};
}; // end of namespace distributions
/**
* The generator class is the base underlying type used to
* generate random numbers. User threads should use the functions
* provided in the random namespace.
*/
class generator {
public:
// base Generator types
typedef boost::lagged_fibonacci607 real_rng_type;
typedef boost::mt11213b discrete_rng_type;
typedef boost::rand48 fast_discrete_rng_type;
generator() {
time_seed();
}
//! Seed the generator using the default seed
inline void seed() {
mut.lock();
real_rng.seed();
discrete_rng.seed();
fast_discrete_rng.seed();
mut.unlock();
}
//! Seed the generator nondeterministically
void nondet_seed();
//! Seed the generator using the current time in microseconds
inline void time_seed() {
seed(time(NULL) );
}
//! Seed the random number generator based on a number
void seed(size_t number) {
mut.lock();
fast_discrete_rng.seed(number);
real_rng.seed(fast_discrete_rng);
discrete_rng.seed(fast_discrete_rng);
mut.unlock();
}
//! Seed the generator using another generator
void seed(generator& other){
mut.lock();
real_rng.seed(other.real_rng);
discrete_rng.seed(other.discrete_rng);
fast_discrete_rng.seed(other.fast_discrete_rng());
mut.unlock();
}
/**
* Generate a random number in the uniform real with range [min,
* max) or [min, max] if the number type is discrete.
*/
template<typename NumType>
inline NumType uniform(const NumType min, const NumType max) {
mut.lock();
const NumType result = distributions::uniform<NumType>::
sample(real_rng, discrete_rng, min, max);
mut.unlock();
return result;
} // end of uniform
/**
* Generate a random number in the uniform real with range [min,
* max) or [min, max] if the number type is discrete.
*/
template<typename NumType>
inline NumType fast_uniform(const NumType min, const NumType max) {
mut.lock();
const NumType result = distributions::uniform<NumType>::
sample(real_rng, fast_discrete_rng, min, max);
mut.unlock();
return result;
} // end of fast_uniform
/**
* Generate a random number in the uniform real with range [min,
* max);
*/
inline double gamma(const double alpha = double(1)) {
boost::gamma_distribution<double> gamma_dist(alpha);
mut.lock();
const double result = gamma_dist(real_rng);
mut.unlock();
return result;
} // end of gamma
/**
* Generate a gaussian random variable with zero mean and unit
* variance.
*/
inline double gaussian(const double mean = double(0),
const double stdev = double(1)) {
boost::normal_distribution<double> normal_dist(mean,stdev);
mut.lock();
const double result = normal_dist(real_rng);
mut.unlock();
return result;
} // end of gaussian
/**
* Generate a gaussian random variable with zero mean and unit
* variance.
*/
inline double normal(const double mean = double(0),
const double stdev = double(1)) {
return gaussian(mean, stdev);
} // end of normal
inline bool bernoulli(const double p = double(0.5)) {
boost::bernoulli_distribution<double> dist(p);
mut.lock();
const double result(dist(discrete_rng));
mut.unlock();
return result;
} // end of bernoulli
inline bool fast_bernoulli(const double p = double(0.5)) {
boost::bernoulli_distribution<double> dist(p);
mut.lock();
const double result(dist(fast_discrete_rng));
mut.unlock();
return result;
} // end of bernoulli
/**
* Draw a random number from a multinomial
*/
template<typename Double>
size_t multinomial(const std::vector<Double>& prb) {
ASSERT_GT(prb.size(),0);
if (prb.size() == 1) { return 0; }
Double sum(0);
for(size_t i = 0; i < prb.size(); ++i) {
ASSERT_GE(prb[i], 0); // Each entry must be P[i] >= 0
sum += prb[i];
}
ASSERT_GT(sum, 0); // Normalizer must be positive
// actually draw the random number
const Double rnd(uniform<Double>(0,1));
size_t ind = 0;
for(Double cumsum(prb[ind]/sum);
rnd > cumsum && (ind+1) < prb.size();
cumsum += (prb[++ind]/sum));
return ind;
} // end of multinomial
/**
* Generate a draw from a multinomial using a CDF. This is
* slightly more efficient since normalization is not required
* and a binary search can be used.
*/
template<typename Double>
inline size_t multinomial_cdf(const std::vector<Double>& cdf) {
return std::upper_bound(cdf.begin(), cdf.end(),
uniform<Double>(0,1)) - cdf.begin();
} // end of multinomial_cdf
/**
* Construct a random permutation
*/
template<typename T>
inline std::vector<T> permutation(const size_t nelems) {
std::vector<T> perm(nelems);
for(T i = 0; i < nelems; ++i) perm[i] = i;
shuffle(perm);
return perm;
} // end of construct a permutation
/**
* Shuffle a standard vector
*/
template<typename T>
void shuffle(std::vector<T>& vec) { shuffle(vec.begin(), vec.end()); }
/**
* Shuffle a range using the begin and end iterators
*/
template<typename Iterator>
void shuffle(Iterator begin, Iterator end) {
mut.lock();
shuffle_functor functor(*this);
std::random_shuffle(begin, end, functor);
mut.unlock();
} // end of shuffle
private:
//////////////////////////////////////////////////////
/// Data members
struct shuffle_functor {
generator& gen;
inline shuffle_functor(generator& gen) : gen(gen) { }
inline std::ptrdiff_t operator()(std::ptrdiff_t end) {
return distributions::uniform<ptrdiff_t>::
sample(gen.real_rng, gen.fast_discrete_rng, 0, end-1);
}
};
//! The real random number generator
real_rng_type real_rng;
//! The discrete random number generator
discrete_rng_type discrete_rng;
//! The fast discrete random number generator
fast_discrete_rng_type fast_discrete_rng;
//! lock used to access local members
mutex mut;
}; // end of class generator
/**
* \ingroup random
* Seed all generators using the default seed
*/
void seed();
/**
* \ingroup random
* Seed all generators using an integer
*/
void seed(size_t seed_value);
/**
* \ingroup random
* Seed all generators using a nondeterministic source
*/
void nondet_seed();
/**
* \ingroup random
* Seed all generators using the current time in microseconds
*/
void time_seed();
/**
* \ingroup random
* Get the local generator
*/
generator& get_source();
/**
* \ingroup random
* Generate a random number in the uniform real with range [min,
* max) or [min, max] if the number type is discrete.
*/
template<typename NumType>
inline NumType uniform(const NumType min, const NumType max) {
return get_source().uniform<NumType>(min, max);
} // end of uniform
/**
* \ingroup random
* Generate a random number in the uniform real with range [min,
* max) or [min, max] if the number type is discrete.
*/
template<typename NumType>
inline NumType fast_uniform(const NumType min, const NumType max) {
return get_source().fast_uniform<NumType>(min, max);
} // end of fast_uniform
/**
* \ingroup random
* Generate a random number between 0 and 1
*/
inline double rand01() { return uniform<double>(0, 1); }
/**
* \ingroup random
* Simulates the standard rand function as defined in cstdlib
*/
inline int rand() { return fast_uniform(0, RAND_MAX); }
/**
* \ingroup random
* Generate a random number from a gamma distribution.
*/
inline double gamma(const double alpha = double(1)) {
return get_source().gamma(alpha);
}
/**
* \ingroup random
* Generate a gaussian random variable with zero mean and unit
* standard deviation.
*/
inline double gaussian(const double mean = double(0),
const double stdev = double(1)) {
return get_source().gaussian(mean, stdev);
}
/**
* \ingroup random
* Generate a gaussian random variable with zero mean and unit
* standard deviation.
*/
inline double normal(const double mean = double(0),
const double stdev = double(1)) {
return get_source().normal(mean, stdev);
}
/**
* \ingroup random
* Draw a sample from a bernoulli distribution
*/
inline bool bernoulli(const double p = double(0.5)) {
return get_source().bernoulli(p);
}
/**
* \ingroup random
* Draw a sample form a bernoulli distribution using the faster generator
*/
inline bool fast_bernoulli(const double p = double(0.5)) {
return get_source().fast_bernoulli(p);
}
/**
* \ingroup random
* Generate a draw from a multinomial. This function
* automatically normalizes as well.
*/
template<typename Double>
inline size_t multinomial(const std::vector<Double>& prb) {
return get_source().multinomial(prb);
}
/**
* \ingroup random
* Generate a draw from a cdf;
*/
template<typename Double>
inline size_t multinomial_cdf(const std::vector<Double>& cdf) {
return get_source().multinomial_cdf(cdf);
}
/**
* \ingroup random
* Construct a random permutation
*/
template<typename T>
inline std::vector<T> permutation(const size_t nelems) {
return get_source().permutation<T>(nelems);
}
/**
* \ingroup random
* Shuffle a standard vector
*/
template<typename T>
inline void shuffle(std::vector<T>& vec) {
get_source().shuffle(vec);
}
/**
* \ingroup random
* Shuffle a range using the begin and end iterators
*/
template<typename Iterator>
inline void shuffle(Iterator begin, Iterator end) {
get_source().shuffle(begin, end);
}
/**
* Converts a discrete PDF into a CDF
*/
void pdf2cdf(std::vector<double>& pdf);
}; // end of random
}; // end of graphlab
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Wrapper classes for GraphLab v2.1 API.
*/
#ifndef DEF_GRAPHLAB_WRAPPERS
#define DEF_GRAPHLAB_WRAPPERS
#include "graphchi_basic_includes.hpp"
using namespace graphchi;
namespace graphlab {
struct IS_POD_TYPE { };
struct empty {};
enum edge_dir_type {
/**
* \brief No edges implies that no edges are processed during the
* corresponding gather or scatter phase, essentially skipping
* that phase.
*/
NO_EDGES = 0,
/**
* \brief In edges implies that only whose target is the center
* vertex are processed during gather or scatter.
*/
IN_EDGES = 1,
/**
* \brief Out edges implies that only whose source is the center
* vertex are processed during gather or scatter.
*/
OUT_EDGES = 2 ,
/**
* \brief All edges implies that all adges adjacent to a the
* center vertex are processed on gather or scatter. Note that
* some neighbors may be encountered twice if there is both an in
* and out edge to that neighbor.
*/
ALL_EDGES = 3
};
typedef vid_t vertex_id_type;
template<typename GraphType,
typename GatherType,
typename MessageType>
class icontext {
public:
// Type members ===========================================================
/**
* \brief the user graph type (typically \ref distributed_graph)
*/
typedef GraphType graph_type;
/**
* \brief the opaque vertex_type defined in the ivertex_program::graph_type
* (typically distributed_graph::vertex_type)
*/
typedef typename graph_type::vertex_type vertex_type;
/**
* \brief the global vertex identifier (see
* graphlab::vertex_id_type).
*/
typedef typename graph_type::vertex_id_type vertex_id_type;
/**
* The message type specified by the user-defined vertex-program.
* (see ivertex_program::message_type)
*/
typedef MessageType message_type;
/**
* The type returned by the gather operation. (see
* ivertex_program::gather_type)
*/
typedef GatherType gather_type;
/* GraphChi */
graphchi_context * gcontext;
public:
icontext(graphchi_context * gcontext) : gcontext(gcontext) {}
/** \brief icontext destructor */
virtual ~icontext() { }
/**
* \brief Get the total number of vertices in the graph.
*
* \return the total number of vertices in the entire graph.
*/
virtual size_t num_vertices() const { return gcontext->nvertices; }
/**
* \brief Get the number of edges in the graph.
*
* Each direction counts as a separate edge.
*
* \return the total number of edges in the entire graph.
*/
virtual size_t num_edges() const { assert(false); return 0; } // Not implemented yet
/**
* \brief Get the id of this process.
*
* The procid is a number between 0 and
* \ref graphlab::icontext::num_procs
*
* \warning Each process may have many threads
*
* @return the process of this machine.
*/
virtual size_t procid() const { return (size_t) omp_get_thread_num(); }
/**
* \brief Returns a standard output object (like cout)
* which only prints once even when running distributed.
*
* This returns a C++ standard output stream object
* which maps directly to std::cout on machine with
* process ID 0, and to empty output streamss
* on all other processes. Calling,
* \code
* context.cout() << "Hello World!";
* \endcode
* will therefore only print if the code is run on machine 0.
* This is useful in the finalize operation in aggregators.
*/
virtual std::ostream& cout() const { return std::cout; }
/**
* \brief Returns a standard error object (like cerr)
* which only prints once even when running distributed.
*
* This returns a C++ standard output stream object
* which maps directly to std::cerr on machine with
* process ID 0, and to empty output streamss
* on all other processes. Calling,
* \code
* context.cerr() << "Hello World!";
* \endcode
* will therefore only print if the code is run on machine 0.
* This is useful in the finalize operation in aggregators.
*/
virtual std::ostream& cerr() const { return std::cerr; }
/**
* \brief Get the number of processes in the current execution.
*
* This is typically the number of mpi jobs created:
* \code
* %> mpiexec -n 16 ./pagerank
* \endcode
* would imply that num_procs() returns 16.
*
* @return the number of processes in the current execution
*/
virtual size_t num_procs() const { return gcontext->execthreads; }
/**
* \brief Get the elapsed time in seconds since start was called.
*
* \return runtine in seconds
*/
virtual float elapsed_seconds() const { return gcontext->runtime(); }
/**
* \brief Return the current interation number (if supported).
*
* \return the current interation number if support or -1
* otherwise.
*/
virtual int iteration() const { return gcontext->iteration; }
/**
* \brief Signal the engine to stop executing additional update
* functions.
*
* \warning The execution engine will stop *eventually* and
* additional update functions may be executed prior to when the
* engine stops. For-example the synchronous engine (see \ref
* synchronous_engine) will complete the current super-step before
* terminating.
*/
virtual void stop() {
gcontext->last_iteration = gcontext->iteration;
}
/**
* \brief Signal a vertex with a particular message.
*
* This function is an essential part of the GraphLab abstraction
* and is used to encode iterative computation. Typically a vertex
* program will signal neighboring vertices during the scatter
* phase. A vertex program may choose to signal neighbors on when
* changes made during the previos phases break invariants or warrant
* future computation on neighboring vertices.
*
* The signal function takes two arguments. The first is mandatory
* and specifies which vertex to signal. The second argument is
* optional and is used to send a message. If no message is
* provided then the default message is used.
*
* \param vertex [in] The vertex to send the message to
* \param message [in] The message to send, defaults to message_type().
*/
virtual void signal(const vertex_type& vertex,
const message_type& message = message_type()) {
gcontext->scheduler->add_task(vertex.id());
}
/**
* \brief Send a message to a vertex ID.
*
* \warning This function will be slow since the current machine
* do not know the location of the vertex ID. If possible use the
* the icontext::signal call instead.
*
* \param gvid [in] the vertex id of the vertex to signal
* \param message [in] the message to send to that vertex,
* defaults to message_type().
*/
virtual void signal_vid(vertex_id_type gvid,
const message_type& message = message_type()) {
gcontext->scheduler->add_task(gvid);
}
/**
* \brief Post a change to the cached sum for the vertex
*
* Often a vertex program will be signaled due to a change in one
* or a few of its neighbors. However the gather operation will
* be rerun on all neighbors potentially producing the same value
* as previous invocations and wasting computation time. To
* address this some engines support caching (see \ref
* gather_caching for details) of the gather phase.
*
* When caching is enabled the engines save a copy of the previous
* gather for each vertex. On subsequent calls to gather if their
* is a cached gather then the gather phase is skipped and the
* cached value is passed to the ivertex_program::apply function.
* Therefore it is the responsibility of the vertex program to
* update the cache values for neighboring vertices. This is
* accomplished by using the icontext::post_delta function.
* Posted deltas are atomically added to the cache.
*
* \param vertex [in] the vertex whose cache we want to update
* \param delta [in] the change that we want to *add* to the
* current cache.
*
*/
virtual void post_delta(const vertex_type& vertex,
const gather_type& delta) {
assert(false); // Not implemented
}
/**
* \brief Invalidate the cached gather on the vertex.
*
* When caching is enabled clear_gather_cache clears the cache
* entry forcing a complete invocation of the subsequent gather.
*
* \param vertex [in] the vertex whose cache to clear.
*/
virtual void clear_gather_cache(const vertex_type& vertex) {
assert(false); // Not implemented
}
}; // end of icontext
/* Forward declaratinos */
template <typename GLVertexDataType, typename EdgeDataType>
struct GraphLabVertexWrapper;
template <typename GLVertexDataType, typename EdgeDataType>
struct GraphLabEdgeWrapper;
/* Fake distributed graph type (this is often hard-coded
in GraphLab vertex programs. */
template <typename vertex_data, typename edge_data>
struct distributed_graph {
typedef vertex_data vertex_data_type;
typedef edge_data edge_data_type;
typedef GraphLabVertexWrapper<vertex_data_type, edge_data_type> vertex_type;
typedef GraphLabEdgeWrapper<vertex_data_type, edge_data_type> edge_type;
typedef graphchi::vid_t vertex_id_type;
};
/* GraphChi's version of the ivertex_program */
template<typename Graph,
typename GatherType, typename MessageType = bool>
struct ivertex_program {
/* Type definitions */
typedef typename Graph::vertex_data_type vertex_data_type;
typedef typename Graph::edge_data_type edge_data_type;
typedef GatherType gather_type;
typedef MessageType message_type;
typedef Graph graph_type;
typedef typename graphchi::vid_t vertex_id_type;
typedef GraphLabVertexWrapper<vertex_data_type, edge_data_type> vertex_type;
typedef GraphLabEdgeWrapper<vertex_data_type, edge_data_type> edge_type;
typedef icontext<graph_type, gather_type, message_type> icontext_type;
typedef graphlab::edge_dir_type edge_dir_type;
virtual void init(icontext_type& context,
const vertex_type& vertex,
const message_type& msg) { /** NOP */ }
/**
* Returns the set of edges on which to run the gather function.
* The default edge direction is the in edges.
*/
virtual edge_dir_type gather_edges(icontext_type& context,
const vertex_type& vertex) const {
return IN_EDGES;
}
/**
* Gather is called on all gather_edges() in parallel and returns
* the gather_type which are added to compute the final output of
* the gather.
*/
virtual gather_type gather(icontext_type& context,
const vertex_type& vertex,
edge_type& edge) const {
logstream(LOG_FATAL) << "Gather not implemented!" << std::endl;
return gather_type();
};
/**
* The apply function is called once the gather has completed and
* must be implemented by all vertex programs.
*/
virtual void apply(icontext_type& context,
vertex_type& vertex,
const gather_type& total) = 0;
/**
* Returns the set of edges on which to run the scatter function.
* The default edge direction is the out edges.
*/
virtual edge_dir_type scatter_edges(icontext_type& context,
const vertex_type& vertex) const {
return OUT_EDGES;
}
/**
* Scatter is called on all scatter_edges() in parallel after the
* apply function has completed. The scatter function can post
* deltas.
*/
virtual void scatter(icontext_type& context, const vertex_type& vertex,
edge_type& edge) const {
logstream(LOG_FATAL) << "Scatter not implemented!" << std::endl;
};
};
template <typename GLVertexDataType, typename EdgeDataType>
struct GraphLabVertexWrapper {
typedef graphchi_vertex<bool, EdgeDataType> VertexType; // Confusing!
typedef GLVertexDataType vertex_data_type;
typedef GraphLabVertexWrapper<GLVertexDataType, EdgeDataType> vertex_type;
graphchi::vid_t vertexId;
VertexType * vertex;
std::vector<GLVertexDataType> * vertexArray;
GraphLabVertexWrapper(graphchi::vid_t vertexId, VertexType * vertex,
std::vector<GLVertexDataType> * vertexArray): vertexId(vertexId),
vertex(vertex), vertexArray(vertexArray) { }
bool operator==(vertex_type& other) const {
return vertexId == other.vertexId;
}
/// \brief Returns a constant reference to the data on the vertex
const vertex_data_type& data() const {
return (*vertexArray)[vertexId];
}
/// \brief Returns a mutable reference to the data on the vertex
vertex_data_type& data() {
return (*vertexArray)[vertexId];
}
/// \brief Returns the number of in edges of the vertex
size_t num_in_edges() const {
if (vertex == NULL) {
logstream(LOG_ERROR) << "GraphChi does not support asking neighbor vertices in/out degrees." << std::endl;
return 0;
}
return vertex->num_edges();
}
/// \brief Returns the number of out edges of the vertex
size_t num_out_edges() const {
if (vertex == NULL) {
logstream(LOG_ERROR) << "GraphChi does not support asking neighbor vertices in/out degrees." << std::endl;
return 0;
}
return vertex->num_outedges();
}
/// \brief Returns the vertex ID of the vertex
graphchi::vid_t id() const {
return vertexId;
}
/**
* \brief Returns the local ID of the vertex
*/
graphchi::vid_t local_id() const {
return vertexId;
}
};
template <typename GLVertexDataType, typename EdgeDataType>
struct GraphLabEdgeWrapper {
typedef graphchi_vertex<bool, EdgeDataType> VertexType;
typedef GLVertexDataType vertex_data_type;
typedef EdgeDataType edge_data_type;
typedef GraphLabVertexWrapper<GLVertexDataType, EdgeDataType> vertex_type;
graphchi_edge<EdgeDataType> * edge;
VertexType * vertex;
std::vector<GLVertexDataType> * vertexArray;
bool is_inedge;
GraphLabEdgeWrapper(graphchi_edge<EdgeDataType> * edge, VertexType * vertex,
std::vector<GLVertexDataType> * vertexArray, bool is_inedge):
edge(edge), vertex(vertex), vertexArray(vertexArray), is_inedge(is_inedge) { }
public:
/**
* \brief Returns the source vertex of the edge.
* This function returns a vertex_object by value and as a
* consequence it is possible to use the resulting vertex object
* to access and *modify* the associated vertex data.
*
* Modification of vertex data obtained through an edge object
* is *usually not safe* and can lead to data corruption.
*
* \return The vertex object representing the source vertex.
*/
vertex_type source() const {
if (is_inedge) {
return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(vertex->id(), vertex, vertexArray);
} else {
return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(edge->vertex_id(), NULL, vertexArray);
}
}
/**
* \brief Returns the target vertex of the edge.
*
* This function returns a vertex_object by value and as a
* consequence it is possible to use the resulting vertex object
* to access and *modify* the associated vertex data.
*
* Modification of vertex data obtained through an edge object
* is *usually not safe* and can lead to data corruption.
*
* \return The vertex object representing the target vertex.
*/
vertex_type target() const {
if (!is_inedge) {
return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(vertex->id(), vertex, vertexArray);
} else {
return GraphLabVertexWrapper<GLVertexDataType, EdgeDataType>(edge->vertex_id(), NULL, vertexArray);
}
}
/**
* \brief Returns a constant reference to the data on the edge
*/
const edge_data_type& data() const { return const_cast<edge_data_type&>(*edge->data_ptr); }
/**
* \brief Returns a mutable reference to the data on the edge
*/
edge_data_type& data() { return *(edge->data_ptr); }
}; // end of edge_type
template <class GraphLabVertexProgram>
struct GraphLabWrapper : public GraphChiProgram<bool, typename GraphLabVertexProgram::edge_data_type> {
typedef bool VertexDataType; /* Temporary hack: as the vertices are stored in memory, no need to store on disk. */
typedef typename GraphLabVertexProgram::vertex_data_type GLVertexDataType;
typedef typename GraphLabVertexProgram::edge_data_type EdgeDataType;
typedef typename GraphLabVertexProgram::gather_type gather_type;
typedef typename GraphLabVertexProgram::graph_type graph_type;
typedef typename GraphLabVertexProgram::message_type message_type;
std::vector<GLVertexDataType> * vertexInmemoryArray;
GraphLabWrapper() {
vertexInmemoryArray = new std::vector<GLVertexDataType>();
}
/**
* Called before an iteration starts.
*/
virtual void before_iteration(int iteration, graphchi_context &gcontext) {
if (gcontext.iteration == 0) {
logstream(LOG_INFO) << "Initialize vertices in memory." << std::endl;
vertexInmemoryArray->resize(gcontext.nvertices);
}
}
/**
* Called after an iteration has finished.
*/
virtual void after_iteration(int iteration, graphchi_context &gcontext) {
}
/**
* Called before an execution interval is started.
*/
virtual void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
virtual void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Update function.
*/
void update(graphchi_vertex<bool, EdgeDataType> &vertex, graphchi_context &gcontext) {
graphlab::icontext<graph_type, gather_type, message_type> glcontext(&gcontext);
/* Create the vertex program */
GraphLabVertexWrapper<GLVertexDataType, EdgeDataType> wrapperVertex(vertex.id(), &vertex, vertexInmemoryArray);
GraphLabVertexProgram glVertexProgram;
/* Init */
glVertexProgram.init(glcontext, wrapperVertex, typename GraphLabVertexProgram::message_type());
const GraphLabVertexProgram& const_vprog = glVertexProgram;
/* Gather */
edge_dir_type gather_direction = const_vprog.gather_edges(glcontext, wrapperVertex);
gather_type sum;
int gathered = 0;
switch (gather_direction) {
case ALL_EDGES:
case IN_EDGES:
for(int i=0; i < vertex.num_inedges(); i++) {
GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.inedge(i), &vertex, vertexInmemoryArray, true);
if (gathered > 0) sum += const_vprog.gather(glcontext, wrapperVertex, edgeWrapper);
else sum = const_vprog.gather(glcontext, wrapperVertex, edgeWrapper);
gathered++;
}
if (gather_direction != ALL_EDGES)
break;
case OUT_EDGES:
for(int i=0; i < vertex.num_outedges(); i++) {
GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.outedge(i), &vertex, vertexInmemoryArray, false);
if (gathered > 0) sum += const_vprog.gather(glcontext, wrapperVertex, edgeWrapper);
else sum = const_vprog.gather(glcontext, wrapperVertex, edgeWrapper);
gathered++;
}
break;
case NO_EDGES:
break;
default:
assert(false); // Huh?
}
/* Apply */
glVertexProgram.apply(glcontext, wrapperVertex, sum);
/* Scatter */
edge_dir_type scatter_direction = const_vprog.scatter_edges(glcontext, wrapperVertex);
switch(scatter_direction) {
case ALL_EDGES:
case IN_EDGES:
for(int i=0; i < vertex.num_inedges(); i++) {
GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.inedge(i), &vertex, vertexInmemoryArray, true);
const_vprog.scatter(glcontext, wrapperVertex, edgeWrapper);
}
if (scatter_direction != ALL_EDGES)
break;
case OUT_EDGES:
for(int i=0; i < vertex.num_outedges(); i++) {
GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.outedge(i), &vertex, vertexInmemoryArray, false);
const_vprog.scatter(glcontext, wrapperVertex, edgeWrapper);
}
break;
case NO_EDGES:
break;
default:
assert(false); // Huh?
}
/* Done! */
}
}; // End GraphLabWrapper
template <typename GraphLabVertexProgram, typename ReductionType,
typename EdgeMapType,
typename FinalizerType>
struct GraphLabEdgeAggregatorWrapper : public GraphChiProgram<bool, typename GraphLabVertexProgram::edge_data_type> {
typedef bool VertexDataType; /* Temporary hack: as the vertices are stored in memory, no need to store on disk. */
typedef typename GraphLabVertexProgram::vertex_data_type GLVertexDataType;
typedef typename GraphLabVertexProgram::edge_data_type EdgeDataType;
typedef typename GraphLabVertexProgram::edge_type edge_type;
typedef typename GraphLabVertexProgram::gather_type gather_type;
typedef typename GraphLabVertexProgram::graph_type graph_type;
typedef typename GraphLabVertexProgram::message_type message_type;
mutex m;
std::vector<ReductionType> localaggr;
ReductionType aggr;
std::vector<GLVertexDataType> * vertexInmemoryArray;
EdgeMapType map_function;
FinalizerType finalize_function;
GraphLabEdgeAggregatorWrapper(EdgeMapType map_function,
FinalizerType finalize_function,
std::vector<typename GraphLabVertexProgram::vertex_data_type> * vertices) : map_function(map_function),
finalize_function(finalize_function) {
vertexInmemoryArray = vertices;
}
/**
* Called before an iteration starts.
*/
virtual void before_iteration(int iteration, graphchi_context &gcontext) {
aggr = ReductionType();
localaggr.resize(gcontext.execthreads);
}
/**
* Called after an iteration has finished.
*/
virtual void after_iteration(int iteration, graphchi_context &gcontext) {
logstream(LOG_INFO) << "Going to run edge-aggregator finalize." << std::endl;
for(int i=0; i < (int)localaggr.size(); i++) {
aggr += localaggr[i];
}
graphlab::icontext<graph_type, gather_type, message_type> glcontext(&gcontext);
finalize_function(glcontext, aggr);
}
/**
* Called before an execution interval is started.
*/
virtual void before_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Called after an execution interval has finished.
*/
virtual void after_exec_interval(vid_t window_st, vid_t window_en, graphchi_context &gcontext) {
}
/**
* Update function.
*/
void update(graphchi_vertex<bool, EdgeDataType> &vertex, graphchi_context &gcontext) {
graphlab::icontext<graph_type, gather_type, message_type> glcontext(&gcontext);
ReductionType a;
for(int i=0; i < vertex.num_edges(); i++) {
const GraphLabEdgeWrapper<GLVertexDataType, EdgeDataType> edgeWrapper(vertex.edge(i), &vertex, vertexInmemoryArray, true);
ReductionType mapped = map_function(glcontext, edgeWrapper);
a += mapped;
}
localaggr[omp_get_thread_num()] += a;
}
}; // End edge-aggregator wrapper
/**
* Just definitions, we do not actually
support them.
*/
namespace messages {
/**
* The priority of two messages is the sum
*/
struct sum_priority : public graphlab::IS_POD_TYPE {
double value;
sum_priority(const double value = 0) : value(value) { }
double priority() const { return value; }
sum_priority& operator+=(const sum_priority& other) {
value += other.value;
return *this;
}
}; // end of sum_priority message
/**
* The priority of two messages is the max
*/
struct max_priority : public graphlab::IS_POD_TYPE {
double value;
max_priority(const double value = 0) : value(value) { }
double priority() const { return value; }
max_priority& operator+=(const max_priority& other) {
value = std::max(value, other.value);
return *this;
}
}; // end of max_priority message
}; // end of messages namespace
}; // End namespace graphlab
template <typename GraphLabVertexProgram>
std::vector<typename GraphLabVertexProgram::vertex_data_type> *
run_graphlab_vertexprogram(std::string base_filename, int nshards, int niters, bool scheduler, metrics & _m,
bool modifies_inedges=true, bool modifies_outedges=true) {
typedef graphlab::GraphLabWrapper<GraphLabVertexProgram> GLWrapper;
GLWrapper wrapperProgram;
graphchi_engine<bool, typename GLWrapper::EdgeDataType> engine(base_filename, nshards, scheduler, _m);
engine.set_modifies_inedges(modifies_inedges);
engine.set_modifies_outedges(modifies_outedges);
engine.run(wrapperProgram, niters);
return wrapperProgram.vertexInmemoryArray;
}
template <typename GraphLabVertexProgram, typename ReductionType,
typename EdgeMapType,
typename FinalizerType>
ReductionType run_graphlab_edge_aggregator(std::string base_filename, int nshards,
EdgeMapType map_function,
FinalizerType finalize_function, std::vector<typename GraphLabVertexProgram::vertex_data_type> * vertices, metrics & _m) {
typedef graphlab::GraphLabEdgeAggregatorWrapper<GraphLabVertexProgram, ReductionType, EdgeMapType, FinalizerType> GLEdgeAggrWrapper;
logstream(LOG_INFO) << "Starting edge aggregator." << std::endl;
GLEdgeAggrWrapper glAggregator(map_function, finalize_function, vertices);
graphchi_engine<bool, typename GLEdgeAggrWrapper::EdgeDataType> engine(base_filename, nshards, true, _m);
engine.set_modifies_inedges(false);
engine.set_modifies_outedges(false);
engine.run(glAggregator, 1);
return glAggregator.aggr;
}
#endif
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file contains #include information about logging-related stuff.
// Pretty much everybody needs to #include this file so that they can
// log various happenings.
//
#ifndef _ASSERTIONS_H_
#define _ASSERTIONS_H_
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <string.h> // for strlen(), strcmp()
#include <assert.h>
#include <errno.h> // for errno
#include <sstream>
#include <cassert>
#include "logger/logger.hpp"
#include <boost/typeof/typeof.hpp>
static void __print_back_trace() {
logstream(LOG_ERROR) << "GraphChi does not currently have the _print_back_trace implementation!" << std::endl;
}
// On some systems (like freebsd), we can't call write() at all in a
// global constructor, perhaps because errno hasn't been set up.
// Calling the write syscall is safer (it doesn't set errno), so we
// prefer that. Note we don't care about errno for logging: we just
// do logging on a best-effort basis.
#define WRITE_TO_STDERR(buf, len) (logbuf(LOG_FATAL, buf, len))
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
#define CHECK(condition) \
do { \
if (__builtin_expect(!(condition), 0)) { \
logstream(LOG_ERROR) \
<< "Check failed: " << #condition << std::endl; \
__print_back_trace(); \
throw("assertion failure"); \
} \
} while(0)
// This prints errno as well. errno is the posix defined last error
// number. See errno.h
#define PCHECK(condition) \
do { \
if (__builtin_expect(!(condition), 0)) { \
const int _PCHECK_err_no_ = errno; \
logstream(LOG_ERROR) \
<< "Check failed: " << #condition << ": " \
<< strerror(err_no) << std::endl; \
__print_back_trace(); \
throw("assertion failure"); \
} \
} while(0)
// Helper macro for binary operators; prints the two values on error
// Don't use this macro directly in your code, use CHECK_EQ et al below
// WARNING: These don't compile correctly if one of the arguments is a pointer
// and the other is NULL. To work around this, simply static_cast NULL to the
// type of the desired pointer.
#define CHECK_OP(op, val1, val2) \
do { \
const typeof(val1) _CHECK_OP_v1_ = val1; \
const typeof(val2) _CHECK_OP_v2_ = (typeof(val2))val2; \
if (__builtin_expect(!((_CHECK_OP_v1_) op \
(typeof(val1))(_CHECK_OP_v2_)), 0)) { \
logstream(LOG_ERROR) \
<< "Check failed: " \
<< #val1 << #op << #val2 \
<< " [" \
<< _CHECK_OP_v1_ \
<< ' ' << #op << ' ' \
<< _CHECK_OP_v2_ << "]" << std::endl; \
__print_back_trace(); \
throw("assertion failure"); \
} \
} while(0)
#define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2)
#define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2)
// Synonyms for CHECK_* that are used in some unittests.
#define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2)
#define EXPECT_NE(val1, val2) CHECK_NE(val1, val2)
#define EXPECT_LE(val1, val2) CHECK_LE(val1, val2)
#define EXPECT_LT(val1, val2) CHECK_LT(val1, val2)
#define EXPECT_GE(val1, val2) CHECK_GE(val1, val2)
#define EXPECT_GT(val1, val2) CHECK_GT(val1, val2)
#define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2)
#define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2)
#define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2)
#define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2)
#define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2)
#define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2)
// As are these variants.
#define EXPECT_TRUE(cond) CHECK(cond)
#define EXPECT_FALSE(cond) CHECK(!(cond))
#define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0)
#define ASSERT_TRUE(cond) EXPECT_TRUE(cond)
#define ASSERT_FALSE(cond) EXPECT_FALSE(cond)
#define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b)
#define ASSERT_MSG(condition, fmt, ...) \
do { \
if (__builtin_expect(!(condition), 0)) { \
logstream(LOG_ERROR) \
<< "Check failed: " << #condition << ":\n"; \
logger(LOG_ERROR, fmt, ##__VA_ARGS__); \
__print_back_trace(); \
throw("assertion failure"); \
} \
} while(0)
// Used for (libc) functions that return -1 and set errno
#define CHECK_ERR(invocation) PCHECK((invocation) != -1)
// A few more checks that only happen in debug mode
#ifdef NDEBUG
#define DCHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2)
#define DASSERT_TRUE(cond)
#define DASSERT_FALSE(cond)
#define DASSERT_MSG(condition, fmt, ...)
#else
#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
#define DASSERT_TRUE(cond) ASSERT_TRUE(cond)
#define DASSERT_FALSE(cond) ASSERT_FALSE(cond)
#define DASSERT_MSG(condition, fmt, ...) \
do { \
if (__builtin_expect(!(condition), 0)) { \
logstream(LOG_ERROR) \
<< "Check failed: " << #condition << ":\n"; \
logger(LOG_ERROR, fmt, ##__VA_ARGS__); \
__print_back_trace(); \
throw("assertion failure"); \
} \
} while(0)
#endif
#ifdef ERROR
#undef ERROR // may conflict with ERROR macro on windows
#endif
#endif // _LOGGING_H_
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Includes for the GraphChi - GraphLab v2.1 Gather-Apply-Scatter API.
*/
#ifndef DEF_GRAPHLAB_GAS_API_V2_1
#define DEF_GRAPHLAB_GAS_API_V2_1
#include "api/graphlab2_1_GAS_api/assertions.hpp"
#include "api/graphlab2_1_GAS_api/graphchi_graphlabv2_1.hpp"
#include <boost/foreach.hpp>
#include <stdint.h>
// if GNUC is available, this checks if the file which included
// macros_def.hpp is the same file which included macros_undef.hpp
#ifdef __GNUC__
#define GRAPHLAB_MACROS_INC_LEVEL __INCLUDE_LEVEL__
#endif
// prevent this file from being included before other graphlab headers
#ifdef GRAPHLAB_MACROS
#error "Repeated include of <macros_def.hpp>. This probably means that macros_def.hpp was not the last include, or some header file failed to include <macros_undef.hpp>"
#endif
#define GRAPHLAB_MACROS
/** A macro to disallow the copy constructor and operator= functions
This should be used in the private: declarations for a class */
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&);
// Shortcut macro definitions
//! see http://www.boost.org/doc/html/foreach.html
#define foreach BOOST_FOREACH
#define rev_foreach BOOST_REVERSE_FOREACH
#include "api/graphlab2_1_GAS_api/random.hpp"
#endif
| C++ |
/**
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#include <pthread.h>
#include <set>
#include <iostream>
#include <fstream>
#include <boost/random.hpp>
#include <boost/integer_traits.hpp>
#include "util/pthread_tools.hpp"
#include "api/graphlab2_1_GAS_api/graphlab.hpp"
namespace graphlab {
namespace random {
/**
* A truely nondeterministic generator
*/
class nondet_generator {
public:
static nondet_generator& global() {
static nondet_generator global_gen;
return global_gen;
}
typedef size_t result_type;
BOOST_STATIC_CONSTANT(result_type, min_value =
boost::integer_traits<result_type>::const_min);
BOOST_STATIC_CONSTANT(result_type, max_value =
boost::integer_traits<result_type>::const_max);
result_type min BOOST_PREVENT_MACRO_SUBSTITUTION () const { return min_value; }
result_type max BOOST_PREVENT_MACRO_SUBSTITUTION () const { return max_value; }
nondet_generator() {
rnd_dev.open("/dev/urandom", std::ios::binary | std::ios::in);
ASSERT_TRUE(rnd_dev.good());
}
// Close the random number generator
~nondet_generator() { rnd_dev.close(); }
// read a size_t from the source
result_type operator()() {
// read a machine word into result
result_type result(0);
mut.lock();
ASSERT_TRUE(rnd_dev.good());
rnd_dev.read(reinterpret_cast<char*>(&result), sizeof(result_type));
ASSERT_TRUE(rnd_dev.good());
mut.unlock();
// std::cout << result << std::endl;
return result;
}
private:
std::ifstream rnd_dev;
mutex mut;
};
//nondet_generator global_nondet_rng;
/**
* This class represents a master registery of all active random
* number generators
*/
struct source_registry {
std::set<generator*> generators;
generator master;
mutex mut;
static source_registry& global() {
static source_registry registry;
return registry;
}
/**
* Seed all threads using the default seed
*/
void seed() {
mut.lock();
master.seed();
foreach(generator* generator, generators) {
ASSERT_TRUE(generator != NULL);
generator->seed(master);
}
mut.unlock();
}
/**
* Seed all threads using the default seed
*/
void nondet_seed() {
mut.lock();
master.nondet_seed();
foreach(generator* generator, generators) {
ASSERT_TRUE(generator != NULL);
generator->seed(master);
}
mut.unlock();
}
/**
* Seed all threads using the default seed
*/
void time_seed() {
mut.lock();
master.time_seed();
foreach(generator* generator, generators) {
ASSERT_TRUE(generator != NULL);
generator->seed(master);
}
mut.unlock();
}
/**
* Seed all threads with a fixed number
*/
void seed(const size_t number) {
mut.lock();
master.seed(number);
foreach(generator* generator, generators) {
ASSERT_TRUE(generator != NULL);
generator->seed(master);
}
mut.unlock();
}
/**
* Register a source with the registry and seed it based on the
* master.
*/
void register_generator(generator* tls_ptr) {
ASSERT_TRUE(tls_ptr != NULL);
mut.lock();
generators.insert(tls_ptr);
tls_ptr->seed(master);
// std::cout << "Generator created" << std::endl;
// __print_back_trace();
mut.unlock();
}
/**
* Unregister a source from the registry
*/
void unregister_source(generator* tls_ptr) {
mut.lock();
generators.erase(tls_ptr);
mut.unlock();
}
};
// source_registry registry;
//////////////////////////////////////////////////////////////
/// Pthread TLS code
/**
* this function is responsible for destroying the random number
* generators
*/
void destroy_tls_data(void* ptr) {
generator* tls_rnd_ptr =
reinterpret_cast<generator*>(ptr);
if(tls_rnd_ptr != NULL) {
source_registry::global().unregister_source(tls_rnd_ptr);
delete tls_rnd_ptr;
}
}
/**
* Simple struct used to construct the thread local storage at
* startup.
*/
struct tls_key_creator {
pthread_key_t TLS_RANDOM_SOURCE_KEY;
tls_key_creator() : TLS_RANDOM_SOURCE_KEY(0) {
pthread_key_create(&TLS_RANDOM_SOURCE_KEY,
destroy_tls_data);
}
};
// This function is to be called prior to any access to the random
// source
static pthread_key_t get_random_source_key() {
static const tls_key_creator key;
return key.TLS_RANDOM_SOURCE_KEY;
}
// This forces __init_keys__ to be called prior to main.
static pthread_key_t __unused_init_keys__(get_random_source_key());
// the combination of the two mechanisms above will force the
// thread local store to be initialized
// 1: before main
// 2: before any use of random by global variables.
// KNOWN_ISSUE: if a global variable (initialized before main)
// spawns threads which then call random. Things explode.
/////////////////////////////////////////////////////////////
//// Implementation of header functions
generator& get_source() {
// get the thread local storage
generator* tls_rnd_ptr =
reinterpret_cast<generator*>
(pthread_getspecific(get_random_source_key()));
// Create a tls_random_source if none was provided
if(tls_rnd_ptr == NULL) {
tls_rnd_ptr = new generator();
assert(tls_rnd_ptr != NULL);
// This will seed it with the master rng
source_registry::global().register_generator(tls_rnd_ptr);
pthread_setspecific(get_random_source_key(),
tls_rnd_ptr);
}
// assert(tls_rnd_ptr != NULL);
return *tls_rnd_ptr;
} // end of get local random source
void seed() { source_registry::global().seed(); }
void nondet_seed() { source_registry::global().nondet_seed(); }
void time_seed() { source_registry::global().time_seed(); }
void seed(const size_t seed_value) {
source_registry::global().seed(seed_value);
}
void generator::nondet_seed() {
// Get the global nondeterministic random number generator.
nondet_generator& nondet_rnd(nondet_generator::global());
mut.lock();
// std::cout << "initializing real rng" << std::endl;
real_rng.seed(nondet_rnd());
// std::cout << "initializing discrete rng" << std::endl;
discrete_rng.seed(nondet_rnd());
// std::cout << "initializing fast discrete rng" << std::endl;
fast_discrete_rng.seed(nondet_rnd());
mut.unlock();
}
void pdf2cdf(std::vector<double>& pdf) {
double Z = 0;
for(size_t i = 0; i < pdf.size(); ++i) Z += pdf[i];
for(size_t i = 0; i < pdf.size(); ++i)
pdf[i] = pdf[i]/Z + ((i>0)? pdf[i-1] : 0);
} // end of pdf2cdf
}; // end of namespace random
};// end of namespace graphlab
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Context object which contains information about the graph
* and on-going computation.
*/
#ifndef DEF_GRAPHCHI_CONTEXT
#define DEF_GRAPHCHI_CONTEXT
#include <vector>
#include <assert.h>
#include <omp.h>
#include <sys/time.h>
#include "graphchi_types.hpp"
#include "api/ischeduler.hpp"
namespace graphchi {
struct graphchi_context {
size_t nvertices;
size_t nedges;
ischeduler * scheduler;
int iteration;
int num_iterations;
int last_iteration;
int execthreads;
std::vector<double> deltas;
timeval start;
std::string filename;
double last_deltasum;
graphchi_context() : scheduler(NULL), iteration(0), last_iteration(-1) {
gettimeofday(&start, NULL);
last_deltasum = 0.0;
}
double runtime() {
timeval end;
gettimeofday(&end, NULL);
return end.tv_sec-start.tv_sec+ ((double)(end.tv_usec-start.tv_usec))/1.0E6;
}
/**
* Set a termination iteration.
*/
void set_last_iteration(int _last_iteration) {
last_iteration = _last_iteration;
}
void reset_deltas(int nthreads) {
deltas = std::vector<double>(nthreads, 0.0);
}
double get_delta() {
double d = 0.0;
for(int i=0; i < (int)deltas.size(); i++) {
d += deltas[i];
}
last_deltasum = d;
return d;
}
inline bool isnan(double x) {
return !(x<0 || x>=0);
}
/**
* Method for keeping track of the amount of change in computation.
* An update function may broadcast a numerical "delta" value that is
* automatically accumulated (in thread-safe way).
* @param delta
*/
void log_change(double delta) {
deltas[omp_get_thread_num()] += delta;
assert(delta >= 0);
assert(!isnan(delta)); /* Sanity check */
}
};
}
#endif
| C++ |
/**
* @file
* @author Aapo Kyrola <akyrola@cs.cmu.edu>
* @version 1.0
*
* @section LICENSE
*
* Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
*
* Scheduler interface.
*/
#ifndef DEF_GRAPHCHI_ISCHEDULER
#define DEF_GRAPHCHI_ISCHEDULER
#include "graphchi_types.hpp"
#include "logger/logger.hpp"
namespace graphchi {
class ischeduler {
public:
virtual ~ischeduler() {}
virtual void add_task(vid_t vid) = 0;
virtual void remove_tasks(vid_t fromvertex, vid_t tovertex) = 0;
virtual void add_task_to_all() = 0;
virtual bool is_scheduled(vid_t vertex) = 0;
};
/**
* Implementation of the scheduler which actually does nothing.
*/
class non_scheduler : public ischeduler {
int nwarnings;
public:
non_scheduler() : nwarnings(0) {}
virtual ~non_scheduler() {}
virtual void add_task(vid_t vid) {
if (nwarnings++ % 10000 == 0) {
logstream(LOG_WARNING) << "Tried to add task to scheduler, but scheduling was not enabled!" << std::endl;
}
}
virtual void remove_tasks(vid_t fromvertex, vid_t tovertex) { }
virtual void add_task_to_all() { }
virtual bool is_scheduled(vid_t vertex) { return true; }
};
}
#endif
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.