code stringlengths 1 2.06M | language stringclasses 1 value |
|---|---|
/**********************************************************************
* File: tlog.cpp
* Description: Variant of printf with logging level controllable by a
* commandline flag.
* Author: Ranjith Unnikrishnan
* Created: Wed Nov 20 2013
*
* (C) Copyright 2013, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "tlog.h"
INT_PARAM_FLAG(tlog_level, 0, "Minimum logging level for tlog() output");
| C++ |
/**********************************************************************
* File: fileio.cpp
* Description: File I/O utilities.
* Author: Samuel Charron
* Created: Tuesday, July 9, 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
* by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
**********************************************************************/
#ifdef _WIN32
#include <windows.h>
#ifndef unlink
#include <io.h>
#endif
#else
#include <glob.h>
#include <unistd.h>
#endif
#include <stdlib.h>
#include <cstdio>
#include <string>
#include "fileio.h"
#include "tprintf.h"
namespace tesseract {
///////////////////////////////////////////////////////////////////////////////
// File::
///////////////////////////////////////////////////////////////////////////////
FILE* File::Open(const string& filename, const string& mode) {
return fopen(filename.c_str(), mode.c_str());
}
FILE* File::OpenOrDie(const string& filename,
const string& mode) {
FILE* stream = fopen(filename.c_str(), mode.c_str());
if (stream == NULL) {
tprintf("Unable to open '%s' in mode '%s'\n", filename.c_str(),
mode.c_str());
}
return stream;
}
void File::WriteStringToFileOrDie(const string& str,
const string& filename) {
FILE* stream = fopen(filename.c_str(), "wb");
if (stream == NULL) {
tprintf("Unable to open '%s' for writing\n", filename.c_str());
return;
}
fputs(str.c_str(), stream);
ASSERT_HOST(fclose(stream) == 0);
}
bool File::Readable(const string& filename) {
FILE* stream = fopen(filename.c_str(), "rb");
if (stream == NULL) {
return false;
}
fclose(stream);
return true;
}
bool File::ReadFileToString(const string& filename, string* out) {
FILE* stream = File::Open(filename.c_str(), "rb");
if (stream == NULL)
return false;
InputBuffer in(stream);
*out = "";
string temp;
while (in.ReadLine(&temp)) {
*out += temp;
*out += '\n';
}
return in.CloseFile();
}
void File::ReadFileToStringOrDie(const string& filename, string* out) {
ASSERT_HOST_MSG(ReadFileToString(filename, out),
"Failed to read file: %s\n", filename.c_str());
}
string File::JoinPath(const string& prefix, const string& suffix) {
return (!prefix.size() || prefix[prefix.size() - 1] == '/') ?
prefix + suffix : prefix + "/" + suffix;
}
bool File::Delete(const char* pathname) {
const int status = unlink(pathname);
if (status != 0) {
tprintf("ERROR: Unable to delete file %s\n", pathname);
return false;
}
return true;
}
#ifdef _WIN32
bool File::DeleteMatchingFiles(const char* pattern) {
WIN32_FIND_DATA data;
BOOL result = TRUE;
HANDLE handle = FindFirstFile(pattern, &data);
bool all_deleted = true;
if (handle != INVALID_HANDLE_VALUE) {
for (; result; result = FindNextFile(handle, &data)) {
all_deleted &= File::Delete(data.cFileName);
}
FindClose(handle);
}
return all_deleted;
}
#else
bool File::DeleteMatchingFiles(const char* pattern) {
glob_t pglob;
char **paths;
bool all_deleted = true;
if (glob(pattern, 0, NULL, &pglob) == 0) {
for (paths = pglob.gl_pathv; *paths != NULL; paths++) {
all_deleted &= File::Delete(*paths);
}
globfree(&pglob);
}
return all_deleted;
}
#endif
///////////////////////////////////////////////////////////////////////////////
// InputBuffer::
///////////////////////////////////////////////////////////////////////////////
InputBuffer::InputBuffer(FILE* stream)
: stream_(stream) {
fseek(stream_, 0, SEEK_END);
filesize_ = ftell(stream_);
fseek(stream_, 0, SEEK_SET);
}
InputBuffer::InputBuffer(FILE* stream, size_t)
: stream_(stream) {
fseek(stream_, 0, SEEK_END);
filesize_ = ftell(stream_);
fseek(stream_, 0, SEEK_SET);
}
InputBuffer::~InputBuffer() {
if (stream_ != NULL) {
fclose(stream_);
}
}
bool InputBuffer::ReadLine(string* out) {
ASSERT_HOST(stream_ != NULL);
char* line = NULL;
int len = -1;
#ifndef HAVE_GETLINE
char line_buf[BUFSIZ];
if ((line = fgets(line_buf, BUFSIZ, stream_)) != NULL) {
len = strlen(line);
if (line_buf[0] != '\0' && line_buf[len - 1] == '\n')
line_buf[len - 1] = '\0';
} else {
return false;
}
*out = string(line);
#else
size_t line_size;
len = getline(&line, &line_size, stream_);
if (len < 0) {
return false;
}
if (len >= 1 && line[len - 1] == '\n')
line[len - 1] = '\0';
*out = string(line);
free(line);
#endif // HAVE_GETLINE
return true;
}
bool InputBuffer::CloseFile() {
int ret = fclose(stream_);
stream_ = NULL;
return ret == 0;
}
///////////////////////////////////////////////////////////////////////////////
// OutputBuffer::
///////////////////////////////////////////////////////////////////////////////
OutputBuffer::OutputBuffer(FILE* stream)
: stream_(stream) {
}
OutputBuffer::OutputBuffer(FILE* stream, size_t)
: stream_(stream) {
}
OutputBuffer::~OutputBuffer() {
if (stream_ != NULL) {
fclose(stream_);
}
}
void OutputBuffer::WriteString(const string& str) {
fputs(str.c_str(), stream_);
}
bool OutputBuffer::CloseFile() {
int ret = fclose(stream_);
stream_ = NULL;
return ret == 0;
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: mftraining.c
** Purpose: Separates training pages into files for each character.
** Strips from files only the features and there parameters of
the feature type mf.
** Author: Dan Johnson
** Revisment: Christy Russon
** Environment: HPUX 6.5
** Library: HPUX 6.5
** History: Fri Aug 18 08:53:50 1989, DSJ, Created.
** 5/25/90, DSJ, Adapted to multiple feature types.
** Tuesday, May 17, 1998 Changes made to make feature specific and
** simplify structures. First step in simplifying training process.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include <string.h>
#include <stdio.h>
#define _USE_MATH_DEFINES
#include <math.h>
#ifdef _WIN32
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#endif
#include "classify.h"
#include "cluster.h"
#include "clusttool.h"
#include "commontraining.h"
#include "danerror.h"
#include "efio.h"
#include "emalloc.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "genericvector.h"
#include "indexmapbidi.h"
#include "intproto.h"
#include "mastertrainer.h"
#include "mergenf.h"
#include "mf.h"
#include "ndminx.h"
#include "ocrfeatures.h"
#include "oldlist.h"
#include "protos.h"
#include "shapetable.h"
#include "tessopt.h"
#include "tprintf.h"
#include "unicity_table.h"
using tesseract::Classify;
using tesseract::FontInfo;
using tesseract::FontSpacingInfo;
using tesseract::IndexMapBiDi;
using tesseract::MasterTrainer;
using tesseract::Shape;
using tesseract::ShapeTable;
#define PROGRAM_FEATURE_TYPE "mf"
// Max length of a fake shape label.
const int kMaxShapeLabelLength = 10;
DECLARE_STRING_PARAM_FLAG(test_ch);
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
int main (
int argc,
char **argv);
/*----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
static void DisplayProtoList(const char* ch, LIST protolist) {
void* window = c_create_window("Char samples", 50, 200,
520, 520, -130.0, 130.0, -130.0, 130.0);
LIST proto = protolist;
iterate(proto) {
PROTOTYPE* prototype = reinterpret_cast<PROTOTYPE *>(first_node(proto));
if (prototype->Significant)
c_line_color_index(window, Green);
else if (prototype->NumSamples == 0)
c_line_color_index(window, Blue);
else if (prototype->Merged)
c_line_color_index(window, Magenta);
else
c_line_color_index(window, Red);
float x = CenterX(prototype->Mean);
float y = CenterY(prototype->Mean);
double angle = OrientationOf(prototype->Mean) * 2 * M_PI;
float dx = static_cast<float>(LengthOf(prototype->Mean) * cos(angle) / 2);
float dy = static_cast<float>(LengthOf(prototype->Mean) * sin(angle) / 2);
c_move(window, (x - dx) * 256, (y - dy) * 256);
c_draw(window, (x + dx) * 256, (y + dy) * 256);
if (prototype->Significant)
tprintf("Green proto at (%g,%g)+(%g,%g) %d samples\n",
x, y, dx, dy, prototype->NumSamples);
else if (prototype->NumSamples > 0 && !prototype->Merged)
tprintf("Red proto at (%g,%g)+(%g,%g) %d samples\n",
x, y, dx, dy, prototype->NumSamples);
}
c_make_current(window);
}
#endif // GRAPHICS_DISABLED
// Helper to run clustering on a single config.
// Mostly copied from the old mftraining, but with renamed variables.
static LIST ClusterOneConfig(int shape_id, const char* class_label,
LIST mf_classes,
const ShapeTable& shape_table,
MasterTrainer* trainer) {
int num_samples;
CLUSTERER *clusterer = trainer->SetupForClustering(shape_table,
feature_defs,
shape_id,
&num_samples);
Config.MagicSamples = num_samples;
LIST proto_list = ClusterSamples(clusterer, &Config);
CleanUpUnusedData(proto_list);
// Merge protos where reasonable to make more of them significant by
// representing almost all samples of the class/font.
MergeInsignificantProtos(proto_list, class_label, clusterer, &Config);
#ifndef GRAPHICS_DISABLED
if (strcmp(FLAGS_test_ch.c_str(), class_label) == 0)
DisplayProtoList(FLAGS_test_ch.c_str(), proto_list);
#endif // GRAPHICS_DISABLED
// Delete the protos that will not be used in the inttemp output file.
proto_list = RemoveInsignificantProtos(proto_list, true,
false,
clusterer->SampleSize);
FreeClusterer(clusterer);
MERGE_CLASS merge_class = FindClass(mf_classes, class_label);
if (merge_class == NULL) {
merge_class = NewLabeledClass(class_label);
mf_classes = push(mf_classes, merge_class);
}
int config_id = AddConfigToClass(merge_class->Class);
merge_class->Class->font_set.push_back(shape_id);
LIST proto_it = proto_list;
iterate(proto_it) {
PROTOTYPE* prototype = reinterpret_cast<PROTOTYPE*>(first_node(proto_it));
// See if proto can be approximated by existing proto.
int p_id = FindClosestExistingProto(merge_class->Class,
merge_class->NumMerged, prototype);
if (p_id == NO_PROTO) {
// Need to make a new proto, as it doesn't match anything.
p_id = AddProtoToClass(merge_class->Class);
MakeNewFromOld(ProtoIn(merge_class->Class, p_id), prototype);
merge_class->NumMerged[p_id] = 1;
} else {
PROTO_STRUCT dummy_proto;
MakeNewFromOld(&dummy_proto, prototype);
// Merge with the similar proto.
ComputeMergedProto(ProtoIn(merge_class->Class, p_id), &dummy_proto,
static_cast<FLOAT32>(merge_class->NumMerged[p_id]),
1.0,
ProtoIn(merge_class->Class, p_id));
merge_class->NumMerged[p_id]++;
}
AddProtoToConfig(p_id, merge_class->Class->Configurations[config_id]);
}
FreeProtoList(&proto_list);
return mf_classes;
}
// Helper to setup the config map.
// Setup an index mapping from the shapes in the shape table to the classes
// that will be trained. In keeping with the original design, each shape
// with the same list of unichars becomes a different class and the configs
// represent the different combinations of fonts.
static void SetupConfigMap(ShapeTable* shape_table, IndexMapBiDi* config_map) {
int num_configs = shape_table->NumShapes();
config_map->Init(num_configs, true);
config_map->Setup();
for (int c1 = 0; c1 < num_configs; ++c1) {
// Only process ids that are not already merged.
if (config_map->SparseToCompact(c1) == c1) {
Shape* shape1 = shape_table->MutableShape(c1);
// Find all the subsequent shapes that are equal.
for (int c2 = c1 + 1; c2 < num_configs; ++c2) {
if (shape_table->MutableShape(c2)->IsEqualUnichars(shape1)) {
config_map->Merge(c1, c2);
}
}
}
}
config_map->CompleteMerges();
}
/*---------------------------------------------------------------------------*/
int main (int argc, char **argv) {
/*
** Parameters:
** argc number of command line arguments
** argv array of command line arguments
** Globals: none
** Operation:
** This program reads in a text file consisting of feature
** samples from a training page in the following format:
**
** FontName UTF8-char-str xmin ymin xmax ymax page-number
** NumberOfFeatureTypes(N)
** FeatureTypeName1 NumberOfFeatures(M)
** Feature1
** ...
** FeatureM
** FeatureTypeName2 NumberOfFeatures(M)
** Feature1
** ...
** FeatureM
** ...
** FeatureTypeNameN NumberOfFeatures(M)
** Feature1
** ...
** FeatureM
** FontName CharName ...
**
** The result of this program is a binary inttemp file used by
** the OCR engine.
** Return: none
** Exceptions: none
** History: Fri Aug 18 08:56:17 1989, DSJ, Created.
** Mon May 18 1998, Christy Russson, Revistion started.
*/
ParseArguments(&argc, &argv);
ShapeTable* shape_table = NULL;
STRING file_prefix;
// Load the training data.
MasterTrainer* trainer = tesseract::LoadTrainingData(argc, argv,
false,
&shape_table,
&file_prefix);
if (trainer == NULL)
return 1; // Failed.
// Setup an index mapping from the shapes in the shape table to the classes
// that will be trained. In keeping with the original design, each shape
// with the same list of unichars becomes a different class and the configs
// represent the different combinations of fonts.
IndexMapBiDi config_map;
SetupConfigMap(shape_table, &config_map);
WriteShapeTable(file_prefix, *shape_table);
// If the shape_table is flat, then either we didn't run shape clustering, or
// it did nothing, so we just output the trainer's unicharset.
// Otherwise shape_set will hold a fake unicharset with an entry for each
// shape in the shape table, and we will output that instead.
UNICHARSET shape_set;
const UNICHARSET* unicharset = &trainer->unicharset();
// If we ran shapeclustering (and it worked) then at least one shape will
// have multiple unichars, so we have to build a fake unicharset.
if (shape_table->AnyMultipleUnichars()) {
unicharset = &shape_set;
// Now build a fake unicharset for the compact shape space to keep the
// output modules happy that we are doing things correctly.
int num_shapes = config_map.CompactSize();
for (int s = 0; s < num_shapes; ++s) {
char shape_label[kMaxShapeLabelLength + 1];
snprintf(shape_label, kMaxShapeLabelLength, "sh%04d", s);
shape_set.unichar_insert(shape_label);
}
}
// Now train each config separately.
int num_configs = shape_table->NumShapes();
LIST mf_classes = NIL_LIST;
for (int s = 0; s < num_configs; ++s) {
int unichar_id, font_id;
if (unicharset == &shape_set) {
// Using fake unichar_ids from the config_map/shape_set.
unichar_id = config_map.SparseToCompact(s);
} else {
// Get the real unichar_id from the shape table/unicharset.
shape_table->GetFirstUnicharAndFont(s, &unichar_id, &font_id);
}
const char* class_label = unicharset->id_to_unichar(unichar_id);
mf_classes = ClusterOneConfig(s, class_label, mf_classes, *shape_table,
trainer);
}
STRING inttemp_file = file_prefix;
inttemp_file += "inttemp";
STRING pffmtable_file = file_prefix;
pffmtable_file += "pffmtable";
CLASS_STRUCT* float_classes = SetUpForFloat2Int(*unicharset, mf_classes);
// Now write the inttemp and pffmtable.
trainer->WriteInttempAndPFFMTable(trainer->unicharset(), *unicharset,
*shape_table, float_classes,
inttemp_file.string(),
pffmtable_file.string());
delete [] float_classes;
FreeLabeledClassList(mf_classes);
delete trainer;
delete shape_table;
printf("Done!\n");
if (!FLAGS_test_ch.empty()) {
// If we are displaying debug window(s), wait for the user to look at them.
printf("Hit return to exit...\n");
while (getchar() != '\n');
}
return 0;
} /* main */
| C++ |
/**********************************************************************
* File: boxchar.h
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_BOXCHAR_H_
#define TESSERACT_TRAINING_BOXCHAR_H_
#include <string>
#include <vector>
#include "allheaders.h" // from Leptonica
#ifdef USE_STD_NAMESPACE
using std::string;
using std::vector;
#endif
struct Box;
namespace tesseract {
class BoxChar {
public:
BoxChar(const char* utf8_str, int len);
~BoxChar();
// Accessors.
const string& ch() const { return ch_; }
const Box* box() const { return box_; }
const int& page() const { return page_; }
// Set the box_ member.
void AddBox(int x, int y, int width, int height);
void set_page(int page) { page_ = page; }
string* mutable_ch() { return &ch_; }
Box* mutable_box() { return box_; }
static void TranslateBoxes(int xshift, int yshift,
vector<BoxChar*>* boxes);
// Rotate the vector of boxes between start and end by the given rotation.
// The rotation is in radians clockwise about the given center.
static void RotateBoxes(float rotation,
int xcenter,
int ycenter,
int start_box,
int end_box,
vector<BoxChar*>* boxes);
// Create a tesseract box file from the vector of boxes. The image height
// is needed to convert to tesseract coordinates.
static void WriteTesseractBoxFile(const string& name, int height,
const vector<BoxChar*>& boxes);
private:
string ch_;
Box* box_;
int page_;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_BOXCHAR_H_
| C++ |
/**********************************************************************
* File: degradeimage.h
* Description: Function to degrade an image (usually of text) as if it
* has been printed and then scanned.
* Authors: Ray Smith
* Created: Tue Nov 19 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_DEGRADEIMAGE_H_
#define TESSERACT_TRAINING_DEGRADEIMAGE_H_
struct Pix;
namespace tesseract {
class TRand;
// Degrade the pix as if by a print/copy/scan cycle with exposure > 0
// corresponding to darkening on the copier and <0 lighter and 0 not copied.
// If rotation is not NULL, the clockwise rotation in radians is saved there.
// The input pix must be 8 bit grey. (Binary with values 0 and 255 is OK.)
// The input image is destroyed and a different image returned.
struct Pix* DegradeImage(struct Pix* input, int exposure, TRand* randomizer,
float* rotation);
} // namespace tesseract
#endif // TESSERACT_TRAINING_DEGRADEIMAGE_H_
| C++ |
/**********************************************************************
* File: pango_font_info.cpp
* Description: Font-related objects and helper functions
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifdef MINGW
// workaround for stdlib.h and putenv
#undef __STRICT_ANSI__
#include "strcasestr.h"
#endif // MINGW
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/param.h>
#include <algorithm>
#include "pango_font_info.h"
#include "commandlineflags.h"
#include "fileio.h"
#include "normstrngs.h"
#include "tlog.h"
#include "unichar.h"
#include "util.h"
#include "pango/pango.h"
#include "pango/pangocairo.h"
#include "pango/pangofc-font.h"
STRING_PARAM_FLAG(fonts_dir, "/auto/ocr-data/tesstraining/fonts",
"Overrides system default font location");
STRING_PARAM_FLAG(fontconfig_tmpdir, "/tmp",
"Overrides fontconfig default temporary dir");
BOOL_PARAM_FLAG(fontconfig_refresh_cache, false,
"Does a one-time deletion of cache files from the "
"fontconfig_tmpdir before initializing fontconfig.");
#ifndef USE_STD_NAMESPACE
#include "ocr/trainingdata/typesetting/legacy_fonts.h"
BOOL_PARAM_FLAG(use_only_legacy_fonts, false,
"Overrides --fonts_dir and sets the known universe of fonts to"
"the list in legacy_fonts.h");
#else
using std::pair;
#endif
namespace tesseract {
// Default assumed output resolution. Required only for providing font metrics
// in pixels.
const int kDefaultResolution = 300;
PangoFontInfo::PangoFontInfo() : desc_(NULL), resolution_(kDefaultResolution) {
Clear();
}
PangoFontInfo::PangoFontInfo(const string& desc)
: desc_(NULL), resolution_(kDefaultResolution) {
if (!ParseFontDescriptionName(desc)) {
tprintf("ERROR: Could not parse %s\n", desc.c_str());
Clear();
}
}
void PangoFontInfo::Clear() {
font_size_ = 0;
is_bold_ = false;
is_italic_ = false;
is_smallcaps_ = false;
is_monospace_ = false;
family_name_.clear();
font_type_ = UNKNOWN;
if (desc_) {
pango_font_description_free(desc_);
desc_ = NULL;
}
}
string PangoFontInfo::DescriptionName() const {
if (!desc_) return "";
char* desc_str = pango_font_description_to_string(desc_);
string desc_name(desc_str);
g_free(desc_str);
return desc_name;
}
// Initializes Fontconfig for use by writing a fake fonts.conf file into the
// FLAGS_fontconfigs_tmpdir directory, that points to the supplied
// FLAGS_fonts_dir, and then overrides the FONTCONFIG_PATH environment variable
// to point to this fonts.conf file.
static void InitFontconfig() {
static bool init_fontconfig = false;
if (init_fontconfig || FLAGS_fonts_dir.empty()) {
init_fontconfig = true;
return;
}
if (FLAGS_fontconfig_refresh_cache) {
tprintf("Deleting cache files from %s\n", FLAGS_fontconfig_tmpdir.c_str());
File::DeleteMatchingFiles(File::JoinPath(
FLAGS_fontconfig_tmpdir.c_str(), "*cache-2").c_str());
}
tprintf("Initializing fontconfig\n");
const int MAX_FONTCONF_FILESIZE = 1024;
char fonts_conf_template[MAX_FONTCONF_FILESIZE];
snprintf(fonts_conf_template, MAX_FONTCONF_FILESIZE,
"<?xml version=\"1.0\"?>\n"
"<!DOCTYPE fontconfig SYSTEM \"fonts.dtd\">\n"
"<fontconfig>\n"
"<dir>%s</dir>\n"
"<cachedir>%s</cachedir>\n"
"<config></config>\n"
"</fontconfig>", FLAGS_fonts_dir.c_str(),
FLAGS_fontconfig_tmpdir.c_str());
string fonts_conf_file = File::JoinPath(FLAGS_fontconfig_tmpdir.c_str(),
"fonts.conf");
File::WriteStringToFileOrDie(fonts_conf_template, fonts_conf_file);
#ifdef _WIN32
std::string env("FONTCONFIG_PATH=");
env.append(FLAGS_fontconfig_tmpdir.c_str());
putenv(env.c_str());
putenv("LANG=en_US.utf8");
#else
setenv("FONTCONFIG_PATH", FLAGS_fontconfig_tmpdir.c_str(), true);
// Fix the locale so that the reported font names are consistent.
setenv("LANG", "en_US.utf8", true);
#endif // _WIN32
init_fontconfig = true;
}
static void ListFontFamilies(PangoFontFamily*** families,
int* n_families) {
InitFontconfig();
PangoFontMap* font_map = pango_cairo_font_map_get_default();
DISABLE_HEAP_LEAK_CHECK;
pango_font_map_list_families(font_map, families, n_families);
}
// Inspects whether a given font family is monospace. If the font is not
// available, it cannot make a decision and returns false by default.
static bool IsMonospaceFontFamily(const char* family_name) {
PangoFontFamily** families = 0;
int n_families = 0;
bool is_monospace = false;
ListFontFamilies(&families, &n_families);
ASSERT_HOST(n_families > 0);
bool found = false;
for (int i = 0; i < n_families; ++i) {
if (!strcasecmp(family_name, pango_font_family_get_name(families[i]))) {
is_monospace = pango_font_family_is_monospace(families[i]);
found = true;
break;
}
}
if (!found) {
tlog(1, "Could not find monospace property of family %s\n", family_name);
}
g_free(families);
return is_monospace;
}
bool PangoFontInfo::ParseFontDescription(const PangoFontDescription *desc) {
Clear();
const char* family = pango_font_description_get_family(desc);
if (!family) {
char* desc_str = pango_font_description_to_string(desc);
tprintf("WARNING: Could not parse family name from description: '%s'\n",
desc_str);
g_free(desc_str);
return false;
}
family_name_ = string(family);
desc_ = pango_font_description_copy(desc);
is_monospace_ = IsMonospaceFontFamily(family);
// Set font size in points
font_size_ = pango_font_description_get_size(desc);
if (!pango_font_description_get_size_is_absolute(desc)) {
font_size_ /= PANGO_SCALE;
}
PangoStyle style = pango_font_description_get_style(desc);
is_italic_ = (PANGO_STYLE_ITALIC == style ||
PANGO_STYLE_OBLIQUE == style);
is_smallcaps_ = (pango_font_description_get_variant(desc)
== PANGO_VARIANT_SMALL_CAPS);
is_bold_ = (pango_font_description_get_weight(desc) >= PANGO_WEIGHT_BOLD);
// We dont have a way to detect whether a font is of type Fraktur. The fonts
// we currently use all have "Fraktur" in their family name, so we do a
// fragile but functional check for that here.
is_fraktur_ = (strcasestr(family, "Fraktur") != NULL);
return true;
}
bool PangoFontInfo::ParseFontDescriptionName(const string& name) {
PangoFontDescription *desc = pango_font_description_from_string(name.c_str());
bool success = ParseFontDescription(desc);
pango_font_description_free(desc);
return success;
}
// Returns the PangoFont structure corresponding to the closest available font
// in the font map. Note that if the font is wholly missing, this could
// correspond to a completely different font family and face.
PangoFont* PangoFontInfo::ToPangoFont() const {
InitFontconfig();
PangoFontMap* font_map = pango_cairo_font_map_get_default();
PangoContext* context = pango_context_new();
pango_cairo_context_set_resolution(context, resolution_);
pango_context_set_font_map(context, font_map);
PangoFont* font = NULL;
{
DISABLE_HEAP_LEAK_CHECK;
font = pango_font_map_load_font(font_map, context, desc_);
}
g_object_unref(context);
return font;
}
bool PangoFontInfo::CoversUTF8Text(const char* utf8_text, int byte_length) const {
PangoFont* font = ToPangoFont();
PangoCoverage* coverage = pango_font_get_coverage(font, NULL);
for (UNICHAR::const_iterator it = UNICHAR::begin(utf8_text, byte_length);
it != UNICHAR::end(utf8_text, byte_length);
++it) {
if (IsWhitespace(*it) || pango_is_zero_width(*it))
continue;
if (pango_coverage_get(coverage, *it) != PANGO_COVERAGE_EXACT) {
char tmp[5];
int len = it.get_utf8(tmp);
tmp[len] = '\0';
tlog(2, "'%s' (U+%x) not covered by font\n", tmp, *it);
return false;
}
}
return true;
}
int PangoFontInfo::DropUncoveredChars(string* utf8_text) const {
PangoFont* font = ToPangoFont();
PangoCoverage* coverage = pango_font_get_coverage(font, NULL);
int num_dropped_chars = 0;
// Maintain two iterators that point into the string. For space efficiency, we
// will repeatedly copy one covered UTF8 character from one to the other, and
// at the end resize the string to the right length.
char* out = const_cast<char*>(utf8_text->c_str());
const UNICHAR::const_iterator it_begin =
UNICHAR::begin(utf8_text->c_str(), utf8_text->length());
const UNICHAR::const_iterator it_end =
UNICHAR::end(utf8_text->c_str(), utf8_text->length());
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
// Skip bad utf-8.
if (!it.is_legal())
continue; // One suitable error message will still be issued.
if (!IsWhitespace(*it) && !pango_is_zero_width(*it) &&
pango_coverage_get(coverage, *it) != PANGO_COVERAGE_EXACT) {
if (TLOG_IS_ON(2)) {
char tmp[5];
int len = it.get_utf8(tmp);
tmp[len] = '\0';
tlog(2, "'%s' (U+%x) not covered by font\n", tmp, *it);
}
++num_dropped_chars;
continue;
}
strncpy(out, it.utf8_data(), it.utf8_len());
out += it.utf8_len();
}
utf8_text->resize(out - utf8_text->c_str());
return num_dropped_chars;
}
bool PangoFontInfo::GetSpacingProperties(const string& utf8_char,
int* x_bearing, int* x_advance) const {
// Convert to equivalent PangoFont structure
PangoFont* font = ToPangoFont();
// Find the glyph index in the font for the supplied utf8 character.
int total_advance = 0;
int min_bearing = 0;
// Handle multi-unicode strings by reporting the left-most position of the
// x-bearing, and right-most position of the x-advance if the string were to
// be rendered.
const UNICHAR::const_iterator it_begin = UNICHAR::begin(utf8_char.c_str(),
utf8_char.length());
const UNICHAR::const_iterator it_end = UNICHAR::end(utf8_char.c_str(),
utf8_char.length());
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
PangoGlyph glyph_index = pango_fc_font_get_glyph(
reinterpret_cast<PangoFcFont*>(font), *it);
if (!glyph_index) {
// Glyph for given unicode character doesn't exist in font.
return false;
}
// Find the ink glyph extents for the glyph
PangoRectangle ink_rect, logical_rect;
pango_font_get_glyph_extents(font, glyph_index, &ink_rect, &logical_rect);
pango_extents_to_pixels(&ink_rect, NULL);
pango_extents_to_pixels(&logical_rect, NULL);
int bearing = total_advance + PANGO_LBEARING(ink_rect);
if (it == it_begin || bearing < min_bearing) {
min_bearing = bearing;
}
total_advance += PANGO_RBEARING(logical_rect);
}
*x_bearing = min_bearing;
*x_advance = total_advance;
return true;
}
bool PangoFontInfo::CanRenderString(const char* utf8_word, int len) const {
vector<string> graphemes;
return CanRenderString(utf8_word, len, &graphemes);
}
bool PangoFontInfo::CanRenderString(const char* utf8_word, int len,
vector<string>* graphemes) const {
if (graphemes) graphemes->clear();
// We check for font coverage of the text first, as otherwise Pango could
// (undesirably) fall back to another font that does have the required
// coverage.
if (!CoversUTF8Text(utf8_word, len)) {
return false;
}
// U+25CC dotted circle character that often (but not always) gets rendered
// when there is an illegal grapheme sequence.
const char32 kDottedCircleGlyph = 9676;
bool bad_glyph = false;
PangoFontMap* font_map = pango_cairo_font_map_get_default();
PangoContext* context = pango_context_new();
pango_context_set_font_map(context, font_map);
PangoLayout* layout;
{
// Pango is not relasing the cached layout.
DISABLE_HEAP_LEAK_CHECK;
layout = pango_layout_new(context);
}
if (desc_) {
pango_layout_set_font_description(layout, desc_);
} else {
PangoFontDescription *desc = pango_font_description_from_string(
DescriptionName().c_str());
pango_layout_set_font_description(layout, desc);
pango_font_description_free(desc);
}
pango_layout_set_text(layout, utf8_word, len);
PangoLayoutIter* run_iter = NULL;
{ // Fontconfig caches some information here that is not freed before exit.
DISABLE_HEAP_LEAK_CHECK;
run_iter = pango_layout_get_iter(layout);
}
do {
PangoLayoutRun* run = pango_layout_iter_get_run_readonly(run_iter);
if (!run) {
tlog(2, "Found end of line NULL run marker\n");
continue;
}
PangoGlyph dotted_circle_glyph;
PangoFont* font = run->item->analysis.font;
dotted_circle_glyph = pango_fc_font_get_glyph(
reinterpret_cast<PangoFcFont*>(font), kDottedCircleGlyph);
if (TLOG_IS_ON(2)) {
PangoFontDescription* desc = pango_font_describe(font);
char* desc_str = pango_font_description_to_string(desc);
tlog(2, "Desc of font in run: %s\n", desc_str);
g_free(desc_str);
pango_font_description_free(desc);
}
PangoGlyphItemIter cluster_iter;
gboolean have_cluster;
for (have_cluster = pango_glyph_item_iter_init_start(&cluster_iter,
run, utf8_word);
have_cluster && !bad_glyph;
have_cluster = pango_glyph_item_iter_next_cluster(&cluster_iter)) {
const int start_byte_index = cluster_iter.start_index;
const int end_byte_index = cluster_iter.end_index;
int start_glyph_index = cluster_iter.start_glyph;
int end_glyph_index = cluster_iter.end_glyph;
string cluster_text = string(utf8_word + start_byte_index,
end_byte_index - start_byte_index);
if (graphemes) graphemes->push_back(cluster_text);
if (IsUTF8Whitespace(cluster_text.c_str())) {
tlog(2, "Skipping whitespace\n");
continue;
}
if (TLOG_IS_ON(2)) {
printf("start_byte=%d end_byte=%d start_glyph=%d end_glyph=%d ",
start_byte_index, end_byte_index,
start_glyph_index, end_glyph_index);
}
for (int i = start_glyph_index,
step = (end_glyph_index > start_glyph_index) ? 1 : -1;
!bad_glyph && i != end_glyph_index; i+= step) {
const bool unknown_glyph =
(cluster_iter.glyph_item->glyphs->glyphs[i].glyph &
PANGO_GLYPH_UNKNOWN_FLAG);
const bool illegal_glyph =
(cluster_iter.glyph_item->glyphs->glyphs[i].glyph ==
dotted_circle_glyph);
bad_glyph = unknown_glyph || illegal_glyph;
if (TLOG_IS_ON(2)) {
printf("(%d=%d)", cluster_iter.glyph_item->glyphs->glyphs[i].glyph,
bad_glyph ? 1 : 0);
}
}
if (TLOG_IS_ON(2)) {
printf(" '%s'\n", cluster_text.c_str());
}
if (bad_glyph)
tlog(1, "Found illegal glyph!\n");
}
} while (!bad_glyph && pango_layout_iter_next_run(run_iter));
pango_layout_iter_free(run_iter);
g_object_unref(context);
g_object_unref(layout);
if (bad_glyph && graphemes) graphemes->clear();
return !bad_glyph;
}
// ------------------------ FontUtils ------------------------------------
// Returns whether the specified font description is available in the fonts
// directory.
//
// The generated list of font families and faces includes "synthesized" font
// faces that are not truly loadable. Pango versions >=1.18 have a
// pango_font_face_is_synthesized method that can be used to prune the list.
// Until then, we are restricted to using a hack where we try to load the font
// from the font_map, and then check what we loaded to see if it has the
// description we expected. If it is not, then the font is deemed unavailable.
/* static */
bool FontUtils::IsAvailableFont(const char* input_query_desc) {
string query_desc(input_query_desc);
if (PANGO_VERSION <= 12005) {
// Strip commas and any ' Medium' substring in the name.
query_desc.erase(std::remove(query_desc.begin(), query_desc.end(), ','),
query_desc.end());
const string kMediumStr = " Medium";
std::size_t found = query_desc.find(kMediumStr);
if (found != std::string::npos) {
query_desc.erase(found, kMediumStr.length());
}
}
PangoFontDescription *desc = pango_font_description_from_string(
query_desc.c_str());
PangoFont* selected_font = NULL;
{
InitFontconfig();
PangoFontMap* font_map = pango_cairo_font_map_get_default();
PangoContext* context = pango_context_new();
pango_context_set_font_map(context, font_map);
{
DISABLE_HEAP_LEAK_CHECK;
selected_font = pango_font_map_load_font(font_map, context, desc);
}
g_object_unref(context);
}
if (selected_font == NULL) {
pango_font_description_free(desc);
return false;
}
PangoFontDescription* selected_desc = pango_font_describe(selected_font);
bool equal = pango_font_description_equal(desc, selected_desc);
tlog(3, "query weight = %d \t selected weight =%d\n",
pango_font_description_get_weight(desc),
pango_font_description_get_weight(selected_desc));
char* selected_desc_str = pango_font_description_to_string(selected_desc);
tlog(2, "query_desc: '%s' Selected: 's'\n", query_desc.c_str(),
selected_desc_str);
g_free(selected_desc_str);
pango_font_description_free(selected_desc);
g_object_unref(selected_font);
pango_font_description_free(desc);
return equal;
}
static bool ShouldIgnoreFontFamilyName(const char* query) {
static const char* kIgnoredFamilyNames[]
= { "Sans", "Serif", "Monospace", NULL };
const char** list = kIgnoredFamilyNames;
for (; *list != NULL; ++list) {
if (!strcmp(*list, query))
return true;
}
return false;
}
// Outputs description names of available fonts.
/* static */
const vector<string>& FontUtils::ListAvailableFonts() {
static vector<string> available_fonts_; // cache list
if (available_fonts_.size()) {
return available_fonts_;
}
#ifndef USE_STD_NAMESPACE
if (FLAGS_use_only_legacy_fonts) {
// Restrict view to list of fonts in legacy_fonts.h
tprintf("Using list of legacy fonts only\n");
const int kNumFontLists = 4;
for (int i = 0; i < kNumFontLists; ++i) {
for (int j = 0; kFontlists[i][j] != NULL; ++j) {
available_fonts_.push_back(kFontlists[i][j]);
}
}
return available_fonts_;
}
#endif
PangoFontFamily** families = 0;
int n_families = 0;
ListFontFamilies(&families, &n_families);
for (int i = 0; i < n_families; ++i) {
const char* family_name = pango_font_family_get_name(families[i]);
tlog(2, "Listing family %s\n", family_name);
if (ShouldIgnoreFontFamilyName(family_name))
continue;
int n_faces;
PangoFontFace** faces = NULL;
pango_font_family_list_faces(families[i], &faces, &n_faces);
for (int j = 0; j < n_faces; ++j) {
PangoFontDescription* desc = pango_font_face_describe(faces[j]);
char* desc_str = pango_font_description_to_string(desc);
if (IsAvailableFont(desc_str)) {
available_fonts_.push_back(desc_str);
}
pango_font_description_free(desc);
g_free(desc_str);
}
g_free(faces);
}
g_free(families);
sort(available_fonts_.begin(), available_fonts_.end());
return available_fonts_;
}
static void CharCoverageMapToBitmap(PangoCoverage* coverage,
vector<bool>* unichar_bitmap) {
const int kMinUnicodeValue = 33;
const int kMaxUnicodeValue = 0x10FFFF;
unichar_bitmap->resize(kMaxUnicodeValue + 1, false);
// Mark off characters that the font can render.
for (int i = kMinUnicodeValue; i <= kMaxUnicodeValue; ++i) {
if (IsInterchangeValid(i)) {
(*unichar_bitmap)[i]
= (pango_coverage_get(coverage, i) == PANGO_COVERAGE_EXACT);
}
}
}
/* static */
void FontUtils::GetAllRenderableCharacters(vector<bool>* unichar_bitmap) {
const vector<string>& all_fonts = ListAvailableFonts();
return GetAllRenderableCharacters(all_fonts, unichar_bitmap);
}
/* static */
void FontUtils::GetAllRenderableCharacters(const string& font_name,
vector<bool>* unichar_bitmap) {
PangoFontInfo font_info(font_name);
PangoCoverage* coverage = pango_font_get_coverage(
font_info.ToPangoFont(), NULL);
CharCoverageMapToBitmap(coverage, unichar_bitmap);
}
/* static */
void FontUtils::GetAllRenderableCharacters(const vector<string>& fonts,
vector<bool>* unichar_bitmap) {
// Form the union of coverage maps from the fonts
PangoCoverage* all_coverage = pango_coverage_new();
tlog(1, "Processing %d fonts\n", fonts.size());
for (int i = 0; i < fonts.size(); ++i) {
PangoFontInfo font_info(fonts[i]);
PangoCoverage* coverage = pango_font_get_coverage(
font_info.ToPangoFont(), NULL);
// Mark off characters that any font can render.
pango_coverage_max(all_coverage, coverage);
}
CharCoverageMapToBitmap(all_coverage, unichar_bitmap);
pango_coverage_unref(all_coverage);
}
// Utilities written to be backward compatible with StringRender
/* static */
int FontUtils::FontScore(const unordered_map<char32, inT64>& ch_map,
const string& fontname,
int* raw_score,
vector<bool>* ch_flags) {
PangoFontInfo font_info;
if (!font_info.ParseFontDescriptionName(fontname)) {
tprintf("ERROR: Could not parse %s\n", fontname.c_str());
}
PangoFont* font = font_info.ToPangoFont();
PangoCoverage* coverage = pango_font_get_coverage(font, NULL);
if (ch_flags) {
ch_flags->clear();
ch_flags->reserve(ch_map.size());
}
*raw_score = 0;
int ok_chars = 0;
for (unordered_map<char32, inT64>::const_iterator it = ch_map.begin();
it != ch_map.end(); ++it) {
bool covered = (IsWhitespace(it->first) ||
(pango_coverage_get(coverage, it->first)
== PANGO_COVERAGE_EXACT));
if (covered) {
++(*raw_score);
ok_chars += it->second;
}
if (ch_flags) {
ch_flags->push_back(covered);
}
}
return ok_chars;
}
/* static */
string FontUtils::BestFonts(const unordered_map<char32, inT64>& ch_map,
vector<pair<const char*, vector<bool> > >* fonts) {
const double kMinOKFraction = 0.99;
// Weighted fraction of characters that must be renderable in a font to make
// it OK even if the raw count is not good.
const double kMinWeightedFraction = 0.99995;
fonts->clear();
vector<vector<bool> > font_flags;
vector<int> font_scores;
vector<int> raw_scores;
int most_ok_chars = 0;
int best_raw_score = 0;
const vector<string>& font_names = FontUtils::ListAvailableFonts();
for (int i = 0; i < font_names.size(); ++i) {
vector<bool> ch_flags;
int raw_score = 0;
int ok_chars = FontScore(ch_map, font_names[i], &raw_score, &ch_flags);
most_ok_chars = MAX(ok_chars, most_ok_chars);
best_raw_score = MAX(raw_score, best_raw_score);
font_flags.push_back(ch_flags);
font_scores.push_back(ok_chars);
raw_scores.push_back(raw_score);
}
// Now select the fonts with a score above a threshold fraction
// of both the raw and weighted best scores. To prevent bogus fonts being
// selected for CJK, we require a high fraction (kMinOKFraction = 0.99) of
// BOTH weighted and raw scores.
// In low character-count scripts, the issue is more getting enough fonts,
// when only 1 or 2 might have all those rare dingbats etc in them, so we
// allow a font with a very high weighted (coverage) score
// (kMinWeightedFraction = 0.99995) to be used even if its raw score is poor.
int least_good_enough = static_cast<int>(most_ok_chars * kMinOKFraction);
int least_raw_enough = static_cast<int>(best_raw_score * kMinOKFraction);
int override_enough = static_cast<int>(most_ok_chars * kMinWeightedFraction);
string font_list;
for (int i = 0; i < font_names.size(); ++i) {
int score = font_scores[i];
int raw_score = raw_scores[i];
if ((score >= least_good_enough && raw_score >= least_raw_enough) ||
score >= override_enough) {
fonts->push_back(make_pair(font_names[i].c_str(), font_flags[i]));
tlog(1, "OK font %s = %.4f%%, raw = %d = %.2f%%\n",
font_names[i].c_str(),
100.0 * score / most_ok_chars,
raw_score, 100.0 * raw_score / best_raw_score);
font_list += font_names[i];
font_list += "\n";
} else if (score >= least_good_enough || raw_score >= least_raw_enough) {
tlog(1, "Runner-up font %s = %.4f%%, raw = %d = %.2f%%\n",
font_names[i].c_str(),
100.0 * score / most_ok_chars,
raw_score, 100.0 * raw_score / best_raw_score);
}
}
return font_list;
}
/* static */
bool FontUtils::SelectFont(const char* utf8_word, const int utf8_len,
string* font_name, vector<string>* graphemes) {
return SelectFont(utf8_word, utf8_len, ListAvailableFonts(), font_name,
graphemes);
}
/* static */
bool FontUtils::SelectFont(const char* utf8_word, const int utf8_len,
const vector<string>& all_fonts,
string* font_name, vector<string>* graphemes) {
if (font_name) font_name->clear();
if (graphemes) graphemes->clear();
for (int i = 0; i < all_fonts.size(); ++i) {
PangoFontInfo font;
vector<string> found_graphemes;
ASSERT_HOST_MSG(font.ParseFontDescriptionName(all_fonts[i]),
"Could not parse font desc name %s\n",
all_fonts[i].c_str());
if (font.CanRenderString(utf8_word, utf8_len, &found_graphemes)) {
if (graphemes) graphemes->swap(found_graphemes);
if (font_name) *font_name = all_fonts[i];
return true;
}
}
return false;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: wordlist2dawg.cpp
// Description: Program to generate a DAWG from a word list file
// Author: Thomas Kielbus
// Created: Thu May 10 18:11:42 PDT 2007
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Given a file that contains a list of words (one word per line) this program
// generates the corresponding squished DAWG file.
#include <stdio.h>
#include "classify.h"
#include "dawg.h"
#include "dict.h"
#include "emalloc.h"
#include "freelist.h"
#include "helpers.h"
#include "serialis.h"
#include "trie.h"
#include "unicharset.h"
int main(int argc, char** argv) {
if (!(argc == 4 || (argc == 5 && strcmp(argv[1], "-t") == 0) ||
(argc == 6 && strcmp(argv[1], "-r") == 0))) {
printf("Usage: %s [-t | -r [reverse policy] ] word_list_file"
" dawg_file unicharset_file\n", argv[0]);
return 1;
}
tesseract::Classify *classify = new tesseract::Classify();
int argv_index = 0;
if (argc == 5) ++argv_index;
tesseract::Trie::RTLReversePolicy reverse_policy =
tesseract::Trie::RRP_DO_NO_REVERSE;
if (argc == 6) {
++argv_index;
int tmp_int;
sscanf(argv[++argv_index], "%d", &tmp_int);
reverse_policy = static_cast<tesseract::Trie::RTLReversePolicy>(tmp_int);
tprintf("Set reverse_policy to %s\n",
tesseract::Trie::get_reverse_policy_name(reverse_policy));
}
if (argc == 7) argv_index += 3;
const char* wordlist_filename = argv[++argv_index];
const char* dawg_filename = argv[++argv_index];
const char* unicharset_file = argv[++argv_index];
tprintf("Loading unicharset from '%s'\n", unicharset_file);
if (!classify->getDict().getUnicharset().load_from_file(unicharset_file)) {
tprintf("Failed to load unicharset from '%s'\n", unicharset_file);
delete classify;
return 1;
}
const UNICHARSET &unicharset = classify->getDict().getUnicharset();
if (argc == 4 || argc == 6) {
tesseract::Trie trie(
// the first 3 arguments are not used in this case
tesseract::DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM,
unicharset.size(), classify->getDict().dawg_debug_level);
tprintf("Reading word list from '%s'\n", wordlist_filename);
if (!trie.read_and_add_word_list(wordlist_filename, unicharset,
reverse_policy)) {
tprintf("Failed to add word list from '%s'\n", wordlist_filename);
exit(1);
}
tprintf("Reducing Trie to SquishedDawg\n");
tesseract::SquishedDawg *dawg = trie.trie_to_dawg();
if (dawg != NULL && dawg->NumEdges() > 0) {
tprintf("Writing squished DAWG to '%s'\n", dawg_filename);
dawg->write_squished_dawg(dawg_filename);
} else {
tprintf("Dawg is empty, skip producing the output file\n");
}
delete dawg;
} else if (argc == 5) {
tprintf("Loading dawg DAWG from '%s'\n", dawg_filename);
tesseract::SquishedDawg words(
dawg_filename,
// these 3 arguments are not used in this case
tesseract::DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM,
classify->getDict().dawg_debug_level);
tprintf("Checking word list from '%s'\n", wordlist_filename);
words.check_for_words(wordlist_filename, unicharset, true);
} else { // should never get here
tprintf("Invalid command-line options\n");
exit(1);
}
delete classify;
return 0;
}
| C++ |
/**********************************************************************
* File: stringrenderer.h
* Description: Class for rendering UTF-8 text to an image, and retrieving
* bounding boxes around each grapheme cluster.
*
* Instances are created using a font description string
* (eg. "Arial Italic 12"; see pango_font_info.h for the format)
* and the page dimensions. Other renderer properties such as
* spacing, ligaturization, as well a preprocessing behavior such
* as removal of unrenderable words and a special n-gram mode may
* be set using respective set_* methods.
*
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_STRINGRENDERER_H_
#define TESSERACT_TRAINING_STRINGRENDERER_H_
#include <string>
#include <vector>
#include "hashfn.h"
#include "host.h"
#include "pango_font_info.h"
#include "pango/pango-layout.h"
#include "pango/pangocairo.h"
struct Boxa;
struct Pix;
namespace tesseract {
class BoxChar;
class StringRenderer {
public:
StringRenderer(const string& font_desc, int page_width, int page_height);
~StringRenderer();
// Renders the text with the chosen font and returns the byte offset upto
// which the text could be rendered so as to fit the specified page
// dimensions.
int RenderToImage(const char* text, int text_length, Pix** pix);
int RenderToGrayscaleImage(const char* text, int text_length, Pix** pix);
int RenderToBinaryImage(const char* text, int text_length, int threshold,
Pix** pix);
// Renders a line of text with all available fonts that were able to render
// at least min_coverage fraction of the input text. Use 1.0 to require that
// a font be able to render all the text.
int RenderAllFontsToImage(double min_coverage, const char* text,
int text_length, string* font_used, Pix** pix);
bool set_font(const string& desc);
void set_char_spacing(double char_spacing) {
char_spacing_ = char_spacing;
}
void set_leading(int leading) {
leading_ = leading;
}
void set_resolution(const int resolution);
void set_vertical_text(bool vertical_text) {
vertical_text_ = vertical_text;
}
void set_gravity_hint_strong(bool gravity_hint_strong) {
gravity_hint_strong_ = gravity_hint_strong;
}
void set_render_fullwidth_latin(bool render_fullwidth_latin) {
render_fullwidth_latin_ = render_fullwidth_latin;
}
// Sets the probability (value in [0, 1]) of starting to render a word with an
// underline. This implementation consider words to be space-delimited
// sequences of characters.
void set_underline_start_prob(const double frac) {
underline_start_prob_ = std::min(std::max(frac, 0.0), 1.0);
}
// Set the probability (value in [0, 1]) of continuing a started underline to
// the next word.
void set_underline_continuation_prob(const double frac) {
underline_continuation_prob_ = std::min(std::max(frac, 0.0), 1.0);
}
void set_underline_style(const PangoUnderline style) {
underline_style_ = style;
}
void set_page(int page) {
page_ = page;
}
void set_box_padding(int val) {
box_padding_ = val;
}
void set_drop_uncovered_chars(bool val) {
drop_uncovered_chars_ = val;
}
void set_strip_unrenderable_words(bool val) {
strip_unrenderable_words_ = val;
}
void set_output_word_boxes(bool val) {
output_word_boxes_ = val;
}
// Before rendering the string, replace latin characters with their optional
// ligatured forms (such as "fi", "ffi" etc.) if the font_ covers those
// unicodes.
void set_add_ligatures(bool add_ligatures) {
add_ligatures_ = add_ligatures;
}
// Set the rgb value of the text ink. Values range in [0, 1.0]
void set_pen_color(double r, double g, double b) {
pen_color_[0] = r;
pen_color_[1] = g;
pen_color_[2] = b;
}
void set_h_margin(const int h_margin) {
h_margin_ = h_margin;
}
void set_v_margin(const int v_margin) {
v_margin_ = v_margin;
}
const PangoFontInfo& font() const {
return font_;
}
const int h_margin() const {
return h_margin_;
}
const int v_margin() const {
return v_margin_;
}
// Get the boxchars of all clusters rendered thus far (or since the last call
// to ClearBoxes()).
const vector<BoxChar*>& GetBoxes() const;
// Get the rendered page bounding boxes of all pages created thus far (or
// since last call to ClearBoxes()).
Boxa* GetPageBoxes() const;
// Rotate the boxes on the most recent page by the given rotation.
void RotatePageBoxes(float rotation);
// Delete all boxes.
void ClearBoxes();
void WriteAllBoxes(const string& filename) const;
// Removes space-delimited words from the string that are not renderable by
// the current font and returns the count of such words.
int StripUnrenderableWords(string* utf8_text) const;
// Insert a Word Joiner symbol (U+2060) between adjacent characters, excluding
// spaces and combining types, in each word before rendering to ensure words
// are not broken across lines. The output boxchars will not contain the
// joiner.
static string InsertWordJoiners(const string& text);
// Helper functions to convert fullwidth Latin and halfwidth Basic Latin.
static string ConvertBasicLatinToFullwidthLatin(const string& text);
static string ConvertFullwidthLatinToBasicLatin(const string& text);
protected:
// Init and free local renderer objects.
void InitPangoCairo();
void FreePangoCairo();
// Set rendering properties.
void SetLayoutProperties();
void SetWordUnderlineAttributes(const string& page_text);
// Compute bounding boxes around grapheme clusters.
void ComputeClusterBoxes();
void CorrectBoxPositionsToLayout(vector<BoxChar*>* boxchars);
bool GetClusterStrings(vector<string>* cluster_text);
int FindFirstPageBreakOffset(const char* text, int text_length);
PangoFontInfo font_;
// Page properties
int page_width_, page_height_, h_margin_, v_margin_;
// Text rendering properties
int pen_color_[3];
double char_spacing_;
int leading_, resolution_;
bool vertical_text_;
bool gravity_hint_strong_;
bool render_fullwidth_latin_;
double underline_start_prob_;
double underline_continuation_prob_;
PangoUnderline underline_style_;
// Text filtering options
bool drop_uncovered_chars_;
bool strip_unrenderable_words_;
bool add_ligatures_;
bool output_word_boxes_;
// Pango and cairo specific objects
cairo_surface_t* surface_;
cairo_t* cr_;
PangoLayout* layout_;
// Internal state of current page number, updated on successive calls to
// RenderToImage()
int start_box_;
int page_;
// Boxes and associated text for all pages rendered with RenderToImage() since
// the last call to ClearBoxes().
vector<BoxChar*> boxchars_;
int box_padding_;
// Bounding boxes for pages since the last call to ClearBoxes().
Boxa* page_boxes_;
// Objects cached for subsequent calls to RenderAllFontsToImage()
hash_map<char32, inT64> char_map_; // Time-saving char histogram.
int total_chars_; // Number in the string to be rendered.
int font_index_; // Index of next font to use in font list.
int last_offset_; // Offset returned from last successful rendering
private:
StringRenderer(const StringRenderer&);
void operator=(const StringRenderer&);
};
} // namespace tesseract
#endif // THIRD_PARTY_TESSERACT_TRAINING_STRINGRENDERER_H_
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Filename: shapeclustering.cpp
// Purpose: Generates a master shape table to merge similarly-shaped
// training data of whole, partial or multiple characters.
// Author: Ray Smith
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef USE_STD_NAMESPACE
#include "base/commandlineflags.h"
#endif
#include "commontraining.h"
#include "mastertrainer.h"
#include "params.h"
#include "strngs.h"
INT_PARAM_FLAG(display_cloud_font, -1,
"Display cloud of this font, canonical_class1");
INT_PARAM_FLAG(display_canonical_font, -1,
"Display canonical sample of this font, canonical_class2");
STRING_PARAM_FLAG(canonical_class1, "", "Class to show ambigs for");
STRING_PARAM_FLAG(canonical_class2, "", "Class to show ambigs for");
// Loads training data, if requested displays debug information, otherwise
// creates the master shape table by shape clustering and writes it to a file.
// If FLAGS_display_cloud_font is set, then the cloud features of
// FLAGS_canonical_class1/FLAGS_display_cloud_font are shown in green ON TOP
// OF the red canonical features of FLAGS_canonical_class2/
// FLAGS_display_canonical_font, so as to show which canonical features are
// NOT in the cloud.
// Otherwise, if FLAGS_canonical_class1 is set, prints a table of font-wise
// cluster distances between FLAGS_canonical_class1 and FLAGS_canonical_class2.
int main(int argc, char **argv) {
ParseArguments(&argc, &argv);
STRING file_prefix;
tesseract::MasterTrainer* trainer = tesseract::LoadTrainingData(
argc, argv, false, NULL, &file_prefix);
if (!trainer)
return 1;
if (FLAGS_display_cloud_font >= 0) {
#ifndef GRAPHICS_DISABLED
trainer->DisplaySamples(FLAGS_canonical_class1.c_str(),
FLAGS_display_cloud_font,
FLAGS_canonical_class2.c_str(),
FLAGS_display_canonical_font);
#endif // GRAPHICS_DISABLED
return 0;
} else if (!FLAGS_canonical_class1.empty()) {
trainer->DebugCanonical(FLAGS_canonical_class1.c_str(),
FLAGS_canonical_class2.c_str());
return 0;
}
trainer->SetupMasterShapes();
WriteShapeTable(file_prefix, trainer->master_shapes());
delete trainer;
return 0;
} /* main */
| C++ |
/**********************************************************************
* File: boxchar.cpp
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "boxchar.h"
#include <stddef.h>
#include "fileio.h"
#include "ndminx.h"
namespace tesseract {
BoxChar::BoxChar(const char* utf8_str, int len) : ch_(utf8_str, len) {
box_ = NULL;
}
BoxChar::~BoxChar() {
boxDestroy(&box_);
}
void BoxChar::AddBox(int x, int y, int width, int height) {
box_ = boxCreate(x, y, width, height);
}
/* static */
void BoxChar::TranslateBoxes(int xshift, int yshift,
vector<BoxChar*>* boxes) {
for (int i = 0; i < boxes->size(); ++i) {
BOX* box = (*boxes)[i]->box_;
if (box != NULL) {
box->x += xshift;
box->y += yshift;
}
}
}
// Rotate the boxes in [start_box, end_box) by the given rotation.
// The rotation is in radians clockwise about the given center.
/* static */
void BoxChar::RotateBoxes(float rotation,
int xcenter,
int ycenter,
int start_box,
int end_box,
vector<BoxChar*>* boxes) {
Boxa* orig = boxaCreate(0);
for (int i = start_box; i < end_box; ++i) {
BOX* box = (*boxes)[i]->box_;
if (box) boxaAddBox(orig, box, L_CLONE);
}
Boxa* rotated = boxaRotate(orig, xcenter, ycenter, rotation);
boxaDestroy(&orig);
for (int i = start_box, box_ind = 0; i < end_box; ++i) {
if ((*boxes)[i]->box_) {
boxDestroy(&((*boxes)[i]->box_));
(*boxes)[i]->box_ = boxaGetBox(rotated, box_ind++, L_CLONE);
}
}
boxaDestroy(&rotated);
}
const int kMaxLineLength = 1024;
// Helper appends a tab box to the string to indicate a newline. We can't use
// an actual newline as the file format is line-based text.
static void AppendTabBox(const Box* box, int height, int page, string* output) {
char buffer[kMaxLineLength];
int nbytes = snprintf(buffer, kMaxLineLength, "\t %d %d %d %d %d\n",
box->x + box->w, height - box->y - box->h,
box->x + box->w + 10, height - box->y, page);
output->append(buffer, nbytes);
}
/* static */
void BoxChar::WriteTesseractBoxFile(const string& filename, int height,
const vector<BoxChar*>& boxes) {
string output;
char buffer[kMaxLineLength];
for (int i = 0; i < boxes.size(); ++i) {
const Box* box = boxes[i]->box_;
if (box != NULL) {
if (i > 0 && boxes[i - 1]->box_ != NULL &&
boxes[i - 1]->page_ == boxes[i]->page_ &&
box->x + box->w < boxes[i - 1]->box_->x) {
// We are on a newline. Output a tab character to indicate the newline.
AppendTabBox(boxes[i - 1]->box_, height, boxes[i]->page_, &output);
}
int nbytes = snprintf(buffer, kMaxLineLength,
"%s %d %d %d %d %d\n",
boxes[i]->ch_.c_str(),
box->x, height - box->y - box->h,
box->x + box->w, height - box->y,
boxes[i]->page_);
output.append(buffer, nbytes);
} else if (i > 0 && boxes[i - 1]->box_ != NULL) {
int j = i + 1;
// Find the next non-null box, as there may be multiple spaces.
while (j < boxes.size() && boxes[j]->box_ == NULL) ++j;
if (j < boxes.size() && boxes[i - 1]->page_ == boxes[j]->page_) {
const Box* prev = boxes[i - 1]->box_;
const Box* next = boxes[j]->box_;
if (next->x + next->w < prev->x) {
// We are on a newline. Output a tab character to indicate it.
AppendTabBox(prev, height, boxes[j]->page_, &output);
} else {
// Space between words.
int nbytes = snprintf(buffer, kMaxLineLength,
" %d %d %d %d %d\n",
prev->x + prev->w,
height - MAX(prev->y + prev->h,
next->y + next->h),
next->x, height - MIN(prev->y, next->y),
boxes[i - 1]->page_);
output.append(buffer, nbytes);
}
}
}
}
File::WriteStringToFileOrDie(output, filename);
}
} // namespace tesseract
| C++ |
/**********************************************************************
* File: icuerrorcode.h
* Description: Wrapper class for UErrorCode, with conversion operators for
* direct use in ICU C and C++ APIs.
* Author: Fredrik Roubert
* Created: Thu July 4 2013
*
* Features:
* - The constructor initializes the internal UErrorCode to U_ZERO_ERROR,
* removing one common source of errors.
* - Same use in C APIs taking a UErrorCode* (pointer) and C++ taking
* UErrorCode& (reference), via conversion operators.
* - Automatic checking for success when it goes out of scope. On failure,
* the destructor will log an error message and exit.
*
* Most of ICU will handle errors gracefully and provide sensible fallbacks.
* Using IcuErrorCode, it is therefore possible to write very compact code
* that does sensible things on failure and provides logging for debugging.
*
* Example:
* IcuErrorCode icuerrorcode;
* return collator.compareUTF8(a, b, icuerrorcode) == UCOL_EQUAL;
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_ICUERRORCODE_H_
#define TESSERACT_CCUTIL_ICUERRORCODE_H_
#include "tprintf.h"
#include "unicode/errorcode.h" // From libicu
namespace tesseract {
class IcuErrorCode : public icu::ErrorCode {
public:
IcuErrorCode() {}
virtual ~IcuErrorCode() {
if (isFailure()) {
handleFailure();
}
}
protected:
virtual void handleFailure() const {
tprintf("ICU ERROR: %s", errorName());
exit(errorCode);
}
private:
// Disallow implicit copying of object.
IcuErrorCode(const IcuErrorCode&);
void operator=(const IcuErrorCode&);
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_ICUERRORCODE_H_
| C++ |
/**********************************************************************
* File: pango_font_info.h
* Description: Font-related objects and helper functions
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_PANGO_FONT_INFO_H_
#define TESSERACT_TRAINING_PANGO_FONT_INFO_H_
#include <string>
#include <utility>
#include <vector>
#include "hashfn.h"
#include "host.h"
#include "util.h"
#include "pango/pango-font.h"
typedef signed int char32;
namespace tesseract {
// Data holder class for a font, intented to avoid having to work with Pango or
// FontConfig-specific objects directly.
class PangoFontInfo {
public:
enum FontTypeEnum {
UNKNOWN,
SERIF,
SANS_SERIF,
DECORATIVE,
};
PangoFontInfo();
// Initialize from parsing a font description name, defined as a string of the
// format:
// "FamilyName [FaceName] [PointSize]"
// where a missing FaceName implies the default regular face.
// eg. "Arial Italic 12", "Verdana"
//
// FaceName is a combination of:
// [StyleName] [Variant] [Weight] [Stretch]
// with (all optional) Pango-defined values of:
// StyleName: Oblique, Italic
// Variant : Small-Caps
// Weight : Ultra-Light, Light, Medium, Semi-Bold, Bold, Ultra-Bold, Heavy
// Stretch : Ultra-Condensed, Extra-Condensed, Condensed, Semi-Condensed,
// Semi-Expanded, Expanded, Extra-Expanded, Ultra-Expanded.
explicit PangoFontInfo(const string& name);
bool ParseFontDescriptionName(const string& name);
// Returns true if the font have codepoint coverage for the specified text.
bool CoversUTF8Text(const char* utf8_text, int byte_length) const;
// Modifies string to remove unicode points that are not covered by the
// font. Returns the number of characters dropped.
int DropUncoveredChars(string* utf8_text) const;
// Returns true if the entire string can be rendered by the font with full
// character coverage and no unknown glyph or dotted-circle glyph
// substitutions on encountering a badly formed unicode sequence.
// If true, returns individual graphemes. Any whitespace characters in the
// original string are also included in the list.
bool CanRenderString(const char* utf8_word, int len,
vector<string>* graphemes) const;
bool CanRenderString(const char* utf8_word, int len) const;
// Retrieves the x_bearing and x_advance for the given utf8 character in the
// font. Returns false if the glyph for the character could not be found in
// the font.
// Ref: http://freetype.sourceforge.net/freetype2/docs/glyphs/glyphs-3.html
bool GetSpacingProperties(const string& utf8_char,
int* x_bearing, int* x_advance) const;
// Accessors
string DescriptionName() const;
// Font Family name eg. "Arial"
const string& family_name() const { return family_name_; }
// Size in points (1/72"), rounded to the nearest integer.
const int font_size() const { return font_size_; }
const bool is_bold() const { return is_bold_; }
const bool is_italic() const { return is_italic_; }
const bool is_smallcaps() const { return is_smallcaps_; }
const bool is_monospace() const { return is_monospace_; }
const bool is_fraktur() const { return is_fraktur_; }
const FontTypeEnum font_type() const { return font_type_; }
const int resolution() const { return resolution_; }
void set_resolution(const int resolution) {
resolution_ = resolution;
}
private:
friend class FontUtils;
void Clear();
bool ParseFontDescription(const PangoFontDescription* desc);
// Returns the PangoFont structure corresponding to the closest available font
// in the font map.
PangoFont* ToPangoFont() const;
// Font properties set automatically from parsing the font description name.
string family_name_;
int font_size_;
bool is_bold_;
bool is_italic_;
bool is_smallcaps_;
bool is_monospace_;
bool is_fraktur_;
FontTypeEnum font_type_;
// The Pango description that was used to initialize the instance.
PangoFontDescription* desc_;
// Default output resolution to assume for GetSpacingProperties() and any
// other methods that returns pixel values.
int resolution_;
private:
PangoFontInfo(const PangoFontInfo&);
void operator=(const PangoFontInfo&);
};
// Static utility methods for querying font availability and font-selection
// based on codepoint coverage.
class FontUtils {
public:
// Returns true if the font of the given description name is available in the
// target directory specified by --fonts_dir
static bool IsAvailableFont(const char* font_desc);
// Outputs description names of available fonts.
static const vector<string>& ListAvailableFonts();
// Picks font among available fonts that covers and can render the given word,
// and returns the font description name and the decomposition of the word to
// graphemes. Returns false if no suitable font was found.
static bool SelectFont(const char* utf8_word, const int utf8_len,
string* font_name, vector<string>* graphemes);
// Picks font among all_fonts that covers and can render the given word,
// and returns the font description name and the decomposition of the word to
// graphemes. Returns false if no suitable font was found.
static bool SelectFont(const char* utf8_word, const int utf8_len,
const vector<string>& all_fonts,
string* font_name, vector<string>* graphemes);
// Returns a bitmask where the value of true at index 'n' implies that unicode
// value 'n' is renderable by at least one available font.
static void GetAllRenderableCharacters(vector<bool>* unichar_bitmap);
// Variant of the above function that inspects only the provided font names.
static void GetAllRenderableCharacters(const vector<string>& font_names,
vector<bool>* unichar_bitmap);
static void GetAllRenderableCharacters(const string& font_name,
vector<bool>* unichar_bitmap);
// NOTE: The following utilities were written to be backward compatible with
// StringRender.
// BestFonts returns a font name and a bit vector of the characters it
// can render for the fonts that score within some fraction of the best
// font on the characters in the given hash map.
// In the flags vector, each flag is set according to whether the
// corresponding character (in order of iterating ch_map) can be rendered.
// The return string is a list of the acceptable fonts that were used.
static string BestFonts(const unordered_map<char32, inT64>& ch_map,
vector<std::pair<const char*, vector<bool> > >* font_flag);
// FontScore returns the weighted renderability score of the given
// hash map character table in the given font. The unweighted score
// is also returned in raw_score.
// The values in the bool vector ch_flags correspond to whether the
// corresponding character (in order of iterating ch_map) can be rendered.
static int FontScore(const unordered_map<char32, inT64>& ch_map,
const string& fontname, int* raw_score,
vector<bool>* ch_flags);
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_PANGO_FONT_INFO_H_
| C++ |
/**********************************************************************
* File: getopt.c
* Description: Re-implementation of the unix code.
* Author: Ray Smith
* Created: Tue Nov 28 05:52:50 MST 1995
*
* (C) Copyright 1995, Hewlett-Packard Co.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string.h>
#include <stdio.h>
#include "tessopt.h"
int tessoptind;
char *tessoptarg;
/**********************************************************************
* tessopt
*
* parse command line args.
**********************************************************************/
int tessopt ( //parse args
inT32 argc, //arg count
char *argv[], //args
const char *arglist //string of arg chars
) {
const char *arg; //arg char
if (tessoptind == 0)
tessoptind = 1;
if (tessoptind < argc && argv[tessoptind][0] == '-') {
arg = strchr (arglist, argv[tessoptind][1]);
if (arg == NULL || *arg == ':')
return '?'; //dud option
tessoptind++;
tessoptarg = argv[tessoptind];
if (arg[1] == ':') {
if (argv[tessoptind - 1][2] != '\0')
//immediately after
tessoptarg = argv[tessoptind - 1] + 2;
else
tessoptind++;
}
return *arg;
}
else
return EOF;
}
| C++ |
/**********************************************************************
* File: ligature_table.h
* Description: Class for adding and removing optional latin ligatures,
* conditional on codepoint support by a specified font
* (if specified).
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TRAININGDATA_LIGATURE_TABLE_H_
#define TRAININGDATA_LIGATURE_TABLE_H_
#include <string>
#include "hashfn.h"
#include "util.h"
namespace tesseract {
class PangoFontInfo; // defined in pango_font_info.h
// Map to substitute strings for ligatures.
typedef hash_map<string, string, StringHash> LigHash;
class LigatureTable {
public:
// Get a static instance of this class.
static LigatureTable* Get();
// Convert the utf8 string so that ligaturizable sequences, such as "fi" get
// replaced by the (utf8 code for) appropriate ligature characters. Only do so
// if the corresponding ligature character is renderable in the current font.
string AddLigatures(const string& str, const PangoFontInfo* font) const;
// Remove all ligatures.
string RemoveLigatures(const string& str) const;
// Remove only custom ligatures (eg. "ct") encoded in the private-use-area.
string RemoveCustomLigatures(const string& str) const;
const LigHash& norm_to_lig_table() const {
return norm_to_lig_table_;
}
const LigHash& lig_to_norm_table() const {
return lig_to_norm_table_;
}
protected:
LigatureTable();
// Initialize the hash tables mapping between ligature strings and the
// corresponding ligature characters.
void Init();
static SmartPtr<LigatureTable> instance_;
LigHash norm_to_lig_table_;
LigHash lig_to_norm_table_;
int min_lig_length_;
int max_lig_length_;
int min_norm_length_;
int max_norm_length_;
private:
LigatureTable(const LigatureTable&);
void operator=(const LigatureTable&);
};
} // namespace tesseract
#endif // OCR_TRAININGDATA_TYPESETTING_LIGATURE_TABLE_H_
| C++ |
/**********************************************************************
* File: ligature_table.cpp
* Description: Class for adding and removing optional latin ligatures,
* conditional on codepoint support by a specified font
* (if specified).
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "ligature_table.h"
#include <utility>
#include "pango_font_info.h"
#include "tlog.h"
#include "unichar.h"
#include "unicharset.h"
#include "unicode/errorcode.h" // from libicu
#include "unicode/normlzr.h" // from libicu
#include "unicode/unistr.h" // from libicu
#include "unicode/utypes.h" // from libicu
namespace tesseract {
static string EncodeAsUTF8(const char32 ch32) {
UNICHAR uni_ch(ch32);
return string(uni_ch.utf8(), uni_ch.utf8_len());
}
// Range of optional latin ligature characters in Unicode to build ligatures
// from. Note that this range does not contain the custom ligatures that we
// encode in the private use area.
const int kMinLigature = 0xfb00;
const int kMaxLigature = 0xfb4f;
/* static */
SmartPtr<LigatureTable> LigatureTable::instance_;
/* static */
LigatureTable* LigatureTable::Get() {
if (instance_ == NULL) {
instance_.reset(new LigatureTable());
instance_->Init();
}
return instance_.get();
}
LigatureTable::LigatureTable() : min_lig_length_(0), max_lig_length_(0),
min_norm_length_(0), max_norm_length_(0) {}
void LigatureTable::Init() {
if (norm_to_lig_table_.empty()) {
for (char32 lig = kMinLigature; lig <= kMaxLigature; ++lig) {
// For each char in the range, convert to utf8, nfkc normalize, and if
// the strings are different put the both mappings in the hash_maps.
string lig8 = EncodeAsUTF8(lig);
icu::UnicodeString unicode_lig8(static_cast<UChar32>(lig));
icu::UnicodeString normed8_result;
icu::ErrorCode status;
icu::Normalizer::normalize(unicode_lig8, UNORM_NFKC, 0, normed8_result,
status);
string normed8;
normed8_result.toUTF8String(normed8);
// The icu::Normalizer maps the "LONG S T" ligature to "st". Correct that
// here manually so that AddLigatures() will work as desired.
if (lig8 == "\uFB05")
normed8 = "ſt";
int lig_length = lig8.length();
int norm_length = normed8.size();
if (normed8 != lig8 && lig_length > 1 && norm_length > 1) {
norm_to_lig_table_[normed8] = lig8;
lig_to_norm_table_[lig8] = normed8;
if (min_lig_length_ == 0 || lig_length < min_lig_length_)
min_lig_length_ = lig_length;
if (lig_length > max_lig_length_)
max_lig_length_ = lig_length;
if (min_norm_length_ == 0 || norm_length < min_norm_length_)
min_norm_length_ = norm_length;
if (norm_length > max_norm_length_)
max_norm_length_ = norm_length;
}
}
// Add custom extra ligatures.
for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != NULL; ++i) {
norm_to_lig_table_[UNICHARSET::kCustomLigatures[i][0]] =
UNICHARSET::kCustomLigatures[i][1];
int norm_length = strlen(UNICHARSET::kCustomLigatures[i][0]);
if (min_norm_length_ == 0 || norm_length < min_norm_length_)
min_norm_length_ = norm_length;
if (norm_length > max_norm_length_)
max_norm_length_ = norm_length;
lig_to_norm_table_[UNICHARSET::kCustomLigatures[i][1]] =
UNICHARSET::kCustomLigatures[i][0];
}
}
}
string LigatureTable::RemoveLigatures(const string& str) const {
string result;
UNICHAR::const_iterator it_begin = UNICHAR::begin(str.c_str(), str.length());
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
char tmp[5];
int len;
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
len = it.get_utf8(tmp);
tmp[len] = '\0';
LigHash::const_iterator lig_it = lig_to_norm_table_.find(tmp);
if (lig_it != lig_to_norm_table_.end()) {
result += lig_it->second;
} else {
result += tmp;
}
}
return result;
}
string LigatureTable::RemoveCustomLigatures(const string& str) const {
string result;
UNICHAR::const_iterator it_begin = UNICHAR::begin(str.c_str(), str.length());
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
char tmp[5];
int len;
int norm_ind;
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
len = it.get_utf8(tmp);
tmp[len] = '\0';
norm_ind = -1;
for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != NULL && norm_ind < 0;
++i) {
if (!strcmp(tmp, UNICHARSET::kCustomLigatures[i][1])) {
norm_ind = i;
}
}
if (norm_ind >= 0) {
result += UNICHARSET::kCustomLigatures[norm_ind][0];
} else {
result += tmp;
}
}
return result;
}
string LigatureTable::AddLigatures(const string& str,
const PangoFontInfo* font) const {
string result;
int len = str.size();
int step = 0;
int i = 0;
for (i = 0; i < len - min_norm_length_ + 1; i += step) {
step = 0;
for (int liglen = max_norm_length_; liglen >= min_norm_length_; --liglen) {
if (i + liglen <= len) {
string lig_cand = str.substr(i, liglen);
LigHash::const_iterator it = norm_to_lig_table_.find(lig_cand);
if (it != norm_to_lig_table_.end()) {
tlog(3, "Considering %s -> %s\n", lig_cand.c_str(),
it->second.c_str());
if (font) {
// Test for renderability.
if (!font->CanRenderString(it->second.data(), it->second.length()))
continue; // Not renderable
}
// Found a match so convert it.
step = liglen;
result += it->second;
tlog(2, "Substituted %s -> %s\n", lig_cand.c_str(),
it->second.c_str());
break;
}
}
}
if (step == 0) {
result += str[i];
step = 1;
}
}
result += str.substr(i, len - i);
return result;
}
} // namespace tesseract
| C++ |
///////////////////////////////////////////////////////////////////////
// File: dawg2wordlist.cpp
// Description: Program to create a word list from a DAWG and unicharset.
// Author: David Eger
// Created: Thu 22 Dec 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "dawg.h"
#include "host.h"
#include "tesscallback.h"
#include "trie.h"
#include "unicharset.h"
const int kDictDebugLevel = 1;
tesseract::Dawg *LoadSquishedDawg(const UNICHARSET &unicharset,
const char *filename) {
const int kDictDebugLevel = 1;
FILE *dawg_file = fopen(filename, "rb");
if (dawg_file == NULL) {
tprintf("Could not open %s for reading.\n", filename);
return NULL;
}
tprintf("Loading word list from %s\n", filename);
tesseract::Dawg *retval = new tesseract::SquishedDawg(
dawg_file, tesseract::DAWG_TYPE_WORD, "eng", SYSTEM_DAWG_PERM,
kDictDebugLevel);
tprintf("Word list loaded.\n");
fclose(dawg_file);
return retval;
}
class WordOutputter {
public:
WordOutputter(FILE *file) : file_(file) {}
void output_word(const char *word) { fprintf(file_, "%s\n", word); }
private:
FILE *file_;
};
// returns 0 if successful.
int WriteDawgAsWordlist(const UNICHARSET &unicharset,
const tesseract::Dawg *dawg,
const char *outfile_name) {
FILE *out = fopen(outfile_name, "wb");
if (out == NULL) {
tprintf("Could not open %s for writing.\n", outfile_name);
return 1;
}
WordOutputter outputter(out);
TessCallback1<const char *> *print_word_cb =
NewPermanentTessCallback(&outputter, &WordOutputter::output_word);
dawg->iterate_words(unicharset, print_word_cb);
delete print_word_cb;
return fclose(out);
}
int main(int argc, char *argv[]) {
if (argc != 4) {
tprintf("Print all the words in a given dawg.\n");
tprintf("Usage: %s <unicharset> <dawgfile> <wordlistfile>\n",
argv[0]);
return 1;
}
const char *unicharset_file = argv[1];
const char *dawg_file = argv[2];
const char *wordlist_file = argv[3];
UNICHARSET unicharset;
if (!unicharset.load_from_file(unicharset_file)) {
tprintf("Error loading unicharset from %s.\n", unicharset_file);
return 1;
}
tesseract::Dawg *dict = LoadSquishedDawg(unicharset, dawg_file);
if (dict == NULL) {
tprintf("Error loading dictionary from %s.\n", dawg_file);
return 1;
}
int retval = WriteDawgAsWordlist(unicharset, dict, wordlist_file);
delete dict;
return retval;
}
| C++ |
#include "commandlineflags.h"
#ifdef USE_STD_NAMESPACE
namespace tesseract {
bool IntFlagExists(const char* flag_name, inT32* value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<IntParam*> empty;
IntParam *p = ParamUtils::FindParam<IntParam>(
full_flag_name.string(), GlobalParams()->int_params, empty);
if (p == NULL) return false;
*value = (inT32)(*p);
return true;
}
bool DoubleFlagExists(const char* flag_name, double* value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<DoubleParam*> empty;
DoubleParam *p = ParamUtils::FindParam<DoubleParam>(
full_flag_name.string(), GlobalParams()->double_params, empty);
if (p == NULL) return false;
*value = static_cast<double>(*p);
return true;
}
bool BoolFlagExists(const char* flag_name, bool* value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<BoolParam*> empty;
BoolParam *p = ParamUtils::FindParam<BoolParam>(
full_flag_name.string(), GlobalParams()->bool_params, empty);
if (p == NULL) return false;
*value = (BOOL8)(*p);
return true;
}
bool StringFlagExists(const char* flag_name, const char** value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<StringParam*> empty;
StringParam *p = ParamUtils::FindParam<StringParam>(
full_flag_name.string(), GlobalParams()->string_params, empty);
*value = (p != NULL) ? p->string() : NULL;
return p != NULL;
}
void SetIntFlagValue(const char* flag_name, const inT32 new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<IntParam*> empty;
IntParam *p = ParamUtils::FindParam<IntParam>(
full_flag_name.string(), GlobalParams()->int_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(new_val);
}
void SetDoubleFlagValue(const char* flag_name, const double new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<DoubleParam*> empty;
DoubleParam *p = ParamUtils::FindParam<DoubleParam>(
full_flag_name.string(), GlobalParams()->double_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(new_val);
}
void SetBoolFlagValue(const char* flag_name, const bool new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<BoolParam*> empty;
BoolParam *p = ParamUtils::FindParam<BoolParam>(
full_flag_name.string(), GlobalParams()->bool_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(new_val);
}
void SetStringFlagValue(const char* flag_name, const char* new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<StringParam*> empty;
StringParam *p = ParamUtils::FindParam<StringParam>(
full_flag_name.string(), GlobalParams()->string_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(STRING(new_val));
}
bool SafeAtoi(const char* str, int* val) {
char *endptr = NULL;
*val = strtol(str, &endptr, 10);
return endptr != NULL && *endptr == '\0';
}
bool SafeAtod(const char* str, double* val) {
char *endptr = NULL;
*val = strtod(str, &endptr);
return endptr != NULL && *endptr == '\0';
}
void PrintCommandLineFlags() {
const char* kFlagNamePrefix = "FLAGS_";
const int kFlagNamePrefixLen = strlen(kFlagNamePrefix);
for (int i = 0; i < GlobalParams()->int_params.size(); ++i) {
if (!strncmp(GlobalParams()->int_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:int default:%d)\n",
GlobalParams()->int_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->int_params[i]->info_str(),
inT32(*(GlobalParams()->int_params[i])));
}
}
for (int i = 0; i < GlobalParams()->double_params.size(); ++i) {
if (!strncmp(GlobalParams()->double_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:double default:%g)\n",
GlobalParams()->double_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->double_params[i]->info_str(),
static_cast<double>(*(GlobalParams()->double_params[i])));
}
}
for (int i = 0; i < GlobalParams()->bool_params.size(); ++i) {
if (!strncmp(GlobalParams()->bool_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:bool default:%s)\n",
GlobalParams()->bool_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->bool_params[i]->info_str(),
(BOOL8(*(GlobalParams()->bool_params[i])) ? "true" : "false"));
}
}
for (int i = 0; i < GlobalParams()->string_params.size(); ++i) {
if (!strncmp(GlobalParams()->string_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:string default:%s)\n",
GlobalParams()->string_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->string_params[i]->info_str(),
GlobalParams()->string_params[i]->string());
}
}
}
void ParseCommandLineFlags(const char* usage,
int* argc, char*** argv,
const bool remove_flags) {
unsigned int i = 1;
for (i = 1; i < *argc; ++i) {
const char* current_arg = (*argv)[i];
// If argument does not start with a hyphen then break.
if (current_arg[0] != '-') {
break;
}
// Position current_arg after startings hyphens. We treat a sequence of
// consecutive hyphens of any length identically.
while (*current_arg == '-') {
++current_arg;
}
// If this is asking for usage, print the help message and abort.
if (!strcmp(current_arg, "help") ||
!strcmp(current_arg, "helpshort")) {
tprintf("USAGE: %s\n", usage);
PrintCommandLineFlags();
exit(0);
}
// Find the starting position of the value if it was specified in this
// string.
const char* equals_position = strchr(current_arg, '=');
const char* rhs = NULL;
if (equals_position != NULL) {
rhs = equals_position + 1;
}
// Extract the flag name.
STRING lhs;
if (equals_position == NULL) {
lhs = current_arg;
} else {
lhs.assign(current_arg, equals_position - current_arg);
}
if (!lhs.length()) {
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
// Find the flag name in the list of global flags.
// inT32 flag
inT32 int_val;
if (IntFlagExists(lhs.string(), &int_val)) {
if (rhs != NULL) {
if (!strlen(rhs)) {
// Bad input of the format --int_flag=
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
if (!SafeAtoi(rhs, &int_val)) {
tprintf("ERROR: Could not parse int from %s in flag %s\n",
rhs, (*argv)[i]);
exit(1);
}
} else {
// We need to parse the next argument
if (i + 1 >= *argc) {
tprintf("ERROR: Could not find value argument for flag %s\n",
lhs.string());
exit(1);
} else {
++i;
if (!SafeAtoi((*argv)[i], &int_val)) {
tprintf("ERROR: Could not parse inT32 from %s\n", (*argv)[i]);
exit(1);
}
}
}
SetIntFlagValue(lhs.string(), int_val);
continue;
}
// double flag
double double_val;
if (DoubleFlagExists(lhs.string(), &double_val)) {
if (rhs != NULL) {
if (!strlen(rhs)) {
// Bad input of the format --double_flag=
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
if (!SafeAtod(rhs, &double_val)) {
tprintf("ERROR: Could not parse double from %s in flag %s\n",
rhs, (*argv)[i]);
exit(1);
}
} else {
// We need to parse the next argument
if (i + 1 >= *argc) {
tprintf("ERROR: Could not find value argument for flag %s\n",
lhs.string());
exit(1);
} else {
++i;
if (!SafeAtod((*argv)[i], &double_val)) {
tprintf("ERROR: Could not parse double from %s\n", (*argv)[i]);
exit(1);
}
}
}
SetDoubleFlagValue(lhs.string(), double_val);
continue;
}
// Bool flag. Allow input forms --flag (equivalent to --flag=true),
// --flag=false, --flag=true, --flag=0 and --flag=1
bool bool_val;
if (BoolFlagExists(lhs.string(), &bool_val)) {
if (rhs == NULL) {
// --flag form
bool_val = true;
} else {
if (!strlen(rhs)) {
// Bad input of the format --bool_flag=
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
if (!strcmp(rhs, "false") || !strcmp(rhs, "0")) {
bool_val = false;
} else if (!strcmp(rhs, "true") || !strcmp(rhs, "1")) {
bool_val = true;
} else {
tprintf("ERROR: Could not parse bool from flag %s\n", (*argv)[i]);
exit(1);
}
}
SetBoolFlagValue(lhs.string(), bool_val);
continue;
}
// string flag
const char* string_val;
if (StringFlagExists(lhs.string(), &string_val)) {
if (rhs != NULL) {
string_val = rhs;
} else {
// Pick the next argument
if (i + 1 >= *argc) {
tprintf("ERROR: Could not find string value for flag %s\n",
lhs.string());
exit(1);
} else {
string_val = (*argv)[++i];
}
}
SetStringFlagValue(lhs.string(), string_val);
continue;
}
// Flag was not found. Exit with an error message.
tprintf("ERROR: Non-existent flag %s\n", (*argv)[i]);
exit(1);
} // for each argv
if (remove_flags) {
(*argv)[i - 1] = (*argv)[0];
(*argv) += (i - 1);
(*argc) -= (i - 1);
}
}
} // namespace tesseract
#else
#include "base/init_google.h"
namespace tesseract {
void ParseCommandLineFlags(const char* usage,
int* argc, char*** argv,
const bool remove_flags) {
InitGoogle(usage, argc, argv, remove_flags);
}
} // namespace tesseract
#endif
| C++ |
// Copyright 2008 Google Inc. All Rights Reserved.
// Author: scharron@google.com (Samuel Charron)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "commontraining.h"
#include "allheaders.h"
#include "ccutil.h"
#include "classify.h"
#include "cluster.h"
#include "clusttool.h"
#include "efio.h"
#include "emalloc.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "freelist.h"
#include "globals.h"
#include "intfeaturespace.h"
#include "mastertrainer.h"
#include "mf.h"
#include "ndminx.h"
#include "oldlist.h"
#include "params.h"
#include "shapetable.h"
#include "tessdatamanager.h"
#include "tessopt.h"
#include "tprintf.h"
#include "unicity_table.h"
#include <math.h>
using tesseract::CCUtil;
using tesseract::FontInfo;
using tesseract::IntFeatureSpace;
using tesseract::ParamUtils;
using tesseract::ShapeTable;
// Global Variables.
// global variable to hold configuration parameters to control clustering
// -M 0.625 -B 0.05 -I 1.0 -C 1e-6.
CLUSTERCONFIG Config = { elliptical, 0.625, 0.05, 1.0, 1e-6, 0 };
FEATURE_DEFS_STRUCT feature_defs;
CCUtil ccutil;
INT_PARAM_FLAG(debug_level, 0, "Level of Trainer debugging");
INT_PARAM_FLAG(load_images, 0, "Load images with tr files");
STRING_PARAM_FLAG(configfile, "", "File to load more configs from");
STRING_PARAM_FLAG(D, "", "Directory to write output files to");
STRING_PARAM_FLAG(F, "font_properties", "File listing font properties");
STRING_PARAM_FLAG(X, "", "File listing font xheights");
STRING_PARAM_FLAG(U, "unicharset", "File to load unicharset from");
STRING_PARAM_FLAG(O, "", "File to write unicharset to");
STRING_PARAM_FLAG(T, "", "File to load trainer from");
STRING_PARAM_FLAG(output_trainer, "", "File to write trainer to");
STRING_PARAM_FLAG(test_ch, "", "UTF8 test character string");
DOUBLE_PARAM_FLAG(clusterconfig_min_samples_fraction, Config.MinSamples,
"Min number of samples per proto as % of total");
DOUBLE_PARAM_FLAG(clusterconfig_max_illegal, Config.MaxIllegal,
"Max percentage of samples in a cluster which have more"
" than 1 feature in that cluster");
DOUBLE_PARAM_FLAG(clusterconfig_independence, Config.Independence,
"Desired independence between dimensions");
DOUBLE_PARAM_FLAG(clusterconfig_confidence, Config.Confidence,
"Desired confidence in prototypes created");
/*
** Parameters:
** argc number of command line arguments to parse
** argv command line arguments
** Globals:
** Config current clustering parameters
** Operation:
** This routine parses the command line arguments that were
** passed to the program and ses them to set relevant
** training-related global parameters
** Return: none
** Exceptions: Illegal options terminate the program.
*/
void ParseArguments(int* argc, char ***argv) {
STRING usage;
if (*argc) {
usage += (*argv)[0];
}
usage += " [.tr files ...]";
tesseract::ParseCommandLineFlags(usage.c_str(), argc, argv, true);
// Record the index of the first non-flag argument to 1, since we set
// remove_flags to true when parsing the flags.
tessoptind = 1;
// Set some global values based on the flags.
Config.MinSamples =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_min_samples_fraction)));
Config.MaxIllegal =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_max_illegal)));
Config.Independence =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_independence)));
Config.Confidence =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_confidence)));
// Set additional parameters from config file if specified.
if (!FLAGS_configfile.empty()) {
tesseract::ParamUtils::ReadParamsFile(
FLAGS_configfile.c_str(),
tesseract::SET_PARAM_CONSTRAINT_NON_INIT_ONLY,
ccutil.params());
}
}
namespace tesseract {
// Helper loads shape table from the given file.
ShapeTable* LoadShapeTable(const STRING& file_prefix) {
ShapeTable* shape_table = NULL;
STRING shape_table_file = file_prefix;
shape_table_file += kShapeTableFileSuffix;
FILE* shape_fp = fopen(shape_table_file.string(), "rb");
if (shape_fp != NULL) {
shape_table = new ShapeTable;
if (!shape_table->DeSerialize(false, shape_fp)) {
delete shape_table;
shape_table = NULL;
tprintf("Error: Failed to read shape table %s\n",
shape_table_file.string());
} else {
int num_shapes = shape_table->NumShapes();
tprintf("Read shape table %s of %d shapes\n",
shape_table_file.string(), num_shapes);
}
fclose(shape_fp);
} else {
tprintf("Warning: No shape table file present: %s\n",
shape_table_file.string());
}
return shape_table;
}
// Helper to write the shape_table.
void WriteShapeTable(const STRING& file_prefix, const ShapeTable& shape_table) {
STRING shape_table_file = file_prefix;
shape_table_file += kShapeTableFileSuffix;
FILE* fp = fopen(shape_table_file.string(), "wb");
if (fp != NULL) {
if (!shape_table.Serialize(fp)) {
fprintf(stderr, "Error writing shape table: %s\n",
shape_table_file.string());
}
fclose(fp);
} else {
fprintf(stderr, "Error creating shape table: %s\n",
shape_table_file.string());
}
}
// Creates a MasterTraininer and loads the training data into it:
// Initializes feature_defs and IntegerFX.
// Loads the shape_table if shape_table != NULL.
// Loads initial unicharset from -U command-line option.
// If FLAGS_T is set, loads the majority of data from there, else:
// Loads font info from -F option.
// Loads xheights from -X option.
// Loads samples from .tr files in remaining command-line args.
// Deletes outliers and computes canonical samples.
// If FLAGS_output_trainer is set, saves the trainer for future use.
// Computes canonical and cloud features.
// If shape_table is not NULL, but failed to load, make a fake flat one,
// as shape clustering was not run.
MasterTrainer* LoadTrainingData(int argc, const char* const * argv,
bool replication,
ShapeTable** shape_table,
STRING* file_prefix) {
InitFeatureDefs(&feature_defs);
InitIntegerFX();
*file_prefix = "";
if (!FLAGS_D.empty()) {
*file_prefix += FLAGS_D.c_str();
*file_prefix += "/";
}
// If we are shape clustering (NULL shape_table) or we successfully load
// a shape_table written by a previous shape clustering, then
// shape_analysis will be true, meaning that the MasterTrainer will replace
// some members of the unicharset with their fragments.
bool shape_analysis = false;
if (shape_table != NULL) {
*shape_table = LoadShapeTable(*file_prefix);
if (*shape_table != NULL)
shape_analysis = true;
} else {
shape_analysis = true;
}
MasterTrainer* trainer = new MasterTrainer(NM_CHAR_ANISOTROPIC,
shape_analysis,
replication,
FLAGS_debug_level);
IntFeatureSpace fs;
fs.Init(kBoostXYBuckets, kBoostXYBuckets, kBoostDirBuckets);
if (FLAGS_T.empty()) {
trainer->LoadUnicharset(FLAGS_U.c_str());
// Get basic font information from font_properties.
if (!FLAGS_F.empty()) {
if (!trainer->LoadFontInfo(FLAGS_F.c_str())) {
delete trainer;
return NULL;
}
}
if (!FLAGS_X.empty()) {
if (!trainer->LoadXHeights(FLAGS_X.c_str())) {
delete trainer;
return NULL;
}
}
trainer->SetFeatureSpace(fs);
const char* page_name;
// Load training data from .tr files on the command line.
while ((page_name = GetNextFilename(argc, argv)) != NULL) {
tprintf("Reading %s ...\n", page_name);
trainer->ReadTrainingSamples(page_name, feature_defs, false);
// If there is a file with [lang].[fontname].exp[num].fontinfo present,
// read font spacing information in to fontinfo_table.
int pagename_len = strlen(page_name);
char *fontinfo_file_name = new char[pagename_len + 7];
strncpy(fontinfo_file_name, page_name, pagename_len - 2); // remove "tr"
strcpy(fontinfo_file_name + pagename_len - 2, "fontinfo"); // +"fontinfo"
trainer->AddSpacingInfo(fontinfo_file_name);
delete[] fontinfo_file_name;
// Load the images into memory if required by the classifier.
if (FLAGS_load_images) {
STRING image_name = page_name;
// Chop off the tr and replace with tif. Extension must be tif!
image_name.truncate_at(image_name.length() - 2);
image_name += "tif";
trainer->LoadPageImages(image_name.string());
}
}
trainer->PostLoadCleanup();
// Write the master trainer if required.
if (!FLAGS_output_trainer.empty()) {
FILE* fp = fopen(FLAGS_output_trainer.c_str(), "wb");
if (fp == NULL) {
tprintf("Can't create saved trainer data!\n");
} else {
trainer->Serialize(fp);
fclose(fp);
}
}
} else {
bool success = false;
tprintf("Loading master trainer from file:%s\n",
FLAGS_T.c_str());
FILE* fp = fopen(FLAGS_T.c_str(), "rb");
if (fp == NULL) {
tprintf("Can't read file %s to initialize master trainer\n",
FLAGS_T.c_str());
} else {
success = trainer->DeSerialize(false, fp);
fclose(fp);
}
if (!success) {
tprintf("Deserialize of master trainer failed!\n");
delete trainer;
return NULL;
}
trainer->SetFeatureSpace(fs);
}
trainer->PreTrainingSetup();
if (!FLAGS_O.empty() &&
!trainer->unicharset().save_to_file(FLAGS_O.c_str())) {
fprintf(stderr, "Failed to save unicharset to file %s\n", FLAGS_O.c_str());
delete trainer;
return NULL;
}
if (shape_table != NULL) {
// If we previously failed to load a shapetable, then shape clustering
// wasn't run so make a flat one now.
if (*shape_table == NULL) {
*shape_table = new ShapeTable;
trainer->SetupFlatShapeTable(*shape_table);
tprintf("Flat shape table summary: %s\n",
(*shape_table)->SummaryStr().string());
}
(*shape_table)->set_unicharset(trainer->unicharset());
}
return trainer;
}
} // namespace tesseract.
/*---------------------------------------------------------------------------*/
const char *GetNextFilename(int argc, const char* const * argv) {
/*
** Parameters: none
** Globals:
** tessoptind defined by tessopt sys call
** Operation:
** This routine returns the next command line argument. If
** there are no remaining command line arguments, it returns
** NULL. This routine should only be called after all option
** arguments have been parsed and removed with ParseArguments.
** Return: Next command line argument or NULL.
** Exceptions: none
** History: Fri Aug 18 09:34:12 1989, DSJ, Created.
*/
if (tessoptind < argc)
return argv[tessoptind++];
else
return NULL;
} /* GetNextFilename */
/*---------------------------------------------------------------------------*/
LABELEDLIST FindList (
LIST List,
char *Label)
/*
** Parameters:
** List list to search
** Label label to search for
** Globals: none
** Operation:
** This routine searches thru a list of labeled lists to find
** a list with the specified label. If a matching labeled list
** cannot be found, NULL is returned.
** Return: Labeled list with the specified Label or NULL.
** Exceptions: none
** History: Fri Aug 18 15:57:41 1989, DSJ, Created.
*/
{
LABELEDLIST LabeledList;
iterate (List)
{
LabeledList = (LABELEDLIST) first_node (List);
if (strcmp (LabeledList->Label, Label) == 0)
return (LabeledList);
}
return (NULL);
} /* FindList */
/*---------------------------------------------------------------------------*/
LABELEDLIST NewLabeledList (
const char *Label)
/*
** Parameters:
** Label label for new list
** Globals: none
** Operation:
** This routine allocates a new, empty labeled list and gives
** it the specified label.
** Return: New, empty labeled list.
** Exceptions: none
** History: Fri Aug 18 16:08:46 1989, DSJ, Created.
*/
{
LABELEDLIST LabeledList;
LabeledList = (LABELEDLIST) Emalloc (sizeof (LABELEDLISTNODE));
LabeledList->Label = (char*)Emalloc (strlen (Label)+1);
strcpy (LabeledList->Label, Label);
LabeledList->List = NIL_LIST;
LabeledList->SampleCount = 0;
LabeledList->font_sample_count = 0;
return (LabeledList);
} /* NewLabeledList */
/*---------------------------------------------------------------------------*/
// TODO(rays) This is now used only by cntraining. Convert cntraining to use
// the new method or get rid of it entirely.
void ReadTrainingSamples(const FEATURE_DEFS_STRUCT& feature_defs,
const char *feature_name, int max_samples,
UNICHARSET* unicharset,
FILE* file, LIST* training_samples) {
/*
** Parameters:
** file open text file to read samples from
** Globals: none
** Operation:
** This routine reads training samples from a file and
** places them into a data structure which organizes the
** samples by FontName and CharName. It then returns this
** data structure.
** Return: none
** Exceptions: none
** History: Fri Aug 18 13:11:39 1989, DSJ, Created.
** Tue May 17 1998 simplifications to structure, illiminated
** font, and feature specification levels of structure.
*/
char buffer[2048];
char unichar[UNICHAR_LEN + 1];
LABELEDLIST char_sample;
FEATURE_SET feature_samples;
CHAR_DESC char_desc;
int i;
int feature_type = ShortNameToFeatureType(feature_defs, feature_name);
// Zero out the font_sample_count for all the classes.
LIST it = *training_samples;
iterate(it) {
char_sample = reinterpret_cast<LABELEDLIST>(first_node(it));
char_sample->font_sample_count = 0;
}
while (fgets(buffer, 2048, file) != NULL) {
if (buffer[0] == '\n')
continue;
sscanf(buffer, "%*s %s", unichar);
if (unicharset != NULL && !unicharset->contains_unichar(unichar)) {
unicharset->unichar_insert(unichar);
if (unicharset->size() > MAX_NUM_CLASSES) {
tprintf("Error: Size of unicharset in training is "
"greater than MAX_NUM_CLASSES\n");
exit(1);
}
}
char_sample = FindList(*training_samples, unichar);
if (char_sample == NULL) {
char_sample = NewLabeledList(unichar);
*training_samples = push(*training_samples, char_sample);
}
char_desc = ReadCharDescription(feature_defs, file);
feature_samples = char_desc->FeatureSets[feature_type];
if (char_sample->font_sample_count < max_samples || max_samples <= 0) {
char_sample->List = push(char_sample->List, feature_samples);
char_sample->SampleCount++;
char_sample->font_sample_count++;
} else {
FreeFeatureSet(feature_samples);
}
for (i = 0; i < char_desc->NumFeatureSets; i++) {
if (feature_type != i)
FreeFeatureSet(char_desc->FeatureSets[i]);
}
free(char_desc);
}
} // ReadTrainingSamples
/*---------------------------------------------------------------------------*/
void FreeTrainingSamples(LIST CharList) {
/*
** Parameters:
** FontList list of all fonts in document
** Globals: none
** Operation:
** This routine deallocates all of the space allocated to
** the specified list of training samples.
** Return: none
** Exceptions: none
** History: Fri Aug 18 17:44:27 1989, DSJ, Created.
*/
LABELEDLIST char_sample;
FEATURE_SET FeatureSet;
LIST FeatureList;
iterate(CharList) { /* iterate thru all of the fonts */
char_sample = (LABELEDLIST) first_node(CharList);
FeatureList = char_sample->List;
iterate(FeatureList) { /* iterate thru all of the classes */
FeatureSet = (FEATURE_SET) first_node(FeatureList);
FreeFeatureSet(FeatureSet);
}
FreeLabeledList(char_sample);
}
destroy(CharList);
} /* FreeTrainingSamples */
/*---------------------------------------------------------------------------*/
void FreeLabeledList(LABELEDLIST LabeledList) {
/*
** Parameters:
** LabeledList labeled list to be freed
** Globals: none
** Operation:
** This routine deallocates all of the memory consumed by
** a labeled list. It does not free any memory which may be
** consumed by the items in the list.
** Return: none
** Exceptions: none
** History: Fri Aug 18 17:52:45 1989, DSJ, Created.
*/
destroy(LabeledList->List);
free(LabeledList->Label);
free(LabeledList);
} /* FreeLabeledList */
/*---------------------------------------------------------------------------*/
CLUSTERER *SetUpForClustering(const FEATURE_DEFS_STRUCT &FeatureDefs,
LABELEDLIST char_sample,
const char* program_feature_type) {
/*
** Parameters:
** char_sample: LABELEDLIST that holds all the feature information for a
** given character.
** Globals:
** None
** Operation:
** This routine reads samples from a LABELEDLIST and enters
** those samples into a clusterer data structure. This
** data structure is then returned to the caller.
** Return:
** Pointer to new clusterer data structure.
** Exceptions:
** None
** History:
** 8/16/89, DSJ, Created.
*/
uinT16 N;
int i, j;
FLOAT32 *Sample = NULL;
CLUSTERER *Clusterer;
inT32 CharID;
LIST FeatureList = NULL;
FEATURE_SET FeatureSet = NULL;
int desc_index = ShortNameToFeatureType(FeatureDefs, program_feature_type);
N = FeatureDefs.FeatureDesc[desc_index]->NumParams;
Clusterer = MakeClusterer(N, FeatureDefs.FeatureDesc[desc_index]->ParamDesc);
FeatureList = char_sample->List;
CharID = 0;
iterate(FeatureList) {
FeatureSet = (FEATURE_SET) first_node(FeatureList);
for (i = 0; i < FeatureSet->MaxNumFeatures; i++) {
if (Sample == NULL)
Sample = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (j = 0; j < N; j++)
Sample[j] = FeatureSet->Features[i]->Params[j];
MakeSample (Clusterer, Sample, CharID);
}
CharID++;
}
if ( Sample != NULL ) free( Sample );
return( Clusterer );
} /* SetUpForClustering */
/*------------------------------------------------------------------------*/
void MergeInsignificantProtos(LIST ProtoList, const char* label,
CLUSTERER *Clusterer, CLUSTERCONFIG *Config) {
PROTOTYPE *Prototype;
bool debug = strcmp(FLAGS_test_ch.c_str(), label) == 0;
LIST pProtoList = ProtoList;
iterate(pProtoList) {
Prototype = (PROTOTYPE *) first_node (pProtoList);
if (Prototype->Significant || Prototype->Merged)
continue;
FLOAT32 best_dist = 0.125;
PROTOTYPE* best_match = NULL;
// Find the nearest alive prototype.
LIST list_it = ProtoList;
iterate(list_it) {
PROTOTYPE* test_p = (PROTOTYPE *) first_node (list_it);
if (test_p != Prototype && !test_p->Merged) {
FLOAT32 dist = ComputeDistance(Clusterer->SampleSize,
Clusterer->ParamDesc,
Prototype->Mean, test_p->Mean);
if (dist < best_dist) {
best_match = test_p;
best_dist = dist;
}
}
}
if (best_match != NULL && !best_match->Significant) {
if (debug)
tprintf("Merging red clusters (%d+%d) at %g,%g and %g,%g\n",
best_match->NumSamples, Prototype->NumSamples,
best_match->Mean[0], best_match->Mean[1],
Prototype->Mean[0], Prototype->Mean[1]);
best_match->NumSamples = MergeClusters(Clusterer->SampleSize,
Clusterer->ParamDesc,
best_match->NumSamples,
Prototype->NumSamples,
best_match->Mean,
best_match->Mean, Prototype->Mean);
Prototype->NumSamples = 0;
Prototype->Merged = 1;
} else if (best_match != NULL) {
if (debug)
tprintf("Red proto at %g,%g matched a green one at %g,%g\n",
Prototype->Mean[0], Prototype->Mean[1],
best_match->Mean[0], best_match->Mean[1]);
Prototype->Merged = 1;
}
}
// Mark significant those that now have enough samples.
int min_samples = (inT32) (Config->MinSamples * Clusterer->NumChar);
pProtoList = ProtoList;
iterate(pProtoList) {
Prototype = (PROTOTYPE *) first_node (pProtoList);
// Process insignificant protos that do not match a green one
if (!Prototype->Significant && Prototype->NumSamples >= min_samples &&
!Prototype->Merged) {
if (debug)
tprintf("Red proto at %g,%g becoming green\n",
Prototype->Mean[0], Prototype->Mean[1]);
Prototype->Significant = true;
}
}
} /* MergeInsignificantProtos */
/*-----------------------------------------------------------------------------*/
void CleanUpUnusedData(
LIST ProtoList)
{
PROTOTYPE* Prototype;
iterate(ProtoList)
{
Prototype = (PROTOTYPE *) first_node (ProtoList);
if(Prototype->Variance.Elliptical != NULL)
{
memfree(Prototype->Variance.Elliptical);
Prototype->Variance.Elliptical = NULL;
}
if(Prototype->Magnitude.Elliptical != NULL)
{
memfree(Prototype->Magnitude.Elliptical);
Prototype->Magnitude.Elliptical = NULL;
}
if(Prototype->Weight.Elliptical != NULL)
{
memfree(Prototype->Weight.Elliptical);
Prototype->Weight.Elliptical = NULL;
}
}
}
/*------------------------------------------------------------------------*/
LIST RemoveInsignificantProtos(
LIST ProtoList,
BOOL8 KeepSigProtos,
BOOL8 KeepInsigProtos,
int N)
{
LIST NewProtoList = NIL_LIST;
LIST pProtoList;
PROTOTYPE* Proto;
PROTOTYPE* NewProto;
int i;
pProtoList = ProtoList;
iterate(pProtoList)
{
Proto = (PROTOTYPE *) first_node (pProtoList);
if ((Proto->Significant && KeepSigProtos) ||
(!Proto->Significant && KeepInsigProtos))
{
NewProto = (PROTOTYPE *)Emalloc(sizeof(PROTOTYPE));
NewProto->Mean = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
NewProto->Significant = Proto->Significant;
NewProto->Style = Proto->Style;
NewProto->NumSamples = Proto->NumSamples;
NewProto->Cluster = NULL;
NewProto->Distrib = NULL;
for (i=0; i < N; i++)
NewProto->Mean[i] = Proto->Mean[i];
if (Proto->Variance.Elliptical != NULL)
{
NewProto->Variance.Elliptical = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (i=0; i < N; i++)
NewProto->Variance.Elliptical[i] = Proto->Variance.Elliptical[i];
}
else
NewProto->Variance.Elliptical = NULL;
//---------------------------------------------
if (Proto->Magnitude.Elliptical != NULL)
{
NewProto->Magnitude.Elliptical = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (i=0; i < N; i++)
NewProto->Magnitude.Elliptical[i] = Proto->Magnitude.Elliptical[i];
}
else
NewProto->Magnitude.Elliptical = NULL;
//------------------------------------------------
if (Proto->Weight.Elliptical != NULL)
{
NewProto->Weight.Elliptical = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (i=0; i < N; i++)
NewProto->Weight.Elliptical[i] = Proto->Weight.Elliptical[i];
}
else
NewProto->Weight.Elliptical = NULL;
NewProto->TotalMagnitude = Proto->TotalMagnitude;
NewProto->LogMagnitude = Proto->LogMagnitude;
NewProtoList = push_last(NewProtoList, NewProto);
}
}
FreeProtoList(&ProtoList);
return (NewProtoList);
} /* RemoveInsignificantProtos */
/*----------------------------------------------------------------------------*/
MERGE_CLASS FindClass (
LIST List,
const char *Label)
{
MERGE_CLASS MergeClass;
iterate (List)
{
MergeClass = (MERGE_CLASS) first_node (List);
if (strcmp (MergeClass->Label, Label) == 0)
return (MergeClass);
}
return (NULL);
} /* FindClass */
/*---------------------------------------------------------------------------*/
MERGE_CLASS NewLabeledClass (
const char *Label)
{
MERGE_CLASS MergeClass;
MergeClass = new MERGE_CLASS_NODE;
MergeClass->Label = (char*)Emalloc (strlen (Label)+1);
strcpy (MergeClass->Label, Label);
MergeClass->Class = NewClass (MAX_NUM_PROTOS, MAX_NUM_CONFIGS);
return (MergeClass);
} /* NewLabeledClass */
/*-----------------------------------------------------------------------------*/
void FreeLabeledClassList (
LIST ClassList)
/*
** Parameters:
** FontList list of all fonts in document
** Globals: none
** Operation:
** This routine deallocates all of the space allocated to
** the specified list of training samples.
** Return: none
** Exceptions: none
** History: Fri Aug 18 17:44:27 1989, DSJ, Created.
*/
{
MERGE_CLASS MergeClass;
iterate (ClassList) /* iterate thru all of the fonts */
{
MergeClass = (MERGE_CLASS) first_node (ClassList);
free (MergeClass->Label);
FreeClass(MergeClass->Class);
delete MergeClass;
}
destroy (ClassList);
} /* FreeLabeledClassList */
/** SetUpForFloat2Int **************************************************/
CLASS_STRUCT* SetUpForFloat2Int(const UNICHARSET& unicharset,
LIST LabeledClassList) {
MERGE_CLASS MergeClass;
CLASS_TYPE Class;
int NumProtos;
int NumConfigs;
int NumWords;
int i, j;
float Values[3];
PROTO NewProto;
PROTO OldProto;
BIT_VECTOR NewConfig;
BIT_VECTOR OldConfig;
// printf("Float2Int ...\n");
CLASS_STRUCT* float_classes = new CLASS_STRUCT[unicharset.size()];
iterate(LabeledClassList)
{
UnicityTableEqEq<int> font_set;
MergeClass = (MERGE_CLASS) first_node (LabeledClassList);
Class = &float_classes[unicharset.unichar_to_id(MergeClass->Label)];
NumProtos = MergeClass->Class->NumProtos;
NumConfigs = MergeClass->Class->NumConfigs;
font_set.move(&MergeClass->Class->font_set);
Class->NumProtos = NumProtos;
Class->MaxNumProtos = NumProtos;
Class->Prototypes = (PROTO) Emalloc (sizeof(PROTO_STRUCT) * NumProtos);
for(i=0; i < NumProtos; i++)
{
NewProto = ProtoIn(Class, i);
OldProto = ProtoIn(MergeClass->Class, i);
Values[0] = OldProto->X;
Values[1] = OldProto->Y;
Values[2] = OldProto->Angle;
Normalize(Values);
NewProto->X = OldProto->X;
NewProto->Y = OldProto->Y;
NewProto->Length = OldProto->Length;
NewProto->Angle = OldProto->Angle;
NewProto->A = Values[0];
NewProto->B = Values[1];
NewProto->C = Values[2];
}
Class->NumConfigs = NumConfigs;
Class->MaxNumConfigs = NumConfigs;
Class->font_set.move(&font_set);
Class->Configurations = (BIT_VECTOR*) Emalloc (sizeof(BIT_VECTOR) * NumConfigs);
NumWords = WordsInVectorOfSize(NumProtos);
for(i=0; i < NumConfigs; i++)
{
NewConfig = NewBitVector(NumProtos);
OldConfig = MergeClass->Class->Configurations[i];
for(j=0; j < NumWords; j++)
NewConfig[j] = OldConfig[j];
Class->Configurations[i] = NewConfig;
}
}
return float_classes;
} // SetUpForFloat2Int
/*--------------------------------------------------------------------------*/
void Normalize (
float *Values)
{
register float Slope;
register float Intercept;
register float Normalizer;
Slope = tan (Values [2] * 2 * PI);
Intercept = Values [1] - Slope * Values [0];
Normalizer = 1 / sqrt (Slope * Slope + 1.0);
Values [0] = Slope * Normalizer;
Values [1] = - Normalizer;
Values [2] = Intercept * Normalizer;
} // Normalize
/*-------------------------------------------------------------------------*/
void FreeNormProtoList (
LIST CharList)
{
LABELEDLIST char_sample;
iterate (CharList) /* iterate thru all of the fonts */
{
char_sample = (LABELEDLIST) first_node (CharList);
FreeLabeledList (char_sample);
}
destroy (CharList);
} // FreeNormProtoList
/*---------------------------------------------------------------------------*/
void AddToNormProtosList(
LIST* NormProtoList,
LIST ProtoList,
char* CharName)
{
PROTOTYPE* Proto;
LABELEDLIST LabeledProtoList;
LabeledProtoList = NewLabeledList(CharName);
iterate(ProtoList)
{
Proto = (PROTOTYPE *) first_node (ProtoList);
LabeledProtoList->List = push(LabeledProtoList->List, Proto);
}
*NormProtoList = push(*NormProtoList, LabeledProtoList);
}
/*---------------------------------------------------------------------------*/
int NumberOfProtos(
LIST ProtoList,
BOOL8 CountSigProtos,
BOOL8 CountInsigProtos)
{
int N = 0;
PROTOTYPE *Proto;
iterate(ProtoList)
{
Proto = (PROTOTYPE *) first_node ( ProtoList );
if (( Proto->Significant && CountSigProtos ) ||
( ! Proto->Significant && CountInsigProtos ) )
N++;
}
return(N);
}
| C++ |
///////////////////////////////////////////////////////////////////////
// File: unicharset_extractor.cpp
// Description: Unicode character/ligature set extractor.
// Author: Thomas Kielbus
// Created: Wed Jun 28 17:05:01 PDT 2006
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Given a list of box files on the command line, this program generates a file
// containing a unicharset, a list of all the characters used by Tesseract
//
// The file contains the size of the set on the first line, and then one
// unichar per line.
#include <stdio.h>
#if defined(HAVE_WCHAR_T) || defined(_WIN32) || defined(GOOGLE3)
#include <wchar.h>
#include <wctype.h>
#define USING_WCTYPE
#endif
#include <locale.h>
#include "boxread.h"
#include "rect.h"
#include "strngs.h"
#include "tessopt.h"
#include "unichar.h"
#include "unicharset.h"
static const char* const kUnicharsetFileName = "unicharset";
UNICHAR_ID wc_to_unichar_id(const UNICHARSET &unicharset, int wc) {
UNICHAR uch(wc);
char *unichar = uch.utf8_str();
UNICHAR_ID unichar_id = unicharset.unichar_to_id(unichar);
delete[] unichar;
return unichar_id;
}
// Set character properties using wctype if we have it.
// Contributed by piggy@gmail.com.
// Modified by Ray to use UNICHAR for unicode conversion
// and to check for wctype using autoconf/presence of windows.
void set_properties(UNICHARSET *unicharset, const char* const c_string) {
#ifdef USING_WCTYPE
UNICHAR_ID id;
int wc;
// Convert the string to a unichar id.
id = unicharset->unichar_to_id(c_string);
// Set the other_case property to be this unichar id by default.
unicharset->set_other_case(id, id);
int step = UNICHAR::utf8_step(c_string);
if (step == 0)
return; // Invalid utf-8.
// Get the next Unicode code point in the string.
UNICHAR ch(c_string, step);
wc = ch.first_uni();
/* Copy the properties. */
if (iswalpha(wc)) {
unicharset->set_isalpha(id, 1);
if (iswlower(wc)) {
unicharset->set_islower(id, 1);
unicharset->set_other_case(id, wc_to_unichar_id(*unicharset,
towupper(wc)));
}
if (iswupper(wc)) {
unicharset->set_isupper(id, 1);
unicharset->set_other_case(id, wc_to_unichar_id(*unicharset,
towlower(wc)));
}
}
if (iswdigit(wc))
unicharset->set_isdigit(id, 1);
if(iswpunct(wc))
unicharset->set_ispunctuation(id, 1);
#endif
}
int main(int argc, char** argv) {
int option;
const char* output_directory = ".";
STRING unicharset_file_name;
// Special characters are now included by default.
UNICHARSET unicharset;
setlocale(LC_ALL, "");
// Print usage
if (argc <= 1) {
printf("Usage: %s [-D DIRECTORY] FILE...\n", argv[0]);
exit(1);
}
// Parse arguments
while ((option = tessopt(argc, argv, "D" )) != EOF) {
switch (option) {
case 'D':
output_directory = tessoptarg;
++tessoptind;
break;
}
}
// Save file name
unicharset_file_name = output_directory;
unicharset_file_name += "/";
unicharset_file_name += kUnicharsetFileName;
// Load box files
for (; tessoptind < argc; ++tessoptind) {
printf("Extracting unicharset from %s\n", argv[tessoptind]);
FILE* box_file = fopen(argv[tessoptind], "rb");
if (box_file == NULL) {
printf("Cannot open box file %s\n", argv[tessoptind]);
return -1;
}
TBOX box;
STRING unichar_string;
int line_number = 0;
while (ReadNextBox(&line_number, box_file, &unichar_string, &box)) {
unicharset.unichar_insert(unichar_string.string());
set_properties(&unicharset, unichar_string.string());
}
}
// Write unicharset file
if (unicharset.save_to_file(unicharset_file_name.string())) {
printf("Wrote unicharset file %s.\n", unicharset_file_name.string());
}
else {
printf("Cannot save unicharset file %s.\n", unicharset_file_name.string());
return -1;
}
return 0;
}
| C++ |
///////////////////////////////////////////////////////////////////////
// File: combine_tessdata
// Description: Creates a unified traineddata file from several
// data files produced by the training process.
// Author: Daria Antonova
// Created: Wed Jun 03 11:26:43 PST 2009
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tessdatamanager.h"
// Main program to combine/extract/overwrite tessdata components
// in [lang].traineddata files.
//
// To combine all the individual tessdata components (unicharset, DAWGs,
// classifier templates, ambiguities, language configs) located at, say,
// /home/$USER/temp/eng.* run:
//
// combine_tessdata /home/$USER/temp/eng.
//
// The result will be a combined tessdata file /home/$USER/temp/eng.traineddata
//
// Specify option -e if you would like to extract individual components
// from a combined traineddata file. For example, to extract language config
// file and the unicharset from tessdata/eng.traineddata run:
//
// combine_tessdata -e tessdata/eng.traineddata
// /home/$USER/temp/eng.config /home/$USER/temp/eng.unicharset
//
// The desired config file and unicharset will be written to
// /home/$USER/temp/eng.config /home/$USER/temp/eng.unicharset
//
// Specify option -o to overwrite individual components of the given
// [lang].traineddata file. For example, to overwrite language config
// and unichar ambiguities files in tessdata/eng.traineddata use:
//
// combine_tessdata -o tessdata/eng.traineddata
// /home/$USER/temp/eng.config /home/$USER/temp/eng.unicharambigs
//
// As a result, tessdata/eng.traineddata will contain the new language config
// and unichar ambigs, plus all the original DAWGs, classifier teamples, etc.
//
// Note: the file names of the files to extract to and to overwrite from should
// have the appropriate file suffixes (extensions) indicating their tessdata
// component type (.unicharset for the unicharset, .unicharambigs for unichar
// ambigs, etc). See k*FileSuffix variable in ccutil/tessdatamanager.h.
//
// Specify option -u to unpack all the components to the specified path:
//
// combine_tessdata -u tessdata/eng.traineddata /home/$USER/temp/eng.
//
// This will create /home/$USER/temp/eng.* files with individual tessdata
// components from tessdata/eng.traineddata.
//
int main(int argc, char **argv) {
int i;
if (argc == 2) {
printf("Combining tessdata files\n");
STRING lang = argv[1];
char* last = &argv[1][strlen(argv[1])-1];
if (*last != '.')
lang += '.';
STRING output_file = lang;
output_file += kTrainedDataSuffix;
if (!tesseract::TessdataManager::CombineDataFiles(
lang.string(), output_file.string())) {
printf("Error combining tessdata files into %s\n",
output_file.string());
} else {
printf("Output %s created sucessfully.\n", output_file.string());
}
} else if (argc >= 4 && (strcmp(argv[1], "-e") == 0 ||
strcmp(argv[1], "-u") == 0)) {
// Initialize TessdataManager with the data in the given traineddata file.
tesseract::TessdataManager tm;
tm.Init(argv[2], 0);
printf("Extracting tessdata components from %s\n", argv[2]);
if (strcmp(argv[1], "-e") == 0) {
for (i = 3; i < argc; ++i) {
if (tm.ExtractToFile(argv[i])) {
printf("Wrote %s\n", argv[i]);
} else {
printf("Not extracting %s, since this component"
" is not present\n", argv[i]);
}
}
} else { // extract all the components
for (i = 0; i < tesseract::TESSDATA_NUM_ENTRIES; ++i) {
STRING filename = argv[3];
char* last = &argv[3][strlen(argv[3])-1];
if (*last != '.')
filename += '.';
filename += tesseract::kTessdataFileSuffixes[i];
if (tm.ExtractToFile(filename.string())) {
printf("Wrote %s\n", filename.string());
}
}
}
tm.End();
} else if (argc >= 4 && strcmp(argv[1], "-o") == 0) {
// Rename the current traineddata file to a temporary name.
const char *new_traineddata_filename = argv[2];
STRING traineddata_filename = new_traineddata_filename;
traineddata_filename += ".__tmp__";
if (rename(new_traineddata_filename, traineddata_filename.string()) != 0) {
tprintf("Failed to create a temporary file %s\n",
traineddata_filename.string());
exit(1);
}
// Initialize TessdataManager with the data in the given traineddata file.
tesseract::TessdataManager tm;
tm.Init(traineddata_filename.string(), 0);
// Write the updated traineddata file.
tm.OverwriteComponents(new_traineddata_filename, argv+3, argc-3);
tm.End();
} else {
printf("Usage for combining tessdata components:\n"
" %s language_data_path_prefix\n"
" (e.g. %s tessdata/eng.)\n\n", argv[0], argv[0]);
printf("Usage for extracting tessdata components:\n"
" %s -e traineddata_file [output_component_file...]\n"
" (e.g. %s -e eng.traineddata eng.unicharset)\n\n",
argv[0], argv[0]);
printf("Usage for overwriting tessdata components:\n"
" %s -o traineddata_file [input_component_file...]\n"
" (e.g. %s -o eng.traineddata eng.unicharset)\n\n",
argv[0], argv[0]);
printf("Usage for unpacking all tessdata components:\n"
" %s -u traineddata_file output_path_prefix\n"
" (e.g. %s -u eng.traineddata tmp/eng.)\n", argv[0], argv[0]);
return 1;
}
}
| C++ |
/**********************************************************************
* File: util.h
* Description: Misc STL string utility functions.
* Author: Samuel Charron
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_UTIL_H_
#define TESSERACT_TRAINING_UTIL_H_
#include <stddef.h>
#include <stdlib.h>
#include <string>
#include <vector>
#ifdef USE_STD_NAMESPACE
using std::string;
using std::vector;
#endif
// StringHash is the hashing functor needed by the stl hash map.
#ifndef COMPILER_MSVC
struct StringHash {
size_t operator()(const string& s) const {
size_t hash_code = 0;
const char* str = s.c_str();
for (int ch = 0; str[ch] != 0; ++ch) {
hash_code += str[ch] << (ch % 24);
}
return hash_code;
}
};
#else // COMPILER_MSVC
struct StringHash : public stdext::hash_compare <string> {
size_t operator()(const string& s) const {
size_t hash_code = 0;
const char* str = s.c_str();
for (int ch = 0; str[ch] != 0; ++ch) {
hash_code += str[ch] << (ch % 24);
}
return hash_code;
}
bool operator()(const string& s1, const string& s2) const {
return s1 == s2;
}
};
#endif // !COMPILER_MSVC
#ifndef USE_STD_NAMESPACE
#include "base/heap-checker.h"
#define DISABLE_HEAP_LEAK_CHECK HeapLeakChecker::Disabler disabler
#else
#define DISABLE_HEAP_LEAK_CHECK {}
#endif
#endif // TESSERACT_TRAINING_UTIL_H_
| C++ |
/**********************************************************************
* File: commandlineflags.h
* Description: Header file for commandline flag parsing.
* Author: Ranjith Unnikrishnan
* Created: July 2013
*
* (C) Copyright 2013, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_COMMANDLINEFLAGS_H_
#define TESSERACT_TRAINING_COMMANDLINEFLAGS_H_
#ifdef USE_STD_NAMESPACE
#include <stdlib.h>
#include "tprintf.h"
#include "params.h"
#define INT_PARAM_FLAG(name, val, comment) \
INT_VAR(FLAGS_##name, val, comment)
#define DECLARE_INT_PARAM_FLAG(name) \
extern INT_VAR_H(FLAGS_##name, 0, "")
#define DOUBLE_PARAM_FLAG(name, val, comment) \
double_VAR(FLAGS_##name, val, comment)
#define DECLARE_DOUBLE_PARAM_FLAG(name) \
extern double_VAR_H(FLAGS_##name, "", "")
#define BOOL_PARAM_FLAG(name, val, comment) \
BOOL_VAR(FLAGS_##name, val, comment)
#define DECLARE_BOOL_PARAM_FLAG(name) \
extern BOOL_VAR_H(FLAGS_##name, 0, "")
#define STRING_PARAM_FLAG(name, val, comment) \
STRING_VAR(FLAGS_##name, val, comment)
#define DECLARE_STRING_PARAM_FLAG(name) \
extern STRING_VAR_H(FLAGS_##name, "", "")
#else
#include "base/commandlineflags.h"
#define INT_PARAM_FLAG(name, val, comment) \
DEFINE_int32(name, val, comment)
#define DECLARE_INT_PARAM_FLAG(name) \
DECLARE_int32(name)
#define DOUBLE_PARAM_FLAG(name, val, comment) \
DEFINE_double(name, val, comment)
#define DECLARE_DOUBLE_PARAM_FLAG(name) \
DECLARE_double(name)
#define BOOL_PARAM_FLAG(name, val, comment) \
DEFINE_bool(name, val, comment)
#define DECLARE_BOOL_PARAM_FLAG(name) \
DECLARE_bool(name)
#define STRING_PARAM_FLAG(name, val, comment) \
DEFINE_string(name, val, comment)
#define DECLARE_STRING_PARAM_FLAG(name) \
DECLARE_string(name)
#endif
namespace tesseract {
// Parse commandline flags and values. Prints the usage string and exits on
// input of --help or --helpshort.
//
// If remove_flags is true, the argv pointer is advanced so that (*argv)[1]
// points to the first non-flag argument, (*argv)[0] points to the same string
// as before, and argc is decremented to reflect the new shorter length of argv.
// eg. If the input *argv is
// { "program", "--foo=4", "--bar=true", "file1", "file2" } with *argc = 5, the
// output *argv is { "program", "file1", "file2" } with *argc = 3
void ParseCommandLineFlags(const char* usage, int* argc,
char*** argv, const bool remove_flags);
}
#endif // TESSERACT_TRAINING_COMMANDLINEFLAGS_H_
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Filename: classifier_tester.cpp
// Purpose: Tests a character classifier on data as formatted for training,
// but doesn't have to be the same as the training data.
// Author: Ray Smith
#include <stdio.h>
#ifndef USE_STD_NAMESPACE
#include "base/commandlineflags.h"
#endif
#include "baseapi.h"
#include "commontraining.h"
#include "cubeclassifier.h"
#include "mastertrainer.h"
#include "params.h"
#include "strngs.h"
#include "tessclassifier.h"
STRING_PARAM_FLAG(classifier, "", "Classifier to test");
STRING_PARAM_FLAG(lang, "eng", "Language to test");
STRING_PARAM_FLAG(tessdata_dir, "", "Directory of traineddata files");
DECLARE_INT_PARAM_FLAG(debug_level);
DECLARE_STRING_PARAM_FLAG(T);
enum ClassifierName {
CN_PRUNER,
CN_FULL,
CN_CUBE,
CN_CUBETESS,
CN_COUNT
};
const char* names[] = {"pruner", "full", "cube", "cubetess", NULL };
static tesseract::ShapeClassifier* InitializeClassifier(
const char* classifer_name, const UNICHARSET& unicharset,
int argc, char **argv,
tesseract::TessBaseAPI** api) {
// Decode the classifier string.
ClassifierName classifier = CN_COUNT;
for (int c = 0; c < CN_COUNT; ++c) {
if (strcmp(classifer_name, names[c]) == 0) {
classifier = static_cast<ClassifierName>(c);
break;
}
}
if (classifier == CN_COUNT) {
fprintf(stderr, "Invalid classifier name:%s\n", FLAGS_classifier.c_str());
return NULL;
}
// We need to initialize tesseract to test.
*api = new tesseract::TessBaseAPI;
tesseract::OcrEngineMode engine_mode = tesseract::OEM_TESSERACT_ONLY;
if (classifier == CN_CUBE || classifier == CN_CUBETESS)
engine_mode = tesseract::OEM_TESSERACT_CUBE_COMBINED;
tesseract::Tesseract* tesseract = NULL;
tesseract::Classify* classify = NULL;
if (classifier == CN_CUBE || classifier == CN_CUBETESS ||
classifier == CN_PRUNER || classifier == CN_FULL) {
(*api)->SetVariable("cube_debug_level", "2");
if ((*api)->Init(FLAGS_tessdata_dir.c_str(), FLAGS_lang.c_str(),
engine_mode) < 0) {
fprintf(stderr, "Tesseract initialization failed!\n");
return NULL;
}
tesseract = const_cast<tesseract::Tesseract*>((*api)->tesseract());
classify = reinterpret_cast<tesseract::Classify*>(tesseract);
if (classify->shape_table() == NULL) {
fprintf(stderr, "Tesseract must contain a ShapeTable!\n");
return NULL;
}
}
tesseract::ShapeClassifier* shape_classifier = NULL;
if (!FLAGS_T.empty()) {
const char* config_name;
while ((config_name = GetNextFilename(argc, argv)) != NULL) {
tprintf("Reading config file %s ...\n", config_name);
(*api)->ReadConfigFile(config_name);
}
}
if (classifier == CN_PRUNER) {
shape_classifier = new tesseract::TessClassifier(true, classify);
} else if (classifier == CN_FULL) {
shape_classifier = new tesseract::TessClassifier(false, classify);
} else if (classifier == CN_CUBE) {
shape_classifier = new tesseract::CubeClassifier(tesseract);
} else if (classifier == CN_CUBETESS) {
shape_classifier = new tesseract::CubeTessClassifier(tesseract);
} else {
fprintf(stderr, "%s tester not yet implemented\n", classifer_name);
return NULL;
}
tprintf("Testing classifier %s:\n", classifer_name);
return shape_classifier;
}
// This program has complex setup requirements, so here is some help:
// Two different modes, tr files and serialized mastertrainer.
// From tr files:
// classifier_tester -U unicharset -F font_properties -X xheights
// -classifier x -lang lang [-output_trainer trainer] *.tr
// From a serialized trainer:
// classifier_tester -input_trainer trainer [-lang lang] -classifier x
//
// In the first case, the unicharset must be the unicharset from within
// the classifier under test, and the font_properties and xheights files must
// match the files used during training.
// In the second case, the trainer file must have been prepared from
// some previous run of shapeclustering, mftraining, or classifier_tester
// using the same conditions as above, ie matching unicharset/font_properties.
//
// Available values of classifier (x above) are:
// pruner : Tesseract class pruner only.
// full : Tesseract full classifier.
// cube : Cube classifier. (Not possible with an input trainer.)
// cubetess : Tesseract class pruner with rescoring by Cube. (Not possible
// with an input trainer.)
int main(int argc, char **argv) {
ParseArguments(&argc, &argv);
STRING file_prefix;
tesseract::MasterTrainer* trainer = tesseract::LoadTrainingData(
argc, argv, false, NULL, &file_prefix);
tesseract::TessBaseAPI* api;
// Decode the classifier string.
tesseract::ShapeClassifier* shape_classifier = InitializeClassifier(
FLAGS_classifier.c_str(), trainer->unicharset(), argc, argv, &api);
if (shape_classifier == NULL) {
fprintf(stderr, "Classifier init failed!:%s\n", FLAGS_classifier.c_str());
return 1;
}
// We want to test junk as well if it is available.
// trainer->IncludeJunk();
// We want to test with replicated samples too.
trainer->ReplicateAndRandomizeSamplesIfRequired();
trainer->TestClassifierOnSamples(tesseract:: CT_UNICHAR_TOP1_ERR,
MAX(3, FLAGS_debug_level), false,
shape_classifier, NULL);
delete shape_classifier;
delete api;
delete trainer;
return 0;
} /* main */
| C++ |
/**********************************************************************
* File: normstrngs.cpp
* Description: Utilities to normalize and manipulate UTF-32 and
* UTF-8 strings.
* Author: Ranjith Unnikrishnan
* Created: Thu July 4 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "normstrngs.h"
#include "icuerrorcode.h"
#include "unichar.h"
#include "unicode/normalizer2.h" // From libicu
#include "unicode/translit.h" // From libicu
#include "unicode/unorm2.h" // From libicu
namespace tesseract {
void UTF8ToUTF32(const char* utf8_str, GenericVector<char32>* str32) {
str32->clear();
str32->reserve(strlen(utf8_str));
int len = strlen(utf8_str);
int step = 0;
for (int ch = 0; ch < len; ch += step) {
step = UNICHAR::utf8_step(utf8_str + ch);
if (step > 0) {
UNICHAR uni_ch(utf8_str + ch, step);
(*str32) += uni_ch.first_uni();
}
}
}
void UTF32ToUTF8(const GenericVector<char32>& str32, STRING* utf8_str) {
utf8_str->ensure(str32.length());
utf8_str->assign("", 0);
for (int i = 0; i < str32.length(); ++i) {
UNICHAR uni_ch(str32[i]);
char *utf8 = uni_ch.utf8_str();
if (utf8 != NULL) {
(*utf8_str) += utf8;
delete[] utf8;
}
}
}
bool is_hyphen_punc(const char32 ch) {
static const int kNumHyphenPuncUnicodes = 13;
static const char32 kHyphenPuncUnicodes[kNumHyphenPuncUnicodes] = {
'-',
0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, // hyphen..horizontal bar
0x207b, // superscript minus
0x208b, // subscript minus
0x2212, // minus sign
0xfe58, // small em dash
0xfe63, // small hyphen-minus
0xff0d, // fullwidth hyphen-minus
};
for (int i = 0; i < kNumHyphenPuncUnicodes; ++i) {
if (kHyphenPuncUnicodes[i] == ch)
return true;
}
return false;
}
bool is_single_quote(const char32 ch) {
static const int kNumSingleQuoteUnicodes = 8;
static const char32 kSingleQuoteUnicodes[kNumSingleQuoteUnicodes] = {
'\'',
'`',
0x2018, // left single quotation mark (English, others)
0x2019, // right single quotation mark (Danish, Finnish, Swedish, Norw.)
// We may have to introduce a comma set with 0x201a
0x201B, // single high-reveresed-9 quotation mark (PropList.txt)
0x2032, // prime
0x300C, // left corner bracket (East Asian languages)
0xFF07, // fullwidth apostrophe
};
for (int i = 0; i < kNumSingleQuoteUnicodes; ++i) {
if (kSingleQuoteUnicodes[i] == ch)
return true;
}
return false;
}
bool is_double_quote(const char32 ch) {
static const int kNumDoubleQuoteUnicodes = 8;
static const char32 kDoubleQuoteUnicodes[kNumDoubleQuoteUnicodes] = {
'"',
0x201C, // left double quotation mark (English, others)
0x201D, // right double quotation mark (Danish, Finnish, Swedish, Norw.)
0x201F, // double high-reversed-9 quotation mark (PropList.txt)
0x2033, // double prime
0x301D, // reversed double prime quotation mark (East Asian langs, horiz.)
0x301E, // close double prime (East Asian languages written horizontally)
0xFF02, // fullwidth quotation mark
};
for (int i = 0; i < kNumDoubleQuoteUnicodes; ++i) {
if (kDoubleQuoteUnicodes[i] == ch)
return true;
}
return false;
}
STRING NormalizeUTF8String(const char* str8) {
GenericVector<char32> str32, out_str32, norm_str;
UTF8ToUTF32(str8, &str32);
for (int i = 0; i < str32.length(); ++i) {
norm_str.clear();
NormalizeChar32(str32[i], &norm_str);
for (int j = 0; j < norm_str.length(); ++j) {
out_str32.push_back(norm_str[j]);
}
}
STRING out_str8;
UTF32ToUTF8(out_str32, &out_str8);
return out_str8;
}
void NormalizeChar32(char32 ch, GenericVector<char32>* str) {
IcuErrorCode error_code;
const icu::Normalizer2* nfkc = icu::Normalizer2::getInstance(
NULL, "nfkc", UNORM2_COMPOSE, error_code);
error_code.assertSuccess();
error_code.reset();
icu::UnicodeString uch_str(static_cast<UChar32>(ch));
icu::UnicodeString norm_str = nfkc->normalize(uch_str, error_code);
error_code.assertSuccess();
str->clear();
for (int i = 0; i < norm_str.length(); ++i) {
// If any spaces were added by NFKC, pretend normalization is a nop.
if (norm_str[i] == ' ') {
str->clear();
str->push_back(ch);
break;
} else {
str->push_back(OCRNormalize(static_cast<char32>(norm_str[i])));
}
}
}
// Apply just the OCR-specific normalizations and return the normalized char.
char32 OCRNormalize(char32 ch) {
if (is_hyphen_punc(ch))
return '-';
else if (is_single_quote(ch))
return '\'';
else if (is_double_quote(ch))
return '"';
return ch;
}
bool IsOCREquivalent(char32 ch1, char32 ch2) {
return OCRNormalize(ch1) == OCRNormalize(ch2);
}
bool IsValidCodepoint(const char32 ch) {
// In the range [0, 0xD800) or [0xE000, 0x10FFFF]
return (static_cast<uinT32>(ch) < 0xD800)
|| (ch >= 0xE000 && ch <= 0x10FFFF);
}
bool IsWhitespace(const char32 ch) {
ASSERT_HOST_MSG(IsValidCodepoint(ch),
"Invalid Unicode codepoint: 0x%x\n", ch);
return u_isUWhiteSpace(static_cast<UChar32>(ch));
}
bool IsUTF8Whitespace(const char* text) {
return SpanUTF8Whitespace(text) == strlen(text);
}
int SpanUTF8Whitespace(const char* text) {
int n_white = 0;
for (UNICHAR::const_iterator it = UNICHAR::begin(text, strlen(text));
it != UNICHAR::end(text, strlen(text));
++it) {
if (!IsWhitespace(*it)) break;
n_white += it.utf8_len();
}
return n_white;
}
int SpanUTF8NotWhitespace(const char* text) {
int n_notwhite = 0;
for (UNICHAR::const_iterator it = UNICHAR::begin(text, strlen(text));
it != UNICHAR::end(text, strlen(text));
++it) {
if (IsWhitespace(*it)) break;
n_notwhite += it.utf8_len();
}
return n_notwhite;
}
bool IsInterchangeValid(const char32 ch) {
return IsValidCodepoint(ch) &&
!(ch >= 0xFDD0 && ch <= 0xFDEF) && // Noncharacters.
!(ch >= 0xFFFE && ch <= 0xFFFF) &&
!(ch >= 0x1FFFE && ch <= 0x1FFFF) &&
!(ch >= 0x2FFFE && ch <= 0x2FFFF) &&
!(ch >= 0x3FFFE && ch <= 0x3FFFF) &&
!(ch >= 0x4FFFE && ch <= 0x4FFFF) &&
!(ch >= 0x5FFFE && ch <= 0x5FFFF) &&
!(ch >= 0x6FFFE && ch <= 0x6FFFF) &&
!(ch >= 0x7FFFE && ch <= 0x7FFFF) &&
!(ch >= 0x8FFFE && ch <= 0x8FFFF) &&
!(ch >= 0x9FFFE && ch <= 0x9FFFF) &&
!(ch >= 0xAFFFE && ch <= 0xAFFFF) &&
!(ch >= 0xBFFFE && ch <= 0xBFFFF) &&
!(ch >= 0xCFFFE && ch <= 0xCFFFF) &&
!(ch >= 0xDFFFE && ch <= 0xDFFFF) &&
!(ch >= 0xEFFFE && ch <= 0xEFFFF) &&
!(ch >= 0xFFFFE && ch <= 0xFFFFF) &&
!(ch >= 0x10FFFE && ch <= 0x10FFFF) &&
(!u_isISOControl(static_cast<UChar32>(ch)) ||
ch == '\n' || ch == '\f' || ch == '\t' || ch == '\r');
}
bool IsInterchangeValid7BitAscii(const char32 ch) {
return IsValidCodepoint(ch) &&
ch <= 128 &&
(!u_isISOControl(static_cast<UChar32>(ch)) ||
ch == '\n' || ch == '\f' || ch == '\t' || ch == '\r');
}
char32 FullwidthToHalfwidth(const char32 ch) {
// Return unchanged if not in the fullwidth-halfwidth Unicode block.
if (ch < 0xFF00 || ch > 0xFFEF || !IsValidCodepoint(ch)) {
if (ch != 0x3000) return ch;
}
// Special case for fullwidth left and right "white parentheses".
if (ch == 0xFF5F) return 0x2985;
if (ch == 0xFF60) return 0x2986;
// Construct a full-to-half width transliterator.
IcuErrorCode error_code;
icu::UnicodeString uch_str(static_cast<UChar32>(ch));
const icu::Transliterator* fulltohalf = icu::Transliterator::createInstance(
"Fullwidth-Halfwidth", UTRANS_FORWARD, error_code);
error_code.assertSuccess();
error_code.reset();
fulltohalf->transliterate(uch_str);
delete fulltohalf;
ASSERT_HOST(uch_str.length() != 0);
return uch_str[0];
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: MergeNF.c
** Purpose: Program for merging similar nano-feature protos
** Author: Dan Johnson
** History: Wed Nov 21 09:55:23 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "mergenf.h"
#include "host.h"
#include "efio.h"
#include "clusttool.h"
#include "cluster.h"
#include "oldlist.h"
#include "protos.h"
#include "ndminx.h"
#include "ocrfeatures.h"
#include "const.h"
#include "featdefs.h"
#include "intproto.h"
#include "params.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
/*-------------------once in subfeat---------------------------------*/
double_VAR(training_angle_match_scale, 1.0, "Angle Match Scale ...");
double_VAR(training_similarity_midpoint, 0.0075, "Similarity Midpoint ...");
double_VAR(training_similarity_curl, 2.0, "Similarity Curl ...");
/*-----------------------------once in fasttrain----------------------------------*/
double_VAR(training_tangent_bbox_pad, 0.5, "Tangent bounding box pad ...");
double_VAR(training_orthogonal_bbox_pad, 2.5, "Orthogonal bounding box pad ...");
double_VAR(training_angle_pad, 45.0, "Angle pad ...");
/**
* Compare protos p1 and p2 and return an estimate of the
* worst evidence rating that will result for any part of p1
* that is compared to p2. In other words, if p1 were broken
* into pico-features and each pico-feature was matched to p2,
* what is the worst evidence rating that will be achieved for
* any pico-feature.
*
* @param p1, p2 protos to be compared
*
* Globals: none
*
* @return Worst possible result when matching p1 to p2.
* @note Exceptions: none
* @note History: Mon Nov 26 08:27:53 1990, DSJ, Created.
*/
FLOAT32 CompareProtos(PROTO p1, PROTO p2) {
FEATURE Feature;
FLOAT32 WorstEvidence = WORST_EVIDENCE;
FLOAT32 Evidence;
FLOAT32 Angle, Length;
/* if p1 and p2 are not close in length, don't let them match */
Length = fabs (p1->Length - p2->Length);
if (Length > MAX_LENGTH_MISMATCH)
return (0.0);
/* create a dummy pico-feature to be used for comparisons */
Feature = NewFeature (&PicoFeatDesc);
Feature->Params[PicoFeatDir] = p1->Angle;
/* convert angle to radians */
Angle = p1->Angle * 2.0 * PI;
/* find distance from center of p1 to 1/2 picofeat from end */
Length = p1->Length / 2.0 - GetPicoFeatureLength () / 2.0;
if (Length < 0) Length = 0;
/* set the dummy pico-feature at one end of p1 and match it to p2 */
Feature->Params[PicoFeatX] = p1->X + cos (Angle) * Length;
Feature->Params[PicoFeatY] = p1->Y + sin (Angle) * Length;
if (DummyFastMatch (Feature, p2)) {
Evidence = SubfeatureEvidence (Feature, p2);
if (Evidence < WorstEvidence)
WorstEvidence = Evidence;
} else {
FreeFeature(Feature);
return 0.0;
}
/* set the dummy pico-feature at the other end of p1 and match it to p2 */
Feature->Params[PicoFeatX] = p1->X - cos (Angle) * Length;
Feature->Params[PicoFeatY] = p1->Y - sin (Angle) * Length;
if (DummyFastMatch (Feature, p2)) {
Evidence = SubfeatureEvidence (Feature, p2);
if (Evidence < WorstEvidence)
WorstEvidence = Evidence;
} else {
FreeFeature(Feature);
return 0.0;
}
FreeFeature (Feature);
return (WorstEvidence);
} /* CompareProtos */
/**
* This routine computes a proto which is the weighted
* average of protos p1 and p2. The new proto is returned
* in MergedProto.
*
* @param p1, p2 protos to be merged
* @param w1, w2 weight of each proto
* @param MergedProto place to put resulting merged proto
*
* Globals: none
*
* @return none (results are returned in MergedProto)
* @note Exceptions: none
* @note History: Mon Nov 26 08:15:08 1990, DSJ, Created.
*/
void ComputeMergedProto (PROTO p1,
PROTO p2,
FLOAT32 w1,
FLOAT32 w2,
PROTO MergedProto) {
FLOAT32 TotalWeight;
TotalWeight = w1 + w2;
w1 /= TotalWeight;
w2 /= TotalWeight;
MergedProto->X = p1->X * w1 + p2->X * w2;
MergedProto->Y = p1->Y * w1 + p2->Y * w2;
MergedProto->Length = p1->Length * w1 + p2->Length * w2;
MergedProto->Angle = p1->Angle * w1 + p2->Angle * w2;
FillABC(MergedProto);
} /* ComputeMergedProto */
/**
* This routine searches thru all of the prototypes in
* Class and returns the id of the proto which would provide
* the best approximation of Prototype. If no close
* approximation can be found, NO_PROTO is returned.
*
* @param Class class to search for matching old proto in
* @param NumMerged # of protos merged into each proto of Class
* @param Prototype new proto to find match for
*
* Globals: none
*
* @return Id of closest proto in Class or NO_PROTO.
* @note Exceptions: none
* @note History: Sat Nov 24 11:42:58 1990, DSJ, Created.
*/
int FindClosestExistingProto(CLASS_TYPE Class, int NumMerged[],
PROTOTYPE *Prototype) {
PROTO_STRUCT NewProto;
PROTO_STRUCT MergedProto;
int Pid;
PROTO Proto;
int BestProto;
FLOAT32 BestMatch;
FLOAT32 Match, OldMatch, NewMatch;
MakeNewFromOld (&NewProto, Prototype);
BestProto = NO_PROTO;
BestMatch = WORST_MATCH_ALLOWED;
for (Pid = 0; Pid < Class->NumProtos; Pid++) {
Proto = ProtoIn(Class, Pid);
ComputeMergedProto(Proto, &NewProto,
(FLOAT32) NumMerged[Pid], 1.0, &MergedProto);
OldMatch = CompareProtos(Proto, &MergedProto);
NewMatch = CompareProtos(&NewProto, &MergedProto);
Match = MIN(OldMatch, NewMatch);
if (Match > BestMatch) {
BestProto = Pid;
BestMatch = Match;
}
}
return BestProto;
} /* FindClosestExistingProto */
/**
* This fills in the fields of the New proto based on the
* fields of the Old proto.
*
* @param New new proto to be filled in
* @param Old old proto to be converted
*
* Globals: none
*
* Exceptions: none
* History: Mon Nov 26 09:45:39 1990, DSJ, Created.
*/
void MakeNewFromOld(PROTO New, PROTOTYPE *Old) {
New->X = CenterX(Old->Mean);
New->Y = CenterY(Old->Mean);
New->Length = LengthOf(Old->Mean);
New->Angle = OrientationOf(Old->Mean);
FillABC(New);
} /* MakeNewFromOld */
/*-------------------once in subfeat---------------------------------*/
/**
* @name SubfeatureEvidence
*
* Compare a feature to a prototype. Print the result.
*/
FLOAT32 SubfeatureEvidence(FEATURE Feature, PROTO Proto) {
float Distance;
float Dangle;
Dangle = Proto->Angle - Feature->Params[PicoFeatDir];
if (Dangle < -0.5) Dangle += 1.0;
if (Dangle > 0.5) Dangle -= 1.0;
Dangle *= training_angle_match_scale;
Distance = Proto->A * Feature->Params[PicoFeatX] +
Proto->B * Feature->Params[PicoFeatY] +
Proto->C;
return (EvidenceOf (Distance * Distance + Dangle * Dangle));
}
/**
* @name EvidenceOf
*
* Return the new type of evidence number corresponding to this
* distance value. This number is no longer based on the chi squared
* approximation. The equation that represents the transform is:
* 1 / (1 + (sim / midpoint) ^ curl)
*/
double EvidenceOf (double Similarity) {
Similarity /= training_similarity_midpoint;
if (training_similarity_curl == 3)
Similarity = Similarity * Similarity * Similarity;
else if (training_similarity_curl == 2)
Similarity = Similarity * Similarity;
else
Similarity = pow (Similarity, training_similarity_curl);
return (1.0 / (1.0 + Similarity));
}
/**
* This routine returns TRUE if Feature would be matched
* by a fast match table built from Proto.
*
* @param Feature feature to be "fast matched" to proto
* @param Proto proto being "fast matched" against
*
* Globals:
* - training_tangent_bbox_pad bounding box pad tangent to proto
* - training_orthogonal_bbox_pad bounding box pad orthogonal to proto
*
* @return TRUE if feature could match Proto.
* @note Exceptions: none
* @note History: Wed Nov 14 17:19:58 1990, DSJ, Created.
*/
BOOL8 DummyFastMatch (
FEATURE Feature,
PROTO Proto)
{
FRECT BoundingBox;
FLOAT32 MaxAngleError;
FLOAT32 AngleError;
MaxAngleError = training_angle_pad / 360.0;
AngleError = fabs (Proto->Angle - Feature->Params[PicoFeatDir]);
if (AngleError > 0.5)
AngleError = 1.0 - AngleError;
if (AngleError > MaxAngleError)
return (FALSE);
ComputePaddedBoundingBox (Proto,
training_tangent_bbox_pad * GetPicoFeatureLength (),
training_orthogonal_bbox_pad * GetPicoFeatureLength (),
&BoundingBox);
return PointInside(&BoundingBox, Feature->Params[PicoFeatX],
Feature->Params[PicoFeatY]);
} /* DummyFastMatch */
/**
* This routine computes a bounding box that encloses the
* specified proto along with some padding. The
* amount of padding is specified as separate distances
* in the tangential and orthogonal directions.
*
* @param Proto proto to compute bounding box for
* @param TangentPad amount of pad to add in direction of segment
* @param OrthogonalPad amount of pad to add orthogonal to segment
* @param[out] BoundingBox place to put results
*
* Globals: none
*
* @return none (results are returned in BoundingBox)
* @note Exceptions: none
* @note History: Wed Nov 14 14:55:30 1990, DSJ, Created.
*/
void ComputePaddedBoundingBox (PROTO Proto, FLOAT32 TangentPad,
FLOAT32 OrthogonalPad, FRECT *BoundingBox) {
FLOAT32 Pad, Length, Angle;
FLOAT32 CosOfAngle, SinOfAngle;
Length = Proto->Length / 2.0 + TangentPad;
Angle = Proto->Angle * 2.0 * PI;
CosOfAngle = fabs(cos(Angle));
SinOfAngle = fabs(sin(Angle));
Pad = MAX (CosOfAngle * Length, SinOfAngle * OrthogonalPad);
BoundingBox->MinX = Proto->X - Pad;
BoundingBox->MaxX = Proto->X + Pad;
Pad = MAX(SinOfAngle * Length, CosOfAngle * OrthogonalPad);
BoundingBox->MinY = Proto->Y - Pad;
BoundingBox->MaxY = Proto->Y + Pad;
} /* ComputePaddedBoundingBox */
/**
* Return TRUE if point (X,Y) is inside of Rectangle.
*
* Globals: none
*
* @return TRUE if point (X,Y) is inside of Rectangle.
* @note Exceptions: none
* @note History: Wed Nov 14 17:26:35 1990, DSJ, Created.
*/
BOOL8 PointInside(FRECT *Rectangle, FLOAT32 X, FLOAT32 Y) {
if (X < Rectangle->MinX) return (FALSE);
if (X > Rectangle->MaxX) return (FALSE);
if (Y < Rectangle->MinY) return (FALSE);
if (Y > Rectangle->MaxY) return (FALSE);
return (TRUE);
} /* PointInside */
| C++ |
// Copyright 2008 Google Inc. All Rights Reserved.
// Author: scharron@google.com (Samuel Charron)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESSERACT_TRAINING_COMMONTRAINING_H__
#define TESSERACT_TRAINING_COMMONTRAINING_H__
#include "cluster.h"
#include "commandlineflags.h"
#include "featdefs.h"
#include "intproto.h"
#include "oldlist.h"
namespace tesseract {
class Classify;
class MasterTrainer;
class ShapeTable;
}
//////////////////////////////////////////////////////////////////////////////
// Globals ///////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
extern FEATURE_DEFS_STRUCT feature_defs;
// Must be defined in the file that "implements" commonTraining facilities.
extern CLUSTERCONFIG Config;
//////////////////////////////////////////////////////////////////////////////
// Structs ///////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
typedef struct
{
char *Label;
int SampleCount;
int font_sample_count;
LIST List;
}
LABELEDLISTNODE, *LABELEDLIST;
typedef struct
{
char* Label;
int NumMerged[MAX_NUM_PROTOS];
CLASS_TYPE Class;
}MERGE_CLASS_NODE;
typedef MERGE_CLASS_NODE* MERGE_CLASS;
//////////////////////////////////////////////////////////////////////////////
// Functions /////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
void ParseArguments(int* argc, char*** argv);
namespace tesseract {
// Helper loads shape table from the given file.
ShapeTable* LoadShapeTable(const STRING& file_prefix);
// Helper to write the shape_table.
void WriteShapeTable(const STRING& file_prefix, const ShapeTable& shape_table);
// Creates a MasterTraininer and loads the training data into it:
// Initializes feature_defs and IntegerFX.
// Loads the shape_table if shape_table != NULL.
// Loads initial unicharset from -U command-line option.
// If FLAGS_input_trainer is set, loads the majority of data from there, else:
// Loads font info from -F option.
// Loads xheights from -X option.
// Loads samples from .tr files in remaining command-line args.
// Deletes outliers and computes canonical samples.
// If FLAGS_output_trainer is set, saves the trainer for future use.
// Computes canonical and cloud features.
// If shape_table is not NULL, but failed to load, make a fake flat one,
// as shape clustering was not run.
MasterTrainer* LoadTrainingData(int argc, const char* const * argv,
bool replication,
ShapeTable** shape_table,
STRING* file_prefix);
} // namespace tesseract.
const char *GetNextFilename(int argc, const char* const * argv);
LABELEDLIST FindList(
LIST List,
char *Label);
LABELEDLIST NewLabeledList(
const char *Label);
void ReadTrainingSamples(const FEATURE_DEFS_STRUCT& feature_defs,
const char *feature_name, int max_samples,
UNICHARSET* unicharset,
FILE* file, LIST* training_samples);
void WriteTrainingSamples(
const FEATURE_DEFS_STRUCT &FeatureDefs,
char *Directory,
LIST CharList,
const char *program_feature_type);
void FreeTrainingSamples(
LIST CharList);
void FreeLabeledList(
LABELEDLIST LabeledList);
void FreeLabeledClassList(
LIST ClassListList);
CLUSTERER *SetUpForClustering(
const FEATURE_DEFS_STRUCT &FeatureDefs,
LABELEDLIST CharSample,
const char *program_feature_type);
LIST RemoveInsignificantProtos(
LIST ProtoList,
BOOL8 KeepSigProtos,
BOOL8 KeepInsigProtos,
int N);
void CleanUpUnusedData(
LIST ProtoList);
void MergeInsignificantProtos(
LIST ProtoList,
const char *label,
CLUSTERER *Clusterer,
CLUSTERCONFIG *Config);
MERGE_CLASS FindClass(
LIST List,
const char *Label);
MERGE_CLASS NewLabeledClass(
const char *Label);
void FreeTrainingSamples(
LIST CharList);
CLASS_STRUCT* SetUpForFloat2Int(const UNICHARSET& unicharset,
LIST LabeledClassList);
void Normalize(
float *Values);
void FreeNormProtoList(
LIST CharList);
void AddToNormProtosList(
LIST* NormProtoList,
LIST ProtoList,
char *CharName);
int NumberOfProtos(
LIST ProtoList,
BOOL8 CountSigProtos,
BOOL8 CountInsigProtos);
void allocNormProtos();
#endif // TESSERACT_TRAINING_COMMONTRAINING_H__
| C++ |
// This program reads a unicharset file, puts the result in a UNICHARSET
// object, fills it with properties about the unichars it contains and writes
// the result back to a file.
#include <stdlib.h>
#include <string.h>
#include <string>
#include "commandlineflags.h"
#include "fileio.h"
#include "genericvector.h"
#include "icuerrorcode.h"
#include "normstrngs.h"
#include "strngs.h"
#include "unicharset.h"
#include "unicode/uchar.h" // from libicu
#include "unicode/uscript.h" // from libicu
// The directory that is searched for universal script unicharsets.
STRING_PARAM_FLAG(script_dir, "",
"Directory name for input script unicharsets/xheights");
// Flags from commontraining.cpp
DECLARE_STRING_PARAM_FLAG(U);
DECLARE_STRING_PARAM_FLAG(O);
DECLARE_STRING_PARAM_FLAG(X);
namespace tesseract {
// Helper sets the character attribute properties and sets up the script table.
// Does not set tops and bottoms.
static void SetupBasicProperties(UNICHARSET* unicharset) {
for (int unichar_id = 0; unichar_id < unicharset->size(); ++unichar_id) {
// Convert any custom ligatures.
const char* unichar_str = unicharset->id_to_unichar(unichar_id);
for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != NULL; ++i) {
if (!strcmp(UNICHARSET::kCustomLigatures[i][1], unichar_str)) {
unichar_str = UNICHARSET::kCustomLigatures[i][0];
break;
}
}
// Convert the unichar to UTF32 representation
GenericVector<char32> uni_vector;
tesseract::UTF8ToUTF32(unichar_str, &uni_vector);
// Assume that if the property is true for any character in the string,
// then it holds for the whole "character".
bool unichar_isalpha = false;
bool unichar_islower = false;
bool unichar_isupper = false;
bool unichar_isdigit = false;
bool unichar_ispunct = false;
for (int i = 0; i < uni_vector.size(); ++i) {
if (u_isalpha(uni_vector[i]))
unichar_isalpha = true;
if (u_islower(uni_vector[i]))
unichar_islower = true;
if (u_isupper(uni_vector[i]))
unichar_isupper = true;
if (u_isdigit(uni_vector[i]))
unichar_isdigit = true;
if (u_ispunct(uni_vector[i]))
unichar_ispunct = true;
}
unicharset->set_isalpha(unichar_id, unichar_isalpha);
unicharset->set_islower(unichar_id, unichar_islower);
unicharset->set_isupper(unichar_id, unichar_isupper);
unicharset->set_isdigit(unichar_id, unichar_isdigit);
unicharset->set_ispunctuation(unichar_id, unichar_ispunct);
tesseract::IcuErrorCode err;
unicharset->set_script(unichar_id, uscript_getName(
uscript_getScript(uni_vector[0], err)));
const int num_code_points = uni_vector.size();
// Obtain the lower/upper case if needed and record it in the properties.
unicharset->set_other_case(unichar_id, unichar_id);
if (unichar_islower || unichar_isupper) {
GenericVector<char32> other_case(num_code_points, 0);
for (int i = 0; i < num_code_points; ++i) {
// TODO(daria): Ideally u_strToLower()/ustrToUpper() should be used.
// However since they deal with UChars (so need a conversion function
// from char32 or UTF8string) and require a meaningful locale string,
// for now u_tolower()/u_toupper() are used.
other_case[i] = unichar_islower ? u_toupper(uni_vector[i]) :
u_tolower(uni_vector[i]);
}
STRING other_case_uch;
tesseract::UTF32ToUTF8(other_case, &other_case_uch);
UNICHAR_ID other_case_id =
unicharset->unichar_to_id(other_case_uch.c_str());
if (other_case_id != INVALID_UNICHAR_ID) {
unicharset->set_other_case(unichar_id, other_case_id);
} else {
tprintf("Other case %s of %s is not in unicharset\n",
other_case_uch.c_str(), unichar_str);
}
}
// Set RTL property and obtain mirror unichar ID from ICU.
GenericVector<char32> mirrors(num_code_points, 0);
for (int i = 0; i < num_code_points; ++i) {
mirrors[i] = u_charMirror(uni_vector[i]);
if (i == 0) { // set directionality to that of the 1st code point
unicharset->set_direction(unichar_id,
static_cast<UNICHARSET::Direction>(
u_charDirection(uni_vector[i])));
}
}
STRING mirror_uch;
tesseract::UTF32ToUTF8(mirrors, &mirror_uch);
UNICHAR_ID mirror_uch_id = unicharset->unichar_to_id(mirror_uch.c_str());
if (mirror_uch_id != INVALID_UNICHAR_ID) {
unicharset->set_mirror(unichar_id, mirror_uch_id);
} else {
tprintf("Mirror %s of %s is not in unicharset\n",
mirror_uch.c_str(), unichar_str);
}
// Record normalized version of this unichar.
STRING normed_str = tesseract::NormalizeUTF8String(unichar_str);
if (unichar_id != 0 && normed_str.length() > 0) {
unicharset->set_normed(unichar_id, normed_str.c_str());
} else {
unicharset->set_normed(unichar_id, unichar_str);
}
}
unicharset->post_load_setup();
}
// Helper to set the properties for an input unicharset file, writes to the
// output file. If an appropriate script unicharset can be found in the
// script_dir directory, then the tops and bottoms are expanded using the
// script unicharset.
// If non-empty, xheight data for the fonts are written to the xheights_file.
static void SetPropertiesForInputFile(const string& script_dir,
const string& input_unicharset_file,
const string& output_unicharset_file,
const string& output_xheights_file) {
UNICHARSET unicharset;
// Load the input unicharset
unicharset.load_from_file(input_unicharset_file.c_str());
tprintf("Loaded unicharset of size %d from file %s\n", unicharset.size(),
input_unicharset_file.c_str());
// Set unichar properties
tprintf("Setting unichar properties\n");
SetupBasicProperties(&unicharset);
string xheights_str;
for (int s = 0; s < unicharset.get_script_table_size(); ++s) {
// Load the unicharset for the script if available.
string filename = script_dir + "/" +
unicharset.get_script_from_script_id(s) + ".unicharset";
UNICHARSET script_set;
if (script_set.load_from_file(filename.c_str())) {
unicharset.SetPropertiesFromOther(script_set);
}
// Load the xheights for the script if available.
filename = script_dir + "/" + unicharset.get_script_from_script_id(s) +
".xheights";
string script_heights;
if (File::ReadFileToString(filename, &script_heights))
xheights_str += script_heights;
}
if (!output_xheights_file.empty())
File::WriteStringToFileOrDie(xheights_str, output_xheights_file);
// Write the output unicharset
tprintf("Writing unicharset to file %s\n", output_unicharset_file.c_str());
unicharset.save_to_file(output_unicharset_file.c_str());
}
} // namespace tesseract
int main(int argc, char** argv) {
tesseract::ParseCommandLineFlags(argv[0], &argc, &argv, true);
// Check validity of input flags.
if (FLAGS_U.empty() || FLAGS_O.empty()) {
tprintf("Specify both input and output unicharsets!\n");
exit(1);
}
if (FLAGS_script_dir.empty()) {
tprintf("Must specify a script_dir!\n");
exit(1);
}
tesseract::SetPropertiesForInputFile(FLAGS_script_dir.c_str(),
FLAGS_U.c_str(), FLAGS_O.c_str(),
FLAGS_X.c_str());
return 0;
}
| C++ |
///////////////////////////////////////////////////////////////////////
// File: ambiguous_words.cpp
// Description: A program that takes a text file with a list of words as
// input (one per line) and outputs a file with the words
// that were found in the dictionary followed by the words
// that are ambiguous to them.
// Author: Rika Antonova
// Created: Fri Oct 21 11:26:43 PDT 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
#include <stdio.h>
#include "baseapi.h"
#include "helpers.h"
#include "strngs.h"
#include "dict.h"
#include "tesseractclass.h"
int main(int argc, char** argv) {
// Parse input arguments.
if (argc != 4 && (argc != 6 || strcmp(argv[1], "-l") != 0)) {
printf("Usage: %s [-l lang] tessdata_dir wordlist_file"
" output_ambiguious_wordlist_file\n", argv[0]);
return 1;
}
int argv_offset = 0;
STRING lang;
if (argc == 6) {
lang = argv[2];
argv_offset = 2;
} else {
lang = "eng";
}
const char *tessdata_dir = argv[++argv_offset];
const char *input_file_str = argv[++argv_offset];
const char *output_file_str = argv[++argv_offset];
// Initialize Tesseract.
tesseract::TessBaseAPI api;
GenericVector<STRING> vars_vec;
GenericVector<STRING> vars_values;
vars_vec.push_back("output_ambig_words_file");
vars_values.push_back(output_file_str);
api.Init(tessdata_dir, lang.string(), tesseract::OEM_TESSERACT_ONLY,
NULL, 0, &vars_vec, &vars_values, false);
tesseract::Dict &dict = api.tesseract()->getDict();
FILE *input_file = fopen(input_file_str, "rb");
if (input_file == NULL) {
tprintf("Failed to open input wordlist file %s\n", input_file_str);
exit(1);
}
char str[CHARS_PER_LINE];
// Read word list and call Dict::NoDangerousAmbig() for each word
// to record ambiguities in the output file.
while (fgets(str, CHARS_PER_LINE, input_file) != NULL) {
chomp_string(str); // remove newline
WERD_CHOICE word(str, dict.getUnicharset());
dict.NoDangerousAmbig(&word, NULL, false, NULL);
}
// Clean up.
fclose(input_file);
}
| C++ |
/**********************************************************************
* File: stringrenderer.cpp
* Description: Class for rendering UTF-8 text to an image, and retrieving
* bounding boxes around each grapheme cluster.
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "stringrenderer.h"
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <map>
#include <utility>
#include <vector>
#include "allheaders.h" // from leptonica
#include "boxchar.h"
#include "ligature_table.h"
#include "normstrngs.h"
#include "pango/pango-font.h"
#include "pango/pango-glyph-item.h"
#include "tlog.h"
#include "unichar.h"
#include "unicode/uchar.h" // from libicu
#include "util.h"
#ifdef USE_STD_NAMESPACE
using std::map;
using std::max;
using std::min;
using std::swap;
#endif
namespace tesseract {
static const int kDefaultOutputResolution = 300;
// Word joiner (U+2060) inserted after letters in ngram mode, as per
// recommendation in http://unicode.org/reports/tr14/ to avoid line-breaks at
// hyphens and other non-alpha characters.
static const char* kWordJoinerUTF8 = "\u2060";
static const char32 kWordJoiner = 0x2060;
static bool IsCombiner(int ch) {
const int char_type = u_charType(ch);
return ((char_type == U_NON_SPACING_MARK) ||
(char_type == U_ENCLOSING_MARK) ||
(char_type == U_COMBINING_SPACING_MARK));
}
static string EncodeAsUTF8(const char32 ch32) {
UNICHAR uni_ch(ch32);
return string(uni_ch.utf8(), uni_ch.utf8_len());
}
// Returns true with probability 'prob'.
static bool RandBool(const double prob, TRand* rand) {
if (prob == 1.0) return true;
if (prob == 0.0) return false;
return rand->UnsignedRand(1.0) < prob;
}
/* static */
Pix* CairoARGB32ToPixFormat(cairo_surface_t *surface) {
if (cairo_image_surface_get_format(surface) != CAIRO_FORMAT_ARGB32) {
printf("Unexpected surface format %d\n",
cairo_image_surface_get_format(surface));
return NULL;
}
const int width = cairo_image_surface_get_width(surface);
const int height = cairo_image_surface_get_height(surface);
Pix* pix = pixCreate(width, height, 32);
int byte_stride = cairo_image_surface_get_stride(surface);
for (int i = 0; i < height; ++i) {
memcpy(reinterpret_cast<unsigned char*>(pix->data + i * pix->wpl) + 1,
cairo_image_surface_get_data(surface) + i * byte_stride,
byte_stride - ((i == height - 1) ? 1 : 0));
}
return pix;
}
StringRenderer::StringRenderer(const string& font_desc, int page_width,
int page_height)
: page_width_(page_width),
page_height_(page_height),
h_margin_(50),
v_margin_(50),
char_spacing_(0),
leading_(0),
vertical_text_(false),
gravity_hint_strong_(false),
render_fullwidth_latin_(false),
underline_start_prob_(0),
underline_continuation_prob_(0),
underline_style_(PANGO_UNDERLINE_SINGLE),
drop_uncovered_chars_(true),
strip_unrenderable_words_(false),
add_ligatures_(false),
output_word_boxes_(false),
surface_(NULL),
cr_(NULL),
layout_(NULL),
start_box_(0),
page_(0),
box_padding_(0),
total_chars_(0),
font_index_(0),
last_offset_(0) {
pen_color_[0] = 0.0;
pen_color_[1] = 0.0;
pen_color_[2] = 0.0;
set_font(font_desc);
set_resolution(kDefaultOutputResolution);
page_boxes_ = NULL;
}
bool StringRenderer::set_font(const string& desc) {
bool success = font_.ParseFontDescriptionName(desc);
font_.set_resolution(resolution_);
return success;
}
void StringRenderer::set_resolution(const int resolution) {
resolution_ = resolution;
font_.set_resolution(resolution);
}
StringRenderer::~StringRenderer() {
ClearBoxes();
FreePangoCairo();
}
void StringRenderer::InitPangoCairo() {
FreePangoCairo();
surface_ = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, page_width_,
page_height_);
cr_ = cairo_create(surface_);
{
DISABLE_HEAP_LEAK_CHECK;
layout_ = pango_cairo_create_layout(cr_);
}
if (vertical_text_) {
PangoContext* context = pango_layout_get_context(layout_);
pango_context_set_base_gravity(context, PANGO_GRAVITY_EAST);
if (gravity_hint_strong_) {
pango_context_set_gravity_hint(context, PANGO_GRAVITY_HINT_STRONG);
}
pango_layout_context_changed(layout_);
}
SetLayoutProperties();
}
void StringRenderer::SetLayoutProperties() {
string font_desc = font_.DescriptionName();
// Specify the font via a description name
PangoFontDescription *desc =
pango_font_description_from_string(font_desc.c_str());
// Assign the font description to the layout
pango_layout_set_font_description(layout_, desc);
pango_font_description_free(desc); // free the description
pango_cairo_context_set_resolution(pango_layout_get_context(layout_),
resolution_);
int max_width = page_width_ - 2 * h_margin_;
int max_height = page_height_ - 2 * v_margin_;
tlog(3, "max_width = %d, max_height = %d\n", max_width, max_height);
if (vertical_text_) {
swap(max_width, max_height);
}
pango_layout_set_width(layout_, max_width * PANGO_SCALE);
pango_layout_set_wrap(layout_, PANGO_WRAP_WORD);
// Adjust character spacing
PangoAttrList* attr_list = pango_attr_list_new();
if (char_spacing_) {
PangoAttribute* spacing_attr = pango_attr_letter_spacing_new(
static_cast<int>(char_spacing_ * PANGO_SCALE + 0.5));
spacing_attr->start_index = 0;
spacing_attr->end_index = static_cast<guint>(-1);
pango_attr_list_change(attr_list, spacing_attr);
}
pango_layout_set_attributes(layout_, attr_list);
pango_attr_list_unref(attr_list);
// Adjust line spacing
if (leading_) {
pango_layout_set_spacing(layout_, leading_ * PANGO_SCALE);
}
}
void StringRenderer::FreePangoCairo() {
if (layout_) {
g_object_unref(layout_);
layout_ = NULL;
}
if (cr_) {
cairo_destroy(cr_);
cr_ = NULL;
}
if (surface_) {
cairo_surface_destroy(surface_);
surface_ = NULL;
}
}
void StringRenderer::SetWordUnderlineAttributes(const string& page_text) {
if (underline_start_prob_ == 0) return;
PangoAttrList* attr_list = pango_layout_get_attributes(layout_);
const char* text = page_text.c_str();
int offset = 0;
TRand rand;
bool started_underline = false;
PangoAttribute* und_attr = nullptr;
while (offset < page_text.length()) {
offset += SpanUTF8Whitespace(text + offset);
if (offset == page_text.length()) break;
int word_start = offset;
int word_len = SpanUTF8NotWhitespace(text + offset);
offset += word_len;
if (started_underline) {
// Should we continue the underline to the next word?
if (RandBool(underline_continuation_prob_, &rand)) {
// Continue the current underline to this word.
und_attr->end_index = word_start + word_len;
} else {
// Otherwise end the current underline attribute at the end of the
// previous word.
pango_attr_list_insert(attr_list, und_attr);
started_underline = false;
und_attr = nullptr;
}
}
if (!started_underline && RandBool(underline_start_prob_, &rand)) {
// Start a new underline attribute
und_attr = pango_attr_underline_new(underline_style_);
und_attr->start_index = word_start;
und_attr->end_index = word_start + word_len;
started_underline = true;
}
}
// Finish the current underline attribute at the end of the page.
if (started_underline) {
und_attr->end_index = page_text.length();
pango_attr_list_insert(attr_list, und_attr);
}
}
// Returns offset in utf8 bytes to first page.
int StringRenderer::FindFirstPageBreakOffset(const char* text,
int text_length) {
if (!text_length) return 0;
const int max_height = (page_height_ - 2 * v_margin_);
const int max_width = (page_width_ - 2 * h_margin_);
const int max_layout_height = vertical_text_ ? max_width : max_height;
UNICHAR::const_iterator it = UNICHAR::begin(text, text_length);
const UNICHAR::const_iterator it_end = UNICHAR::end(text, text_length);
const int kMaxUnicodeBufLength = 15000;
for (int i = 0; i < kMaxUnicodeBufLength && it != it_end; ++it, ++i);
int buf_length = it.utf8_data() - text;
tlog(1, "len = %d buf_len = %d\n", text_length, buf_length);
pango_layout_set_text(layout_, text, buf_length);
PangoLayoutIter* line_iter = NULL;
{ // Fontconfig caches some info here that is not freed before exit.
DISABLE_HEAP_LEAK_CHECK;
line_iter = pango_layout_get_iter(layout_);
}
bool first_page = true;
int page_top = 0;
int offset = buf_length;
do {
// Get bounding box of the current line
PangoRectangle line_ink_rect;
pango_layout_iter_get_line_extents(line_iter, &line_ink_rect, NULL);
pango_extents_to_pixels(&line_ink_rect, NULL);
PangoLayoutLine* line = pango_layout_iter_get_line_readonly(line_iter);
if (first_page) {
page_top = line_ink_rect.y;
first_page = false;
}
int line_bottom = line_ink_rect.y + line_ink_rect.height;
if (line_bottom - page_top > max_layout_height) {
offset = line->start_index;
tlog(1, "Found offset = %d\n", offset);
break;
}
} while (pango_layout_iter_next_line(line_iter));
pango_layout_iter_free(line_iter);
return offset;
}
const vector<BoxChar*>& StringRenderer::GetBoxes() const {
return boxchars_;
}
Boxa* StringRenderer::GetPageBoxes() const {
return page_boxes_;
}
void StringRenderer::RotatePageBoxes(float rotation) {
BoxChar::RotateBoxes(rotation, page_width_ / 2, page_height_ / 2,
start_box_, boxchars_.size(), &boxchars_);
}
void StringRenderer::ClearBoxes() {
for (int i = 0; i < boxchars_.size(); ++i)
delete boxchars_[i];
boxchars_.clear();
boxaDestroy(&page_boxes_);
}
void StringRenderer::WriteAllBoxes(const string& filename) const {
BoxChar::WriteTesseractBoxFile(filename, page_height_, boxchars_);
}
// Returns cluster strings in logical order.
bool StringRenderer::GetClusterStrings(vector<string>* cluster_text) {
map<int, string> start_byte_to_text;
PangoLayoutIter* run_iter = pango_layout_get_iter(layout_);
const char* full_text = pango_layout_get_text(layout_);
do {
PangoLayoutRun* run = pango_layout_iter_get_run_readonly(run_iter);
if (!run) {
// End of line NULL run marker
tlog(2, "Found end of line marker\n");
continue;
}
PangoGlyphItemIter cluster_iter;
gboolean have_cluster;
for (have_cluster = pango_glyph_item_iter_init_start(&cluster_iter,
run, full_text);
have_cluster;
have_cluster = pango_glyph_item_iter_next_cluster(&cluster_iter)) {
const int start_byte_index = cluster_iter.start_index;
const int end_byte_index = cluster_iter.end_index;
string text = string(full_text + start_byte_index,
end_byte_index - start_byte_index);
if (IsUTF8Whitespace(text.c_str())) {
tlog(2, "Found whitespace\n");
text = " ";
}
tlog(2, "start_byte=%d end_byte=%d : '%s'\n", start_byte_index,
end_byte_index, text.c_str());
if (add_ligatures_) {
// Make sure the output box files have ligatured text in case the font
// decided to use an unmapped glyph.
text = LigatureTable::Get()->AddLigatures(text, NULL);
}
start_byte_to_text[start_byte_index] = text;
}
} while (pango_layout_iter_next_run(run_iter));
pango_layout_iter_free(run_iter);
cluster_text->clear();
for (map<int, string>::const_iterator it = start_byte_to_text.begin();
it != start_byte_to_text.end(); ++it) {
cluster_text->push_back(it->second);
}
return cluster_text->size();
}
// Merges an array of BoxChars into words based on the identification of
// BoxChars containing the space character as inter-word separators.
//
// Sometime two adjacent characters in the sequence may be detected as lying on
// different lines based on their spatial positions. This may be the result of a
// newline character at end of the last word on a line in the source text, or of
// a discretionary line-break created by Pango at intra-word locations like
// hyphens. When this is detected the word is split at that location into
// multiple BoxChars. Otherwise, each resulting BoxChar will contain a word and
// its bounding box.
static void MergeBoxCharsToWords(vector<BoxChar*>* boxchars) {
vector<BoxChar*> result;
bool started_word = false;
for (int i = 0; i < boxchars->size(); ++i) {
if (boxchars->at(i)->ch() == " " ||
boxchars->at(i)->box() == NULL) {
result.push_back(boxchars->at(i));
boxchars->at(i) = NULL;
started_word = false;
continue;
}
if (!started_word) {
// Begin new word
started_word = true;
result.push_back(boxchars->at(i));
boxchars->at(i) = NULL;
} else {
BoxChar* last_boxchar = result.back();
// Compute bounding box union
const Box* box = boxchars->at(i)->box();
Box* last_box = last_boxchar->mutable_box();
int left = min(last_box->x, box->x);
int right = max(last_box->x + last_box->w, box->x + box->w);
int top = min(last_box->y, box->y);
int bottom = max(last_box->y + last_box->h, box->y + box->h);
// Conclude that the word was broken to span multiple lines based on the
// size of the merged bounding box in relation to those of the individual
// characters seen so far.
if (right - left > last_box->w + 5 * box->w) {
tlog(1, "Found line break after '%s'", last_boxchar->ch().c_str());
// Insert a fake interword space and start a new word with the current
// boxchar.
result.push_back(new BoxChar(" ", 1));
result.push_back(boxchars->at(i));
boxchars->at(i) = NULL;
continue;
}
// Append to last word
last_boxchar->mutable_ch()->append(boxchars->at(i)->ch());
last_box->x = left;
last_box->w = right - left;
last_box->y = top;
last_box->h = bottom - top;
delete boxchars->at(i);
boxchars->at(i) = NULL;
}
}
boxchars->swap(result);
}
void StringRenderer::ComputeClusterBoxes() {
const char* text = pango_layout_get_text(layout_);
PangoLayoutIter* cluster_iter = pango_layout_get_iter(layout_);
// Do a first pass to store cluster start indexes.
vector<int> cluster_start_indices;
do {
cluster_start_indices.push_back(pango_layout_iter_get_index(cluster_iter));
tlog(3, "Added %d\n", cluster_start_indices.back());
} while (pango_layout_iter_next_cluster(cluster_iter));
pango_layout_iter_free(cluster_iter);
cluster_start_indices.push_back(strlen(text));
tlog(3, "Added last index %d\n", cluster_start_indices.back());
// Sort the indices and create a map from start to end indices.
sort(cluster_start_indices.begin(), cluster_start_indices.end());
map<int, int> cluster_start_to_end_index;
for (int i = 0; i < cluster_start_indices.size() - 1; ++i) {
cluster_start_to_end_index[cluster_start_indices[i]]
= cluster_start_indices[i + 1];
}
// Iterate again to compute cluster boxes and their text with the obtained
// cluster extent information.
cluster_iter = pango_layout_get_iter(layout_);
// Store BoxChars* sorted by their byte start positions
map<int, BoxChar*> start_byte_to_box;
do {
PangoRectangle cluster_rect;
pango_layout_iter_get_cluster_extents(cluster_iter, &cluster_rect,
NULL);
pango_extents_to_pixels(&cluster_rect, NULL);
const int start_byte_index = pango_layout_iter_get_index(cluster_iter);
const int end_byte_index = cluster_start_to_end_index[start_byte_index];
string cluster_text = string(text + start_byte_index,
end_byte_index - start_byte_index);
if (cluster_text.size() && cluster_text[0] == '\n') {
tlog(2, "Skipping newlines at start of text.\n");
continue;
}
if (!cluster_rect.width || !cluster_rect.height ||
IsUTF8Whitespace(cluster_text.c_str())) {
tlog(2, "Skipping whitespace with boxdim (%d,%d) '%s'\n",
cluster_rect.width, cluster_rect.height, cluster_text.c_str());
BoxChar* boxchar = new BoxChar(" ", 1);
boxchar->set_page(page_);
start_byte_to_box[start_byte_index] = boxchar;
continue;
}
// Prepare a boxchar for addition at this byte position.
tlog(2, "[%d %d], %d, %d : start_byte=%d end_byte=%d : '%s'\n",
cluster_rect.x, cluster_rect.y,
cluster_rect.width, cluster_rect.height,
start_byte_index, end_byte_index,
cluster_text.c_str());
ASSERT_HOST_MSG(cluster_rect.width,
"cluster_text:%s start_byte_index:%d\n",
cluster_text.c_str(), start_byte_index);
ASSERT_HOST_MSG(cluster_rect.height,
"cluster_text:%s start_byte_index:%d\n",
cluster_text.c_str(), start_byte_index);
if (box_padding_) {
cluster_rect.x = max(0, cluster_rect.x - box_padding_);
cluster_rect.width += 2 * box_padding_;
cluster_rect.y = max(0, cluster_rect.y - box_padding_);
cluster_rect.height += 2 * box_padding_;
}
if (add_ligatures_) {
// Make sure the output box files have ligatured text in case the font
// decided to use an unmapped glyph.
cluster_text = LigatureTable::Get()->AddLigatures(cluster_text, NULL);
}
BoxChar* boxchar = new BoxChar(cluster_text.c_str(), cluster_text.size());
boxchar->set_page(page_);
boxchar->AddBox(cluster_rect.x, cluster_rect.y,
cluster_rect.width, cluster_rect.height);
start_byte_to_box[start_byte_index] = boxchar;
} while (pango_layout_iter_next_cluster(cluster_iter));
pango_layout_iter_free(cluster_iter);
// There is a subtle bug in the cluster text reported by the PangoLayoutIter
// on ligatured characters (eg. The word "Lam-Aliph" in arabic). To work
// around this, we use text reported using the PangoGlyphIter which is
// accurate.
// TODO(ranjith): Revisit whether this is still needed in newer versions of
// pango.
vector<string> cluster_text;
if (GetClusterStrings(&cluster_text)) {
ASSERT_HOST(cluster_text.size() == start_byte_to_box.size());
int ind = 0;
for (map<int, BoxChar*>::iterator it = start_byte_to_box.begin();
it != start_byte_to_box.end(); ++it, ++ind) {
it->second->mutable_ch()->swap(cluster_text[ind]);
}
}
// Append to the boxchars list in byte order.
vector<BoxChar*> page_boxchars;
page_boxchars.reserve(start_byte_to_box.size());
string last_ch;
for (map<int, BoxChar*>::const_iterator it = start_byte_to_box.begin();
it != start_byte_to_box.end(); ++it) {
if (it->second->ch() == kWordJoinerUTF8) {
// Skip zero-width joiner characters (ZWJs) here.
delete it->second;
} else {
page_boxchars.push_back(it->second);
}
}
CorrectBoxPositionsToLayout(&page_boxchars);
if (render_fullwidth_latin_) {
for (map<int, BoxChar*>::iterator it = start_byte_to_box.begin();
it != start_byte_to_box.end(); ++it) {
// Convert fullwidth Latin characters to their halfwidth forms.
string half(ConvertFullwidthLatinToBasicLatin(it->second->ch()));
it->second->mutable_ch()->swap(half);
}
}
// Merge the character boxes into word boxes if we are rendering n-grams.
if (output_word_boxes_) {
MergeBoxCharsToWords(&page_boxchars);
}
boxchars_.insert(boxchars_.end(), page_boxchars.begin(), page_boxchars.end());
// Compute the page bounding box
Box* page_box = NULL;
Boxa* all_boxes = NULL;
for (int i = 0; i < page_boxchars.size(); ++i) {
if (page_boxchars[i]->box() == NULL) continue;
if (all_boxes == NULL)
all_boxes = boxaCreate(0);
boxaAddBox(all_boxes, page_boxchars[i]->mutable_box(), L_CLONE);
}
boxaGetExtent(all_boxes, NULL, NULL, &page_box);
boxaDestroy(&all_boxes);
if (page_boxes_ == NULL)
page_boxes_ = boxaCreate(0);
boxaAddBox(page_boxes_, page_box, L_INSERT);
}
void StringRenderer::CorrectBoxPositionsToLayout(vector<BoxChar*>* boxchars) {
if (vertical_text_) {
const double rotation = - pango_gravity_to_rotation(
pango_context_get_base_gravity(pango_layout_get_context(layout_)));
BoxChar::TranslateBoxes(page_width_ - h_margin_, v_margin_, boxchars);
BoxChar::RotateBoxes(rotation, page_width_ - h_margin_, v_margin_,
0, boxchars->size(), boxchars);
} else {
BoxChar::TranslateBoxes(h_margin_, v_margin_, boxchars);
}
}
int StringRenderer::StripUnrenderableWords(string* utf8_text) const {
string output_text;
const char* text = utf8_text->c_str();
int offset = 0;
int num_dropped = 0;
while (offset < utf8_text->length()) {
int space_len = SpanUTF8Whitespace(text + offset);
output_text.append(text + offset, space_len);
offset += space_len;
if (offset == utf8_text->length()) break;
int word_len = SpanUTF8NotWhitespace(text + offset);
if (font_.CanRenderString(text + offset, word_len)) {
output_text.append(text + offset, word_len);
} else {
++num_dropped;
}
offset += word_len;
}
utf8_text->swap(output_text);
if (num_dropped > 0) {
tprintf("Stripped %d unrenderable words\n", num_dropped);
}
return num_dropped;
}
int StringRenderer::RenderToGrayscaleImage(const char* text, int text_length,
Pix** pix) {
Pix *orig_pix = NULL;
int offset = RenderToImage(text, text_length, &orig_pix);
if (orig_pix) {
*pix = pixConvertTo8(orig_pix, false);
pixDestroy(&orig_pix);
}
return offset;
}
int StringRenderer::RenderToBinaryImage(const char* text, int text_length,
int threshold, Pix** pix) {
Pix *orig_pix = NULL;
int offset = RenderToImage(text, text_length, &orig_pix);
if (orig_pix) {
Pix* gray_pix = pixConvertTo8(orig_pix, false);
pixDestroy(&orig_pix);
*pix = pixThresholdToBinary(gray_pix, threshold);
pixDestroy(&gray_pix);
} else {
*pix = orig_pix;
}
return offset;
}
// Add word joiner (WJ) characters between adjacent non-space characters except
// immediately before a combiner.
/* static */
string StringRenderer::InsertWordJoiners(const string& text) {
string out_str;
const UNICHAR::const_iterator it_end = UNICHAR::end(text.c_str(),
text.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(text.c_str(), text.length());
it != it_end; ++it) {
// Add the symbol to the output string.
out_str.append(it.utf8_data(), it.utf8_len());
// Check the next symbol.
UNICHAR::const_iterator next_it = it;
++next_it;
bool next_char_is_boundary = (next_it == it_end || *next_it == ' ');
bool next_char_is_combiner = (next_it == it_end) ?
false : IsCombiner(*next_it);
if (*it != ' ' && *it != '\n' && !next_char_is_boundary &&
!next_char_is_combiner) {
out_str += kWordJoinerUTF8;
}
}
return out_str;
}
// Convert halfwidth Basic Latin characters to their fullwidth forms.
string StringRenderer::ConvertBasicLatinToFullwidthLatin(const string& str) {
string full_str;
const UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(),
str.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(str.c_str(), str.length());
it != it_end; ++it) {
// Convert printable and non-space 7-bit ASCII characters to
// their fullwidth forms.
if (IsInterchangeValid7BitAscii(*it) && isprint(*it) && !isspace(*it)) {
// Convert by adding 0xFEE0 to the codepoint of 7-bit ASCII.
char32 full_char = *it + 0xFEE0;
full_str.append(EncodeAsUTF8(full_char));
} else {
full_str.append(it.utf8_data(), it.utf8_len());
}
}
return full_str;
}
// Convert fullwidth Latin characters to their halfwidth forms.
string StringRenderer::ConvertFullwidthLatinToBasicLatin(const string& str) {
string half_str;
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(str.c_str(), str.length());
it != it_end; ++it) {
char32 half_char = FullwidthToHalfwidth(*it);
// Convert fullwidth Latin characters to their halfwidth forms
// only if halfwidth forms are printable and non-space 7-bit ASCII.
if (IsInterchangeValid7BitAscii(half_char) &&
isprint(half_char) && !isspace(half_char)) {
half_str.append(EncodeAsUTF8(half_char));
} else {
half_str.append(it.utf8_data(), it.utf8_len());
}
}
return half_str;
}
// Returns offset to end of text substring rendered in this method.
int StringRenderer::RenderToImage(const char* text, int text_length,
Pix** pix) {
if (pix && *pix) pixDestroy(pix);
InitPangoCairo();
const int page_offset = FindFirstPageBreakOffset(text, text_length);
if (!page_offset) {
return 0;
}
start_box_ = boxchars_.size();
if (!vertical_text_) {
// Translate by the specified margin
cairo_translate(cr_, h_margin_, v_margin_);
} else {
// Vertical text rendering is achieved by a two-step process of first
// performing regular horizontal layout with character orientation set to
// EAST, and then translating and rotating the layout before rendering onto
// the desired image surface. The settings required for the former step are
// done within InitPangoCairo().
//
// Translate to the top-right margin of page
cairo_translate(cr_, page_width_ - h_margin_, v_margin_);
// Rotate the layout
double rotation = - pango_gravity_to_rotation(
pango_context_get_base_gravity(pango_layout_get_context(layout_)));
tlog(2, "Rotating by %f radians\n", rotation);
cairo_rotate(cr_, rotation);
pango_cairo_update_layout(cr_, layout_);
}
string page_text(text, page_offset);
if (render_fullwidth_latin_) {
// Convert Basic Latin to their fullwidth forms.
page_text = ConvertBasicLatinToFullwidthLatin(page_text);
}
if (strip_unrenderable_words_) {
StripUnrenderableWords(&page_text);
}
if (drop_uncovered_chars_ &&
!font_.CoversUTF8Text(page_text.c_str(), page_text.length())) {
int num_dropped = font_.DropUncoveredChars(&page_text);
if (num_dropped) {
tprintf("WARNING: Dropped %d uncovered characters\n", num_dropped);
}
}
if (add_ligatures_) {
// Add ligatures wherever possible, including custom ligatures.
page_text = LigatureTable::Get()->AddLigatures(page_text, &font_);
}
if (underline_start_prob_ > 0) {
SetWordUnderlineAttributes(page_text);
}
pango_layout_set_text(layout_, page_text.c_str(), page_text.length());
if (pix) {
// Set a white background for the target image surface.
cairo_set_source_rgb(cr_, 1.0, 1.0, 1.0); // sets drawing colour to white
// Fill the surface with the active colour (if you don't do this, you will
// be given a surface with a transparent background to draw on)
cairo_paint(cr_);
// Set the ink color to black
cairo_set_source_rgb(cr_, pen_color_[0], pen_color_[1], pen_color_[2]);
// If the target surface or transformation properties of the cairo instance
// have changed, update the pango layout to reflect this
pango_cairo_update_layout(cr_, layout_);
{
DISABLE_HEAP_LEAK_CHECK; // for Fontconfig
// Draw the pango layout onto the cairo surface
pango_cairo_show_layout(cr_, layout_);
}
*pix = CairoARGB32ToPixFormat(surface_);
}
ComputeClusterBoxes();
FreePangoCairo();
// Update internal state variables.
++page_;
return page_offset;
}
// Render a string to an image, returning it as an 8 bit pix. Behaves as
// RenderString, except that it ignores the font set at construction and works
// through all the fonts, returning 0 until they are exhausted, at which point
// it returns the value it should have returned all along, but no pix this time.
// Fonts that don't contain a given proportion of the characters in the string
// get skipped.
// Fonts that work each get rendered and the font name gets added
// to the image.
// NOTE that no boxes are produced by this function.
//
// Example usage: To render a null terminated char-array "txt"
//
// int offset = 0;
// do {
// Pix *pix;
// offset += renderer.RenderAllFontsToImage(min_proportion, txt + offset,
// strlen(txt + offset), NULL, &pix);
// ...
// } while (offset < strlen(text));
//
int StringRenderer::RenderAllFontsToImage(double min_coverage,
const char* text, int text_length,
string* font_used, Pix** image) {
// Select a suitable font to render the title with.
const char kTitleTemplate[] = "%s : %d hits = %.2f%%, raw = %d = %.2f%%";
string title_font;
if (!FontUtils::SelectFont(kTitleTemplate, strlen(kTitleTemplate),
&title_font, NULL)) {
tprintf("WARNING: Could not find a font to render image title with!\n");
title_font = "Arial";
}
title_font += " 8";
tlog(1, "Selected title font: %s\n", title_font.c_str());
if (font_used) font_used->clear();
string orig_font = font_.DescriptionName();
if (char_map_.empty()) {
total_chars_ = 0;
// Fill the hash table and use that for computing which fonts to use.
for (UNICHAR::const_iterator it = UNICHAR::begin(text, text_length);
it != UNICHAR::end(text, text_length); ++it) {
++total_chars_;
++char_map_[*it];
}
tprintf("Total chars = %d\n", total_chars_);
}
const vector<string>& all_fonts = FontUtils::ListAvailableFonts();
for (int i = font_index_; i < all_fonts.size(); ++i) {
++font_index_;
int raw_score = 0;
int ok_chars = FontUtils::FontScore(char_map_, all_fonts[i], &raw_score,
NULL);
if (ok_chars > 0 && ok_chars >= total_chars_ * min_coverage) {
set_font(all_fonts[i]);
int offset = RenderToBinaryImage(text, text_length, 128, image);
ClearBoxes(); // Get rid of them as they are garbage.
const int kMaxTitleLength = 1024;
char title[kMaxTitleLength];
snprintf(title, kMaxTitleLength, kTitleTemplate,
all_fonts[i].c_str(), ok_chars,
100.0 * ok_chars / total_chars_, raw_score,
100.0 * raw_score / char_map_.size());
tprintf("%s\n", title);
// This is a good font! Store the offset to return once we've tried all
// the fonts.
if (offset) {
last_offset_ = offset;
if (font_used) *font_used = all_fonts[i];
}
// Add the font to the image.
set_font(title_font);
v_margin_ /= 8;
Pix* title_image = NULL;
RenderToBinaryImage(title, strlen(title), 128, &title_image);
pixOr(*image, *image, title_image);
pixDestroy(&title_image);
v_margin_ *= 8;
set_font(orig_font);
// We return the real offset only after cycling through the list of fonts.
return 0;
} else {
tprintf("Font %s failed with %d hits = %.2f%%\n",
all_fonts[i].c_str(), ok_chars, 100.0 * ok_chars / total_chars_);
}
}
*image = NULL;
font_index_ = 0;
char_map_.clear();
return last_offset_;
}
} // namespace tesseract
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapeclassifier.h
// Description: Base interface class for classifiers that return a
// shape index.
// Author: Ray Smith
// Created: Tue Sep 13 11:26:32 PDT 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_SHAPECLASSIFIER_H_
#define TESSERACT_CLASSIFY_SHAPECLASSIFIER_H_
#include "unichar.h"
template <typename T> class GenericVector;
struct Pix;
class ScrollView;
class UNICHARSET;
namespace tesseract {
template <typename T> class PointerVector;
struct ShapeRating;
class ShapeTable;
class TrainingSample;
class TrainingSampleSet;
struct UnicharRating;
// Interface base class for classifiers that produce ShapeRating results.
class ShapeClassifier {
public:
virtual ~ShapeClassifier() {}
// Classifies the given [training] sample, writing to results.
// If page_pix is not NULL, the overriding function may call
// sample.GetSamplePix(padding, page_pix) to get an image of the sample
// padded (with real image data) by the given padding to extract features
// from the image of the character. Other members of TrainingSample:
// features(), micro_features(), cn_feature(), geo_feature() may be used
// to get the appropriate tesseract features.
// If debug is non-zero, then various degrees of classifier dependent debug
// information is provided.
// If keep_this (a UNICHAR_ID) is >= 0, then the results should always
// contain keep_this, and (if possible) anything of intermediate confidence.
// (Used for answering "Why didn't it get that right?" questions.) It must
// be a UNICHAR_ID as the callers have no clue how to choose the best shape
// that may contain a desired answer.
// The return value is the number of classes saved in results.
// NOTE that overriding functions MUST clear and sort the results by
// descending rating unless the classifier is working with a team of such
// classifiers.
// NOTE: Neither overload of ClassifySample is pure, but at least one must
// be overridden by a classifier in order for it to do anything.
virtual int UnicharClassifySample(const TrainingSample& sample, Pix* page_pix,
int debug, UNICHAR_ID keep_this,
GenericVector<UnicharRating>* results);
protected:
virtual int ClassifySample(const TrainingSample& sample, Pix* page_pix,
int debug, UNICHAR_ID keep_this,
GenericVector<ShapeRating>* results);
public:
// Returns the shape that contains unichar_id that has the best result.
// If result is not NULL, it is set with the shape_id and rating.
// Returns -1 if ClassifySample fails to provide any result containing
// unichar_id. BestShapeForUnichar does not need to be overridden if
// ClassifySample respects the keep_this rule.
virtual int BestShapeForUnichar(const TrainingSample& sample, Pix* page_pix,
UNICHAR_ID unichar_id, ShapeRating* result);
// Provides access to the ShapeTable that this classifier works with.
virtual const ShapeTable* GetShapeTable() const = 0;
// Provides access to the UNICHARSET that this classifier works with.
// Must be overridden IFF GetShapeTable() returns NULL.
virtual const UNICHARSET& GetUnicharset() const;
// Visual debugger classifies the given sample, displays the results and
// solicits user input to display other classifications. Returns when
// the user has finished with debugging the sample.
// Probably doesn't need to be overridden if the subclass provides
// DisplayClassifyAs.
virtual void DebugDisplay(const TrainingSample& sample, Pix* page_pix,
UNICHAR_ID unichar_id);
// Displays classification as the given unichar_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
virtual int DisplayClassifyAs(const TrainingSample& sample, Pix* page_pix,
UNICHAR_ID unichar_id, int index,
PointerVector<ScrollView>* windows);
// Prints debug information on the results. context is some introductory/title
// message.
virtual void UnicharPrintResults(
const char* context, const GenericVector<UnicharRating>& results) const;
virtual void PrintResults(const char* context,
const GenericVector<ShapeRating>& results) const;
protected:
// Removes any result that has all its unichars covered by a better choice,
// regardless of font.
void FilterDuplicateUnichars(GenericVector<ShapeRating>* results) const;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_SHAPECLASSIFIER_H_
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "trainingsampleset.h"
#include "allheaders.h"
#include "boxread.h"
#include "fontinfo.h"
#include "indexmapbidi.h"
#include "intfeaturedist.h"
#include "intfeaturemap.h"
#include "intfeaturespace.h"
#include "shapetable.h"
#include "trainingsample.h"
#include "unicity_table.h"
namespace tesseract {
const int kTestChar = -1; // 37;
// Max number of distances to compute the squared way
const int kSquareLimit = 25;
// Prime numbers for subsampling distances.
const int kPrime1 = 17;
const int kPrime2 = 13;
// Min samples from which to start discarding outliers.
const int kMinOutlierSamples = 5;
TrainingSampleSet::FontClassInfo::FontClassInfo()
: num_raw_samples(0), canonical_sample(-1), canonical_dist(0.0f) {
}
// Writes to the given file. Returns false in case of error.
bool TrainingSampleSet::FontClassInfo::Serialize(FILE* fp) const {
if (fwrite(&num_raw_samples, sizeof(num_raw_samples), 1, fp) != 1)
return false;
if (fwrite(&canonical_sample, sizeof(canonical_sample), 1, fp) != 1)
return false;
if (fwrite(&canonical_dist, sizeof(canonical_dist), 1, fp) != 1) return false;
if (!samples.Serialize(fp)) return false;
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TrainingSampleSet::FontClassInfo::DeSerialize(bool swap, FILE* fp) {
if (fread(&num_raw_samples, sizeof(num_raw_samples), 1, fp) != 1)
return false;
if (fread(&canonical_sample, sizeof(canonical_sample), 1, fp) != 1)
return false;
if (fread(&canonical_dist, sizeof(canonical_dist), 1, fp) != 1) return false;
if (!samples.DeSerialize(swap, fp)) return false;
if (swap) {
ReverseN(&num_raw_samples, sizeof(num_raw_samples));
ReverseN(&canonical_sample, sizeof(canonical_sample));
ReverseN(&canonical_dist, sizeof(canonical_dist));
}
return true;
}
TrainingSampleSet::TrainingSampleSet(const FontInfoTable& font_table)
: num_raw_samples_(0), unicharset_size_(0),
font_class_array_(NULL), fontinfo_table_(font_table) {
}
TrainingSampleSet::~TrainingSampleSet() {
delete font_class_array_;
}
// Writes to the given file. Returns false in case of error.
bool TrainingSampleSet::Serialize(FILE* fp) const {
if (!samples_.Serialize(fp)) return false;
if (!unicharset_.save_to_file(fp)) return false;
if (!font_id_map_.Serialize(fp)) return false;
inT8 not_null = font_class_array_ != NULL;
if (fwrite(¬_null, sizeof(not_null), 1, fp) != 1) return false;
if (not_null) {
if (!font_class_array_->SerializeClasses(fp)) return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TrainingSampleSet::DeSerialize(bool swap, FILE* fp) {
if (!samples_.DeSerialize(swap, fp)) return false;
num_raw_samples_ = samples_.size();
if (!unicharset_.load_from_file(fp)) return false;
if (!font_id_map_.DeSerialize(swap, fp)) return false;
if (font_class_array_ != NULL) {
delete font_class_array_;
font_class_array_ = NULL;
}
inT8 not_null;
if (fread(¬_null, sizeof(not_null), 1, fp) != 1) return false;
if (not_null) {
FontClassInfo empty;
font_class_array_ = new GENERIC_2D_ARRAY<FontClassInfo >(1, 1 , empty);
if (!font_class_array_->DeSerializeClasses(swap, fp)) return false;
}
unicharset_size_ = unicharset_.size();
return true;
}
// Load an initial unicharset, or set one up if the file cannot be read.
void TrainingSampleSet::LoadUnicharset(const char* filename) {
if (!unicharset_.load_from_file(filename)) {
tprintf("Failed to load unicharset from file %s\n"
"Building unicharset from scratch...\n",
filename);
unicharset_.clear();
// Add special characters as they were removed by the clear.
UNICHARSET empty;
unicharset_.AppendOtherUnicharset(empty);
}
unicharset_size_ = unicharset_.size();
}
// Adds a character sample to this sample set.
// If the unichar is not already in the local unicharset, it is added.
// Returns the unichar_id of the added sample, from the local unicharset.
int TrainingSampleSet::AddSample(const char* unichar, TrainingSample* sample) {
if (!unicharset_.contains_unichar(unichar)) {
unicharset_.unichar_insert(unichar);
if (unicharset_.size() > MAX_NUM_CLASSES) {
tprintf("Error: Size of unicharset in TrainingSampleSet::AddSample is "
"greater than MAX_NUM_CLASSES\n");
return -1;
}
}
UNICHAR_ID char_id = unicharset_.unichar_to_id(unichar);
AddSample(char_id, sample);
return char_id;
}
// Adds a character sample to this sample set with the given unichar_id,
// which must correspond to the local unicharset (in this).
void TrainingSampleSet::AddSample(int unichar_id, TrainingSample* sample) {
sample->set_class_id(unichar_id);
samples_.push_back(sample);
num_raw_samples_ = samples_.size();
unicharset_size_ = unicharset_.size();
}
// Returns the number of samples for the given font,class pair.
// If randomize is true, returns the number of samples accessible
// with randomizing on. (Increases the number of samples if small.)
// OrganizeByFontAndClass must have been already called.
int TrainingSampleSet::NumClassSamples(int font_id, int class_id,
bool randomize) const {
ASSERT_HOST(font_class_array_ != NULL);
if (font_id < 0 || class_id < 0 ||
font_id >= font_id_map_.SparseSize() || class_id >= unicharset_size_) {
// There are no samples because the font or class doesn't exist.
return 0;
}
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0)
return 0; // The font has no samples.
if (randomize)
return (*font_class_array_)(font_index, class_id).samples.size();
else
return (*font_class_array_)(font_index, class_id).num_raw_samples;
}
// Gets a sample by its index.
const TrainingSample* TrainingSampleSet::GetSample(int index) const {
return samples_[index];
}
// Gets a sample by its font, class, index.
// OrganizeByFontAndClass must have been already called.
const TrainingSample* TrainingSampleSet::GetSample(int font_id, int class_id,
int index) const {
ASSERT_HOST(font_class_array_ != NULL);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) return NULL;
int sample_index = (*font_class_array_)(font_index, class_id).samples[index];
return samples_[sample_index];
}
// Get a sample by its font, class, index. Does not randomize.
// OrganizeByFontAndClass must have been already called.
TrainingSample* TrainingSampleSet::MutableSample(int font_id, int class_id,
int index) {
ASSERT_HOST(font_class_array_ != NULL);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) return NULL;
int sample_index = (*font_class_array_)(font_index, class_id).samples[index];
return samples_[sample_index];
}
// Returns a string debug representation of the given sample:
// font, unichar_str, bounding box, page.
STRING TrainingSampleSet::SampleToString(const TrainingSample& sample) const {
STRING boxfile_str;
MakeBoxFileStr(unicharset_.id_to_unichar(sample.class_id()),
sample.bounding_box(), sample.page_num(), &boxfile_str);
return STRING(fontinfo_table_.get(sample.font_id()).name) + " " + boxfile_str;
}
// Gets the combined set of features used by all the samples of the given
// font/class combination.
const BitVector& TrainingSampleSet::GetCloudFeatures(
int font_id, int class_id) const {
int font_index = font_id_map_.SparseToCompact(font_id);
ASSERT_HOST(font_index >= 0);
return (*font_class_array_)(font_index, class_id).cloud_features;
}
// Gets the indexed features of the canonical sample of the given
// font/class combination.
const GenericVector<int>& TrainingSampleSet::GetCanonicalFeatures(
int font_id, int class_id) const {
int font_index = font_id_map_.SparseToCompact(font_id);
ASSERT_HOST(font_index >= 0);
return (*font_class_array_)(font_index, class_id).canonical_features;
}
// Returns the distance between the given UniCharAndFonts pair.
// If matched_fonts, only matching fonts, are considered, unless that yields
// the empty set.
// OrganizeByFontAndClass must have been already called.
float TrainingSampleSet::UnicharDistance(const UnicharAndFonts& uf1,
const UnicharAndFonts& uf2,
bool matched_fonts,
const IntFeatureMap& feature_map) {
int num_fonts1 = uf1.font_ids.size();
int c1 = uf1.unichar_id;
int num_fonts2 = uf2.font_ids.size();
int c2 = uf2.unichar_id;
double dist_sum = 0.0;
int dist_count = 0;
bool debug = false;
if (matched_fonts) {
// Compute distances only where fonts match.
for (int i = 0; i < num_fonts1; ++i) {
int f1 = uf1.font_ids[i];
for (int j = 0; j < num_fonts2; ++j) {
int f2 = uf2.font_ids[j];
if (f1 == f2) {
dist_sum += ClusterDistance(f1, c1, f2, c2, feature_map);
++dist_count;
}
}
}
} else if (num_fonts1 * num_fonts2 <= kSquareLimit) {
// Small enough sets to compute all the distances.
for (int i = 0; i < num_fonts1; ++i) {
int f1 = uf1.font_ids[i];
for (int j = 0; j < num_fonts2; ++j) {
int f2 = uf2.font_ids[j];
dist_sum += ClusterDistance(f1, c1, f2, c2, feature_map);
if (debug) {
tprintf("Cluster dist %d %d %d %d = %g\n",
f1, c1, f2, c2,
ClusterDistance(f1, c1, f2, c2, feature_map));
}
++dist_count;
}
}
} else {
// Subsample distances, using the largest set once, and stepping through
// the smaller set so as to ensure that all the pairs are different.
int increment = kPrime1 != num_fonts2 ? kPrime1 : kPrime2;
int index = 0;
int num_samples = MAX(num_fonts1, num_fonts2);
for (int i = 0; i < num_samples; ++i, index += increment) {
int f1 = uf1.font_ids[i % num_fonts1];
int f2 = uf2.font_ids[index % num_fonts2];
if (debug) {
tprintf("Cluster dist %d %d %d %d = %g\n",
f1, c1, f2, c2, ClusterDistance(f1, c1, f2, c2, feature_map));
}
dist_sum += ClusterDistance(f1, c1, f2, c2, feature_map);
++dist_count;
}
}
if (dist_count == 0) {
if (matched_fonts)
return UnicharDistance(uf1, uf2, false, feature_map);
return 0.0f;
}
return dist_sum / dist_count;
}
// Returns the distance between the given pair of font/class pairs.
// Finds in cache or computes and caches.
// OrganizeByFontAndClass must have been already called.
float TrainingSampleSet::ClusterDistance(int font_id1, int class_id1,
int font_id2, int class_id2,
const IntFeatureMap& feature_map) {
ASSERT_HOST(font_class_array_ != NULL);
int font_index1 = font_id_map_.SparseToCompact(font_id1);
int font_index2 = font_id_map_.SparseToCompact(font_id2);
if (font_index1 < 0 || font_index2 < 0)
return 0.0f;
FontClassInfo& fc_info = (*font_class_array_)(font_index1, class_id1);
if (font_id1 == font_id2) {
// Special case cache for speed.
if (fc_info.unichar_distance_cache.size() == 0)
fc_info.unichar_distance_cache.init_to_size(unicharset_size_, -1.0f);
if (fc_info.unichar_distance_cache[class_id2] < 0) {
// Distance has to be calculated.
float result = ComputeClusterDistance(font_id1, class_id1,
font_id2, class_id2,
feature_map);
fc_info.unichar_distance_cache[class_id2] = result;
// Copy to the symmetric cache entry.
FontClassInfo& fc_info2 = (*font_class_array_)(font_index2, class_id2);
if (fc_info2.unichar_distance_cache.size() == 0)
fc_info2.unichar_distance_cache.init_to_size(unicharset_size_, -1.0f);
fc_info2.unichar_distance_cache[class_id1] = result;
}
return fc_info.unichar_distance_cache[class_id2];
} else if (class_id1 == class_id2) {
// Another special-case cache for equal class-id.
if (fc_info.font_distance_cache.size() == 0)
fc_info.font_distance_cache.init_to_size(font_id_map_.CompactSize(),
-1.0f);
if (fc_info.font_distance_cache[font_index2] < 0) {
// Distance has to be calculated.
float result = ComputeClusterDistance(font_id1, class_id1,
font_id2, class_id2,
feature_map);
fc_info.font_distance_cache[font_index2] = result;
// Copy to the symmetric cache entry.
FontClassInfo& fc_info2 = (*font_class_array_)(font_index2, class_id2);
if (fc_info2.font_distance_cache.size() == 0)
fc_info2.font_distance_cache.init_to_size(font_id_map_.CompactSize(),
-1.0f);
fc_info2.font_distance_cache[font_index1] = result;
}
return fc_info.font_distance_cache[font_index2];
}
// Both font and class are different. Linear search for class_id2/font_id2
// in what is a hopefully short list of distances.
int cache_index = 0;
while (cache_index < fc_info.distance_cache.size() &&
(fc_info.distance_cache[cache_index].unichar_id != class_id2 ||
fc_info.distance_cache[cache_index].font_id != font_id2))
++cache_index;
if (cache_index == fc_info.distance_cache.size()) {
// Distance has to be calculated.
float result = ComputeClusterDistance(font_id1, class_id1,
font_id2, class_id2,
feature_map);
FontClassDistance fc_dist = { class_id2, font_id2, result };
fc_info.distance_cache.push_back(fc_dist);
// Copy to the symmetric cache entry. We know it isn't there already, as
// we always copy to the symmetric entry.
FontClassInfo& fc_info2 = (*font_class_array_)(font_index2, class_id2);
fc_dist.unichar_id = class_id1;
fc_dist.font_id = font_id1;
fc_info2.distance_cache.push_back(fc_dist);
}
return fc_info.distance_cache[cache_index].distance;
}
// Computes the distance between the given pair of font/class pairs.
float TrainingSampleSet::ComputeClusterDistance(
int font_id1, int class_id1, int font_id2, int class_id2,
const IntFeatureMap& feature_map) const {
int dist = ReliablySeparable(font_id1, class_id1, font_id2, class_id2,
feature_map, false);
dist += ReliablySeparable(font_id2, class_id2, font_id1, class_id1,
feature_map, false);
int denominator = GetCanonicalFeatures(font_id1, class_id1).size();
denominator += GetCanonicalFeatures(font_id2, class_id2).size();
return static_cast<float>(dist) / denominator;
}
// Helper to add a feature and its near neighbors to the good_features.
// levels indicates how many times to compute the offset features of what is
// already there. This is done by iteration rather than recursion.
static void AddNearFeatures(const IntFeatureMap& feature_map, int f, int levels,
GenericVector<int>* good_features) {
int prev_num_features = 0;
good_features->push_back(f);
int num_features = 1;
for (int level = 0; level < levels; ++level) {
for (int i = prev_num_features; i < num_features; ++i) {
int feature = (*good_features)[i];
for (int dir = -kNumOffsetMaps; dir <= kNumOffsetMaps; ++dir) {
if (dir == 0) continue;
int f1 = feature_map.OffsetFeature(feature, dir);
if (f1 >= 0) {
good_features->push_back(f1);
}
}
}
prev_num_features = num_features;
num_features = good_features->size();
}
}
// Returns the number of canonical features of font/class 2 for which
// neither the feature nor any of its near neighbors occurs in the cloud
// of font/class 1. Each such feature is a reliable separation between
// the classes, ASSUMING that the canonical sample is sufficiently
// representative that every sample has a feature near that particular
// feature. To check that this is so on the fly would be prohibitively
// expensive, but it might be possible to pre-qualify the canonical features
// to include only those for which this assumption is true.
// ComputeCanonicalFeatures and ComputeCloudFeatures must have been called
// first, or the results will be nonsense.
int TrainingSampleSet::ReliablySeparable(int font_id1, int class_id1,
int font_id2, int class_id2,
const IntFeatureMap& feature_map,
bool thorough) const {
int result = 0;
const TrainingSample* sample2 = GetCanonicalSample(font_id2, class_id2);
if (sample2 == NULL)
return 0; // There are no canonical features.
const GenericVector<int>& canonical2 = GetCanonicalFeatures(font_id2,
class_id2);
const BitVector& cloud1 = GetCloudFeatures(font_id1, class_id1);
if (cloud1.size() == 0)
return canonical2.size(); // There are no cloud features.
// Find a canonical2 feature that is not in cloud1.
for (int f = 0; f < canonical2.size(); ++f) {
int feature = canonical2[f];
if (cloud1[feature])
continue;
// Gather the near neighbours of f.
GenericVector<int> good_features;
AddNearFeatures(feature_map, feature, 1, &good_features);
// Check that none of the good_features are in the cloud.
int i;
for (i = 0; i < good_features.size(); ++i) {
int good_f = good_features[i];
if (cloud1[good_f]) {
break;
}
}
if (i < good_features.size())
continue; // Found one in the cloud.
++result;
}
return result;
}
// Returns the total index of the requested sample.
// OrganizeByFontAndClass must have been already called.
int TrainingSampleSet::GlobalSampleIndex(int font_id, int class_id,
int index) const {
ASSERT_HOST(font_class_array_ != NULL);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) return -1;
return (*font_class_array_)(font_index, class_id).samples[index];
}
// Gets the canonical sample for the given font, class pair.
// ComputeCanonicalSamples must have been called first.
const TrainingSample* TrainingSampleSet::GetCanonicalSample(
int font_id, int class_id) const {
ASSERT_HOST(font_class_array_ != NULL);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) return NULL;
int sample_index = (*font_class_array_)(font_index,
class_id).canonical_sample;
return sample_index >= 0 ? samples_[sample_index] : NULL;
}
// Gets the max distance for the given canonical sample.
// ComputeCanonicalSamples must have been called first.
float TrainingSampleSet::GetCanonicalDist(int font_id, int class_id) const {
ASSERT_HOST(font_class_array_ != NULL);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) return 0.0f;
if ((*font_class_array_)(font_index, class_id).canonical_sample >= 0)
return (*font_class_array_)(font_index, class_id).canonical_dist;
else
return 0.0f;
}
// Generates indexed features for all samples with the supplied feature_space.
void TrainingSampleSet::IndexFeatures(const IntFeatureSpace& feature_space) {
for (int s = 0; s < samples_.size(); ++s)
samples_[s]->IndexFeatures(feature_space);
}
// Delete outlier samples with few features that are shared with others.
// IndexFeatures must have been called already.
void TrainingSampleSet::DeleteOutliers(const IntFeatureSpace& feature_space,
bool debug) {
if (font_class_array_ == NULL)
OrganizeByFontAndClass();
Pixa* pixa = NULL;
if (debug)
pixa = pixaCreate(0);
GenericVector<int> feature_counts;
int fs_size = feature_space.Size();
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
for (int c = 0; c < unicharset_size_; ++c) {
// Create a histogram of the features used by all samples of this
// font/class combination.
feature_counts.init_to_size(fs_size, 0);
FontClassInfo& fcinfo = (*font_class_array_)(font_index, c);
int sample_count = fcinfo.samples.size();
if (sample_count < kMinOutlierSamples)
continue;
for (int i = 0; i < sample_count; ++i) {
int s = fcinfo.samples[i];
const GenericVector<int>& features = samples_[s]->indexed_features();
for (int f = 0; f < features.size(); ++f) {
++feature_counts[features[f]];
}
}
for (int i = 0; i < sample_count; ++i) {
int s = fcinfo.samples[i];
const TrainingSample& sample = *samples_[s];
const GenericVector<int>& features = sample.indexed_features();
// A feature that has a histogram count of 1 is only used by this
// sample, making it 'bad'. All others are 'good'.
int good_features = 0;
int bad_features = 0;
for (int f = 0; f < features.size(); ++f) {
if (feature_counts[features[f]] > 1)
++good_features;
else
++bad_features;
}
// If more than 1/3 features are bad, then this is an outlier.
if (bad_features * 2 > good_features) {
tprintf("Deleting outlier sample of %s, %d good, %d bad\n",
SampleToString(sample).string(),
good_features, bad_features);
if (debug) {
pixaAddPix(pixa, sample.RenderToPix(&unicharset_), L_INSERT);
// Add the previous sample as well, so it is easier to see in
// the output what is wrong with this sample.
int t;
if (i == 0)
t = fcinfo.samples[1];
else
t = fcinfo.samples[i - 1];
const TrainingSample &csample = *samples_[t];
pixaAddPix(pixa, csample.RenderToPix(&unicharset_), L_INSERT);
}
// Mark the sample for deletion.
KillSample(samples_[s]);
}
}
}
}
// Truly delete all bad samples and renumber everything.
DeleteDeadSamples();
if (pixa != NULL) {
Pix* pix = pixaDisplayTiledInRows(pixa, 1, 2600, 1.0, 0, 10, 10);
pixaDestroy(&pixa);
pixWrite("outliers.png", pix, IFF_PNG);
pixDestroy(&pix);
}
}
// Marks the given sample index for deletion.
// Deletion is actually completed by DeleteDeadSamples.
void TrainingSampleSet::KillSample(TrainingSample* sample) {
sample->set_sample_index(-1);
}
// Deletes all samples with zero features marked by KillSample.
void TrainingSampleSet::DeleteDeadSamples() {
samples_.compact(
NewPermanentTessCallback(this, &TrainingSampleSet::DeleteableSample));
num_raw_samples_ = samples_.size();
// Samples must be re-organized now we have deleted a few.
}
// Callback function returns true if the given sample is to be deleted, due
// to having a negative classid.
bool TrainingSampleSet::DeleteableSample(const TrainingSample* sample) {
return sample == NULL || sample->class_id() < 0;
}
static Pix* DebugSample(const UNICHARSET& unicharset,
TrainingSample* sample) {
tprintf("\nOriginal features:\n");
for (int i = 0; i < sample->num_features(); ++i) {
sample->features()[i].print();
}
if (sample->features_are_mapped()) {
tprintf("\nMapped features:\n");
for (int i = 0; i < sample->mapped_features().size(); ++i) {
tprintf("%d ", sample->mapped_features()[i]);
}
tprintf("\n");
}
return sample->RenderToPix(&unicharset);
}
// Construct an array to access the samples by font,class pair.
void TrainingSampleSet::OrganizeByFontAndClass() {
// Font indexes are sparse, so we used a map to compact them, so we can
// have an efficient 2-d array of fonts and character classes.
SetupFontIdMap();
int compact_font_size = font_id_map_.CompactSize();
// Get a 2-d array of generic vectors.
if (font_class_array_ != NULL)
delete font_class_array_;
FontClassInfo empty;
font_class_array_ = new GENERIC_2D_ARRAY<FontClassInfo>(
compact_font_size, unicharset_size_, empty);
for (int s = 0; s < samples_.size(); ++s) {
int font_id = samples_[s]->font_id();
int class_id = samples_[s]->class_id();
if (font_id < 0 || font_id >= font_id_map_.SparseSize()) {
tprintf("Font id = %d/%d, class id = %d/%d on sample %d\n",
font_id, font_id_map_.SparseSize(), class_id, unicharset_size_,
s);
}
ASSERT_HOST(font_id >= 0 && font_id < font_id_map_.SparseSize());
ASSERT_HOST(class_id >= 0 && class_id < unicharset_size_);
int font_index = font_id_map_.SparseToCompact(font_id);
(*font_class_array_)(font_index, class_id).samples.push_back(s);
}
// Set the num_raw_samples member of the FontClassInfo, to set the boundary
// between the raw samples and the replicated ones.
for (int f = 0; f < compact_font_size; ++f) {
for (int c = 0; c < unicharset_size_; ++c)
(*font_class_array_)(f, c).num_raw_samples =
(*font_class_array_)(f, c).samples.size();
}
// This is the global number of samples and also marks the boundary between
// real and replicated samples.
num_raw_samples_ = samples_.size();
}
// Constructs the font_id_map_ which maps real font_ids (sparse) to a compact
// index for the font_class_array_.
void TrainingSampleSet::SetupFontIdMap() {
// Number of samples for each font_id.
GenericVector<int> font_counts;
for (int s = 0; s < samples_.size(); ++s) {
int font_id = samples_[s]->font_id();
while (font_id >= font_counts.size())
font_counts.push_back(0);
++font_counts[font_id];
}
font_id_map_.Init(font_counts.size(), false);
for (int f = 0; f < font_counts.size(); ++f) {
font_id_map_.SetMap(f, font_counts[f] > 0);
}
font_id_map_.Setup();
}
// Finds the sample for each font, class pair that has least maximum
// distance to all the other samples of the same font, class.
// OrganizeByFontAndClass must have been already called.
void TrainingSampleSet::ComputeCanonicalSamples(const IntFeatureMap& map,
bool debug) {
ASSERT_HOST(font_class_array_ != NULL);
IntFeatureDist f_table;
if (debug) tprintf("feature table size %d\n", map.sparse_size());
f_table.Init(&map);
int worst_s1 = 0;
int worst_s2 = 0;
double global_worst_dist = 0.0;
// Compute distances independently for each font and char index.
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
int font_id = font_id_map_.CompactToSparse(font_index);
for (int c = 0; c < unicharset_size_; ++c) {
int samples_found = 0;
FontClassInfo& fcinfo = (*font_class_array_)(font_index, c);
if (fcinfo.samples.size() == 0 ||
(kTestChar >= 0 && c != kTestChar)) {
fcinfo.canonical_sample = -1;
fcinfo.canonical_dist = 0.0f;
if (debug) tprintf("Skipping class %d\n", c);
continue;
}
// The canonical sample will be the one with the min_max_dist, which
// is the sample with the lowest maximum distance to all other samples.
double min_max_dist = 2.0;
// We keep track of the farthest apart pair (max_s1, max_s2) which
// are max_max_dist apart, so we can see how bad the variability is.
double max_max_dist = 0.0;
int max_s1 = 0;
int max_s2 = 0;
fcinfo.canonical_sample = fcinfo.samples[0];
fcinfo.canonical_dist = 0.0f;
for (int i = 0; i < fcinfo.samples.size(); ++i) {
int s1 = fcinfo.samples[i];
const GenericVector<int>& features1 = samples_[s1]->indexed_features();
f_table.Set(features1, features1.size(), true);
double max_dist = 0.0;
// Run the full squared-order search for similar samples. It is still
// reasonably fast because f_table.FeatureDistance is fast, but we
// may have to reconsider if we start playing with too many samples
// of a single char/font.
for (int j = 0; j < fcinfo.samples.size(); ++j) {
int s2 = fcinfo.samples[j];
if (samples_[s2]->class_id() != c ||
samples_[s2]->font_id() != font_id ||
s2 == s1)
continue;
GenericVector<int> features2 = samples_[s2]->indexed_features();
double dist = f_table.FeatureDistance(features2);
if (dist > max_dist) {
max_dist = dist;
if (dist > max_max_dist) {
max_s1 = s1;
max_s2 = s2;
}
}
}
// Using Set(..., false) is far faster than re initializing, due to
// the sparseness of the feature space.
f_table.Set(features1, features1.size(), false);
samples_[s1]->set_max_dist(max_dist);
++samples_found;
if (max_dist < min_max_dist) {
fcinfo.canonical_sample = s1;
fcinfo.canonical_dist = max_dist;
}
UpdateRange(max_dist, &min_max_dist, &max_max_dist);
}
if (max_max_dist > global_worst_dist) {
// Keep a record of the worst pair over all characters/fonts too.
global_worst_dist = max_max_dist;
worst_s1 = max_s1;
worst_s2 = max_s2;
}
if (debug) {
tprintf("Found %d samples of class %d=%s, font %d, "
"dist range [%g, %g], worst pair= %s, %s\n",
samples_found, c, unicharset_.debug_str(c).string(),
font_index, min_max_dist, max_max_dist,
SampleToString(*samples_[max_s1]).string(),
SampleToString(*samples_[max_s2]).string());
}
}
}
if (debug) {
tprintf("Global worst dist = %g, between sample %d and %d\n",
global_worst_dist, worst_s1, worst_s2);
Pix* pix1 = DebugSample(unicharset_, samples_[worst_s1]);
Pix* pix2 = DebugSample(unicharset_, samples_[worst_s2]);
pixOr(pix1, pix1, pix2);
pixWrite("worstpair.png", pix1, IFF_PNG);
pixDestroy(&pix1);
pixDestroy(&pix2);
}
}
// Replicates the samples to a minimum frequency defined by
// 2 * kSampleRandomSize, or for larger counts duplicates all samples.
// After replication, the replicated samples are perturbed slightly, but
// in a predictable and repeatable way.
// Use after OrganizeByFontAndClass().
void TrainingSampleSet::ReplicateAndRandomizeSamples() {
ASSERT_HOST(font_class_array_ != NULL);
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
for (int c = 0; c < unicharset_size_; ++c) {
FontClassInfo& fcinfo = (*font_class_array_)(font_index, c);
int sample_count = fcinfo.samples.size();
int min_samples = 2 * MAX(kSampleRandomSize, sample_count);
if (sample_count > 0 && sample_count < min_samples) {
int base_count = sample_count;
for (int base_index = 0; sample_count < min_samples; ++sample_count) {
int src_index = fcinfo.samples[base_index++];
if (base_index >= base_count) base_index = 0;
TrainingSample* sample = samples_[src_index]->RandomizedCopy(
sample_count % kSampleRandomSize);
int sample_index = samples_.size();
sample->set_sample_index(sample_index);
samples_.push_back(sample);
fcinfo.samples.push_back(sample_index);
}
}
}
}
}
// Caches the indexed features of the canonical samples.
// ComputeCanonicalSamples must have been already called.
// TODO(rays) see note on ReliablySeparable and try restricting the
// canonical features to those that truly represent all samples.
void TrainingSampleSet::ComputeCanonicalFeatures() {
ASSERT_HOST(font_class_array_ != NULL);
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
int font_id = font_id_map_.CompactToSparse(font_index);
for (int c = 0; c < unicharset_size_; ++c) {
int num_samples = NumClassSamples(font_id, c, false);
if (num_samples == 0)
continue;
const TrainingSample* sample = GetCanonicalSample(font_id, c);
FontClassInfo& fcinfo = (*font_class_array_)(font_index, c);
fcinfo.canonical_features = sample->indexed_features();
}
}
}
// Computes the combined set of features used by all the samples of each
// font/class combination. Use after ReplicateAndRandomizeSamples.
void TrainingSampleSet::ComputeCloudFeatures(int feature_space_size) {
ASSERT_HOST(font_class_array_ != NULL);
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
int font_id = font_id_map_.CompactToSparse(font_index);
for (int c = 0; c < unicharset_size_; ++c) {
int num_samples = NumClassSamples(font_id, c, false);
if (num_samples == 0)
continue;
FontClassInfo& fcinfo = (*font_class_array_)(font_index, c);
fcinfo.cloud_features.Init(feature_space_size);
for (int s = 0; s < num_samples; ++s) {
const TrainingSample* sample = GetSample(font_id, c, s);
const GenericVector<int>& sample_features = sample->indexed_features();
for (int i = 0; i < sample_features.size(); ++i)
fcinfo.cloud_features.SetBit(sample_features[i]);
}
}
}
}
// Adds all fonts of the given class to the shape.
void TrainingSampleSet::AddAllFontsForClass(int class_id, Shape* shape) const {
for (int f = 0; f < font_id_map_.CompactSize(); ++f) {
int font_id = font_id_map_.CompactToSparse(f);
shape->AddToShape(class_id, font_id);
}
}
// Display the samples with the given indexed feature that also match
// the given shape.
void TrainingSampleSet::DisplaySamplesWithFeature(int f_index,
const Shape& shape,
const IntFeatureSpace& space,
ScrollView::Color color,
ScrollView* window) const {
for (int s = 0; s < num_raw_samples(); ++s) {
const TrainingSample* sample = GetSample(s);
if (shape.ContainsUnichar(sample->class_id())) {
GenericVector<int> indexed_features;
space.IndexAndSortFeatures(sample->features(), sample->num_features(),
&indexed_features);
for (int f = 0; f < indexed_features.size(); ++f) {
if (indexed_features[f] == f_index) {
sample->DisplayFeatures(color, window);
}
}
}
}
}
} // namespace tesseract.
| C++ |
/******************************************************************************
** Filename: mf.c
** Purpose: Micro-feature interface to flexible feature extractor.
** Author: Dan Johnson
** History: Thu May 24 09:08:38 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "mf.h"
#include "featdefs.h"
#include "mfdefs.h"
#include "mfx.h"
#include <math.h>
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
FEATURE_SET ExtractMicros(TBLOB *Blob, const DENORM& bl_denorm,
const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info) {
/*
** Parameters:
** Blob blob to extract micro-features from
** denorm control parameter to feature extractor.
** Globals: none
** Operation: Call the old micro-feature extractor and then copy
** the features into the new format. Then deallocate the
** old micro-features.
** Return: Micro-features for Blob.
** Exceptions: none
** History: Wed May 23 18:06:38 1990, DSJ, Created.
*/
int NumFeatures;
MICROFEATURES Features, OldFeatures;
FEATURE_SET FeatureSet;
FEATURE Feature;
MICROFEATURE OldFeature;
OldFeatures = (MICROFEATURES)BlobMicroFeatures(Blob, bl_denorm, cn_denorm,
fx_info);
if (OldFeatures == NULL)
return NULL;
NumFeatures = count (OldFeatures);
FeatureSet = NewFeatureSet (NumFeatures);
Features = OldFeatures;
iterate(Features) {
OldFeature = (MICROFEATURE) first_node (Features);
Feature = NewFeature (&MicroFeatureDesc);
Feature->Params[MFDirection] = OldFeature[ORIENTATION];
Feature->Params[MFXPosition] = OldFeature[XPOSITION];
Feature->Params[MFYPosition] = OldFeature[YPOSITION];
Feature->Params[MFLength] = OldFeature[MFLENGTH];
// Bulge features are deprecated and should not be used. Set to 0.
Feature->Params[MFBulge1] = 0.0f;
Feature->Params[MFBulge2] = 0.0f;
#ifndef _WIN32
// Assert that feature parameters are well defined.
int i;
for (i = 0; i < Feature->Type->NumParams; i++) {
ASSERT_HOST(!isnan(Feature->Params[i]));
}
#endif
AddFeature(FeatureSet, Feature);
}
FreeMicroFeatures(OldFeatures);
return FeatureSet;
} /* ExtractMicros */
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapetable.cpp
// Description: Class to map a classifier shape index to unicharset
// indices and font indices.
// Author: Ray Smith
// Created: Tue Nov 02 15:31:32 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "shapetable.h"
#include "bitvector.h"
#include "fontinfo.h"
#include "intfeaturespace.h"
#include "strngs.h"
#include "unicharset.h"
#include "unicity_table.h"
namespace tesseract {
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
int ShapeRating::FirstResultWithUnichar(
const GenericVector<ShapeRating>& results,
const ShapeTable& shape_table,
UNICHAR_ID unichar_id) {
for (int r = 0; r < results.size(); ++r) {
int shape_id = results[r].shape_id;
const Shape& shape = shape_table.GetShape(shape_id);
if (shape.ContainsUnichar(unichar_id)) {
return r;
}
}
return -1;
}
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
int UnicharRating::FirstResultWithUnichar(
const GenericVector<UnicharRating>& results,
UNICHAR_ID unichar_id) {
for (int r = 0; r < results.size(); ++r) {
if (results[r].unichar_id == unichar_id)
return r;
}
return -1;
}
// Writes to the given file. Returns false in case of error.
bool UnicharAndFonts::Serialize(FILE* fp) const {
if (fwrite(&unichar_id, sizeof(unichar_id), 1, fp) != 1) return false;
if (!font_ids.Serialize(fp)) return false;
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool UnicharAndFonts::DeSerialize(bool swap, FILE* fp) {
if (fread(&unichar_id, sizeof(unichar_id), 1, fp) != 1) return false;
if (swap)
ReverseN(&unichar_id, sizeof(unichar_id));
if (!font_ids.DeSerialize(swap, fp)) return false;
return true;
}
// Sort function to sort a pair of UnicharAndFonts by unichar_id.
int UnicharAndFonts::SortByUnicharId(const void* v1, const void* v2) {
const UnicharAndFonts* p1 = reinterpret_cast<const UnicharAndFonts*>(v1);
const UnicharAndFonts* p2 = reinterpret_cast<const UnicharAndFonts*>(v2);
return p1->unichar_id - p2->unichar_id;
}
// Writes to the given file. Returns false in case of error.
bool Shape::Serialize(FILE* fp) const {
uinT8 sorted = unichars_sorted_;
if (fwrite(&sorted, sizeof(sorted), 1, fp) != 1)
return false;
if (!unichars_.SerializeClasses(fp)) return false;
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool Shape::DeSerialize(bool swap, FILE* fp) {
uinT8 sorted;
if (fread(&sorted, sizeof(sorted), 1, fp) != 1)
return false;
unichars_sorted_ = sorted != 0;
if (!unichars_.DeSerializeClasses(swap, fp)) return false;
return true;
}
// Adds a font_id for the given unichar_id. If the unichar_id is not
// in the shape, it is added.
void Shape::AddToShape(int unichar_id, int font_id) {
for (int c = 0; c < unichars_.size(); ++c) {
if (unichars_[c].unichar_id == unichar_id) {
// Found the unichar in the shape table.
GenericVector<int>& font_list = unichars_[c].font_ids;
for (int f = 0; f < font_list.size(); ++f) {
if (font_list[f] == font_id)
return; // Font is already there.
}
font_list.push_back(font_id);
return;
}
}
// Unichar_id is not in shape, so add it to shape.
unichars_.push_back(UnicharAndFonts(unichar_id, font_id));
unichars_sorted_ = unichars_.size() <= 1;
}
// Adds everything in other to this.
void Shape::AddShape(const Shape& other) {
for (int c = 0; c < other.unichars_.size(); ++c) {
for (int f = 0; f < other.unichars_[c].font_ids.size(); ++f) {
AddToShape(other.unichars_[c].unichar_id,
other.unichars_[c].font_ids[f]);
}
}
unichars_sorted_ = unichars_.size() <= 1;
}
// Returns true if the shape contains the given unichar_id, font_id pair.
bool Shape::ContainsUnicharAndFont(int unichar_id, int font_id) const {
for (int c = 0; c < unichars_.size(); ++c) {
if (unichars_[c].unichar_id == unichar_id) {
// Found the unichar, so look for the font.
GenericVector<int>& font_list = unichars_[c].font_ids;
for (int f = 0; f < font_list.size(); ++f) {
if (font_list[f] == font_id)
return true;
}
return false;
}
}
return false;
}
// Returns true if the shape contains the given unichar_id, ignoring font.
bool Shape::ContainsUnichar(int unichar_id) const {
for (int c = 0; c < unichars_.size(); ++c) {
if (unichars_[c].unichar_id == unichar_id) {
return true;
}
}
return false;
}
// Returns true if the shape contains the given font, ignoring unichar_id.
bool Shape::ContainsFont(int font_id) const {
for (int c = 0; c < unichars_.size(); ++c) {
GenericVector<int>& font_list = unichars_[c].font_ids;
for (int f = 0; f < font_list.size(); ++f) {
if (font_list[f] == font_id)
return true;
}
}
return false;
}
// Returns true if the shape contains the given font properties, ignoring
// unichar_id.
bool Shape::ContainsFontProperties(const FontInfoTable& font_table,
uinT32 properties) const {
for (int c = 0; c < unichars_.size(); ++c) {
GenericVector<int>& font_list = unichars_[c].font_ids;
for (int f = 0; f < font_list.size(); ++f) {
if (font_table.get(font_list[f]).properties == properties)
return true;
}
}
return false;
}
// Returns true if the shape contains multiple different font properties,
// ignoring unichar_id.
bool Shape::ContainsMultipleFontProperties(
const FontInfoTable& font_table) const {
uinT32 properties = font_table.get(unichars_[0].font_ids[0]).properties;
for (int c = 0; c < unichars_.size(); ++c) {
GenericVector<int>& font_list = unichars_[c].font_ids;
for (int f = 0; f < font_list.size(); ++f) {
if (font_table.get(font_list[f]).properties != properties)
return true;
}
}
return false;
}
// Returns true if this shape is equal to other (ignoring order of unichars
// and fonts).
bool Shape::operator==(const Shape& other) const {
return IsSubsetOf(other) && other.IsSubsetOf(*this);
}
// Returns true if this is a subset (including equal) of other.
bool Shape::IsSubsetOf(const Shape& other) const {
for (int c = 0; c < unichars_.size(); ++c) {
int unichar_id = unichars_[c].unichar_id;
const GenericVector<int>& font_list = unichars_[c].font_ids;
for (int f = 0; f < font_list.size(); ++f) {
if (!other.ContainsUnicharAndFont(unichar_id, font_list[f]))
return false;
}
}
return true;
}
// Returns true if the lists of unichar ids are the same in this and other,
// ignoring fonts.
// NOT const, as it will sort the unichars on demand.
bool Shape::IsEqualUnichars(Shape* other) {
if (unichars_.size() != other->unichars_.size()) return false;
if (!unichars_sorted_) SortUnichars();
if (!other->unichars_sorted_) other->SortUnichars();
for (int c = 0; c < unichars_.size(); ++c) {
if (unichars_[c].unichar_id != other->unichars_[c].unichar_id)
return false;
}
return true;
}
// Sorts the unichars_ vector by unichar.
void Shape::SortUnichars() {
unichars_.sort(UnicharAndFonts::SortByUnicharId);
unichars_sorted_ = true;
}
ShapeTable::ShapeTable() : unicharset_(NULL), num_fonts_(0) {
}
ShapeTable::ShapeTable(const UNICHARSET& unicharset)
: unicharset_(&unicharset), num_fonts_(0) {
}
// Writes to the given file. Returns false in case of error.
bool ShapeTable::Serialize(FILE* fp) const {
if (!shape_table_.Serialize(fp)) return false;
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool ShapeTable::DeSerialize(bool swap, FILE* fp) {
if (!shape_table_.DeSerialize(swap, fp)) return false;
num_fonts_ = 0;
return true;
}
// Returns the number of fonts used in this ShapeTable, computing it if
// necessary.
int ShapeTable::NumFonts() const {
if (num_fonts_ <= 0) {
for (int shape_id = 0; shape_id < shape_table_.size(); ++shape_id) {
const Shape& shape = *shape_table_[shape_id];
for (int c = 0; c < shape.size(); ++c) {
for (int f = 0; f < shape[c].font_ids.size(); ++f) {
if (shape[c].font_ids[f] >= num_fonts_)
num_fonts_ = shape[c].font_ids[f] + 1;
}
}
}
}
return num_fonts_;
}
// Re-indexes the class_ids in the shapetable according to the given map.
// Useful in conjunction with set_unicharset.
void ShapeTable::ReMapClassIds(const GenericVector<int>& unicharset_map) {
for (int shape_id = 0; shape_id < shape_table_.size(); ++shape_id) {
Shape* shape = shape_table_[shape_id];
for (int c = 0; c < shape->size(); ++c) {
shape->SetUnicharId(c, unicharset_map[(*shape)[c].unichar_id]);
}
}
}
// Returns a string listing the classes/fonts in a shape.
STRING ShapeTable::DebugStr(int shape_id) const {
if (shape_id < 0 || shape_id >= shape_table_.size())
return STRING("INVALID_UNICHAR_ID");
const Shape& shape = GetShape(shape_id);
STRING result;
result.add_str_int("Shape", shape_id);
if (shape.size() > 100) {
result.add_str_int(" Num unichars=", shape.size());
return result;
}
for (int c = 0; c < shape.size(); ++c) {
result.add_str_int(" c_id=", shape[c].unichar_id);
result += "=";
result += unicharset_->id_to_unichar(shape[c].unichar_id);
if (shape.size() < 10) {
result.add_str_int(", ", shape[c].font_ids.size());
result += " fonts =";
int num_fonts = shape[c].font_ids.size();
if (num_fonts > 10) {
result.add_str_int(" ", shape[c].font_ids[0]);
result.add_str_int(" ... ", shape[c].font_ids[num_fonts - 1]);
} else {
for (int f = 0; f < num_fonts; ++f) {
result.add_str_int(" ", shape[c].font_ids[f]);
}
}
}
}
return result;
}
// Returns a debug string summarizing the table.
STRING ShapeTable::SummaryStr() const {
int max_unichars = 0;
int num_multi_shapes = 0;
int num_master_shapes = 0;
for (int s = 0; s < shape_table_.size(); ++s) {
if (MasterDestinationIndex(s) != s) continue;
++num_master_shapes;
int shape_size = GetShape(s).size();
if (shape_size > 1)
++num_multi_shapes;
if (shape_size > max_unichars)
max_unichars = shape_size;
}
STRING result;
result.add_str_int("Number of shapes = ", num_master_shapes);
result.add_str_int(" max unichars = ", max_unichars);
result.add_str_int(" number with multiple unichars = ", num_multi_shapes);
return result;
}
// Adds a new shape starting with the given unichar_id and font_id.
// Returns the assigned index.
int ShapeTable::AddShape(int unichar_id, int font_id) {
int index = shape_table_.size();
Shape* shape = new Shape;
shape->AddToShape(unichar_id, font_id);
shape_table_.push_back(shape);
num_fonts_ = MAX(num_fonts_, font_id + 1);
return index;
}
// Adds a copy of the given shape unless it is already present.
// Returns the assigned index or index of existing shape if already present.
int ShapeTable::AddShape(const Shape& other) {
int index;
for (index = 0; index < shape_table_.size() &&
!(other == *shape_table_[index]); ++index)
continue;
if (index == shape_table_.size()) {
Shape* shape = new Shape(other);
shape_table_.push_back(shape);
}
num_fonts_ = 0;
return index;
}
// Removes the shape given by the shape index.
void ShapeTable::DeleteShape(int shape_id) {
delete shape_table_[shape_id];
shape_table_[shape_id] = NULL;
shape_table_.remove(shape_id);
}
// Adds a font_id to the given existing shape index for the given
// unichar_id. If the unichar_id is not in the shape, it is added.
void ShapeTable::AddToShape(int shape_id, int unichar_id, int font_id) {
Shape& shape = *shape_table_[shape_id];
shape.AddToShape(unichar_id, font_id);
num_fonts_ = MAX(num_fonts_, font_id + 1);
}
// Adds the given shape to the existing shape with the given index.
void ShapeTable::AddShapeToShape(int shape_id, const Shape& other) {
Shape& shape = *shape_table_[shape_id];
shape.AddShape(other);
num_fonts_ = 0;
}
// Returns the id of the shape that contains the given unichar and font.
// If not found, returns -1.
// If font_id < 0, the font_id is ignored and the first shape that matches
// the unichar_id is returned.
int ShapeTable::FindShape(int unichar_id, int font_id) const {
for (int s = 0; s < shape_table_.size(); ++s) {
const Shape& shape = GetShape(s);
for (int c = 0; c < shape.size(); ++c) {
if (shape[c].unichar_id == unichar_id) {
if (font_id < 0)
return s; // We don't care about the font.
for (int f = 0; f < shape[c].font_ids.size(); ++f) {
if (shape[c].font_ids[f] == font_id)
return s;
}
}
}
}
return -1;
}
// Returns the first unichar_id and font_id in the given shape.
void ShapeTable::GetFirstUnicharAndFont(int shape_id,
int* unichar_id, int* font_id) const {
const UnicharAndFonts& unichar_and_fonts = (*shape_table_[shape_id])[0];
*unichar_id = unichar_and_fonts.unichar_id;
*font_id = unichar_and_fonts.font_ids[0];
}
// Expands all the classes/fonts in the shape individually to build
// a ShapeTable.
int ShapeTable::BuildFromShape(const Shape& shape,
const ShapeTable& master_shapes) {
BitVector shape_map(master_shapes.NumShapes());
for (int u_ind = 0; u_ind < shape.size(); ++u_ind) {
for (int f_ind = 0; f_ind < shape[u_ind].font_ids.size(); ++f_ind) {
int c = shape[u_ind].unichar_id;
int f = shape[u_ind].font_ids[f_ind];
int master_id = master_shapes.FindShape(c, f);
if (master_id >= 0) {
shape_map.SetBit(master_id);
} else if (FindShape(c, f) < 0) {
AddShape(c, f);
}
}
}
int num_masters = 0;
for (int s = 0; s < master_shapes.NumShapes(); ++s) {
if (shape_map[s]) {
AddShape(master_shapes.GetShape(s));
++num_masters;
}
}
return num_masters;
}
// Returns true if the shapes are already merged.
bool ShapeTable::AlreadyMerged(int shape_id1, int shape_id2) const {
return MasterDestinationIndex(shape_id1) == MasterDestinationIndex(shape_id2);
}
// Returns true if any shape contains multiple unichars.
bool ShapeTable::AnyMultipleUnichars() const {
int num_shapes = NumShapes();
for (int s1 = 0; s1 < num_shapes; ++s1) {
if (MasterDestinationIndex(s1) != s1) continue;
if (GetShape(s1).size() > 1)
return true;
}
return false;
}
// Returns the maximum number of unichars over all shapes.
int ShapeTable::MaxNumUnichars() const {
int max_num_unichars = 0;
int num_shapes = NumShapes();
for (int s = 0; s < num_shapes; ++s) {
if (GetShape(s).size() > max_num_unichars)
max_num_unichars = GetShape(s).size();
}
return max_num_unichars;
}
// Merges shapes with a common unichar over the [start, end) interval.
// Assumes single unichar per shape.
void ShapeTable::ForceFontMerges(int start, int end) {
for (int s1 = start; s1 < end; ++s1) {
if (MasterDestinationIndex(s1) == s1 && GetShape(s1).size() == 1) {
int unichar_id = GetShape(s1)[0].unichar_id;
for (int s2 = s1 + 1; s2 < end; ++s2) {
if (MasterDestinationIndex(s2) == s2 && GetShape(s2).size() == 1 &&
unichar_id == GetShape(s2)[0].unichar_id) {
MergeShapes(s1, s2);
}
}
}
}
ShapeTable compacted(*unicharset_);
compacted.AppendMasterShapes(*this, NULL);
*this = compacted;
}
// Returns the number of unichars in the master shape.
int ShapeTable::MasterUnicharCount(int shape_id) const {
int master_id = MasterDestinationIndex(shape_id);
return GetShape(master_id).size();
}
// Returns the sum of the font counts in the master shape.
int ShapeTable::MasterFontCount(int shape_id) const {
int master_id = MasterDestinationIndex(shape_id);
const Shape& shape = GetShape(master_id);
int font_count = 0;
for (int c = 0; c < shape.size(); ++c) {
font_count += shape[c].font_ids.size();
}
return font_count;
}
// Returns the number of unichars that would result from merging the shapes.
int ShapeTable::MergedUnicharCount(int shape_id1, int shape_id2) const {
// Do it the easy way for now.
int master_id1 = MasterDestinationIndex(shape_id1);
int master_id2 = MasterDestinationIndex(shape_id2);
Shape combined_shape(*shape_table_[master_id1]);
combined_shape.AddShape(*shape_table_[master_id2]);
return combined_shape.size();
}
// Merges two shape_ids, leaving shape_id2 marked as merged.
void ShapeTable::MergeShapes(int shape_id1, int shape_id2) {
int master_id1 = MasterDestinationIndex(shape_id1);
int master_id2 = MasterDestinationIndex(shape_id2);
// Point master_id2 (and all merged shapes) to master_id1.
shape_table_[master_id2]->set_destination_index(master_id1);
// Add all the shapes of master_id2 to master_id1.
shape_table_[master_id1]->AddShape(*shape_table_[master_id2]);
}
// Swaps two shape_ids.
void ShapeTable::SwapShapes(int shape_id1, int shape_id2) {
Shape* tmp = shape_table_[shape_id1];
shape_table_[shape_id1] = shape_table_[shape_id2];
shape_table_[shape_id2] = tmp;
}
// Returns the destination of this shape, (if merged), taking into account
// the fact that the destination may itself have been merged.
int ShapeTable::MasterDestinationIndex(int shape_id) const {
int dest_id = shape_table_[shape_id]->destination_index();
if (dest_id == shape_id || dest_id < 0)
return shape_id; // Is master already.
int master_id = shape_table_[dest_id]->destination_index();
if (master_id == dest_id || master_id < 0)
return dest_id; // Dest is the master and shape_id points to it.
master_id = MasterDestinationIndex(master_id);
return master_id;
}
// Returns false if the unichars in neither shape is a subset of the other.
bool ShapeTable::SubsetUnichar(int shape_id1, int shape_id2) const {
const Shape& shape1 = GetShape(shape_id1);
const Shape& shape2 = GetShape(shape_id2);
int c1, c2;
for (c1 = 0; c1 < shape1.size(); ++c1) {
int unichar_id1 = shape1[c1].unichar_id;
if (!shape2.ContainsUnichar(unichar_id1))
break;
}
for (c2 = 0; c2 < shape2.size(); ++c2) {
int unichar_id2 = shape2[c2].unichar_id;
if (!shape1.ContainsUnichar(unichar_id2))
break;
}
return c1 == shape1.size() || c2 == shape2.size();
}
// Returns false if the unichars in neither shape is a subset of the other.
bool ShapeTable::MergeSubsetUnichar(int merge_id1, int merge_id2,
int shape_id) const {
const Shape& merge1 = GetShape(merge_id1);
const Shape& merge2 = GetShape(merge_id2);
const Shape& shape = GetShape(shape_id);
int cm1, cm2, cs;
for (cs = 0; cs < shape.size(); ++cs) {
int unichar_id = shape[cs].unichar_id;
if (!merge1.ContainsUnichar(unichar_id) &&
!merge2.ContainsUnichar(unichar_id))
break; // Shape is not a subset of the merge.
}
for (cm1 = 0; cm1 < merge1.size(); ++cm1) {
int unichar_id1 = merge1[cm1].unichar_id;
if (!shape.ContainsUnichar(unichar_id1))
break; // Merge is not a subset of shape
}
for (cm2 = 0; cm2 < merge2.size(); ++cm2) {
int unichar_id2 = merge2[cm2].unichar_id;
if (!shape.ContainsUnichar(unichar_id2))
break; // Merge is not a subset of shape
}
return cs == shape.size() || (cm1 == merge1.size() && cm2 == merge2.size());
}
// Returns true if the unichar sets are equal between the shapes.
bool ShapeTable::EqualUnichars(int shape_id1, int shape_id2) const {
const Shape& shape1 = GetShape(shape_id1);
const Shape& shape2 = GetShape(shape_id2);
for (int c1 = 0; c1 < shape1.size(); ++c1) {
int unichar_id1 = shape1[c1].unichar_id;
if (!shape2.ContainsUnichar(unichar_id1))
return false;
}
for (int c2 = 0; c2 < shape2.size(); ++c2) {
int unichar_id2 = shape2[c2].unichar_id;
if (!shape1.ContainsUnichar(unichar_id2))
return false;
}
return true;
}
// Returns true if the unichar sets are equal between the shapes.
bool ShapeTable::MergeEqualUnichars(int merge_id1, int merge_id2,
int shape_id) const {
const Shape& merge1 = GetShape(merge_id1);
const Shape& merge2 = GetShape(merge_id2);
const Shape& shape = GetShape(shape_id);
for (int cs = 0; cs < shape.size(); ++cs) {
int unichar_id = shape[cs].unichar_id;
if (!merge1.ContainsUnichar(unichar_id) &&
!merge2.ContainsUnichar(unichar_id))
return false; // Shape has a unichar that appears in neither merge.
}
for (int cm1 = 0; cm1 < merge1.size(); ++cm1) {
int unichar_id1 = merge1[cm1].unichar_id;
if (!shape.ContainsUnichar(unichar_id1))
return false; // Merge has a unichar that is not in shape.
}
for (int cm2 = 0; cm2 < merge2.size(); ++cm2) {
int unichar_id2 = merge2[cm2].unichar_id;
if (!shape.ContainsUnichar(unichar_id2))
return false; // Merge has a unichar that is not in shape.
}
return true;
}
// Returns true if there is a common unichar between the shapes.
bool ShapeTable::CommonUnichars(int shape_id1, int shape_id2) const {
const Shape& shape1 = GetShape(shape_id1);
const Shape& shape2 = GetShape(shape_id2);
for (int c1 = 0; c1 < shape1.size(); ++c1) {
int unichar_id1 = shape1[c1].unichar_id;
if (shape2.ContainsUnichar(unichar_id1))
return true;
}
return false;
}
// Returns true if there is a common font id between the shapes.
bool ShapeTable::CommonFont(int shape_id1, int shape_id2) const {
const Shape& shape1 = GetShape(shape_id1);
const Shape& shape2 = GetShape(shape_id2);
for (int c1 = 0; c1 < shape1.size(); ++c1) {
const GenericVector<int>& font_list1 = shape1[c1].font_ids;
for (int f = 0; f < font_list1.size(); ++f) {
if (shape2.ContainsFont(font_list1[f]))
return true;
}
}
return false;
}
// Appends the master shapes from other to this.
// If not NULL, shape_map is set to map other shape_ids to this's shape_ids.
void ShapeTable::AppendMasterShapes(const ShapeTable& other,
GenericVector<int>* shape_map) {
if (shape_map != NULL)
shape_map->init_to_size(other.NumShapes(), -1);
for (int s = 0; s < other.shape_table_.size(); ++s) {
if (other.shape_table_[s]->destination_index() < 0) {
int index = AddShape(*other.shape_table_[s]);
if (shape_map != NULL)
(*shape_map)[s] = index;
}
}
}
// Returns the number of master shapes remaining after merging.
int ShapeTable::NumMasterShapes() const {
int num_shapes = 0;
for (int s = 0; s < shape_table_.size(); ++s) {
if (shape_table_[s]->destination_index() < 0)
++num_shapes;
}
return num_shapes;
}
// Adds the unichars of the given shape_id to the vector of results. Any
// unichar_id that is already present just has the fonts added to the
// font set for that result without adding a new entry in the vector.
// NOTE: it is assumed that the results are given to this function in order
// of decreasing rating.
// The unichar_map vector indicates the index of the results entry containing
// each unichar, or -1 if the unichar is not yet included in results.
void ShapeTable::AddShapeToResults(const ShapeRating& shape_rating,
GenericVector<int>* unichar_map,
GenericVector<UnicharRating>* results)const {
if (shape_rating.joined) {
AddUnicharToResults(UNICHAR_JOINED, shape_rating.rating, unichar_map,
results);
}
if (shape_rating.broken) {
AddUnicharToResults(UNICHAR_BROKEN, shape_rating.rating, unichar_map,
results);
}
const Shape& shape = GetShape(shape_rating.shape_id);
for (int u = 0; u < shape.size(); ++u) {
int result_index = AddUnicharToResults(shape[u].unichar_id,
shape_rating.rating,
unichar_map, results);
(*results)[result_index].fonts += shape[u].font_ids;
}
}
// Adds the given unichar_id to the results if needed, updating unichar_map
// and returning the index of unichar in results.
int ShapeTable::AddUnicharToResults(
int unichar_id, float rating, GenericVector<int>* unichar_map,
GenericVector<UnicharRating>* results) const {
int result_index = unichar_map->get(unichar_id);
if (result_index < 0) {
UnicharRating result(unichar_id, rating);
result_index = results->push_back(result);
(*unichar_map)[unichar_id] = result_index;
}
return result_index;
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: intproto.c
** Purpose: Definition of data structures for integer protos.
** Author: Dan Johnson
** History: Thu Feb 7 14:38:16 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include <math.h>
#include <stdio.h>
#include <assert.h>
#ifdef __UNIX__
#include <unistd.h>
#endif
#include "classify.h"
#include "const.h"
#include "emalloc.h"
#include "fontinfo.h"
#include "genericvector.h"
#include "globals.h"
#include "helpers.h"
#include "intproto.h"
#include "mfoutline.h"
#include "ndminx.h"
#include "picofeat.h"
#include "points.h"
#include "shapetable.h"
#include "svmnode.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
using tesseract::FontInfo;
using tesseract::FontSet;
using tesseract::FontSpacingInfo;
/* match debug display constants*/
#define PROTO_PRUNER_SCALE (4.0)
#define INT_DESCENDER (0.0 * INT_CHAR_NORM_RANGE)
#define INT_BASELINE (0.25 * INT_CHAR_NORM_RANGE)
#define INT_XHEIGHT (0.75 * INT_CHAR_NORM_RANGE)
#define INT_CAPHEIGHT (1.0 * INT_CHAR_NORM_RANGE)
#define INT_XCENTER (0.5 * INT_CHAR_NORM_RANGE)
#define INT_YCENTER (0.5 * INT_CHAR_NORM_RANGE)
#define INT_XRADIUS (0.2 * INT_CHAR_NORM_RANGE)
#define INT_YRADIUS (0.2 * INT_CHAR_NORM_RANGE)
#define INT_MIN_X 0
#define INT_MIN_Y 0
#define INT_MAX_X INT_CHAR_NORM_RANGE
#define INT_MAX_Y INT_CHAR_NORM_RANGE
/** define pad used to snap near horiz/vertical protos to horiz/vertical */
#define HV_TOLERANCE (0.0025) /* approx 0.9 degrees */
typedef enum
{ StartSwitch, EndSwitch, LastSwitch }
SWITCH_TYPE;
#define MAX_NUM_SWITCHES 3
typedef struct
{
SWITCH_TYPE Type;
inT8 X, Y;
inT16 YInit;
inT16 Delta;
}
FILL_SWITCH;
typedef struct
{
uinT8 NextSwitch;
uinT8 AngleStart, AngleEnd;
inT8 X;
inT16 YStart, YEnd;
inT16 StartDelta, EndDelta;
FILL_SWITCH Switch[MAX_NUM_SWITCHES];
}
TABLE_FILLER;
typedef struct
{
inT8 X;
inT8 YStart, YEnd;
uinT8 AngleStart, AngleEnd;
}
FILL_SPEC;
/* constants for conversion from old inttemp format */
#define OLD_MAX_NUM_CONFIGS 32
#define OLD_WERDS_PER_CONFIG_VEC ((OLD_MAX_NUM_CONFIGS + BITS_PER_WERD - 1) /\
BITS_PER_WERD)
/*-----------------------------------------------------------------------------
Macros
-----------------------------------------------------------------------------*/
/** macro for performing circular increments of bucket indices */
#define CircularIncrement(i,r) (((i) < (r) - 1)?((i)++):((i) = 0))
/** macro for mapping floats to ints without bounds checking */
#define MapParam(P,O,N) (floor (((P) + (O)) * (N)))
/*---------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------*/
FLOAT32 BucketStart(int Bucket, FLOAT32 Offset, int NumBuckets);
FLOAT32 BucketEnd(int Bucket, FLOAT32 Offset, int NumBuckets);
void DoFill(FILL_SPEC *FillSpec,
CLASS_PRUNER_STRUCT* Pruner,
register uinT32 ClassMask,
register uinT32 ClassCount,
register uinT32 WordIndex);
BOOL8 FillerDone(TABLE_FILLER *Filler);
void FillPPCircularBits(uinT32
ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR],
int Bit, FLOAT32 Center, FLOAT32 Spread, bool debug);
void FillPPLinearBits(uinT32 ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR],
int Bit, FLOAT32 Center, FLOAT32 Spread, bool debug);
void GetCPPadsForLevel(int Level,
FLOAT32 *EndPad,
FLOAT32 *SidePad,
FLOAT32 *AnglePad);
ScrollView::Color GetMatchColorFor(FLOAT32 Evidence);
void GetNextFill(TABLE_FILLER *Filler, FILL_SPEC *Fill);
void InitTableFiller(FLOAT32 EndPad,
FLOAT32 SidePad,
FLOAT32 AnglePad,
PROTO Proto,
TABLE_FILLER *Filler);
#ifndef GRAPHICS_DISABLED
void RenderIntFeature(ScrollView *window, const INT_FEATURE_STRUCT* Feature,
ScrollView::Color color);
void RenderIntProto(ScrollView *window,
INT_CLASS Class,
PROTO_ID ProtoId,
ScrollView::Color color);
#endif // GRAPHICS_DISABLED
int TruncateParam(FLOAT32 Param, int Min, int Max, char *Id);
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
/* global display lists used to display proto and feature match information*/
ScrollView *IntMatchWindow = NULL;
ScrollView *FeatureDisplayWindow = NULL;
ScrollView *ProtoDisplayWindow = NULL;
/*-----------------------------------------------------------------------------
Variables
-----------------------------------------------------------------------------*/
/* control knobs */
INT_VAR(classify_num_cp_levels, 3, "Number of Class Pruner Levels");
double_VAR(classify_cp_angle_pad_loose, 45.0,
"Class Pruner Angle Pad Loose");
double_VAR(classify_cp_angle_pad_medium, 20.0,
"Class Pruner Angle Pad Medium");
double_VAR(classify_cp_angle_pad_tight, 10.0,
"CLass Pruner Angle Pad Tight");
double_VAR(classify_cp_end_pad_loose, 0.5, "Class Pruner End Pad Loose");
double_VAR(classify_cp_end_pad_medium, 0.5, "Class Pruner End Pad Medium");
double_VAR(classify_cp_end_pad_tight, 0.5, "Class Pruner End Pad Tight");
double_VAR(classify_cp_side_pad_loose, 2.5, "Class Pruner Side Pad Loose");
double_VAR(classify_cp_side_pad_medium, 1.2, "Class Pruner Side Pad Medium");
double_VAR(classify_cp_side_pad_tight, 0.6, "Class Pruner Side Pad Tight");
double_VAR(classify_pp_angle_pad, 45.0, "Proto Pruner Angle Pad");
double_VAR(classify_pp_end_pad, 0.5, "Proto Prune End Pad");
double_VAR(classify_pp_side_pad, 2.5, "Proto Pruner Side Pad");
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
// Builds a feature from an FCOORD for position with all the necessary
// clipping and rounding.
INT_FEATURE_STRUCT::INT_FEATURE_STRUCT(const FCOORD& pos, uinT8 theta)
: X(ClipToRange<inT16>(static_cast<inT16>(pos.x() + 0.5), 0, 255)),
Y(ClipToRange<inT16>(static_cast<inT16>(pos.y() + 0.5), 0, 255)),
Theta(theta),
CP_misses(0) {
}
// Builds a feature from ints with all the necessary clipping and casting.
INT_FEATURE_STRUCT::INT_FEATURE_STRUCT(int x, int y, int theta)
: X(static_cast<uinT8>(ClipToRange(x, 0, MAX_UINT8))),
Y(static_cast<uinT8>(ClipToRange(y, 0, MAX_UINT8))),
Theta(static_cast<uinT8>(ClipToRange(theta, 0, MAX_UINT8))),
CP_misses(0) {
}
/*---------------------------------------------------------------------------*/
/**
* This routine adds a new class structure to a set of
* templates. Classes have to be added to Templates in
* the order of increasing ClassIds.
*
* @param Templates templates to add new class to
* @param ClassId class id to associate new class with
* @param Class class data structure to add to templates
*
* Globals: none
*
* @note Exceptions: none
* @note History: Mon Feb 11 11:52:08 1991, DSJ, Created.
*/
void AddIntClass(INT_TEMPLATES Templates, CLASS_ID ClassId, INT_CLASS Class) {
int Pruner;
assert (LegalClassId (ClassId));
if (ClassId != Templates->NumClasses) {
fprintf(stderr, "Please make sure that classes are added to templates");
fprintf(stderr, " in increasing order of ClassIds\n");
exit(1);
}
ClassForClassId (Templates, ClassId) = Class;
Templates->NumClasses++;
if (Templates->NumClasses > MaxNumClassesIn (Templates)) {
Pruner = Templates->NumClassPruners++;
Templates->ClassPruners[Pruner] = new CLASS_PRUNER_STRUCT;
memset(Templates->ClassPruners[Pruner], 0, sizeof(CLASS_PRUNER_STRUCT));
}
} /* AddIntClass */
/*---------------------------------------------------------------------------*/
/**
* This routine returns the index of the next free config
* in Class.
*
* @param Class class to add new configuration to
*
* Globals: none
*
* @return Index of next free config.
* @note Exceptions: none
* @note History: Mon Feb 11 14:44:40 1991, DSJ, Created.
*/
int AddIntConfig(INT_CLASS Class) {
int Index;
assert(Class->NumConfigs < MAX_NUM_CONFIGS);
Index = Class->NumConfigs++;
Class->ConfigLengths[Index] = 0;
return Index;
} /* AddIntConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine allocates the next free proto in Class and
* returns its index.
*
* @param Class class to add new proto to
*
* Globals: none
*
* @return Proto index of new proto.
* @note Exceptions: none
* @note History: Mon Feb 11 13:26:41 1991, DSJ, Created.
*/
int AddIntProto(INT_CLASS Class) {
int Index;
int ProtoSetId;
PROTO_SET ProtoSet;
INT_PROTO Proto;
register uinT32 *Word;
if (Class->NumProtos >= MAX_NUM_PROTOS)
return (NO_PROTO);
Index = Class->NumProtos++;
if (Class->NumProtos > MaxNumIntProtosIn(Class)) {
ProtoSetId = Class->NumProtoSets++;
ProtoSet = (PROTO_SET) Emalloc(sizeof(PROTO_SET_STRUCT));
Class->ProtoSets[ProtoSetId] = ProtoSet;
memset(ProtoSet, 0, sizeof(*ProtoSet));
/* reallocate space for the proto lengths and install in class */
Class->ProtoLengths =
(uinT8 *)Erealloc(Class->ProtoLengths,
MaxNumIntProtosIn(Class) * sizeof(uinT8));
memset(&Class->ProtoLengths[Index], 0,
sizeof(*Class->ProtoLengths) * (MaxNumIntProtosIn(Class) - Index));
}
/* initialize proto so its length is zero and it isn't in any configs */
Class->ProtoLengths[Index] = 0;
Proto = ProtoForProtoId (Class, Index);
for (Word = Proto->Configs;
Word < Proto->Configs + WERDS_PER_CONFIG_VEC; *Word++ = 0);
return (Index);
} /* AddIntProto */
/*---------------------------------------------------------------------------*/
void AddProtoToClassPruner (PROTO Proto, CLASS_ID ClassId,
INT_TEMPLATES Templates)
/*
** Parameters:
** Proto floating-pt proto to add to class pruner
** ClassId class id corresponding to Proto
** Templates set of templates containing class pruner
** Globals:
** classify_num_cp_levels number of levels used in the class pruner
** Operation: This routine adds Proto to the class pruning tables
** for the specified class in Templates.
** Return: none
** Exceptions: none
** History: Wed Feb 13 08:49:54 1991, DSJ, Created.
*/
#define MAX_LEVEL 2
{
CLASS_PRUNER_STRUCT* Pruner;
uinT32 ClassMask;
uinT32 ClassCount;
uinT32 WordIndex;
int Level;
FLOAT32 EndPad, SidePad, AnglePad;
TABLE_FILLER TableFiller;
FILL_SPEC FillSpec;
Pruner = CPrunerFor (Templates, ClassId);
WordIndex = CPrunerWordIndexFor (ClassId);
ClassMask = CPrunerMaskFor (MAX_LEVEL, ClassId);
for (Level = classify_num_cp_levels - 1; Level >= 0; Level--) {
GetCPPadsForLevel(Level, &EndPad, &SidePad, &AnglePad);
ClassCount = CPrunerMaskFor (Level, ClassId);
InitTableFiller(EndPad, SidePad, AnglePad, Proto, &TableFiller);
while (!FillerDone (&TableFiller)) {
GetNextFill(&TableFiller, &FillSpec);
DoFill(&FillSpec, Pruner, ClassMask, ClassCount, WordIndex);
}
}
} /* AddProtoToClassPruner */
/*---------------------------------------------------------------------------*/
void AddProtoToProtoPruner(PROTO Proto, int ProtoId,
INT_CLASS Class, bool debug) {
/*
** Parameters:
** Proto floating-pt proto to be added to proto pruner
** ProtoId id of proto
** Class integer class that contains desired proto pruner
** Globals: none
** Operation: This routine updates the proto pruner lookup tables
** for Class to include a new proto identified by ProtoId
** and described by Proto.
** Return: none
** Exceptions: none
** History: Fri Feb 8 13:07:19 1991, DSJ, Created.
*/
FLOAT32 Angle, X, Y, Length;
FLOAT32 Pad;
int Index;
PROTO_SET ProtoSet;
if (ProtoId >= Class->NumProtos)
cprintf("AddProtoToProtoPruner:assert failed: %d < %d",
ProtoId, Class->NumProtos);
assert(ProtoId < Class->NumProtos);
Index = IndexForProto (ProtoId);
ProtoSet = Class->ProtoSets[SetForProto (ProtoId)];
Angle = Proto->Angle;
#ifndef _WIN32
assert(!isnan(Angle));
#endif
FillPPCircularBits (ProtoSet->ProtoPruner[PRUNER_ANGLE], Index,
Angle + ANGLE_SHIFT, classify_pp_angle_pad / 360.0,
debug);
Angle *= 2.0 * PI;
Length = Proto->Length;
X = Proto->X + X_SHIFT;
Pad = MAX (fabs (cos (Angle)) * (Length / 2.0 +
classify_pp_end_pad *
GetPicoFeatureLength ()),
fabs (sin (Angle)) * (classify_pp_side_pad *
GetPicoFeatureLength ()));
FillPPLinearBits(ProtoSet->ProtoPruner[PRUNER_X], Index, X, Pad, debug);
Y = Proto->Y + Y_SHIFT;
Pad = MAX (fabs (sin (Angle)) * (Length / 2.0 +
classify_pp_end_pad *
GetPicoFeatureLength ()),
fabs (cos (Angle)) * (classify_pp_side_pad *
GetPicoFeatureLength ()));
FillPPLinearBits(ProtoSet->ProtoPruner[PRUNER_Y], Index, Y, Pad, debug);
} /* AddProtoToProtoPruner */
/*---------------------------------------------------------------------------*/
int BucketFor(FLOAT32 Param, FLOAT32 Offset, int NumBuckets) {
/*
** Parameters:
** Param parameter value to map into a bucket number
** Offset amount to shift param before mapping it
** NumBuckets number of buckets to map param into
** Globals: none
** Operation: This routine maps a parameter value into a bucket between
** 0 and NumBuckets-1. Offset is added to the parameter
** before mapping it. Values which map to buckets outside
** the range are truncated to fit within the range. Mapping
** is done by truncating rather than rounding.
** Return: Bucket number corresponding to Param + Offset.
** Exceptions: none
** History: Thu Feb 14 13:24:33 1991, DSJ, Created.
*/
return ClipToRange(static_cast<int>(MapParam(Param, Offset, NumBuckets)),
0, NumBuckets - 1);
} /* BucketFor */
/*---------------------------------------------------------------------------*/
int CircBucketFor(FLOAT32 Param, FLOAT32 Offset, int NumBuckets) {
/*
** Parameters:
** Param parameter value to map into a circular bucket
** Offset amount to shift param before mapping it
** NumBuckets number of buckets to map param into
** Globals: none
** Operation: This routine maps a parameter value into a bucket between
** 0 and NumBuckets-1. Offset is added to the parameter
** before mapping it. Values which map to buckets outside
** the range are wrapped to a new value in a circular fashion.
** Mapping is done by truncating rather than rounding.
** Return: Bucket number corresponding to Param + Offset.
** Exceptions: none
** History: Thu Feb 14 13:24:33 1991, DSJ, Created.
*/
int Bucket;
Bucket = static_cast<int>(MapParam(Param, Offset, NumBuckets));
if (Bucket < 0)
Bucket += NumBuckets;
else if (Bucket >= NumBuckets)
Bucket -= NumBuckets;
return Bucket;
} /* CircBucketFor */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
void UpdateMatchDisplay() {
/*
** Parameters: none
** Globals:
** FeatureShapes display list for features
** ProtoShapes display list for protos
** Operation: This routine clears the global feature and proto
** display lists.
** Return: none
** Exceptions: none
** History: Thu Mar 21 15:40:19 1991, DSJ, Created.
*/
if (IntMatchWindow != NULL)
IntMatchWindow->Update();
} /* ClearMatchDisplay */
#endif
/*---------------------------------------------------------------------------*/
void ConvertConfig(BIT_VECTOR Config, int ConfigId, INT_CLASS Class) {
/*
** Parameters:
** Config config to be added to class
** ConfigId id to be used for new config
** Class class to add new config to
** Globals: none
** Operation: This operation updates the config vectors of all protos
** in Class to indicate that the protos with 1's in Config
** belong to a new configuration identified by ConfigId.
** It is assumed that the length of the Config bit vector is
** equal to the number of protos in Class.
** Return: none
** Exceptions: none
** History: Mon Feb 11 14:57:31 1991, DSJ, Created.
*/
int ProtoId;
INT_PROTO Proto;
int TotalLength;
for (ProtoId = 0, TotalLength = 0;
ProtoId < Class->NumProtos; ProtoId++) {
if (test_bit(Config, ProtoId)) {
Proto = ProtoForProtoId(Class, ProtoId);
SET_BIT(Proto->Configs, ConfigId);
TotalLength += Class->ProtoLengths[ProtoId];
}
}
Class->ConfigLengths[ConfigId] = TotalLength;
} /* ConvertConfig */
namespace tesseract {
/*---------------------------------------------------------------------------*/
void Classify::ConvertProto(PROTO Proto, int ProtoId, INT_CLASS Class) {
/*
** Parameters:
** Proto floating-pt proto to be converted to integer format
** ProtoId id of proto
** Class integer class to add converted proto to
** Globals: none
** Operation: This routine converts Proto to integer format and
** installs it as ProtoId in Class.
** Return: none
** Exceptions: none
** History: Fri Feb 8 11:22:43 1991, DSJ, Created.
*/
INT_PROTO P;
FLOAT32 Param;
assert(ProtoId < Class->NumProtos);
P = ProtoForProtoId(Class, ProtoId);
Param = Proto->A * 128;
P->A = TruncateParam(Param, -128, 127, NULL);
Param = -Proto->B * 256;
P->B = TruncateParam(Param, 0, 255, NULL);
Param = Proto->C * 128;
P->C = TruncateParam(Param, -128, 127, NULL);
Param = Proto->Angle * 256;
if (Param < 0 || Param >= 256)
P->Angle = 0;
else
P->Angle = (uinT8) Param;
/* round proto length to nearest integer number of pico-features */
Param = (Proto->Length / GetPicoFeatureLength()) + 0.5;
Class->ProtoLengths[ProtoId] = TruncateParam(Param, 1, 255, NULL);
if (classify_learning_debug_level >= 2)
cprintf("Converted ffeat to (A=%d,B=%d,C=%d,L=%d)",
P->A, P->B, P->C, Class->ProtoLengths[ProtoId]);
} /* ConvertProto */
/*---------------------------------------------------------------------------*/
INT_TEMPLATES Classify::CreateIntTemplates(CLASSES FloatProtos,
const UNICHARSET&
target_unicharset) {
/*
** Parameters:
** FloatProtos prototypes in old floating pt format
** Globals: none
** Operation: This routine converts from the old floating point format
** to the new integer format.
** Return: New set of training templates in integer format.
** Exceptions: none
** History: Thu Feb 7 14:40:42 1991, DSJ, Created.
*/
INT_TEMPLATES IntTemplates;
CLASS_TYPE FClass;
INT_CLASS IClass;
int ClassId;
int ProtoId;
int ConfigId;
IntTemplates = NewIntTemplates();
for (ClassId = 0; ClassId < target_unicharset.size(); ClassId++) {
FClass = &(FloatProtos[ClassId]);
if (FClass->NumProtos == 0 && FClass->NumConfigs == 0 &&
strcmp(target_unicharset.id_to_unichar(ClassId), " ") != 0) {
cprintf("Warning: no protos/configs for %s in CreateIntTemplates()\n",
target_unicharset.id_to_unichar(ClassId));
}
assert(UnusedClassIdIn(IntTemplates, ClassId));
IClass = NewIntClass(FClass->NumProtos, FClass->NumConfigs);
FontSet fs;
fs.size = FClass->font_set.size();
fs.configs = new int[fs.size];
for (int i = 0; i < fs.size; ++i) {
fs.configs[i] = FClass->font_set.get(i);
}
if (this->fontset_table_.contains(fs)) {
IClass->font_set_id = this->fontset_table_.get_id(fs);
delete[] fs.configs;
} else {
IClass->font_set_id = this->fontset_table_.push_back(fs);
}
AddIntClass(IntTemplates, ClassId, IClass);
for (ProtoId = 0; ProtoId < FClass->NumProtos; ProtoId++) {
AddIntProto(IClass);
ConvertProto(ProtoIn(FClass, ProtoId), ProtoId, IClass);
AddProtoToProtoPruner(ProtoIn(FClass, ProtoId), ProtoId, IClass,
classify_learning_debug_level >= 2);
AddProtoToClassPruner(ProtoIn(FClass, ProtoId), ClassId, IntTemplates);
}
for (ConfigId = 0; ConfigId < FClass->NumConfigs; ConfigId++) {
AddIntConfig(IClass);
ConvertConfig(FClass->Configurations[ConfigId], ConfigId, IClass);
}
}
return (IntTemplates);
} /* CreateIntTemplates */
} // namespace tesseract
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
void DisplayIntFeature(const INT_FEATURE_STRUCT* Feature, FLOAT32 Evidence) {
/*
** Parameters:
** Feature pico-feature to be displayed
** Evidence best evidence for this feature (0-1)
** Globals:
** FeatureShapes global display list for features
** Operation: This routine renders the specified feature into a
** global display list.
** Return: none
** Exceptions: none
** History: Thu Mar 21 14:45:04 1991, DSJ, Created.
*/
ScrollView::Color color = GetMatchColorFor(Evidence);
RenderIntFeature(IntMatchWindow, Feature, color);
if (FeatureDisplayWindow) {
RenderIntFeature(FeatureDisplayWindow, Feature, color);
}
} /* DisplayIntFeature */
/*---------------------------------------------------------------------------*/
void DisplayIntProto(INT_CLASS Class, PROTO_ID ProtoId, FLOAT32 Evidence) {
/*
** Parameters:
** Class class to take proto from
** ProtoId id of proto in Class to be displayed
** Evidence total evidence for proto (0-1)
** Globals:
** ProtoShapes global display list for protos
** Operation: This routine renders the specified proto into a
** global display list.
** Return: none
** Exceptions: none
** History: Thu Mar 21 14:45:04 1991, DSJ, Created.
*/
ScrollView::Color color = GetMatchColorFor(Evidence);
RenderIntProto(IntMatchWindow, Class, ProtoId, color);
if (ProtoDisplayWindow) {
RenderIntProto(ProtoDisplayWindow, Class, ProtoId, color);
}
} /* DisplayIntProto */
#endif
/*---------------------------------------------------------------------------*/
INT_CLASS NewIntClass(int MaxNumProtos, int MaxNumConfigs) {
/*
** Parameters:
** MaxNumProtos number of protos to allocate space for
** MaxNumConfigs number of configs to allocate space for
** Globals: none
** Operation: This routine creates a new integer class data structure
** and returns it. Sufficient space is allocated
** to handle the specified number of protos and configs.
** Return: New class created.
** Exceptions: none
** History: Fri Feb 8 10:51:23 1991, DSJ, Created.
*/
INT_CLASS Class;
PROTO_SET ProtoSet;
int i;
assert(MaxNumConfigs <= MAX_NUM_CONFIGS);
Class = (INT_CLASS) Emalloc(sizeof(INT_CLASS_STRUCT));
Class->NumProtoSets = ((MaxNumProtos + PROTOS_PER_PROTO_SET - 1) /
PROTOS_PER_PROTO_SET);
assert(Class->NumProtoSets <= MAX_NUM_PROTO_SETS);
Class->NumProtos = 0;
Class->NumConfigs = 0;
for (i = 0; i < Class->NumProtoSets; i++) {
/* allocate space for a proto set, install in class, and initialize */
ProtoSet = (PROTO_SET) Emalloc(sizeof(PROTO_SET_STRUCT));
memset(ProtoSet, 0, sizeof(*ProtoSet));
Class->ProtoSets[i] = ProtoSet;
/* allocate space for the proto lengths and install in class */
}
if (MaxNumIntProtosIn (Class) > 0) {
Class->ProtoLengths =
(uinT8 *)Emalloc(MaxNumIntProtosIn (Class) * sizeof (uinT8));
memset(Class->ProtoLengths, 0,
MaxNumIntProtosIn(Class) * sizeof(*Class->ProtoLengths));
} else {
Class->ProtoLengths = NULL;
}
memset(Class->ConfigLengths, 0, sizeof(Class->ConfigLengths));
return (Class);
} /* NewIntClass */
/*-------------------------------------------------------------------------*/
void free_int_class(INT_CLASS int_class) {
int i;
for (i = 0; i < int_class->NumProtoSets; i++) {
Efree (int_class->ProtoSets[i]);
}
if (int_class->ProtoLengths != NULL) {
Efree (int_class->ProtoLengths);
}
Efree(int_class);
}
/*---------------------------------------------------------------------------*/
INT_TEMPLATES NewIntTemplates() {
/*
** Parameters: none
** Globals: none
** Operation: This routine allocates a new set of integer templates
** initialized to hold 0 classes.
** Return: The integer templates created.
** Exceptions: none
** History: Fri Feb 8 08:38:51 1991, DSJ, Created.
*/
INT_TEMPLATES T;
int i;
T = (INT_TEMPLATES) Emalloc (sizeof (INT_TEMPLATES_STRUCT));
T->NumClasses = 0;
T->NumClassPruners = 0;
for (i = 0; i < MAX_NUM_CLASSES; i++)
ClassForClassId (T, i) = NULL;
return (T);
} /* NewIntTemplates */
/*---------------------------------------------------------------------------*/
void free_int_templates(INT_TEMPLATES templates) {
int i;
for (i = 0; i < templates->NumClasses; i++)
free_int_class(templates->Class[i]);
for (i = 0; i < templates->NumClassPruners; i++)
delete templates->ClassPruners[i];
Efree(templates);
}
namespace tesseract {
INT_TEMPLATES Classify::ReadIntTemplates(FILE *File) {
/*
** Parameters:
** File open file to read templates from
** Globals: none
** Operation: This routine reads a set of integer templates from
** File. File must already be open and must be in the
** correct binary format.
** Return: Pointer to integer templates read from File.
** Exceptions: none
** History: Wed Feb 27 11:48:46 1991, DSJ, Created.
*/
int i, j, w, x, y, z;
BOOL8 swap;
int nread;
int unicharset_size;
int version_id = 0;
INT_TEMPLATES Templates;
CLASS_PRUNER_STRUCT* Pruner;
INT_CLASS Class;
uinT8 *Lengths;
PROTO_SET ProtoSet;
/* variables for conversion from older inttemp formats */
int b, bit_number, last_cp_bit_number, new_b, new_i, new_w;
CLASS_ID class_id, max_class_id;
inT16 *IndexFor = new inT16[MAX_NUM_CLASSES];
CLASS_ID *ClassIdFor = new CLASS_ID[MAX_NUM_CLASSES];
CLASS_PRUNER_STRUCT **TempClassPruner =
new CLASS_PRUNER_STRUCT*[MAX_NUM_CLASS_PRUNERS];
uinT32 SetBitsForMask = // word with NUM_BITS_PER_CLASS
(1 << NUM_BITS_PER_CLASS) - 1; // set starting at bit 0
uinT32 Mask, NewMask, ClassBits;
int MaxNumConfigs = MAX_NUM_CONFIGS;
int WerdsPerConfigVec = WERDS_PER_CONFIG_VEC;
/* first read the high level template struct */
Templates = NewIntTemplates();
// Read Templates in parts for 64 bit compatibility.
if (fread(&unicharset_size, sizeof(int), 1, File) != 1)
cprintf("Bad read of inttemp!\n");
if (fread(&Templates->NumClasses,
sizeof(Templates->NumClasses), 1, File) != 1 ||
fread(&Templates->NumClassPruners,
sizeof(Templates->NumClassPruners), 1, File) != 1)
cprintf("Bad read of inttemp!\n");
// Swap status is determined automatically.
swap = Templates->NumClassPruners < 0 ||
Templates->NumClassPruners > MAX_NUM_CLASS_PRUNERS;
if (swap) {
Reverse32(&Templates->NumClassPruners);
Reverse32(&Templates->NumClasses);
Reverse32(&unicharset_size);
}
if (Templates->NumClasses < 0) {
// This file has a version id!
version_id = -Templates->NumClasses;
if (fread(&Templates->NumClasses, sizeof(Templates->NumClasses),
1, File) != 1)
cprintf("Bad read of inttemp!\n");
if (swap)
Reverse32(&Templates->NumClasses);
}
if (version_id < 3) {
MaxNumConfigs = OLD_MAX_NUM_CONFIGS;
WerdsPerConfigVec = OLD_WERDS_PER_CONFIG_VEC;
}
if (version_id < 2) {
for (i = 0; i < unicharset_size; ++i) {
if (fread(&IndexFor[i], sizeof(inT16), 1, File) != 1)
cprintf("Bad read of inttemp!\n");
}
for (i = 0; i < Templates->NumClasses; ++i) {
if (fread(&ClassIdFor[i], sizeof(CLASS_ID), 1, File) != 1)
cprintf("Bad read of inttemp!\n");
}
if (swap) {
for (i = 0; i < Templates->NumClasses; i++)
Reverse16(&IndexFor[i]);
for (i = 0; i < Templates->NumClasses; i++)
Reverse32(&ClassIdFor[i]);
}
}
/* then read in the class pruners */
for (i = 0; i < Templates->NumClassPruners; i++) {
Pruner = new CLASS_PRUNER_STRUCT;
if ((nread =
fread(Pruner, 1, sizeof(CLASS_PRUNER_STRUCT),
File)) != sizeof(CLASS_PRUNER_STRUCT))
cprintf("Bad read of inttemp!\n");
if (swap) {
for (x = 0; x < NUM_CP_BUCKETS; x++) {
for (y = 0; y < NUM_CP_BUCKETS; y++) {
for (z = 0; z < NUM_CP_BUCKETS; z++) {
for (w = 0; w < WERDS_PER_CP_VECTOR; w++) {
Reverse32(&Pruner->p[x][y][z][w]);
}
}
}
}
}
if (version_id < 2) {
TempClassPruner[i] = Pruner;
} else {
Templates->ClassPruners[i] = Pruner;
}
}
/* fix class pruners if they came from an old version of inttemp */
if (version_id < 2) {
// Allocate enough class pruners to cover all the class ids.
max_class_id = 0;
for (i = 0; i < Templates->NumClasses; i++)
if (ClassIdFor[i] > max_class_id)
max_class_id = ClassIdFor[i];
for (i = 0; i <= CPrunerIdFor(max_class_id); i++) {
Templates->ClassPruners[i] = new CLASS_PRUNER_STRUCT;
memset(Templates->ClassPruners[i], 0, sizeof(CLASS_PRUNER_STRUCT));
}
// Convert class pruners from the old format (indexed by class index)
// to the new format (indexed by class id).
last_cp_bit_number = NUM_BITS_PER_CLASS * Templates->NumClasses - 1;
for (i = 0; i < Templates->NumClassPruners; i++) {
for (x = 0; x < NUM_CP_BUCKETS; x++)
for (y = 0; y < NUM_CP_BUCKETS; y++)
for (z = 0; z < NUM_CP_BUCKETS; z++)
for (w = 0; w < WERDS_PER_CP_VECTOR; w++) {
if (TempClassPruner[i]->p[x][y][z][w] == 0)
continue;
for (b = 0; b < BITS_PER_WERD; b += NUM_BITS_PER_CLASS) {
bit_number = i * BITS_PER_CP_VECTOR + w * BITS_PER_WERD + b;
if (bit_number > last_cp_bit_number)
break; // the rest of the bits in this word are not used
class_id = ClassIdFor[bit_number / NUM_BITS_PER_CLASS];
// Single out NUM_BITS_PER_CLASS bits relating to class_id.
Mask = SetBitsForMask << b;
ClassBits = TempClassPruner[i]->p[x][y][z][w] & Mask;
// Move these bits to the new position in which they should
// appear (indexed corresponding to the class_id).
new_i = CPrunerIdFor(class_id);
new_w = CPrunerWordIndexFor(class_id);
new_b = CPrunerBitIndexFor(class_id) * NUM_BITS_PER_CLASS;
if (new_b > b) {
ClassBits <<= (new_b - b);
} else {
ClassBits >>= (b - new_b);
}
// Copy bits relating to class_id to the correct position
// in Templates->ClassPruner.
NewMask = SetBitsForMask << new_b;
Templates->ClassPruners[new_i]->p[x][y][z][new_w] &= ~NewMask;
Templates->ClassPruners[new_i]->p[x][y][z][new_w] |= ClassBits;
}
}
}
for (i = 0; i < Templates->NumClassPruners; i++) {
delete TempClassPruner[i];
}
}
/* then read in each class */
for (i = 0; i < Templates->NumClasses; i++) {
/* first read in the high level struct for the class */
Class = (INT_CLASS) Emalloc (sizeof (INT_CLASS_STRUCT));
if (fread(&Class->NumProtos, sizeof(Class->NumProtos), 1, File) != 1 ||
fread(&Class->NumProtoSets, sizeof(Class->NumProtoSets), 1, File) != 1 ||
fread(&Class->NumConfigs, sizeof(Class->NumConfigs), 1, File) != 1)
cprintf ("Bad read of inttemp!\n");
if (version_id == 0) {
// Only version 0 writes 5 pointless pointers to the file.
for (j = 0; j < 5; ++j) {
int junk;
if (fread(&junk, sizeof(junk), 1, File) != 1)
cprintf ("Bad read of inttemp!\n");
}
}
if (version_id < 4) {
for (j = 0; j < MaxNumConfigs; ++j) {
if (fread(&Class->ConfigLengths[j], sizeof(uinT16), 1, File) != 1)
cprintf ("Bad read of inttemp!\n");
}
if (swap) {
Reverse16(&Class->NumProtos);
for (j = 0; j < MaxNumConfigs; j++)
Reverse16(&Class->ConfigLengths[j]);
}
} else {
ASSERT_HOST(Class->NumConfigs < MaxNumConfigs);
for (j = 0; j < Class->NumConfigs; ++j) {
if (fread(&Class->ConfigLengths[j], sizeof(uinT16), 1, File) != 1)
cprintf ("Bad read of inttemp!\n");
}
if (swap) {
Reverse16(&Class->NumProtos);
for (j = 0; j < MaxNumConfigs; j++)
Reverse16(&Class->ConfigLengths[j]);
}
}
if (version_id < 2) {
ClassForClassId (Templates, ClassIdFor[i]) = Class;
} else {
ClassForClassId (Templates, i) = Class;
}
/* then read in the proto lengths */
Lengths = NULL;
if (MaxNumIntProtosIn (Class) > 0) {
Lengths = (uinT8 *)Emalloc(sizeof(uinT8) * MaxNumIntProtosIn(Class));
if ((nread =
fread((char *)Lengths, sizeof(uinT8),
MaxNumIntProtosIn(Class), File)) != MaxNumIntProtosIn (Class))
cprintf ("Bad read of inttemp!\n");
}
Class->ProtoLengths = Lengths;
/* then read in the proto sets */
for (j = 0; j < Class->NumProtoSets; j++) {
ProtoSet = (PROTO_SET)Emalloc(sizeof(PROTO_SET_STRUCT));
if (version_id < 3) {
if ((nread =
fread((char *) &ProtoSet->ProtoPruner, 1,
sizeof(PROTO_PRUNER), File)) != sizeof(PROTO_PRUNER))
cprintf("Bad read of inttemp!\n");
for (x = 0; x < PROTOS_PER_PROTO_SET; x++) {
if ((nread = fread((char *) &ProtoSet->Protos[x].A, 1,
sizeof(inT8), File)) != sizeof(inT8) ||
(nread = fread((char *) &ProtoSet->Protos[x].B, 1,
sizeof(uinT8), File)) != sizeof(uinT8) ||
(nread = fread((char *) &ProtoSet->Protos[x].C, 1,
sizeof(inT8), File)) != sizeof(inT8) ||
(nread = fread((char *) &ProtoSet->Protos[x].Angle, 1,
sizeof(uinT8), File)) != sizeof(uinT8))
cprintf("Bad read of inttemp!\n");
for (y = 0; y < WerdsPerConfigVec; y++)
if ((nread = fread((char *) &ProtoSet->Protos[x].Configs[y], 1,
sizeof(uinT32), File)) != sizeof(uinT32))
cprintf("Bad read of inttemp!\n");
}
} else {
if ((nread =
fread((char *) ProtoSet, 1, sizeof(PROTO_SET_STRUCT),
File)) != sizeof(PROTO_SET_STRUCT))
cprintf("Bad read of inttemp!\n");
}
if (swap) {
for (x = 0; x < NUM_PP_PARAMS; x++)
for (y = 0; y < NUM_PP_BUCKETS; y++)
for (z = 0; z < WERDS_PER_PP_VECTOR; z++)
Reverse32(&ProtoSet->ProtoPruner[x][y][z]);
for (x = 0; x < PROTOS_PER_PROTO_SET; x++)
for (y = 0; y < WerdsPerConfigVec; y++)
Reverse32(&ProtoSet->Protos[x].Configs[y]);
}
Class->ProtoSets[j] = ProtoSet;
}
if (version_id < 4)
Class->font_set_id = -1;
else {
fread(&Class->font_set_id, sizeof(int), 1, File);
if (swap)
Reverse32(&Class->font_set_id);
}
}
if (version_id < 2) {
/* add an empty NULL class with class id 0 */
assert(UnusedClassIdIn (Templates, 0));
ClassForClassId (Templates, 0) = NewIntClass (1, 1);
ClassForClassId (Templates, 0)->font_set_id = -1;
Templates->NumClasses++;
/* make sure the classes are contiguous */
for (i = 0; i < MAX_NUM_CLASSES; i++) {
if (i < Templates->NumClasses) {
if (ClassForClassId (Templates, i) == NULL) {
fprintf(stderr, "Non-contiguous class ids in inttemp\n");
exit(1);
}
} else {
if (ClassForClassId (Templates, i) != NULL) {
fprintf(stderr, "Class id %d exceeds NumClassesIn (Templates) %d\n",
i, Templates->NumClasses);
exit(1);
}
}
}
}
if (version_id >= 4) {
this->fontinfo_table_.read(File, NewPermanentTessCallback(read_info), swap);
if (version_id >= 5) {
this->fontinfo_table_.read(File,
NewPermanentTessCallback(read_spacing_info),
swap);
}
this->fontset_table_.read(File, NewPermanentTessCallback(read_set), swap);
}
// Clean up.
delete[] IndexFor;
delete[] ClassIdFor;
delete[] TempClassPruner;
return (Templates);
} /* ReadIntTemplates */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
void Classify::ShowMatchDisplay() {
/*
** Parameters: none
** Globals:
** FeatureShapes display list containing feature matches
** ProtoShapes display list containing proto matches
** Operation: This routine sends the shapes in the global display
** lists to the match debugger window.
** Return: none
** Exceptions: none
** History: Thu Mar 21 15:47:33 1991, DSJ, Created.
*/
InitIntMatchWindowIfReqd();
if (ProtoDisplayWindow) {
ProtoDisplayWindow->Clear();
}
if (FeatureDisplayWindow) {
FeatureDisplayWindow->Clear();
}
ClearFeatureSpaceWindow(
static_cast<NORM_METHOD>(static_cast<int>(classify_norm_method)),
IntMatchWindow);
IntMatchWindow->ZoomToRectangle(INT_MIN_X, INT_MIN_Y,
INT_MAX_X, INT_MAX_Y);
if (ProtoDisplayWindow) {
ProtoDisplayWindow->ZoomToRectangle(INT_MIN_X, INT_MIN_Y,
INT_MAX_X, INT_MAX_Y);
}
if (FeatureDisplayWindow) {
FeatureDisplayWindow->ZoomToRectangle(INT_MIN_X, INT_MIN_Y,
INT_MAX_X, INT_MAX_Y);
}
} /* ShowMatchDisplay */
// Clears the given window and draws the featurespace guides for the
// appropriate normalization method.
void ClearFeatureSpaceWindow(NORM_METHOD norm_method, ScrollView* window) {
window->Clear();
window->Pen(ScrollView::GREY);
// Draw the feature space limit rectangle.
window->Rectangle(0, 0, INT_MAX_X, INT_MAX_Y);
if (norm_method == baseline) {
window->SetCursor(0, INT_DESCENDER);
window->DrawTo(INT_MAX_X, INT_DESCENDER);
window->SetCursor(0, INT_BASELINE);
window->DrawTo(INT_MAX_X, INT_BASELINE);
window->SetCursor(0, INT_XHEIGHT);
window->DrawTo(INT_MAX_X, INT_XHEIGHT);
window->SetCursor(0, INT_CAPHEIGHT);
window->DrawTo(INT_MAX_X, INT_CAPHEIGHT);
} else {
window->Rectangle(INT_XCENTER - INT_XRADIUS, INT_YCENTER - INT_YRADIUS,
INT_XCENTER + INT_XRADIUS, INT_YCENTER + INT_YRADIUS);
}
}
#endif
/*---------------------------------------------------------------------------*/
void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES Templates,
const UNICHARSET& target_unicharset) {
/*
** Parameters:
** File open file to write templates to
** Templates templates to save into File
** Globals: none
** Operation: This routine writes Templates to File. The format
** is an efficient binary format. File must already be open
** for writing.
** Return: none
** Exceptions: none
** History: Wed Feb 27 11:48:46 1991, DSJ, Created.
*/
int i, j;
INT_CLASS Class;
int unicharset_size = target_unicharset.size();
int version_id = -5; // When negated by the reader -1 becomes +1 etc.
if (Templates->NumClasses != unicharset_size) {
cprintf("Warning: executing WriteIntTemplates() with %d classes in"
" Templates, while target_unicharset size is %d\n",
Templates->NumClasses, unicharset_size);
}
/* first write the high level template struct */
fwrite(&unicharset_size, sizeof(unicharset_size), 1, File);
fwrite(&version_id, sizeof(version_id), 1, File);
fwrite(&Templates->NumClassPruners, sizeof(Templates->NumClassPruners),
1, File);
fwrite(&Templates->NumClasses, sizeof(Templates->NumClasses), 1, File);
/* then write out the class pruners */
for (i = 0; i < Templates->NumClassPruners; i++)
fwrite(Templates->ClassPruners[i],
sizeof(CLASS_PRUNER_STRUCT), 1, File);
/* then write out each class */
for (i = 0; i < Templates->NumClasses; i++) {
Class = Templates->Class[i];
/* first write out the high level struct for the class */
fwrite(&Class->NumProtos, sizeof(Class->NumProtos), 1, File);
fwrite(&Class->NumProtoSets, sizeof(Class->NumProtoSets), 1, File);
ASSERT_HOST(Class->NumConfigs == this->fontset_table_.get(Class->font_set_id).size);
fwrite(&Class->NumConfigs, sizeof(Class->NumConfigs), 1, File);
for (j = 0; j < Class->NumConfigs; ++j) {
fwrite(&Class->ConfigLengths[j], sizeof(uinT16), 1, File);
}
/* then write out the proto lengths */
if (MaxNumIntProtosIn (Class) > 0) {
fwrite ((char *) (Class->ProtoLengths), sizeof (uinT8),
MaxNumIntProtosIn (Class), File);
}
/* then write out the proto sets */
for (j = 0; j < Class->NumProtoSets; j++)
fwrite ((char *) Class->ProtoSets[j],
sizeof (PROTO_SET_STRUCT), 1, File);
/* then write the fonts info */
fwrite(&Class->font_set_id, sizeof(int), 1, File);
}
/* Write the fonts info tables */
this->fontinfo_table_.write(File, NewPermanentTessCallback(write_info));
this->fontinfo_table_.write(File,
NewPermanentTessCallback(write_spacing_info));
this->fontset_table_.write(File, NewPermanentTessCallback(write_set));
} /* WriteIntTemplates */
} // namespace tesseract
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
FLOAT32 BucketStart(int Bucket, FLOAT32 Offset, int NumBuckets) {
/*
** Parameters:
** Bucket bucket whose start is to be computed
** Offset offset used to map params to buckets
** NumBuckets total number of buckets
** Globals: none
** Operation: This routine returns the parameter value which
** corresponds to the beginning of the specified bucket.
** The bucket number should have been generated using the
** BucketFor() function with parameters Offset and NumBuckets.
** Return: Param value corresponding to start position of Bucket.
** Exceptions: none
** History: Thu Feb 14 13:24:33 1991, DSJ, Created.
*/
return (((FLOAT32) Bucket / NumBuckets) - Offset);
} /* BucketStart */
/*---------------------------------------------------------------------------*/
FLOAT32 BucketEnd(int Bucket, FLOAT32 Offset, int NumBuckets) {
/*
** Parameters:
** Bucket bucket whose end is to be computed
** Offset offset used to map params to buckets
** NumBuckets total number of buckets
** Globals: none
** Operation: This routine returns the parameter value which
** corresponds to the end of the specified bucket.
** The bucket number should have been generated using the
** BucketFor() function with parameters Offset and NumBuckets.
** Return: Param value corresponding to end position of Bucket.
** Exceptions: none
** History: Thu Feb 14 13:24:33 1991, DSJ, Created.
*/
return (((FLOAT32) (Bucket + 1) / NumBuckets) - Offset);
} /* BucketEnd */
/*---------------------------------------------------------------------------*/
void DoFill(FILL_SPEC *FillSpec,
CLASS_PRUNER_STRUCT* Pruner,
register uinT32 ClassMask,
register uinT32 ClassCount,
register uinT32 WordIndex) {
/*
** Parameters:
** FillSpec specifies which bits to fill in pruner
** Pruner class pruner to be filled
** ClassMask indicates which bits to change in each word
** ClassCount indicates what to change bits to
** WordIndex indicates which word to change
** Globals: none
** Operation: This routine fills in the section of a class pruner
** corresponding to a single x value for a single proto of
** a class.
** Return: none
** Exceptions: none
** History: Tue Feb 19 11:11:29 1991, DSJ, Created.
*/
register int X, Y, Angle;
register uinT32 OldWord;
X = FillSpec->X;
if (X < 0)
X = 0;
if (X >= NUM_CP_BUCKETS)
X = NUM_CP_BUCKETS - 1;
if (FillSpec->YStart < 0)
FillSpec->YStart = 0;
if (FillSpec->YEnd >= NUM_CP_BUCKETS)
FillSpec->YEnd = NUM_CP_BUCKETS - 1;
for (Y = FillSpec->YStart; Y <= FillSpec->YEnd; Y++)
for (Angle = FillSpec->AngleStart;
TRUE; CircularIncrement (Angle, NUM_CP_BUCKETS)) {
OldWord = Pruner->p[X][Y][Angle][WordIndex];
if (ClassCount > (OldWord & ClassMask)) {
OldWord &= ~ClassMask;
OldWord |= ClassCount;
Pruner->p[X][Y][Angle][WordIndex] = OldWord;
}
if (Angle == FillSpec->AngleEnd)
break;
}
} /* DoFill */
/*---------------------------------------------------------------------------*/
BOOL8 FillerDone(TABLE_FILLER *Filler) {
/*
** Parameters:
** Filler table filler to check if done
** Globals: none
** Operation: Return TRUE if the specified table filler is done, i.e.
** if it has no more lines to fill.
** Return: TRUE if no more lines to fill, FALSE otherwise.
** Exceptions: none
** History: Tue Feb 19 10:08:05 1991, DSJ, Created.
*/
FILL_SWITCH *Next;
Next = &(Filler->Switch[Filler->NextSwitch]);
if (Filler->X > Next->X && Next->Type == LastSwitch)
return (TRUE);
else
return (FALSE);
} /* FillerDone */
/*---------------------------------------------------------------------------*/
void FillPPCircularBits(uinT32 ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR],
int Bit, FLOAT32 Center, FLOAT32 Spread, bool debug) {
/*
** Parameters:
** ParamTable table of bit vectors, one per param bucket
** Bit bit position in vectors to be filled
** Center center of filled area
** Spread spread of filled area
** Globals: none
** Operation: This routine sets Bit in each bit vector whose
** bucket lies within the range Center +- Spread. The fill
** is done for a circular dimension, i.e. bucket 0 is adjacent
** to the last bucket. It is assumed that Center and Spread
** are expressed in a circular coordinate system whose range
** is 0 to 1.
** Return: none
** Exceptions: none
** History: Tue Oct 16 09:26:54 1990, DSJ, Created.
*/
int i, FirstBucket, LastBucket;
if (Spread > 0.5)
Spread = 0.5;
FirstBucket = (int) floor ((Center - Spread) * NUM_PP_BUCKETS);
if (FirstBucket < 0)
FirstBucket += NUM_PP_BUCKETS;
LastBucket = (int) floor ((Center + Spread) * NUM_PP_BUCKETS);
if (LastBucket >= NUM_PP_BUCKETS)
LastBucket -= NUM_PP_BUCKETS;
if (debug) tprintf("Circular fill from %d to %d", FirstBucket, LastBucket);
for (i = FirstBucket; TRUE; CircularIncrement (i, NUM_PP_BUCKETS)) {
SET_BIT (ParamTable[i], Bit);
/* exit loop after we have set the bit for the last bucket */
if (i == LastBucket)
break;
}
} /* FillPPCircularBits */
/*---------------------------------------------------------------------------*/
void FillPPLinearBits(uinT32 ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR],
int Bit, FLOAT32 Center, FLOAT32 Spread, bool debug) {
/*
** Parameters:
** ParamTable table of bit vectors, one per param bucket
** Bit bit number being filled
** Center center of filled area
** Spread spread of filled area
** Globals: none
** Operation: This routine sets Bit in each bit vector whose
** bucket lies within the range Center +- Spread. The fill
** is done for a linear dimension, i.e. there is no wrap-around
** for this dimension. It is assumed that Center and Spread
** are expressed in a linear coordinate system whose range
** is approximately 0 to 1. Values outside this range will
** be clipped.
** Return: none
** Exceptions: none
** History: Tue Oct 16 09:26:54 1990, DSJ, Created.
*/
int i, FirstBucket, LastBucket;
FirstBucket = (int) floor ((Center - Spread) * NUM_PP_BUCKETS);
if (FirstBucket < 0)
FirstBucket = 0;
LastBucket = (int) floor ((Center + Spread) * NUM_PP_BUCKETS);
if (LastBucket >= NUM_PP_BUCKETS)
LastBucket = NUM_PP_BUCKETS - 1;
if (debug) tprintf("Linear fill from %d to %d", FirstBucket, LastBucket);
for (i = FirstBucket; i <= LastBucket; i++)
SET_BIT (ParamTable[i], Bit);
} /* FillPPLinearBits */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
namespace tesseract {
CLASS_ID Classify::GetClassToDebug(const char *Prompt, bool* adaptive_on,
bool* pretrained_on, int* shape_id) {
/*
** Parameters:
** Prompt prompt to print while waiting for input from window
** Globals: none
** Operation: This routine prompts the user with Prompt and waits
** for the user to enter something in the debug window.
** Return: Character entered in the debug window.
** Exceptions: none
** History: Thu Mar 21 16:55:13 1991, DSJ, Created.
*/
tprintf("%s\n", Prompt);
SVEvent* ev;
SVEventType ev_type;
int unichar_id = INVALID_UNICHAR_ID;
// Wait until a click or popup event.
do {
ev = IntMatchWindow->AwaitEvent(SVET_ANY);
ev_type = ev->type;
if (ev_type == SVET_POPUP) {
if (ev->command_id == IDA_SHAPE_INDEX) {
if (shape_table_ != NULL) {
*shape_id = atoi(ev->parameter);
*adaptive_on = false;
*pretrained_on = true;
if (*shape_id >= 0 && *shape_id < shape_table_->NumShapes()) {
int font_id;
shape_table_->GetFirstUnicharAndFont(*shape_id, &unichar_id,
&font_id);
tprintf("Shape %d, first unichar=%d, font=%d\n",
*shape_id, unichar_id, font_id);
return unichar_id;
}
tprintf("Shape index '%s' not found in shape table\n", ev->parameter);
} else {
tprintf("No shape table loaded!\n");
}
} else {
if (unicharset.contains_unichar(ev->parameter)) {
unichar_id = unicharset.unichar_to_id(ev->parameter);
if (ev->command_id == IDA_ADAPTIVE) {
*adaptive_on = true;
*pretrained_on = false;
*shape_id = -1;
} else if (ev->command_id == IDA_STATIC) {
*adaptive_on = false;
*pretrained_on = true;
} else {
*adaptive_on = true;
*pretrained_on = true;
}
if (ev->command_id == IDA_ADAPTIVE || shape_table_ == NULL) {
*shape_id = -1;
return unichar_id;
}
for (int s = 0; s < shape_table_->NumShapes(); ++s) {
if (shape_table_->GetShape(s).ContainsUnichar(unichar_id)) {
tprintf("%s\n", shape_table_->DebugStr(s).string());
}
}
} else {
tprintf("Char class '%s' not found in unicharset",
ev->parameter);
}
}
}
delete ev;
} while (ev_type != SVET_CLICK);
return 0;
} /* GetClassToDebug */
} // namespace tesseract
#endif
/*---------------------------------------------------------------------------*/
void GetCPPadsForLevel(int Level,
FLOAT32 *EndPad,
FLOAT32 *SidePad,
FLOAT32 *AnglePad) {
/*
** Parameters:
** Level "tightness" level to return pads for
** EndPad place to put end pad for Level
** SidePad place to put side pad for Level
** AnglePad place to put angle pad for Level
** Globals: none
** Operation: This routine copies the appropriate global pad variables
** into EndPad, SidePad, and AnglePad. This is a kludge used
** to get around the fact that global control variables cannot
** be arrays. If the specified level is illegal, the tightest
** possible pads are returned.
** Return: none (results are returned in EndPad, SidePad, and AnglePad.
** Exceptions: none
** History: Thu Feb 14 08:26:49 1991, DSJ, Created.
*/
switch (Level) {
case 0:
*EndPad = classify_cp_end_pad_loose * GetPicoFeatureLength ();
*SidePad = classify_cp_side_pad_loose * GetPicoFeatureLength ();
*AnglePad = classify_cp_angle_pad_loose / 360.0;
break;
case 1:
*EndPad = classify_cp_end_pad_medium * GetPicoFeatureLength ();
*SidePad = classify_cp_side_pad_medium * GetPicoFeatureLength ();
*AnglePad = classify_cp_angle_pad_medium / 360.0;
break;
case 2:
*EndPad = classify_cp_end_pad_tight * GetPicoFeatureLength ();
*SidePad = classify_cp_side_pad_tight * GetPicoFeatureLength ();
*AnglePad = classify_cp_angle_pad_tight / 360.0;
break;
default:
*EndPad = classify_cp_end_pad_tight * GetPicoFeatureLength ();
*SidePad = classify_cp_side_pad_tight * GetPicoFeatureLength ();
*AnglePad = classify_cp_angle_pad_tight / 360.0;
break;
}
if (*AnglePad > 0.5)
*AnglePad = 0.5;
} /* GetCPPadsForLevel */
/*---------------------------------------------------------------------------*/
ScrollView::Color GetMatchColorFor(FLOAT32 Evidence) {
/*
** Parameters:
** Evidence evidence value to return color for
** Globals: none
** Operation:
** Return: Color which corresponds to specified Evidence value.
** Exceptions: none
** History: Thu Mar 21 15:24:52 1991, DSJ, Created.
*/
assert (Evidence >= 0.0);
assert (Evidence <= 1.0);
if (Evidence >= 0.90)
return ScrollView::WHITE;
else if (Evidence >= 0.75)
return ScrollView::GREEN;
else if (Evidence >= 0.50)
return ScrollView::RED;
else
return ScrollView::BLUE;
} /* GetMatchColorFor */
/*---------------------------------------------------------------------------*/
void GetNextFill(TABLE_FILLER *Filler, FILL_SPEC *Fill) {
/*
** Parameters:
** Filler filler to get next fill spec from
** Fill place to put spec for next fill
** Globals: none
** Operation: This routine returns (in Fill) the specification of
** the next line to be filled from Filler. FillerDone() should
** always be called before GetNextFill() to ensure that we
** do not run past the end of the fill table.
** Return: none (results are returned in Fill)
** Exceptions: none
** History: Tue Feb 19 10:17:42 1991, DSJ, Created.
*/
FILL_SWITCH *Next;
/* compute the fill assuming no switches will be encountered */
Fill->AngleStart = Filler->AngleStart;
Fill->AngleEnd = Filler->AngleEnd;
Fill->X = Filler->X;
Fill->YStart = Filler->YStart >> 8;
Fill->YEnd = Filler->YEnd >> 8;
/* update the fill info and the filler for ALL switches at this X value */
Next = &(Filler->Switch[Filler->NextSwitch]);
while (Filler->X >= Next->X) {
Fill->X = Filler->X = Next->X;
if (Next->Type == StartSwitch) {
Fill->YStart = Next->Y;
Filler->StartDelta = Next->Delta;
Filler->YStart = Next->YInit;
}
else if (Next->Type == EndSwitch) {
Fill->YEnd = Next->Y;
Filler->EndDelta = Next->Delta;
Filler->YEnd = Next->YInit;
}
else { /* Type must be LastSwitch */
break;
}
Filler->NextSwitch++;
Next = &(Filler->Switch[Filler->NextSwitch]);
}
/* prepare the filler for the next call to this routine */
Filler->X++;
Filler->YStart += Filler->StartDelta;
Filler->YEnd += Filler->EndDelta;
} /* GetNextFill */
/*---------------------------------------------------------------------------*/
/**
* This routine computes a data structure (Filler)
* which can be used to fill in a rectangle surrounding
* the specified Proto.
*
* @param EndPad, SidePad, AnglePad padding to add to proto
* @param Proto proto to create a filler for
* @param Filler place to put table filler
*
* Globals: none
*
* @return none (results are returned in Filler)
* @note Exceptions: none
* @note History: Thu Feb 14 09:27:05 1991, DSJ, Created.
*/
void InitTableFiller (FLOAT32 EndPad, FLOAT32 SidePad,
FLOAT32 AnglePad, PROTO Proto, TABLE_FILLER * Filler)
#define XS X_SHIFT
#define YS Y_SHIFT
#define AS ANGLE_SHIFT
#define NB NUM_CP_BUCKETS
{
FLOAT32 Angle;
FLOAT32 X, Y, HalfLength;
FLOAT32 Cos, Sin;
FLOAT32 XAdjust, YAdjust;
FPOINT Start, Switch1, Switch2, End;
int S1 = 0;
int S2 = 1;
Angle = Proto->Angle;
X = Proto->X;
Y = Proto->Y;
HalfLength = Proto->Length / 2.0;
Filler->AngleStart = CircBucketFor(Angle - AnglePad, AS, NB);
Filler->AngleEnd = CircBucketFor(Angle + AnglePad, AS, NB);
Filler->NextSwitch = 0;
if (fabs (Angle - 0.0) < HV_TOLERANCE || fabs (Angle - 0.5) < HV_TOLERANCE) {
/* horizontal proto - handle as special case */
Filler->X = BucketFor(X - HalfLength - EndPad, XS, NB);
Filler->YStart = BucketFor(Y - SidePad, YS, NB * 256);
Filler->YEnd = BucketFor(Y + SidePad, YS, NB * 256);
Filler->StartDelta = 0;
Filler->EndDelta = 0;
Filler->Switch[0].Type = LastSwitch;
Filler->Switch[0].X = BucketFor(X + HalfLength + EndPad, XS, NB);
} else if (fabs(Angle - 0.25) < HV_TOLERANCE ||
fabs(Angle - 0.75) < HV_TOLERANCE) {
/* vertical proto - handle as special case */
Filler->X = BucketFor(X - SidePad, XS, NB);
Filler->YStart = BucketFor(Y - HalfLength - EndPad, YS, NB * 256);
Filler->YEnd = BucketFor(Y + HalfLength + EndPad, YS, NB * 256);
Filler->StartDelta = 0;
Filler->EndDelta = 0;
Filler->Switch[0].Type = LastSwitch;
Filler->Switch[0].X = BucketFor(X + SidePad, XS, NB);
} else {
/* diagonal proto */
if ((Angle > 0.0 && Angle < 0.25) || (Angle > 0.5 && Angle < 0.75)) {
/* rising diagonal proto */
Angle *= 2.0 * PI;
Cos = fabs(cos(Angle));
Sin = fabs(sin(Angle));
/* compute the positions of the corners of the acceptance region */
Start.x = X - (HalfLength + EndPad) * Cos - SidePad * Sin;
Start.y = Y - (HalfLength + EndPad) * Sin + SidePad * Cos;
End.x = 2.0 * X - Start.x;
End.y = 2.0 * Y - Start.y;
Switch1.x = X - (HalfLength + EndPad) * Cos + SidePad * Sin;
Switch1.y = Y - (HalfLength + EndPad) * Sin - SidePad * Cos;
Switch2.x = 2.0 * X - Switch1.x;
Switch2.y = 2.0 * Y - Switch1.y;
if (Switch1.x > Switch2.x) {
S1 = 1;
S2 = 0;
}
/* translate into bucket positions and deltas */
Filler->X = (inT8) MapParam(Start.x, XS, NB);
Filler->StartDelta = -(inT16) ((Cos / Sin) * 256);
Filler->EndDelta = (inT16) ((Sin / Cos) * 256);
XAdjust = BucketEnd(Filler->X, XS, NB) - Start.x;
YAdjust = XAdjust * Cos / Sin;
Filler->YStart = (inT16) MapParam(Start.y - YAdjust, YS, NB * 256);
YAdjust = XAdjust * Sin / Cos;
Filler->YEnd = (inT16) MapParam(Start.y + YAdjust, YS, NB * 256);
Filler->Switch[S1].Type = StartSwitch;
Filler->Switch[S1].X = (inT8) MapParam(Switch1.x, XS, NB);
Filler->Switch[S1].Y = (inT8) MapParam(Switch1.y, YS, NB);
XAdjust = Switch1.x - BucketStart(Filler->Switch[S1].X, XS, NB);
YAdjust = XAdjust * Sin / Cos;
Filler->Switch[S1].YInit =
(inT16) MapParam(Switch1.y - YAdjust, YS, NB * 256);
Filler->Switch[S1].Delta = Filler->EndDelta;
Filler->Switch[S2].Type = EndSwitch;
Filler->Switch[S2].X = (inT8) MapParam(Switch2.x, XS, NB);
Filler->Switch[S2].Y = (inT8) MapParam(Switch2.y, YS, NB);
XAdjust = Switch2.x - BucketStart(Filler->Switch[S2].X, XS, NB);
YAdjust = XAdjust * Cos / Sin;
Filler->Switch[S2].YInit =
(inT16) MapParam(Switch2.y + YAdjust, YS, NB * 256);
Filler->Switch[S2].Delta = Filler->StartDelta;
Filler->Switch[2].Type = LastSwitch;
Filler->Switch[2].X = (inT8)MapParam(End.x, XS, NB);
} else {
/* falling diagonal proto */
Angle *= 2.0 * PI;
Cos = fabs(cos(Angle));
Sin = fabs(sin(Angle));
/* compute the positions of the corners of the acceptance region */
Start.x = X - (HalfLength + EndPad) * Cos - SidePad * Sin;
Start.y = Y + (HalfLength + EndPad) * Sin - SidePad * Cos;
End.x = 2.0 * X - Start.x;
End.y = 2.0 * Y - Start.y;
Switch1.x = X - (HalfLength + EndPad) * Cos + SidePad * Sin;
Switch1.y = Y + (HalfLength + EndPad) * Sin + SidePad * Cos;
Switch2.x = 2.0 * X - Switch1.x;
Switch2.y = 2.0 * Y - Switch1.y;
if (Switch1.x > Switch2.x) {
S1 = 1;
S2 = 0;
}
/* translate into bucket positions and deltas */
Filler->X = (inT8) MapParam(Start.x, XS, NB);
Filler->StartDelta = -(inT16) ((Sin / Cos) * 256);
Filler->EndDelta = (inT16) ((Cos / Sin) * 256);
XAdjust = BucketEnd(Filler->X, XS, NB) - Start.x;
YAdjust = XAdjust * Sin / Cos;
Filler->YStart = (inT16) MapParam(Start.y - YAdjust, YS, NB * 256);
YAdjust = XAdjust * Cos / Sin;
Filler->YEnd = (inT16) MapParam(Start.y + YAdjust, YS, NB * 256);
Filler->Switch[S1].Type = EndSwitch;
Filler->Switch[S1].X = (inT8) MapParam(Switch1.x, XS, NB);
Filler->Switch[S1].Y = (inT8) MapParam(Switch1.y, YS, NB);
XAdjust = Switch1.x - BucketStart(Filler->Switch[S1].X, XS, NB);
YAdjust = XAdjust * Sin / Cos;
Filler->Switch[S1].YInit =
(inT16) MapParam(Switch1.y + YAdjust, YS, NB * 256);
Filler->Switch[S1].Delta = Filler->StartDelta;
Filler->Switch[S2].Type = StartSwitch;
Filler->Switch[S2].X = (inT8) MapParam(Switch2.x, XS, NB);
Filler->Switch[S2].Y = (inT8) MapParam(Switch2.y, YS, NB);
XAdjust = Switch2.x - BucketStart(Filler->Switch[S2].X, XS, NB);
YAdjust = XAdjust * Cos / Sin;
Filler->Switch[S2].YInit =
(inT16) MapParam(Switch2.y - YAdjust, YS, NB * 256);
Filler->Switch[S2].Delta = Filler->EndDelta;
Filler->Switch[2].Type = LastSwitch;
Filler->Switch[2].X = (inT8) MapParam(End.x, XS, NB);
}
}
} /* InitTableFiller */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/*
* Parameters:
* ShapeList shape list to add feature rendering to
* Feature feature to be rendered
* Color color to use for feature rendering
* Globals: none
* Operation: This routine renders the specified feature into ShapeList.
* Return: New shape list with rendering of Feature added.
* @note Exceptions: none
* @note History: Thu Mar 21 14:57:41 1991, DSJ, Created.
*/
void RenderIntFeature(ScrollView *window, const INT_FEATURE_STRUCT* Feature,
ScrollView::Color color) {
FLOAT32 X, Y, Dx, Dy, Length;
window->Pen(color);
assert(Feature != NULL);
assert(color != 0);
X = Feature->X;
Y = Feature->Y;
Length = GetPicoFeatureLength() * 0.7 * INT_CHAR_NORM_RANGE;
// The -PI has no significant effect here, but the value of Theta is computed
// using BinaryAnglePlusPi in intfx.cpp.
Dx = (Length / 2.0) * cos((Feature->Theta / 256.0) * 2.0 * PI - PI);
Dy = (Length / 2.0) * sin((Feature->Theta / 256.0) * 2.0 * PI - PI);
window->SetCursor(X, Y);
window->DrawTo(X + Dx, Y + Dy);
} /* RenderIntFeature */
/*---------------------------------------------------------------------------*/
/*
* This routine extracts the parameters of the specified
* proto from the class description and adds a rendering of
* the proto onto the ShapeList.
*
* @param Class class that proto is contained in
* @param ProtoId id of proto to be rendered
* @param color color to render proto in
*
* Globals: none
*
* @return New shape list with a rendering of one proto added.
* @note Exceptions: none
* @note History: Thu Mar 21 10:21:09 1991, DSJ, Created.
*/
void RenderIntProto(ScrollView *window,
INT_CLASS Class,
PROTO_ID ProtoId,
ScrollView::Color color) {
PROTO_SET ProtoSet;
INT_PROTO Proto;
int ProtoSetIndex;
int ProtoWordIndex;
FLOAT32 Length;
int Xmin, Xmax, Ymin, Ymax;
FLOAT32 X, Y, Dx, Dy;
uinT32 ProtoMask;
int Bucket;
assert(ProtoId >= 0);
assert(Class != NULL);
assert(ProtoId < Class->NumProtos);
assert(color != 0);
window->Pen(color);
ProtoSet = Class->ProtoSets[SetForProto(ProtoId)];
ProtoSetIndex = IndexForProto(ProtoId);
Proto = &(ProtoSet->Protos[ProtoSetIndex]);
Length = (Class->ProtoLengths[ProtoId] *
GetPicoFeatureLength() * INT_CHAR_NORM_RANGE);
ProtoMask = PPrunerMaskFor(ProtoId);
ProtoWordIndex = PPrunerWordIndexFor(ProtoId);
// find the x and y extent of the proto from the proto pruning table
Xmin = Ymin = NUM_PP_BUCKETS;
Xmax = Ymax = 0;
for (Bucket = 0; Bucket < NUM_PP_BUCKETS; Bucket++) {
if (ProtoMask & ProtoSet->ProtoPruner[PRUNER_X][Bucket][ProtoWordIndex]) {
UpdateRange(Bucket, &Xmin, &Xmax);
}
if (ProtoMask & ProtoSet->ProtoPruner[PRUNER_Y][Bucket][ProtoWordIndex]) {
UpdateRange(Bucket, &Ymin, &Ymax);
}
}
X = (Xmin + Xmax + 1) / 2.0 * PROTO_PRUNER_SCALE;
Y = (Ymin + Ymax + 1) / 2.0 * PROTO_PRUNER_SCALE;
// The -PI has no significant effect here, but the value of Theta is computed
// using BinaryAnglePlusPi in intfx.cpp.
Dx = (Length / 2.0) * cos((Proto->Angle / 256.0) * 2.0 * PI - PI);
Dy = (Length / 2.0) * sin((Proto->Angle / 256.0) * 2.0 * PI - PI);
window->SetCursor(X - Dx, Y - Dy);
window->DrawTo(X + Dx, Y + Dy);
} /* RenderIntProto */
#endif
/*---------------------------------------------------------------------------*/
/**
* This routine truncates Param to lie within the range
* of Min-Max inclusive. If a truncation is performed, and
* Id is not null, an warning message is printed.
*
* @param Param parameter value to be truncated
* @param Min, Max parameter limits (inclusive)
* @param Id string id of parameter for error messages
*
* Globals: none
*
* @return Truncated parameter.
* @note Exceptions: none
* @note History: Fri Feb 8 11:54:28 1991, DSJ, Created.
*/
int TruncateParam(FLOAT32 Param, int Min, int Max, char *Id) {
if (Param < Min) {
if (Id)
cprintf("Warning: Param %s truncated from %f to %d!\n",
Id, Param, Min);
Param = Min;
} else if (Param > Max) {
if (Id)
cprintf("Warning: Param %s truncated from %f to %d!\n",
Id, Param, Max);
Param = Max;
}
return static_cast<int>(floor(Param));
} /* TruncateParam */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**
* Initializes the int matcher window if it is not already
* initialized.
*/
void InitIntMatchWindowIfReqd() {
if (IntMatchWindow == NULL) {
IntMatchWindow = CreateFeatureSpaceWindow("IntMatchWindow", 50, 200);
SVMenuNode* popup_menu = new SVMenuNode();
popup_menu->AddChild("Debug Adapted classes", IDA_ADAPTIVE,
"x", "Class to debug");
popup_menu->AddChild("Debug Static classes", IDA_STATIC,
"x", "Class to debug");
popup_menu->AddChild("Debug Both", IDA_BOTH,
"x", "Class to debug");
popup_menu->AddChild("Debug Shape Index", IDA_SHAPE_INDEX,
"0", "Index to debug");
popup_menu->BuildMenu(IntMatchWindow, false);
}
}
/**
* Initializes the proto display window if it is not already
* initialized.
*/
void InitProtoDisplayWindowIfReqd() {
if (ProtoDisplayWindow == NULL) {
ProtoDisplayWindow = CreateFeatureSpaceWindow("ProtoDisplayWindow",
550, 200);
}
}
/**
* Initializes the feature display window if it is not already
* initialized.
*/
void InitFeatureDisplayWindowIfReqd() {
if (FeatureDisplayWindow == NULL) {
FeatureDisplayWindow = CreateFeatureSpaceWindow("FeatureDisplayWindow",
50, 700);
}
}
// Creates a window of the appropriate size for displaying elements
// in feature space.
ScrollView* CreateFeatureSpaceWindow(const char* name, int xpos, int ypos) {
return new ScrollView(name, xpos, ypos, 520, 520, 260, 260, true);
}
#endif // GRAPHICS_DISABLED
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_TRAININGSAMPLESET_H__
#define TESSERACT_TRAINING_TRAININGSAMPLESET_H__
#include "bitvector.h"
#include "genericvector.h"
#include "indexmapbidi.h"
#include "matrix.h"
#include "shapetable.h"
#include "trainingsample.h"
class UNICHARSET;
namespace tesseract {
struct FontInfo;
class FontInfoTable;
class IntFeatureMap;
class IntFeatureSpace;
class TrainingSample;
struct UnicharAndFonts;
// Collection of TrainingSample used for training or testing a classifier.
// Provides several useful methods to operate on the collection as a whole,
// including outlier detection and deletion, providing access by font and
// class, finding the canonical sample, finding the "cloud" features (OR of
// all features in all samples), replication of samples, caching of distance
// metrics.
class TrainingSampleSet {
public:
explicit TrainingSampleSet(const FontInfoTable& fontinfo_table);
~TrainingSampleSet();
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
// Accessors
int num_samples() const {
return samples_.size();
}
int num_raw_samples() const {
return num_raw_samples_;
}
int NumFonts() const {
return font_id_map_.SparseSize();
}
const UNICHARSET& unicharset() const {
return unicharset_;
}
int charsetsize() const {
return unicharset_size_;
}
const FontInfoTable& fontinfo_table() const {
return fontinfo_table_;
}
// Loads an initial unicharset, or sets one up if the file cannot be read.
void LoadUnicharset(const char* filename);
// Adds a character sample to this sample set.
// If the unichar is not already in the local unicharset, it is added.
// Returns the unichar_id of the added sample, from the local unicharset.
int AddSample(const char* unichar, TrainingSample* sample);
// Adds a character sample to this sample set with the given unichar_id,
// which must correspond to the local unicharset (in this).
void AddSample(int unichar_id, TrainingSample* sample);
// Returns the number of samples for the given font,class pair.
// If randomize is true, returns the number of samples accessible
// with randomizing on. (Increases the number of samples if small.)
// OrganizeByFontAndClass must have been already called.
int NumClassSamples(int font_id, int class_id, bool randomize) const;
// Gets a sample by its index.
const TrainingSample* GetSample(int index) const;
// Gets a sample by its font, class, index.
// OrganizeByFontAndClass must have been already called.
const TrainingSample* GetSample(int font_id, int class_id, int index) const;
// Get a sample by its font, class, index. Does not randomize.
// OrganizeByFontAndClass must have been already called.
TrainingSample* MutableSample(int font_id, int class_id, int index);
// Returns a string debug representation of the given sample:
// font, unichar_str, bounding box, page.
STRING SampleToString(const TrainingSample& sample) const;
// Gets the combined set of features used by all the samples of the given
// font/class combination.
const BitVector& GetCloudFeatures(int font_id, int class_id) const;
// Gets the indexed features of the canonical sample of the given
// font/class combination.
const GenericVector<int>& GetCanonicalFeatures(int font_id,
int class_id) const;
// Returns the distance between the given UniCharAndFonts pair.
// If matched_fonts, only matching fonts, are considered, unless that yields
// the empty set.
// OrganizeByFontAndClass must have been already called.
float UnicharDistance(const UnicharAndFonts& uf1, const UnicharAndFonts& uf2,
bool matched_fonts, const IntFeatureMap& feature_map);
// Returns the distance between the given pair of font/class pairs.
// Finds in cache or computes and caches.
// OrganizeByFontAndClass must have been already called.
float ClusterDistance(int font_id1, int class_id1,
int font_id2, int class_id2,
const IntFeatureMap& feature_map);
// Computes the distance between the given pair of font/class pairs.
float ComputeClusterDistance(int font_id1, int class_id1,
int font_id2, int class_id2,
const IntFeatureMap& feature_map) const;
// Returns the number of canonical features of font/class 2 for which
// neither the feature nor any of its near neighbors occurs in the cloud
// of font/class 1. Each such feature is a reliable separation between
// the classes, ASSUMING that the canonical sample is sufficiently
// representative that every sample has a feature near that particular
// feature. To check that this is so on the fly would be prohibitively
// expensive, but it might be possible to pre-qualify the canonical features
// to include only those for which this assumption is true.
// ComputeCanonicalFeatures and ComputeCloudFeatures must have been called
// first, or the results will be nonsense.
int ReliablySeparable(int font_id1, int class_id1,
int font_id2, int class_id2,
const IntFeatureMap& feature_map,
bool thorough) const;
// Returns the total index of the requested sample.
// OrganizeByFontAndClass must have been already called.
int GlobalSampleIndex(int font_id, int class_id, int index) const;
// Gets the canonical sample for the given font, class pair.
// ComputeCanonicalSamples must have been called first.
const TrainingSample* GetCanonicalSample(int font_id, int class_id) const;
// Gets the max distance for the given canonical sample.
// ComputeCanonicalSamples must have been called first.
float GetCanonicalDist(int font_id, int class_id) const;
// Returns a mutable pointer to the sample with the given index.
TrainingSample* mutable_sample(int index) {
return samples_[index];
}
// Gets ownership of the sample with the given index, removing it from this.
TrainingSample* extract_sample(int index) {
TrainingSample* sample = samples_[index];
samples_[index] = NULL;
return sample;
}
// Generates indexed features for all samples with the supplied feature_space.
void IndexFeatures(const IntFeatureSpace& feature_space);
// Delete outlier samples with few features that are shared with others.
// IndexFeatures must have been called already.
void DeleteOutliers(const IntFeatureSpace& feature_space, bool debug);
// Marks the given sample for deletion.
// Deletion is actually completed by DeleteDeadSamples.
void KillSample(TrainingSample* sample);
// Deletes all samples with a negative sample index marked by KillSample.
// Must be called before OrganizeByFontAndClass, and OrganizeByFontAndClass
// must be called after as the samples have been renumbered.
void DeleteDeadSamples();
// Callback function returns true if the given sample is to be deleted, due
// to having a negative classid.
bool DeleteableSample(const TrainingSample* sample);
// Construct an array to access the samples by font,class pair.
void OrganizeByFontAndClass();
// Constructs the font_id_map_ which maps real font_ids (sparse) to a compact
// index for the font_class_array_.
void SetupFontIdMap();
// Finds the sample for each font, class pair that has least maximum
// distance to all the other samples of the same font, class.
// OrganizeByFontAndClass must have been already called.
void ComputeCanonicalSamples(const IntFeatureMap& map, bool debug);
// Replicates the samples to a minimum frequency defined by
// 2 * kSampleRandomSize, or for larger counts duplicates all samples.
// After replication, the replicated samples are perturbed slightly, but
// in a predictable and repeatable way.
// Use after OrganizeByFontAndClass().
void ReplicateAndRandomizeSamples();
// Caches the indexed features of the canonical samples.
// ComputeCanonicalSamples must have been already called.
void ComputeCanonicalFeatures();
// Computes the combined set of features used by all the samples of each
// font/class combination. Use after ReplicateAndRandomizeSamples.
void ComputeCloudFeatures(int feature_space_size);
// Adds all fonts of the given class to the shape.
void AddAllFontsForClass(int class_id, Shape* shape) const;
// Display the samples with the given indexed feature that also match
// the given shape.
void DisplaySamplesWithFeature(int f_index, const Shape& shape,
const IntFeatureSpace& feature_space,
ScrollView::Color color,
ScrollView* window) const;
private:
// Struct to store a triplet of unichar, font, distance in the distance cache.
struct FontClassDistance {
int unichar_id;
int font_id; // Real font id.
float distance;
};
// Simple struct to store information related to each font/class combination.
struct FontClassInfo {
FontClassInfo();
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
// Number of raw samples.
inT32 num_raw_samples;
// Index of the canonical sample.
inT32 canonical_sample;
// Max distance of the canonical sample from any other.
float canonical_dist;
// Sample indices for the samples, including replicated.
GenericVector<inT32> samples;
// Non-serialized cache data.
// Indexed features of the canonical sample.
GenericVector<int> canonical_features;
// The mapped features of all the samples.
BitVector cloud_features;
// Caches for ClusterDistance.
// Caches for other fonts but matching this unichar. -1 indicates not set.
// Indexed by compact font index from font_id_map_.
GenericVector<float> font_distance_cache;
// Caches for other unichars but matching this font. -1 indicates not set.
GenericVector<float> unichar_distance_cache;
// Cache for the rest (non matching font and unichar.)
// A cache of distances computed by ReliablySeparable.
GenericVector<FontClassDistance> distance_cache;
};
PointerVector<TrainingSample> samples_;
// Number of samples before replication/randomization.
int num_raw_samples_;
// Character set we are training for.
UNICHARSET unicharset_;
// Character set size to which the 2-d arrays below refer.
int unicharset_size_;
// Map to allow the font_class_array_ below to be compact.
// The sparse space is the real font_id, used in samples_ .
// The compact space is an index to font_class_array_
IndexMapBiDi font_id_map_;
// A 2-d array of FontClassInfo holding information related to each
// (font_id, class_id) pair.
GENERIC_2D_ARRAY<FontClassInfo>* font_class_array_;
// Reference to the fontinfo_table_ in MasterTrainer. Provides names
// for font_ids in the samples. Not serialized!
const FontInfoTable& fontinfo_table_;
};
} // namespace tesseract.
#endif // TRAININGSAMPLESETSET_H_
| C++ |
/******************************************************************************
** Filename: fxdefs.c
** Purpose: Utility functions to be used by feature extractors.
** Author: Dan Johnson
** History: Sun Jan 21 15:29:02 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "fxdefs.h"
#include "featdefs.h"
#include "mf.h"
#include "outfeat.h"
#include "picofeat.h"
#include "normfeat.h"
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
// Definitions of extractors separated from feature definitions.
const FEATURE_EXT_STRUCT MicroFeatureExt = { ExtractMicros };
const FEATURE_EXT_STRUCT CharNormExt = { ExtractCharNormFeatures };
const FEATURE_EXT_STRUCT IntFeatExt = { ExtractIntCNFeatures };
const FEATURE_EXT_STRUCT GeoFeatExt = { ExtractIntGeoFeatures };
// MUST be kept in-sync with DescDefs in featdefs.cpp.
const FEATURE_EXT_STRUCT* ExtractorDefs[NUM_FEATURE_TYPES] = {
&MicroFeatureExt,
&CharNormExt,
&IntFeatExt,
&GeoFeatExt
};
void SetupExtractors(FEATURE_DEFS_STRUCT *FeatureDefs) {
for (int i = 0; i < NUM_FEATURE_TYPES; ++i)
FeatureDefs->FeatureExtractors[i] = ExtractorDefs[i];
}
| C++ |
/******************************************************************************
** Filename: float2int.c
** Purpose: Routines for converting float features to int features
** Author: Dan Johnson
** History: Wed Mar 13 07:47:48 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include "float2int.h"
#include "normmatch.h"
#include "mfoutline.h"
#include "classify.h"
#include "helpers.h"
#include "picofeat.h"
#define MAX_INT_CHAR_NORM (INT_CHAR_NORM_RANGE - 1)
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* For each class in the unicharset, clears the corresponding
* entry in char_norm_array. char_norm_array is indexed by unichar_id.
*
* Globals:
* - none
*
* @param char_norm_array array to be cleared
*
* @note Exceptions: none
* @note History: Wed Feb 20 11:20:54 1991, DSJ, Created.
*/
void Classify::ClearCharNormArray(uinT8* char_norm_array) {
memset(char_norm_array, 0, sizeof(*char_norm_array) * unicharset.size());
} /* ClearCharNormArray */
/*---------------------------------------------------------------------------*/
/**
* For each class in unicharset, computes the match between
* norm_feature and the normalization protos for that class.
* Converts this number to the range from 0 - 255 and stores it
* into char_norm_array. CharNormArray is indexed by unichar_id.
*
* Globals:
* - PreTrainedTemplates current set of built-in templates
*
* @param norm_feature character normalization feature
* @param[out] char_norm_array place to put results of size unicharset.size()
*
* @note Exceptions: none
* @note History: Wed Feb 20 11:20:54 1991, DSJ, Created.
*/
void Classify::ComputeIntCharNormArray(const FEATURE_STRUCT& norm_feature,
uinT8* char_norm_array) {
for (int i = 0; i < unicharset.size(); i++) {
if (i < PreTrainedTemplates->NumClasses) {
int norm_adjust = static_cast<int>(INT_CHAR_NORM_RANGE *
ComputeNormMatch(i, norm_feature, FALSE));
char_norm_array[i] = ClipToRange(norm_adjust, 0, MAX_INT_CHAR_NORM);
} else {
// Classes with no templates (eg. ambigs & ligatures) default
// to worst match.
char_norm_array[i] = MAX_INT_CHAR_NORM;
}
}
} /* ComputeIntCharNormArray */
/*---------------------------------------------------------------------------*/
/**
* This routine converts each floating point pico-feature
* in Features into integer format and saves it into
* IntFeatures.
*
* Globals:
* - none
*
* @param Features floating point pico-features to be converted
* @param[out] IntFeatures array to put converted features into
*
* @note Exceptions: none
* @note History: Wed Feb 20 10:58:45 1991, DSJ, Created.
*/
void Classify::ComputeIntFeatures(FEATURE_SET Features,
INT_FEATURE_ARRAY IntFeatures) {
int Fid;
FEATURE Feature;
FLOAT32 YShift;
if (classify_norm_method == baseline)
YShift = BASELINE_Y_SHIFT;
else
YShift = Y_SHIFT;
for (Fid = 0; Fid < Features->NumFeatures; Fid++) {
Feature = Features->Features[Fid];
IntFeatures[Fid].X = BucketFor (Feature->Params[PicoFeatX],
X_SHIFT, INT_FEAT_RANGE);
IntFeatures[Fid].Y = BucketFor (Feature->Params[PicoFeatY],
YShift, INT_FEAT_RANGE);
IntFeatures[Fid].Theta = CircBucketFor (Feature->Params[PicoFeatDir],
ANGLE_SHIFT, INT_FEAT_RANGE);
IntFeatures[Fid].CP_misses = 0;
}
} /* ComputeIntFeatures */
} // namespace tesseract
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapeclassifier.h
// Description: Base interface class for classifiers that return a
// shape index.
// Author: Ray Smith
// Created: Thu Dec 15 15:24:27 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "shapeclassifier.h"
#include "genericvector.h"
#include "scrollview.h"
#include "shapetable.h"
#include "svmnode.h"
#include "trainingsample.h"
#include "tprintf.h"
namespace tesseract {
// Classifies the given [training] sample, writing to results.
// See shapeclassifier.h for a full description.
// Default implementation calls the ShapeRating version.
int ShapeClassifier::UnicharClassifySample(
const TrainingSample& sample, Pix* page_pix, int debug,
UNICHAR_ID keep_this, GenericVector<UnicharRating>* results) {
results->truncate(0);
GenericVector<ShapeRating> shape_results;
int num_shape_results = ClassifySample(sample, page_pix, debug, keep_this,
&shape_results);
const ShapeTable* shapes = GetShapeTable();
GenericVector<int> unichar_map;
unichar_map.init_to_size(shapes->unicharset().size(), -1);
for (int r = 0; r < num_shape_results; ++r) {
shapes->AddShapeToResults(shape_results[r], &unichar_map, results);
}
return results->size();
}
// Classifies the given [training] sample, writing to results.
// See shapeclassifier.h for a full description.
// Default implementation aborts.
int ShapeClassifier::ClassifySample(const TrainingSample& sample, Pix* page_pix,
int debug, int keep_this,
GenericVector<ShapeRating>* results) {
ASSERT_HOST("Must implement ClassifySample!" == NULL);
return 0;
}
// Returns the shape that contains unichar_id that has the best result.
// If result is not NULL, it is set with the shape_id and rating.
// Does not need to be overridden if ClassifySample respects the keep_this
// rule.
int ShapeClassifier::BestShapeForUnichar(const TrainingSample& sample,
Pix* page_pix, UNICHAR_ID unichar_id,
ShapeRating* result) {
GenericVector<ShapeRating> results;
const ShapeTable* shapes = GetShapeTable();
int num_results = ClassifySample(sample, page_pix, 0, unichar_id, &results);
for (int r = 0; r < num_results; ++r) {
if (shapes->GetShape(results[r].shape_id).ContainsUnichar(unichar_id)) {
if (result != NULL)
*result = results[r];
return results[r].shape_id;
}
}
return -1;
}
// Provides access to the UNICHARSET that this classifier works with.
// Only needs to be overridden if GetShapeTable() can return NULL.
const UNICHARSET& ShapeClassifier::GetUnicharset() const {
return GetShapeTable()->unicharset();
}
// Visual debugger classifies the given sample, displays the results and
// solicits user input to display other classifications. Returns when
// the user has finished with debugging the sample.
// Probably doesn't need to be overridden if the subclass provides
// DisplayClassifyAs.
void ShapeClassifier::DebugDisplay(const TrainingSample& sample,
Pix* page_pix,
UNICHAR_ID unichar_id) {
#ifndef GRAPHICS_DISABLED
static ScrollView* terminator = NULL;
if (terminator == NULL) {
terminator = new ScrollView("XIT", 0, 0, 50, 50, 50, 50, true);
}
ScrollView* debug_win = CreateFeatureSpaceWindow("ClassifierDebug", 0, 0);
// Provide a right-click menu to choose the class.
SVMenuNode* popup_menu = new SVMenuNode();
popup_menu->AddChild("Choose class to debug", 0, "x", "Class to debug");
popup_menu->BuildMenu(debug_win, false);
// Display the features in green.
const INT_FEATURE_STRUCT* features = sample.features();
int num_features = sample.num_features();
for (int f = 0; f < num_features; ++f) {
RenderIntFeature(debug_win, &features[f], ScrollView::GREEN);
}
debug_win->Update();
GenericVector<UnicharRating> results;
// Debug classification until the user quits.
const UNICHARSET& unicharset = GetUnicharset();
SVEvent* ev;
SVEventType ev_type;
do {
PointerVector<ScrollView> windows;
if (unichar_id >= 0) {
tprintf("Debugging class %d = %s\n",
unichar_id, unicharset.id_to_unichar(unichar_id));
UnicharClassifySample(sample, page_pix, 1, unichar_id, &results);
DisplayClassifyAs(sample, page_pix, unichar_id, 1, &windows);
} else {
tprintf("Invalid unichar_id: %d\n", unichar_id);
UnicharClassifySample(sample, page_pix, 1, -1, &results);
}
if (unichar_id >= 0) {
tprintf("Debugged class %d = %s\n",
unichar_id, unicharset.id_to_unichar(unichar_id));
}
tprintf("Right-click in ClassifierDebug window to choose debug class,");
tprintf(" Left-click or close window to quit...\n");
UNICHAR_ID old_unichar_id;
do {
old_unichar_id = unichar_id;
ev = debug_win->AwaitEvent(SVET_ANY);
ev_type = ev->type;
if (ev_type == SVET_POPUP) {
if (unicharset.contains_unichar(ev->parameter)) {
unichar_id = unicharset.unichar_to_id(ev->parameter);
} else {
tprintf("Char class '%s' not found in unicharset", ev->parameter);
}
}
delete ev;
} while (unichar_id == old_unichar_id &&
ev_type != SVET_CLICK && ev_type != SVET_DESTROY);
} while (ev_type != SVET_CLICK && ev_type != SVET_DESTROY);
delete debug_win;
#endif // GRAPHICS_DISABLED
}
// Displays classification as the given shape_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
int ShapeClassifier::DisplayClassifyAs(
const TrainingSample& sample, Pix* page_pix,
UNICHAR_ID unichar_id, int index,
PointerVector<ScrollView>* windows) {
// Does nothing in the default implementation.
return index;
}
// Prints debug information on the results.
void ShapeClassifier::UnicharPrintResults(
const char* context, const GenericVector<UnicharRating>& results) const {
tprintf("%s\n", context);
for (int i = 0; i < results.size(); ++i) {
tprintf("%g: c_id=%d=%s", results[i].rating, results[i].unichar_id,
GetUnicharset().id_to_unichar(results[i].unichar_id));
if (results[i].fonts.size() != 0) {
tprintf(" Font Vector:");
for (int f = 0; f < results[i].fonts.size(); ++f) {
tprintf(" %d", results[i].fonts[f]);
}
}
tprintf("\n");
}
}
void ShapeClassifier::PrintResults(
const char* context, const GenericVector<ShapeRating>& results) const {
tprintf("%s\n", context);
for (int i = 0; i < results.size(); ++i) {
tprintf("%g:", results[i].rating);
if (results[i].joined)
tprintf("[J]");
if (results[i].broken)
tprintf("[B]");
tprintf(" %s\n", GetShapeTable()->DebugStr(results[i].shape_id).string());
}
}
// Removes any result that has all its unichars covered by a better choice,
// regardless of font.
void ShapeClassifier::FilterDuplicateUnichars(
GenericVector<ShapeRating>* results) const {
GenericVector<ShapeRating> filtered_results;
// Copy results to filtered results and knock out duplicate unichars.
const ShapeTable* shapes = GetShapeTable();
for (int r = 0; r < results->size(); ++r) {
if (r > 0) {
const Shape& shape_r = shapes->GetShape((*results)[r].shape_id);
int c;
for (c = 0; c < shape_r.size(); ++c) {
int unichar_id = shape_r[c].unichar_id;
int s;
for (s = 0; s < r; ++s) {
const Shape& shape_s = shapes->GetShape((*results)[s].shape_id);
if (shape_s.ContainsUnichar(unichar_id))
break; // We found unichar_id.
}
if (s == r)
break; // We didn't find unichar_id.
}
if (c == shape_r.size())
continue; // We found all the unichar ids in previous answers.
}
filtered_results.push_back((*results)[r]);
}
*results = filtered_results;
}
} // namespace tesseract.
| C++ |
/******************************************************************************
** Filename: featdefs.c
** Purpose: Definitions of currently defined feature types.
** Author: Dan Johnson
** History: Mon May 21 10:26:21 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#ifdef _MSC_VER
#include <mathfix.h>
#endif
#include "featdefs.h"
#include "emalloc.h"
#include "danerror.h"
#include "scanutils.h"
#include <string.h>
#include <stdio.h>
/** define errors triggered by this module */
#define ILLEGAL_NUM_SETS 3001
#define PICO_FEATURE_LENGTH 0.05
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
const char* kMicroFeatureType = "mf";
const char* kCNFeatureType = "cn";
const char* kIntFeatureType = "if";
const char* kGeoFeatureType = "tb";
// Define all of the parameters for the MicroFeature type.
StartParamDesc(MicroFeatureParams)
DefineParam(0, 0, -0.5, 0.5)
DefineParam(0, 0, -0.25, 0.75)
DefineParam(0, 1, 0.0, 1.0)
DefineParam(1, 0, 0.0, 1.0)
DefineParam (0, 1, -0.5, 0.5)
DefineParam (0, 1, -0.5, 0.5)
EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(MicroFeatureDesc, 5, 1, kMicroFeatureType, MicroFeatureParams)
// Define all of the parameters for the NormFeat type.
StartParamDesc (CharNormParams)
DefineParam(0, 0, -0.25, 0.75)
DefineParam(0, 1, 0.0, 1.0)
DefineParam(0, 0, 0.0, 1.0)
DefineParam(0, 0, 0.0, 1.0)
EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(CharNormDesc, 4, 0, kCNFeatureType, CharNormParams)
// Define all of the parameters for the IntFeature type
StartParamDesc(IntFeatParams)
DefineParam(0, 0, 0.0, 255.0)
DefineParam(0, 0, 0.0, 255.0)
DefineParam(1, 0, 0.0, 255.0)
EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(IntFeatDesc, 2, 1, kIntFeatureType, IntFeatParams)
// Define all of the parameters for the GeoFeature type
StartParamDesc(GeoFeatParams)
DefineParam(0, 0, 0.0, 255.0)
DefineParam(0, 0, 0.0, 255.0)
DefineParam(0, 0, 0.0, 255.0)
EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(GeoFeatDesc, 3, 0, kGeoFeatureType, GeoFeatParams)
// Other features used for training the adaptive classifier, but not used
// during normal training, therefore not in the DescDefs array.
// Define all of the parameters for the PicoFeature type
// define knob that can be used to adjust pico-feature length.
FLOAT32 PicoFeatureLength = PICO_FEATURE_LENGTH;
StartParamDesc(PicoFeatParams)
DefineParam(0, 0, -0.25, 0.75)
DefineParam(1, 0, 0.0, 1.0)
DefineParam(0, 0, -0.5, 0.5)
EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(PicoFeatDesc, 2, 1, "pf", PicoFeatParams)
// Define all of the parameters for the OutlineFeature type.
StartParamDesc(OutlineFeatParams)
DefineParam(0, 0, -0.5, 0.5)
DefineParam(0, 0, -0.25, 0.75)
DefineParam(0, 0, 0.0, 1.0)
DefineParam(1, 0, 0.0, 1.0)
EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(OutlineFeatDesc, 3, 1, "of", OutlineFeatParams)
// MUST be kept in-sync with ExtractorDefs in fxdefs.cpp.
static const FEATURE_DESC_STRUCT *DescDefs[NUM_FEATURE_TYPES] = {
&MicroFeatureDesc,
&CharNormDesc,
&IntFeatDesc,
&GeoFeatDesc
};
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
void InitFeatureDefs(FEATURE_DEFS_STRUCT *featuredefs) {
featuredefs->NumFeatureTypes = NUM_FEATURE_TYPES;
for (int i = 0; i < NUM_FEATURE_TYPES; ++i) {
featuredefs->FeatureDesc[i] = DescDefs[i];
}
}
/*---------------------------------------------------------------------------*/
/**
* Release the memory consumed by the specified character
* description and all of the features in that description.
*
* @param CharDesc character description to be deallocated
*
* Globals:
* - none
*
* @note Exceptions: none
* @note History: Wed May 23 13:52:19 1990, DSJ, Created.
*/
void FreeCharDescription(CHAR_DESC CharDesc) {
int i;
if (CharDesc) {
for (i = 0; i < CharDesc->NumFeatureSets; i++)
FreeFeatureSet (CharDesc->FeatureSets[i]);
Efree(CharDesc);
}
} /* FreeCharDescription */
/*---------------------------------------------------------------------------*/
/**
* Allocate a new character description, initialize its
* feature sets to be empty, and return it.
*
* Globals:
* - none
*
* @return New character description structure.
* @note Exceptions: none
* @note History: Wed May 23 15:27:10 1990, DSJ, Created.
*/
CHAR_DESC NewCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs) {
CHAR_DESC CharDesc;
int i;
CharDesc = (CHAR_DESC) Emalloc (sizeof (CHAR_DESC_STRUCT));
CharDesc->NumFeatureSets = FeatureDefs.NumFeatureTypes;
for (i = 0; i < CharDesc->NumFeatureSets; i++)
CharDesc->FeatureSets[i] = NULL;
return (CharDesc);
} /* NewCharDescription */
/*---------------------------------------------------------------------------*/
/**
* Write a textual representation of CharDesc to File.
* The format used is to write out the number of feature
* sets which will be written followed by a representation of
* each feature set.
*
* Each set starts with the short name for that feature followed
* by a description of the feature set. Feature sets which are
* not present are not written.
*
* Globals:
* - none
*
* @param FeatureDefs definitions of feature types/extractors
* @param File open text file to write CharDesc to
* @param CharDesc character description to write to File
*
* @note Exceptions: none
* @note History: Wed May 23 17:21:18 1990, DSJ, Created.
*/
void WriteCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs,
FILE *File, CHAR_DESC CharDesc) {
int Type;
int NumSetsToWrite = 0;
for (Type = 0; Type < CharDesc->NumFeatureSets; Type++)
if (CharDesc->FeatureSets[Type])
NumSetsToWrite++;
fprintf (File, " %d\n", NumSetsToWrite);
for (Type = 0; Type < CharDesc->NumFeatureSets; Type++)
if (CharDesc->FeatureSets[Type]) {
fprintf (File, "%s ", (FeatureDefs.FeatureDesc[Type])->ShortName);
WriteFeatureSet (File, CharDesc->FeatureSets[Type]);
}
} /* WriteCharDescription */
// Return whether all of the fields of the given feature set
// are well defined (not inf or nan).
bool ValidCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs,
CHAR_DESC CharDesc) {
bool anything_written = false;
bool well_formed = true;
for (int Type = 0; Type < CharDesc->NumFeatureSets; Type++) {
if (CharDesc->FeatureSets[Type]) {
for (int i = 0; i < CharDesc->FeatureSets[Type]->NumFeatures; i++) {
FEATURE feat = CharDesc->FeatureSets[Type]->Features[i];
for (int p = 0; p < feat->Type->NumParams; p++) {
if (isnan(feat->Params[p]) || isinf(feat->Params[p]))
well_formed = false;
else
anything_written = true;
}
}
}
}
return anything_written && well_formed;
} /* ValidCharDescription */
/*---------------------------------------------------------------------------*/
/**
* Read a character description from File, and return
* a data structure containing this information. The data
* is formatted as follows:
* @verbatim
NumberOfSets
ShortNameForSet1 Set1
ShortNameForSet2 Set2
...
@endverbatim
*
* Globals:
* - none
*
* @param FeatureDefs definitions of feature types/extractors
* @param File open text file to read character description from
* @return Character description read from File.
* @note Exceptions:
* - ILLEGAL_NUM_SETS
* @note History: Wed May 23 17:32:48 1990, DSJ, Created.
*/
CHAR_DESC ReadCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs,
FILE *File) {
int NumSetsToRead;
char ShortName[FEAT_NAME_SIZE];
CHAR_DESC CharDesc;
int Type;
if (tfscanf(File, "%d", &NumSetsToRead) != 1 ||
NumSetsToRead < 0 || NumSetsToRead > FeatureDefs.NumFeatureTypes)
DoError (ILLEGAL_NUM_SETS, "Illegal number of feature sets");
CharDesc = NewCharDescription(FeatureDefs);
for (; NumSetsToRead > 0; NumSetsToRead--) {
tfscanf(File, "%s", ShortName);
Type = ShortNameToFeatureType(FeatureDefs, ShortName);
CharDesc->FeatureSets[Type] =
ReadFeatureSet (File, FeatureDefs.FeatureDesc[Type]);
}
return (CharDesc);
} // ReadCharDescription
/*---------------------------------------------------------------------------*/
/**
* Search thru all features currently defined and return
* the feature type for the feature with the specified short
* name. Trap an error if the specified name is not found.
*
* Globals:
* - none
*
* @param FeatureDefs definitions of feature types/extractors
* @param ShortName short name of a feature type
* @return Feature type which corresponds to ShortName.
* @note Exceptions:
* - ILLEGAL_SHORT_NAME
* @note History: Wed May 23 15:36:05 1990, DSJ, Created.
*/
int ShortNameToFeatureType(const FEATURE_DEFS_STRUCT &FeatureDefs,
const char *ShortName) {
int i;
for (i = 0; i < FeatureDefs.NumFeatureTypes; i++)
if (!strcmp ((FeatureDefs.FeatureDesc[i]->ShortName), ShortName))
return (i);
DoError (ILLEGAL_SHORT_NAME, "Illegal short name for a feature");
return 0;
} // ShortNameToFeatureType
| C++ |
/******************************************************************************
** Filename: fpoint.c
** Purpose: Abstract data type for a 2D point (floating point coords)
** Author: Dan Johnson
** History: Thu Apr 12 10:44:15 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "const.h"
#include "fpoint.h"
#include <stdio.h>
#include <math.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
FLOAT32 DistanceBetween(FPOINT A, FPOINT B) {
double xd = XDelta(A, B);
double yd = YDelta(A, B);
return sqrt(static_cast<double>(xd * xd + yd * yd));
}
FLOAT32 NormalizedAngleFrom(FPOINT *Point1,
FPOINT *Point2,
FLOAT32 FullScale) {
/*
** Parameters:
** Point1, Point2 points to compute angle between
** FullScale value to associate with 2*pi
** Globals: none
** Operation: Return the angle from Point1 to Point2 normalized to
** lie in the range 0 to FullScale (where FullScale corresponds
** to 2*pi or 360 degrees).
** Return: none
** Exceptions: none
** History: Wed Mar 28 14:27:25 1990, DSJ, Created.
*/
FLOAT32 Angle;
FLOAT32 NumRadsInCircle = 2.0 * PI;
Angle = AngleFrom (*Point1, *Point2);
if (Angle < 0.0)
Angle += NumRadsInCircle;
Angle *= FullScale / NumRadsInCircle;
if (Angle < 0.0 || Angle >= FullScale)
Angle = 0.0;
return (Angle);
} /* NormalizedAngleFrom */
| C++ |
/******************************************************************************
** Filename: features.c
** Purpose: Generic definition of a feature.
** Author: Dan Johnson
** History: Mon May 21 10:49:04 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "ocrfeatures.h"
#include "emalloc.h"
#include "callcpp.h"
#include "danerror.h"
#include "freelist.h"
#include "scanutils.h"
#include <assert.h>
#include <math.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
BOOL8 AddFeature(FEATURE_SET FeatureSet, FEATURE Feature) {
/*
** Parameters:
** FeatureSet set of features to add Feature to
** Feature feature to be added to FeatureSet
** Globals: none
** Operation: Add a feature to a feature set. If the feature set is
** already full, FALSE is returned to indicate that the
** feature could not be added to the set; otherwise, TRUE is
** returned.
** Return: TRUE if feature added to set, FALSE if set is already full.
** Exceptions: none
** History: Tue May 22 17:22:23 1990, DSJ, Created.
*/
if (FeatureSet->NumFeatures >= FeatureSet->MaxNumFeatures) {
FreeFeature(Feature);
return FALSE;
}
FeatureSet->Features[FeatureSet->NumFeatures++] = Feature;
return TRUE;
} /* AddFeature */
/*---------------------------------------------------------------------------*/
void FreeFeature(FEATURE Feature) {
/*
** Parameters:
** Feature feature to be deallocated.
** Globals: none
** Operation: Release the memory consumed by the specified feature.
** Return: none
** Exceptions: none
** History: Mon May 21 13:33:27 1990, DSJ, Created.
*/
if (Feature) {
free_struct (Feature, sizeof (FEATURE_STRUCT)
+ sizeof (FLOAT32) * (Feature->Type->NumParams - 1),
"sizeof(FEATURE_STRUCT)+sizeof(FLOAT32)*(NumParamsIn(Feature)-1)");
}
} /* FreeFeature */
/*---------------------------------------------------------------------------*/
void FreeFeatureSet(FEATURE_SET FeatureSet) {
/*
** Parameters:
** FeatureSet set of features to be freed
** Globals: none
** Operation: Release the memory consumed by the specified feature
** set. This routine also frees the memory consumed by the
** features contained in the set.
** Return: none
** Exceptions: none
** History: Mon May 21 13:59:46 1990, DSJ, Created.
*/
int i;
if (FeatureSet) {
for (i = 0; i < FeatureSet->NumFeatures; i++)
FreeFeature(FeatureSet->Features[i]);
memfree(FeatureSet);
}
} /* FreeFeatureSet */
/*---------------------------------------------------------------------------*/
FEATURE NewFeature(const FEATURE_DESC_STRUCT* FeatureDesc) {
/*
** Parameters:
** FeatureDesc description of feature to be created.
** Globals: none
** Operation: Allocate and return a new feature of the specified
** type.
** Return: New feature.
** Exceptions: none
** History: Mon May 21 14:06:42 1990, DSJ, Created.
*/
FEATURE Feature;
Feature = (FEATURE) alloc_struct (sizeof (FEATURE_STRUCT) +
(FeatureDesc->NumParams - 1) *
sizeof (FLOAT32),
"sizeof(FEATURE_STRUCT)+sizeof(FLOAT32)*(NumParamsIn(Feature)-1)");
Feature->Type = FeatureDesc;
return (Feature);
} /* NewFeature */
/*---------------------------------------------------------------------------*/
FEATURE_SET NewFeatureSet(int NumFeatures) {
/*
** Parameters:
** NumFeatures maximum # of features to be put in feature set
** Globals: none
** Operation: Allocate and return a new feature set large enough to
** hold the specified number of features.
** Return: New feature set.
** Exceptions: none
** History: Mon May 21 14:22:40 1990, DSJ, Created.
*/
FEATURE_SET FeatureSet;
FeatureSet = (FEATURE_SET) Emalloc (sizeof (FEATURE_SET_STRUCT) +
(NumFeatures - 1) * sizeof (FEATURE));
FeatureSet->MaxNumFeatures = NumFeatures;
FeatureSet->NumFeatures = 0;
return (FeatureSet);
} /* NewFeatureSet */
/*---------------------------------------------------------------------------*/
FEATURE ReadFeature(FILE *File, const FEATURE_DESC_STRUCT* FeatureDesc) {
/*
** Parameters:
** File open text file to read feature from
** FeatureDesc specifies type of feature to read from File
** Globals: none
** Operation: Create a new feature of the specified type and read in
** the value of its parameters from File. The extra penalty
** for the feature is also computed by calling the appropriate
** function for the specified feature type. The correct text
** representation for a feature is a list of N floats where
** N is the number of parameters in the feature.
** Return: New feature read from File.
** Exceptions: ILLEGAL_FEATURE_PARAM if text file doesn't match expected format
** History: Wed May 23 08:53:16 1990, DSJ, Created.
*/
FEATURE Feature;
int i;
Feature = NewFeature (FeatureDesc);
for (i = 0; i < Feature->Type->NumParams; i++) {
if (tfscanf(File, "%f", &(Feature->Params[i])) != 1)
DoError (ILLEGAL_FEATURE_PARAM, "Illegal feature parameter spec");
#ifndef _WIN32
assert (!isnan(Feature->Params[i]));
#endif
}
return (Feature);
} /* ReadFeature */
/*---------------------------------------------------------------------------*/
FEATURE_SET ReadFeatureSet(FILE *File, const FEATURE_DESC_STRUCT* FeatureDesc) {
/*
** Parameters:
** File open text file to read new feature set from
** FeatureDesc specifies type of feature to read from File
** Globals: none
** Operation: Create a new feature set of the specified type and read in
** the features from File. The correct text representation
** for a feature set is an integer which specifies the number (N)
** of features in a set followed by a list of N feature
** descriptions.
** Return: New feature set read from File.
** Exceptions: none
** History: Wed May 23 09:17:31 1990, DSJ, Created.
*/
FEATURE_SET FeatureSet;
int NumFeatures;
int i;
if (tfscanf(File, "%d", &NumFeatures) != 1 || NumFeatures < 0)
DoError(ILLEGAL_NUM_FEATURES, "Illegal number of features in set");
FeatureSet = NewFeatureSet(NumFeatures);
for (i = 0; i < NumFeatures; i++)
AddFeature(FeatureSet, ReadFeature (File, FeatureDesc));
return (FeatureSet);
} /* ReadFeatureSet */
/*---------------------------------------------------------------------------*/
void WriteFeature(FILE *File, FEATURE Feature) {
/*
** Parameters:
** File open text file to write Feature to
** Feature feature to write out to File
** Globals: none
** Operation: Write a textual representation of Feature to File.
** This representation is simply a list of the N parameters
** of the feature, terminated with a newline. It is assumed
** that the ExtraPenalty field can be reconstructed from the
** parameters of the feature. It is also assumed that the
** feature type information is specified or assumed elsewhere.
** Return: none
** Exceptions: none
** History: Wed May 23 09:28:18 1990, DSJ, Created.
*/
int i;
for (i = 0; i < Feature->Type->NumParams; i++) {
#ifndef WIN32
assert(!isnan(Feature->Params[i]));
#endif
fprintf(File, " %g", Feature->Params[i]);
}
fprintf(File, "\n");
} /* WriteFeature */
/*---------------------------------------------------------------------------*/
void WriteFeatureSet(FILE *File, FEATURE_SET FeatureSet) {
/*
** Parameters:
** File open text file to write FeatureSet to
** FeatureSet feature set to write to File
** Globals: none
** Operation: Write a textual representation of FeatureSet to File.
** This representation is an integer specifying the number of
** features in the set, followed by a newline, followed by
** text representations for each feature in the set.
** Return: none
** Exceptions: none
** History: Wed May 23 10:06:03 1990, DSJ, Created.
*/
int i;
if (FeatureSet) {
fprintf (File, "%d\n", FeatureSet->NumFeatures);
for (i = 0; i < FeatureSet->NumFeatures; i++)
WriteFeature (File, FeatureSet->Features[i]);
}
} /* WriteFeatureSet */
/*---------------------------------------------------------------------------*/
void WriteOldParamDesc(FILE *File, const FEATURE_DESC_STRUCT* FeatureDesc) {
/*
** Parameters:
** File open text file to write FeatureDesc to
** FeatureDesc feature descriptor to write to File
** Globals: none
** Operation: Write a textual representation of FeatureDesc to File
** in the old format (i.e. the format used by the clusterer).
** This format is:
** Number of Params
** Description of Param 1
** ...
** Return: none
** Exceptions: none
** History: Fri May 25 15:27:18 1990, DSJ, Created.
*/
int i;
fprintf (File, "%d\n", FeatureDesc->NumParams);
for (i = 0; i < FeatureDesc->NumParams; i++) {
if (FeatureDesc->ParamDesc[i].Circular)
fprintf (File, "circular ");
else
fprintf (File, "linear ");
if (FeatureDesc->ParamDesc[i].NonEssential)
fprintf (File, "non-essential ");
else
fprintf (File, "essential ");
fprintf (File, "%f %f\n",
FeatureDesc->ParamDesc[i].Min, FeatureDesc->ParamDesc[i].Max);
}
} /* WriteOldParamDesc */
| C++ |
/******************************************************************************
** Filename: normfeat.c
** Purpose: Definition of char normalization features.
** Author: Dan Johnson
** History: 12/14/90, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "normfeat.h"
#include "intfx.h"
#include "featdefs.h"
#include "mfoutline.h"
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
// Return the length of the outline in baseline normalized form.
FLOAT32 ActualOutlineLength(FEATURE Feature) {
return (Feature->Params[CharNormLength] * LENGTH_COMPRESSION);
}
/*---------------------------------------------------------------------------*/
// Return the character normalization feature for a blob.
//
// The features returned are in a scale where the x-height has been
// normalized to live in the region y = [-0.25 .. 0.25]. Example ranges
// for English below are based on the Linux font collection on 2009-12-04:
//
// Params[CharNormY]
// The y coordinate of the grapheme's centroid.
// English: [-0.27, 0.71]
//
// Params[CharNormLength]
// The length of the grapheme's outline (tiny segments discarded),
// divided by 10.0=LENGTH_COMPRESSION.
// English: [0.16, 0.85]
//
// Params[CharNormRx]
// The radius of gyration about the x axis, as measured from CharNormY.
// English: [0.011, 0.34]
//
// Params[CharNormRy]
// The radius of gyration about the y axis, as measured from
// the x center of the grapheme's bounding box.
// English: [0.011, 0.31]
//
FEATURE_SET ExtractCharNormFeatures(TBLOB *blob, const DENORM& bl_denorm,
const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info) {
FEATURE_SET feature_set = NewFeatureSet(1);
FEATURE feature = NewFeature(&CharNormDesc);
feature->Params[CharNormY] =
MF_SCALE_FACTOR * (fx_info.Ymean - kBlnBaselineOffset);
feature->Params[CharNormLength] =
MF_SCALE_FACTOR * fx_info.Length / LENGTH_COMPRESSION;
feature->Params[CharNormRx] = MF_SCALE_FACTOR * fx_info.Rx;
feature->Params[CharNormRy] = MF_SCALE_FACTOR * fx_info.Ry;
AddFeature(feature_set, feature);
return feature_set;
} /* ExtractCharNormFeatures */
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturemap.cpp
// Description: Encapsulation of IntFeatureSpace with IndexMapBiDi
// to provide a subspace mapping and fast feature lookup.
// Created: Tue Oct 26 08:58:30 PDT 2010
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "intfeaturemap.h"
#include "intfeaturespace.h"
#include "intfx.h"
// These includes do not exist yet, but will be coming soon.
//#include "sampleiterator.h"
//#include "trainingsample.h"
//#include "trainingsampleset.h"
namespace tesseract {
const int kMaxOffsetDist = 32;
const double kMinPCLengthIncrease = 1.0 / 1024;
IntFeatureMap::IntFeatureMap()
: mapping_changed_(true), compact_size_(0) {
for (int dir = 0; dir < kNumOffsetMaps; ++dir) {
offset_plus_[dir] = NULL;
offset_minus_[dir] = NULL;
}
}
IntFeatureMap::~IntFeatureMap() {
Clear();
}
// Pseudo-accessors.
int IntFeatureMap::IndexFeature(const INT_FEATURE_STRUCT& f) const {
return feature_space_.Index(f);
}
int IntFeatureMap::MapFeature(const INT_FEATURE_STRUCT& f) const {
return feature_map_.SparseToCompact(feature_space_.Index(f));
}
int IntFeatureMap::MapIndexFeature(int index_feature) const {
return feature_map_.SparseToCompact(index_feature);
}
INT_FEATURE_STRUCT IntFeatureMap::InverseIndexFeature(int index_feature) const {
return feature_space_.PositionFromIndex(index_feature);
}
INT_FEATURE_STRUCT IntFeatureMap::InverseMapFeature(int map_feature) const {
int index = feature_map_.CompactToSparse(map_feature);
return feature_space_.PositionFromIndex(index);
}
void IntFeatureMap::DeleteMapFeature(int map_feature) {
feature_map_.Merge(-1, map_feature);
mapping_changed_ = true;
}
bool IntFeatureMap::IsMapFeatureDeleted(int map_feature) const {
return feature_map_.IsCompactDeleted(map_feature);
}
// Copies the given feature_space and uses it as the index feature map
// from INT_FEATURE_STRUCT.
void IntFeatureMap::Init(const IntFeatureSpace& feature_space) {
feature_space_ = feature_space;
mapping_changed_ = false;
int sparse_size = feature_space_.Size();
feature_map_.Init(sparse_size, true);
feature_map_.Setup();
compact_size_ = feature_map_.CompactSize();
// Initialize look-up tables if needed.
FCOORD dir = FeatureDirection(0);
if (dir.x() == 0.0f && dir.y() == 0.0f)
InitIntegerFX();
// Compute look-up tables to generate offset features.
for (int dir = 0; dir < kNumOffsetMaps; ++dir) {
delete [] offset_plus_[dir];
delete [] offset_minus_[dir];
offset_plus_[dir] = new int[sparse_size];
offset_minus_[dir] = new int[sparse_size];
}
for (int dir = 1; dir <= kNumOffsetMaps; ++dir) {
for (int i = 0; i < sparse_size; ++i) {
int offset_index = ComputeOffsetFeature(i, dir);
offset_plus_[dir - 1][i] = offset_index;
offset_index = ComputeOffsetFeature(i, -dir);
offset_minus_[dir - 1][i] = offset_index;
}
}
}
// Helper to return an offset index feature. In this context an offset
// feature with a dir of +/-1 is a feature of a similar direction,
// but shifted perpendicular to the direction of the feature. An offset
// feature with a dir of +/-2 is feature at the same position, but rotated
// by +/- one [compact] quantum. Returns the index of the generated offset
// feature, or -1 if it doesn't exist. Dir should be in
// [-kNumOffsetMaps, kNumOffsetMaps] to indicate the relative direction.
// A dir of 0 is an identity transformation.
// Both input and output are from the index(sparse) feature space, not
// the mapped/compact feature space, but the offset feature is the minimum
// distance moved from the input to guarantee that it maps to the next
// available quantum in the mapped/compact space.
int IntFeatureMap::OffsetFeature(int index_feature, int dir) const {
if (dir > 0 && dir <= kNumOffsetMaps)
return offset_plus_[dir - 1][index_feature];
else if (dir < 0 && -dir <= kNumOffsetMaps)
return offset_minus_[-dir - 1][index_feature];
else if (dir == 0)
return index_feature;
else
return -1;
}
//#define EXPERIMENT_ON
#ifdef EXPERIMENT_ON // This code is commented out as SampleIterator and
// TrainingSample are not reviewed/checked in yet, but these functions are a
// useful indicator of how an IntFeatureMap is setup.
// Computes the features used by the subset of samples defined by
// the iterator and sets up the feature mapping.
// Returns the size of the compacted feature space.
int IntFeatureMap::FindNZFeatureMapping(SampleIterator* it) {
feature_map_.Init(feature_space_.Size(), false);
int total_samples = 0;
for (it->Begin(); !it->AtEnd(); it->Next()) {
const TrainingSample& sample = it->GetSample();
GenericVector<int> features;
feature_space_.IndexAndSortFeatures(sample.features(),
sample.num_features(),
&features);
int num_features = features.size();
for (int f = 0; f < num_features; ++f)
feature_map_.SetMap(features[f], true);
++total_samples;
}
feature_map_.Setup();
compact_size_ = feature_map_.CompactSize();
mapping_changed_ = true;
FinalizeMapping(it);
tprintf("%d non-zero features found in %d samples\n",
compact_size_, total_samples);
return compact_size_;
}
#endif
// After deleting some features, finish setting up the mapping, and map
// all the samples. Returns the size of the compacted feature space.
int IntFeatureMap::FinalizeMapping(SampleIterator* it) {
if (mapping_changed_) {
feature_map_.CompleteMerges();
compact_size_ = feature_map_.CompactSize();
#ifdef EXPERIMENT_ON
it->MapSampleFeatures(*this);
#endif
mapping_changed_ = false;
}
return compact_size_;
}
// Prints the map features from the set in human-readable form.
void IntFeatureMap::DebugMapFeatures(
const GenericVector<int>& map_features) const {
for (int i = 0; i < map_features.size(); ++i) {
INT_FEATURE_STRUCT f = InverseMapFeature(map_features[i]);
f.print();
}
}
void IntFeatureMap::Clear() {
for (int dir = 0; dir < kNumOffsetMaps; ++dir) {
delete [] offset_plus_[dir];
delete [] offset_minus_[dir];
offset_plus_[dir] = NULL;
offset_minus_[dir] = NULL;
}
}
// Helper to compute an offset index feature. In this context an offset
// feature with a dir of +/-1 is a feature of a similar direction,
// but shifted perpendicular to the direction of the feature. An offset
// feature with a dir of +/-2 is feature at the same position, but rotated
// by +/- one [compact] quantum. Returns the index of the generated offset
// feature, or -1 if it doesn't exist. Dir should be in
// [-kNumOffsetMaps, kNumOffsetMaps] to indicate the relative direction.
// A dir of 0 is an identity transformation.
// Both input and output are from the index(sparse) feature space, not
// the mapped/compact feature space, but the offset feature is the minimum
// distance moved from the input to guarantee that it maps to the next
// available quantum in the mapped/compact space.
int IntFeatureMap::ComputeOffsetFeature(int index_feature, int dir) const {
INT_FEATURE_STRUCT f = InverseIndexFeature(index_feature);
ASSERT_HOST(IndexFeature(f) == index_feature);
if (dir == 0) {
return index_feature;
} else if (dir == 1 || dir == -1) {
FCOORD feature_dir = FeatureDirection(f.Theta);
FCOORD rotation90(0.0f, 1.0f);
feature_dir.rotate(rotation90);
// Find the nearest existing feature.
for (int m = 1; m < kMaxOffsetDist; ++m) {
double x_pos = f.X + feature_dir.x() * (m * dir);
double y_pos = f.Y + feature_dir.y() * (m * dir);
int x = IntCastRounded(x_pos);
int y = IntCastRounded(y_pos);
if (x >= 0 && x <= MAX_UINT8 && y >= 0 && y <= MAX_UINT8) {
INT_FEATURE_STRUCT offset_f;
offset_f.X = x;
offset_f.Y = y;
offset_f.Theta = f.Theta;
int offset_index = IndexFeature(offset_f);
if (offset_index != index_feature && offset_index >= 0)
return offset_index; // Found one.
} else {
return -1; // Hit the edge of feature space.
}
}
} else if (dir == 2 || dir == -2) {
// Find the nearest existing index_feature.
for (int m = 1; m < kMaxOffsetDist; ++m) {
int theta = f.Theta + m * dir / 2;
INT_FEATURE_STRUCT offset_f;
offset_f.X = f.X;
offset_f.Y = f.Y;
offset_f.Theta = Modulo(theta, 256);
int offset_index = IndexFeature(offset_f);
if (offset_index != index_feature && offset_index >= 0)
return offset_index; // Found one.
}
}
return -1; // Nothing within the max distance.
}
} // namespace tesseract.
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturespace.h
// Description: Indexed feature space based on INT_FEATURE_STRUCT.
// Created: Wed Mar 24 10:55:30 PDT 2010
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_INTFEATURESPACE_H__
#define TESSERACT_CLASSIFY_INTFEATURESPACE_H__
#include "genericvector.h"
#include "intproto.h"
// Extent of x,y,theta in the input feature space. [0,255].
const int kIntFeatureExtent = 256;
// Extent of x,y,theta dimensions in the quantized feature space.
const int kBoostXYBuckets = 16;
const int kBoostDirBuckets = 16;
namespace tesseract {
class IndexMap;
// Down-sampling quantization of the INT_FEATURE_STRUCT feature space and
// conversion to a single scalar index value, used as a binary feature space.
class IntFeatureSpace {
public:
IntFeatureSpace();
// Default copy constructors and assignment OK!
// Setup the feature space with the given dimensions.
void Init(uinT8 xbuckets, uinT8 ybuckets, uinT8 thetabuckets);
// Serializes the feature space definition to the given file.
// Returns false on error.
bool Serialize(FILE* fp) const;
// DeSerializes the feature space definition from the given file.
// If swap is true, the data is big/little-endian swapped.
// Returns false on error.
bool DeSerialize(bool swap, FILE* fp);
// Returns the total size of the feature space.
int Size() const {
return static_cast<int>(x_buckets_) * y_buckets_ * theta_buckets_;
}
// Returns an INT_FEATURE_STRUCT corresponding to the given index.
// This is the inverse of the Index member.
INT_FEATURE_STRUCT PositionFromIndex(int index) const;
// Returns a 1-dimensional index corresponding to the given feature value.
// Range is [0, Size()-1]. Inverse of PositionFromIndex member.
int Index(const INT_FEATURE_STRUCT& f) const {
return (XBucket(f.X) * y_buckets_ + YBucket(f.Y)) * theta_buckets_ +
ThetaBucket(f.Theta);
}
// Bulk calls to Index. Maps the given array of features to a vector of
// inT32 indices in the same order as the input.
void IndexFeatures(const INT_FEATURE_STRUCT* features, int num_features,
GenericVector<int>* mapped_features) const;
// Bulk calls to Index. Maps the given array of features to a vector of
// sorted inT32 indices.
void IndexAndSortFeatures(const INT_FEATURE_STRUCT* features,
int num_features,
GenericVector<int>* sorted_features) const;
// Returns a feature space index for the given x,y position in a display
// window, or -1 if the feature is a miss.
int XYToFeatureIndex(int x, int y) const;
protected:
// Converters to generate indices for individual feature dimensions.
int XBucket(int x) const {
int bucket = x * x_buckets_ / kIntFeatureExtent;
return ClipToRange(bucket, 0, static_cast<int>(x_buckets_) - 1);
}
int YBucket(int y) const {
int bucket = y * y_buckets_ / kIntFeatureExtent;
return ClipToRange(bucket, 0, static_cast<int>(y_buckets_) - 1);
}
// Use DivRounded for theta so that exactly vertical and horizontal are in
// the middle of a bucket. The Modulo takes care of the wrap-around.
int ThetaBucket(int theta) const {
int bucket = DivRounded(theta * theta_buckets_, kIntFeatureExtent);
return Modulo(bucket, theta_buckets_);
}
// Returns an INT_FEATURE_STRUCT corresponding to the given buckets.
INT_FEATURE_STRUCT PositionFromBuckets(int x, int y, int theta) const;
// Feature space definition - serialized.
uinT8 x_buckets_;
uinT8 y_buckets_;
uinT8 theta_buckets_;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_INTFEATURESPACE_H__
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: tessclassifier.cpp
// Description: Tesseract implementation of a ShapeClassifier.
// Author: Ray Smith
// Created: Tue Nov 22 14:16:25 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tessclassifier.h"
#include "classify.h"
#include "trainingsample.h"
namespace tesseract {
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
int TessClassifier::UnicharClassifySample(
const TrainingSample& sample, Pix* page_pix, int debug,
UNICHAR_ID keep_this, GenericVector<UnicharRating>* results) {
int old_matcher_level = classify_->matcher_debug_level;
int old_matcher_flags = classify_->matcher_debug_flags;
int old_classify_level = classify_->classify_debug_level;
if (debug) {
// Explicitly set values of various control parameters to generate debug
// output if required, restoring the old values after classifying.
classify_->matcher_debug_level.set_value(2);
classify_->matcher_debug_flags.set_value(25);
classify_->classify_debug_level.set_value(3);
}
classify_->CharNormTrainingSample(pruner_only_, keep_this, sample, results);
if (debug) {
classify_->matcher_debug_level.set_value(old_matcher_level);
classify_->matcher_debug_flags.set_value(old_matcher_flags);
classify_->classify_debug_level.set_value(old_classify_level);
}
return results->size();
}
// Provides access to the ShapeTable that this classifier works with.
const ShapeTable* TessClassifier::GetShapeTable() const {
return classify_->shape_table();
}
// Provides access to the UNICHARSET that this classifier works with.
// Only needs to be overridden if GetShapeTable() can return NULL.
const UNICHARSET& TessClassifier::GetUnicharset() const {
return classify_->unicharset;
}
// Displays classification as the given shape_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
int TessClassifier::DisplayClassifyAs(
const TrainingSample& sample, Pix* page_pix, int unichar_id, int index,
PointerVector<ScrollView>* windows) {
int shape_id = unichar_id;
if (GetShapeTable() != NULL)
shape_id = BestShapeForUnichar(sample, page_pix, unichar_id, NULL);
if (shape_id < 0) return index;
if (UnusedClassIdIn(classify_->PreTrainedTemplates, shape_id)) {
tprintf("No built-in templates for class/shape %d\n", shape_id);
return index;
}
classify_->ShowBestMatchFor(shape_id, sample.features(),
sample.num_features());
return index;
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: intmatcher.h
** Purpose: Interface to high level generic classifier routines.
** Author: Robert Moss
** History: Wed Feb 13 15:24:15 MST 1991, RWM, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef INTMATCHER_H
#define INTMATCHER_H
#include "params.h"
// Character fragments could be present in the trained templaes
// but turned on/off on the language-by-language basis or depending
// on particular properties of the corpus (e.g. when we expect the
// images to have low exposure).
extern BOOL_VAR_H(disable_character_fragments, FALSE,
"Do not include character fragments in the"
" results of the classifier");
extern INT_VAR_H(classify_integer_matcher_multiplier, 10,
"Integer Matcher Multiplier 0-255: ");
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "intproto.h"
#include "cutoffs.h"
struct INT_RESULT_STRUCT {
INT_RESULT_STRUCT() : Rating(0.0f), Config(0), Config2(0), FeatureMisses(0) {}
FLOAT32 Rating;
// TODO(rays) It might be desirable for these to be able to represent a
// null config.
uinT8 Config;
uinT8 Config2;
uinT16 FeatureMisses;
};
typedef INT_RESULT_STRUCT *INT_RESULT;
struct CP_RESULT_STRUCT {
CP_RESULT_STRUCT() : Rating(0.0f), Class(0) {}
FLOAT32 Rating;
INT_RESULT_STRUCT IMResult;
CLASS_ID Class;
};
/*----------------------------------------------------------------------------
Variables
-----------------------------------------------------------------------------*/
extern INT_VAR_H(classify_adapt_proto_thresh, 230,
"Threshold for good protos during adaptive 0-255: ");
extern INT_VAR_H(classify_adapt_feature_thresh, 230,
"Threshold for good features during adaptive 0-255: ");
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
#define SE_TABLE_BITS 9
#define SE_TABLE_SIZE 512
struct ScratchEvidence {
uinT8 feature_evidence_[MAX_NUM_CONFIGS];
int sum_feature_evidence_[MAX_NUM_CONFIGS];
uinT8 proto_evidence_[MAX_NUM_PROTOS][MAX_PROTO_INDEX];
void Clear(const INT_CLASS class_template);
void ClearFeatureEvidence(const INT_CLASS class_template);
void NormalizeSums(INT_CLASS ClassTemplate, inT16 NumFeatures,
inT32 used_features);
void UpdateSumOfProtoEvidences(
INT_CLASS ClassTemplate, BIT_VECTOR ConfigMask, inT16 NumFeatures);
};
class IntegerMatcher {
public:
// Integer Matcher Theta Fudge (0-255).
static const int kIntThetaFudge = 128;
// Bits in Similarity to Evidence Lookup (8-9).
static const int kEvidenceTableBits = 9;
// Integer Evidence Truncation Bits (8-14).
static const int kIntEvidenceTruncBits = 14;
// Similarity to Evidence Table Exponential Multiplier.
static const float kSEExponentialMultiplier;
// Center of Similarity Curve.
static const float kSimilarityCenter;
IntegerMatcher() : classify_debug_level_(0) {}
void Init(tesseract::IntParam *classify_debug_level);
void Match(INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
inT16 NumFeatures,
const INT_FEATURE_STRUCT* Features,
INT_RESULT Result,
int AdaptFeatureThreshold,
int Debug,
bool SeparateDebugWindows);
// Applies the CN normalization factor to the given rating and returns
// the modified rating.
float ApplyCNCorrection(float rating, int blob_length,
int normalization_factor, int matcher_multiplier);
int FindGoodProtos(INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
uinT16 BlobLength,
inT16 NumFeatures,
INT_FEATURE_ARRAY Features,
PROTO_ID *ProtoArray,
int AdaptProtoThreshold,
int Debug);
int FindBadFeatures(INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
uinT16 BlobLength,
inT16 NumFeatures,
INT_FEATURE_ARRAY Features,
FEATURE_ID *FeatureArray,
int AdaptFeatureThreshold,
int Debug);
private:
int UpdateTablesForFeature(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
int FeatureNum,
const INT_FEATURE_STRUCT* Feature,
ScratchEvidence *evidence,
int Debug);
int FindBestMatch(INT_CLASS ClassTemplate,
const ScratchEvidence &tables,
INT_RESULT Result);
#ifndef GRAPHICS_DISABLED
void DebugFeatureProtoError(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
const ScratchEvidence &tables,
inT16 NumFeatures,
int Debug);
void DisplayProtoDebugInfo(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
const ScratchEvidence &tables,
bool SeparateDebugWindows);
void DisplayFeatureDebugInfo(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
inT16 NumFeatures,
const INT_FEATURE_STRUCT* Features,
int AdaptFeatureThreshold,
int Debug,
bool SeparateDebugWindows);
void DebugBestMatch(int BestMatch, INT_RESULT Result);
#endif
private:
uinT8 similarity_evidence_table_[SE_TABLE_SIZE];
uinT32 evidence_table_mask_;
uinT32 mult_trunc_shift_bits_;
uinT32 table_trunc_shift_bits_;
tesseract::IntParam *classify_debug_level_;
uinT32 evidence_mult_mask_;
};
/**----------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------**/
void IMDebugConfiguration(INT_FEATURE FeatureNum,
uinT16 ActualProtoNum,
uinT8 Evidence,
BIT_VECTOR ConfigMask,
uinT32 ConfigWord);
void IMDebugConfigurationSum(INT_FEATURE FeatureNum,
uinT8 *FeatureEvidence,
inT32 ConfigCount);
void HeapSort (int n, register int ra[], register int rb[]);
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
#endif
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturespace.cpp
// Description: Indexed feature space based on INT_FEATURE_STRUCT.
// Created: Wed Mar 24 11:21:27 PDT 2010
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "intfeaturespace.h"
#include "intfx.h"
namespace tesseract {
IntFeatureSpace::IntFeatureSpace()
: x_buckets_(0), y_buckets_(0), theta_buckets_(0) {
}
void IntFeatureSpace::Init(uinT8 xbuckets, uinT8 ybuckets, uinT8 thetabuckets) {
x_buckets_ = xbuckets;
y_buckets_ = ybuckets;
theta_buckets_ = thetabuckets;
}
// Serializes the feature space definition to the given file.
// Returns false on error.
bool IntFeatureSpace::Serialize(FILE* fp) const {
if (fwrite(&x_buckets_, sizeof(x_buckets_), 1, fp) != 1)
return false;
if (fwrite(&y_buckets_, sizeof(y_buckets_), 1, fp) != 1)
return false;
if (fwrite(&theta_buckets_, sizeof(theta_buckets_), 1, fp) != 1)
return false;
return true;
}
// DeSerializes the feature space definition from the given file.
// If swap is true, the data is big/little-endian swapped.
// Returns false on error.
bool IntFeatureSpace::DeSerialize(bool swap, FILE* fp) {
if (fread(&x_buckets_, sizeof(x_buckets_), 1, fp) != 1)
return false;
if (fread(&y_buckets_, sizeof(y_buckets_), 1, fp) != 1)
return false;
if (fread(&theta_buckets_, sizeof(theta_buckets_), 1, fp) != 1)
return false;
return true;
}
// Returns an INT_FEATURE_STRUCT corresponding to the given index.
// This is the inverse of the Index member.
INT_FEATURE_STRUCT IntFeatureSpace::PositionFromIndex(int index) const {
return PositionFromBuckets(index / (y_buckets_ * theta_buckets_),
index / theta_buckets_ % y_buckets_,
index % theta_buckets_);
}
// Bulk calls to Index. Maps the given array of features to a vector of
// inT32 indices in the same order as the input.
void IntFeatureSpace::IndexFeatures(const INT_FEATURE_STRUCT* features,
int num_features,
GenericVector<int>* mapped_features) const {
mapped_features->truncate(0);
for (int f = 0; f < num_features; ++f)
mapped_features->push_back(Index(features[f]));
}
// Bulk calls to Index. Maps the given array of features to a vector of
// sorted inT32 indices.
void IntFeatureSpace::IndexAndSortFeatures(
const INT_FEATURE_STRUCT* features, int num_features,
GenericVector<int>* sorted_features) const {
sorted_features->truncate(0);
for (int f = 0; f < num_features; ++f)
sorted_features->push_back(Index(features[f]));
sorted_features->sort();
}
// Returns a feature space index for the given x,y position in a display
// window, or -1 if the feature is a miss.
int IntFeatureSpace::XYToFeatureIndex(int x, int y) const {
// Round the x,y position to a feature. Search for a valid theta.
INT_FEATURE_STRUCT feature(x, y, 0);
int index = -1;
for (int theta = 0; theta <= MAX_UINT8 && index < 0; ++theta) {
feature.Theta = theta;
index = Index(feature);
}
if (index < 0) {
tprintf("(%d,%d) does not exist in feature space!\n", x, y);
return -1;
}
feature = PositionFromIndex(index);
tprintf("Click at (%d, %d) ->(%d, %d), ->(%d, %d)\n",
x, y, feature.X, feature.Y, x - feature.X, y - feature.Y);
// Get the relative position of x,y from the rounded feature.
x -= feature.X;
y -= feature.Y;
if (x != 0 || y != 0) {
double angle = atan2(static_cast<double>(y), static_cast<double>(x)) + PI;
angle *= kIntFeatureExtent / (2.0 * PI);
feature.Theta = static_cast<uinT8>(angle + 0.5);
index = Index(feature);
if (index < 0) {
tprintf("Feature failed to map to a valid index:");
feature.print();
return -1;
}
feature = PositionFromIndex(index);
}
feature.print();
return index;
}
// Returns an INT_FEATURE_STRUCT corresponding to the given bucket coords.
INT_FEATURE_STRUCT IntFeatureSpace::PositionFromBuckets(int x,
int y,
int theta) const {
INT_FEATURE_STRUCT pos(
(x * kIntFeatureExtent + kIntFeatureExtent / 2) / x_buckets_,
(y * kIntFeatureExtent + kIntFeatureExtent / 2) / y_buckets_,
DivRounded(theta * kIntFeatureExtent, theta_buckets_));
return pos;
}
} // namespace tesseract.
| C++ |
/******************************************************************************
** Filename: adaptmatch.c
** Purpose: High level adaptive matcher.
** Author: Dan Johnson
** History: Mon Mar 11 10:00:10 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include <ctype.h>
#include "ambigs.h"
#include "blobclass.h"
#include "blobs.h"
#include "helpers.h"
#include "normfeat.h"
#include "mfoutline.h"
#include "picofeat.h"
#include "float2int.h"
#include "outfeat.h"
#include "emalloc.h"
#include "intfx.h"
#include "efio.h"
#include "normmatch.h"
#include "ndminx.h"
#include "intproto.h"
#include "const.h"
#include "globals.h"
#include "werd.h"
#include "callcpp.h"
#include "pageres.h"
#include "params.h"
#include "classify.h"
#include "shapetable.h"
#include "tessclassifier.h"
#include "trainingsample.h"
#include "unicharset.h"
#include "dict.h"
#include "featdefs.h"
#include "genericvector.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#ifdef __UNIX__
#include <assert.h>
#endif
#define ADAPT_TEMPLATE_SUFFIX ".a"
#define MAX_MATCHES 10
#define UNLIKELY_NUM_FEAT 200
#define NO_DEBUG 0
#define MAX_ADAPTABLE_WERD_SIZE 40
#define ADAPTABLE_WERD_ADJUSTMENT (0.05)
#define Y_DIM_OFFSET (Y_SHIFT - BASELINE_Y_SHIFT)
#define WORST_POSSIBLE_RATING (1.0)
struct ScoredClass {
CLASS_ID unichar_id;
int shape_id;
FLOAT32 rating;
bool adapted;
inT16 config;
inT16 fontinfo_id;
inT16 fontinfo_id2;
};
struct ADAPT_RESULTS {
inT32 BlobLength;
bool HasNonfragment;
GenericVector<ScoredClass> match;
ScoredClass best_match;
GenericVector<CP_RESULT_STRUCT> CPResults;
/// Initializes data members to the default values. Sets the initial
/// rating of each class to be the worst possible rating (1.0).
inline void Initialize() {
BlobLength = MAX_INT32;
HasNonfragment = false;
best_match.unichar_id = NO_CLASS;
best_match.shape_id = -1;
best_match.rating = WORST_POSSIBLE_RATING;
best_match.adapted = false;
best_match.config = 0;
best_match.fontinfo_id = kBlankFontinfoId;
best_match.fontinfo_id2 = kBlankFontinfoId;
}
};
struct PROTO_KEY {
ADAPT_TEMPLATES Templates;
CLASS_ID ClassId;
int ConfigId;
};
/*-----------------------------------------------------------------------------
Private Macros
-----------------------------------------------------------------------------*/
#define MarginalMatch(Rating) \
((Rating) > matcher_great_threshold)
/*-----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
int CompareByRating(const void *arg1, const void *arg2);
ScoredClass *FindScoredUnichar(ADAPT_RESULTS *results, UNICHAR_ID id);
ScoredClass ScoredUnichar(ADAPT_RESULTS *results, UNICHAR_ID id);
void InitMatcherRatings(register FLOAT32 *Rating);
int MakeTempProtoPerm(void *item1, void *item2);
void SetAdaptiveThreshold(FLOAT32 Threshold);
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* This routine calls the adaptive matcher
* which returns (in an array) the class id of each
* class matched.
*
* It also returns the number of classes matched.
* For each class matched it places the best rating
* found for that class into the Ratings array.
*
* Bad matches are then removed so that they don't
* need to be sorted. The remaining good matches are
* then sorted and converted to choices.
*
* This routine also performs some simple speckle
* filtering.
*
* @note Exceptions: none
* @note History: Mon Mar 11 10:00:58 1991, DSJ, Created.
*
* @param Blob blob to be classified
* @param[out] Choices List of choices found by adaptive matcher.
* filled on return with the choices found by the
* class pruner and the ratings therefrom. Also
* contains the detailed results of the integer matcher.
*
*/
void Classify::AdaptiveClassifier(TBLOB *Blob, BLOB_CHOICE_LIST *Choices) {
assert(Choices != NULL);
ADAPT_RESULTS *Results = new ADAPT_RESULTS;
Results->Initialize();
ASSERT_HOST(AdaptedTemplates != NULL);
DoAdaptiveMatch(Blob, Results);
RemoveBadMatches(Results);
Results->match.sort(CompareByRating);
RemoveExtraPuncs(Results);
ConvertMatchesToChoices(Blob->denorm(), Blob->bounding_box(), Results,
Choices);
if (matcher_debug_level >= 1) {
cprintf ("AD Matches = ");
PrintAdaptiveMatchResults(stdout, Results);
}
if (LargeSpeckle(*Blob) || Choices->length() == 0)
AddLargeSpeckleTo(Results->BlobLength, Choices);
#ifndef GRAPHICS_DISABLED
if (classify_enable_adaptive_debugger)
DebugAdaptiveClassifier(Blob, Results);
#endif
delete Results;
} /* AdaptiveClassifier */
// If *win is NULL, sets it to a new ScrollView() object with title msg.
// Clears the window and draws baselines.
void Classify::RefreshDebugWindow(ScrollView **win, const char *msg,
int y_offset, const TBOX &wbox) {
#ifndef GRAPHICS_DISABLED
const int kSampleSpaceWidth = 500;
if (*win == NULL) {
*win = new ScrollView(msg, 100, y_offset, kSampleSpaceWidth * 2, 200,
kSampleSpaceWidth * 2, 200, true);
}
(*win)->Clear();
(*win)->Pen(64, 64, 64);
(*win)->Line(-kSampleSpaceWidth, kBlnBaselineOffset,
kSampleSpaceWidth, kBlnBaselineOffset);
(*win)->Line(-kSampleSpaceWidth, kBlnXHeight + kBlnBaselineOffset,
kSampleSpaceWidth, kBlnXHeight + kBlnBaselineOffset);
(*win)->ZoomToRectangle(wbox.left(), wbox.top(),
wbox.right(), wbox.bottom());
#endif // GRAPHICS_DISABLED
}
// Learns the given word using its chopped_word, seam_array, denorm,
// box_word, best_state, and correct_text to learn both correctly and
// incorrectly segmented blobs. If filename is not NULL, then LearnBlob
// is called and the data will be written to a file for static training.
// Otherwise AdaptToBlob is called for adaption within a document.
// If rejmap is not NULL, then only chars with a rejmap entry of '1' will
// be learned, otherwise all chars with good correct_text are learned.
void Classify::LearnWord(const char* filename, WERD_RES *word) {
int word_len = word->correct_text.size();
if (word_len == 0) return;
float* thresholds = NULL;
if (filename == NULL) {
// Adaption mode.
if (!EnableLearning || word->best_choice == NULL)
return; // Can't or won't adapt.
if (classify_learning_debug_level >= 1)
tprintf("\n\nAdapting to word = %s\n",
word->best_choice->debug_string().string());
thresholds = new float[word_len];
word->ComputeAdaptionThresholds(certainty_scale,
matcher_perfect_threshold,
matcher_good_threshold,
matcher_rating_margin, thresholds);
}
int start_blob = 0;
#ifndef GRAPHICS_DISABLED
if (classify_debug_character_fragments) {
if (learn_fragmented_word_debug_win_ != NULL) {
window_wait(learn_fragmented_word_debug_win_);
}
RefreshDebugWindow(&learn_fragments_debug_win_, "LearnPieces", 400,
word->chopped_word->bounding_box());
RefreshDebugWindow(&learn_fragmented_word_debug_win_, "LearnWord", 200,
word->chopped_word->bounding_box());
word->chopped_word->plot(learn_fragmented_word_debug_win_);
ScrollView::Update();
}
#endif // GRAPHICS_DISABLED
for (int ch = 0; ch < word_len; ++ch) {
if (classify_debug_character_fragments) {
tprintf("\nLearning %s\n", word->correct_text[ch].string());
}
if (word->correct_text[ch].length() > 0) {
float threshold = thresholds != NULL ? thresholds[ch] : 0.0f;
LearnPieces(filename, start_blob, word->best_state[ch],
threshold, CST_WHOLE, word->correct_text[ch].string(), word);
if (word->best_state[ch] > 1 && !disable_character_fragments) {
// Check that the character breaks into meaningful fragments
// that each match a whole character with at least
// classify_character_fragments_garbage_certainty_threshold
bool garbage = false;
int frag;
for (frag = 0; frag < word->best_state[ch]; ++frag) {
TBLOB* frag_blob = word->chopped_word->blobs[start_blob + frag];
if (classify_character_fragments_garbage_certainty_threshold < 0) {
garbage |= LooksLikeGarbage(frag_blob);
}
}
// Learn the fragments.
if (!garbage) {
bool pieces_all_natural = word->PiecesAllNatural(start_blob,
word->best_state[ch]);
if (pieces_all_natural || !prioritize_division) {
for (frag = 0; frag < word->best_state[ch]; ++frag) {
GenericVector<STRING> tokens;
word->correct_text[ch].split(' ', &tokens);
tokens[0] = CHAR_FRAGMENT::to_string(
tokens[0].string(), frag, word->best_state[ch],
pieces_all_natural);
STRING full_string;
for (int i = 0; i < tokens.size(); i++) {
full_string += tokens[i];
if (i != tokens.size() - 1)
full_string += ' ';
}
LearnPieces(filename, start_blob + frag, 1,
threshold, CST_FRAGMENT, full_string.string(), word);
}
}
}
}
// TODO(rays): re-enable this part of the code when we switch to the
// new classifier that needs to see examples of garbage.
/*
if (word->best_state[ch] > 1) {
// If the next blob is good, make junk with the rightmost fragment.
if (ch + 1 < word_len && word->correct_text[ch + 1].length() > 0) {
LearnPieces(filename, start_blob + word->best_state[ch] - 1,
word->best_state[ch + 1] + 1,
threshold, CST_IMPROPER, INVALID_UNICHAR, word);
}
// If the previous blob is good, make junk with the leftmost fragment.
if (ch > 0 && word->correct_text[ch - 1].length() > 0) {
LearnPieces(filename, start_blob - word->best_state[ch - 1],
word->best_state[ch - 1] + 1,
threshold, CST_IMPROPER, INVALID_UNICHAR, word);
}
}
// If the next blob is good, make a join with it.
if (ch + 1 < word_len && word->correct_text[ch + 1].length() > 0) {
STRING joined_text = word->correct_text[ch];
joined_text += word->correct_text[ch + 1];
LearnPieces(filename, start_blob,
word->best_state[ch] + word->best_state[ch + 1],
threshold, CST_NGRAM, joined_text.string(), word);
}
*/
}
start_blob += word->best_state[ch];
}
delete [] thresholds;
} // LearnWord.
// Builds a blob of length fragments, from the word, starting at start,
// and then learns it, as having the given correct_text.
// If filename is not NULL, then LearnBlob
// is called and the data will be written to a file for static training.
// Otherwise AdaptToBlob is called for adaption within a document.
// threshold is a magic number required by AdaptToChar and generated by
// ComputeAdaptionThresholds.
// Although it can be partly inferred from the string, segmentation is
// provided to explicitly clarify the character segmentation.
void Classify::LearnPieces(const char* filename, int start, int length,
float threshold, CharSegmentationType segmentation,
const char* correct_text, WERD_RES *word) {
// TODO(daria) Remove/modify this if/when we want
// to train and/or adapt to n-grams.
if (segmentation != CST_WHOLE &&
(segmentation != CST_FRAGMENT || disable_character_fragments))
return;
if (length > 1) {
join_pieces(word->seam_array, start, start + length - 1,
word->chopped_word);
}
TBLOB* blob = word->chopped_word->blobs[start];
// Rotate the blob if needed for classification.
TBLOB* rotated_blob = blob->ClassifyNormalizeIfNeeded();
if (rotated_blob == NULL)
rotated_blob = blob;
#ifndef GRAPHICS_DISABLED
// Draw debug windows showing the blob that is being learned if needed.
if (strcmp(classify_learn_debug_str.string(), correct_text) == 0) {
RefreshDebugWindow(&learn_debug_win_, "LearnPieces", 600,
word->chopped_word->bounding_box());
rotated_blob->plot(learn_debug_win_, ScrollView::GREEN, ScrollView::BROWN);
learn_debug_win_->Update();
window_wait(learn_debug_win_);
}
if (classify_debug_character_fragments && segmentation == CST_FRAGMENT) {
ASSERT_HOST(learn_fragments_debug_win_ != NULL); // set up in LearnWord
blob->plot(learn_fragments_debug_win_,
ScrollView::BLUE, ScrollView::BROWN);
learn_fragments_debug_win_->Update();
}
#endif // GRAPHICS_DISABLED
if (filename != NULL) {
classify_norm_method.set_value(character); // force char norm spc 30/11/93
tess_bn_matching.set_value(false); // turn it off
tess_cn_matching.set_value(false);
DENORM bl_denorm, cn_denorm;
INT_FX_RESULT_STRUCT fx_info;
SetupBLCNDenorms(*rotated_blob, classify_nonlinear_norm,
&bl_denorm, &cn_denorm, &fx_info);
LearnBlob(feature_defs_, filename, rotated_blob, bl_denorm, cn_denorm,
fx_info, correct_text);
} else if (unicharset.contains_unichar(correct_text)) {
UNICHAR_ID class_id = unicharset.unichar_to_id(correct_text);
int font_id = word->fontinfo != NULL
? fontinfo_table_.get_id(*word->fontinfo)
: 0;
if (classify_learning_debug_level >= 1)
tprintf("Adapting to char = %s, thr= %g font_id= %d\n",
unicharset.id_to_unichar(class_id), threshold, font_id);
// If filename is not NULL we are doing recognition
// (as opposed to training), so we must have already set word fonts.
AdaptToChar(rotated_blob, class_id, font_id, threshold);
} else if (classify_debug_level >= 1) {
tprintf("Can't adapt to %s not in unicharset\n", correct_text);
}
if (rotated_blob != blob) {
delete rotated_blob;
}
break_pieces(word->seam_array, start, start + length - 1, word->chopped_word);
} // LearnPieces.
/*---------------------------------------------------------------------------*/
/**
* This routine performs cleanup operations
* on the adaptive classifier. It should be called
* before the program is terminated. Its main function
* is to save the adapted templates to a file.
*
* Globals:
* - #AdaptedTemplates current set of adapted templates
* - #classify_save_adapted_templates TRUE if templates should be saved
* - #classify_enable_adaptive_matcher TRUE if adaptive matcher is enabled
*
* @note Exceptions: none
* @note History: Tue Mar 19 14:37:06 1991, DSJ, Created.
*/
void Classify::EndAdaptiveClassifier() {
STRING Filename;
FILE *File;
if (AdaptedTemplates != NULL &&
classify_enable_adaptive_matcher && classify_save_adapted_templates) {
Filename = imagefile + ADAPT_TEMPLATE_SUFFIX;
File = fopen (Filename.string(), "wb");
if (File == NULL)
cprintf ("Unable to save adapted templates to %s!\n", Filename.string());
else {
cprintf ("\nSaving adapted templates to %s ...", Filename.string());
fflush(stdout);
WriteAdaptedTemplates(File, AdaptedTemplates);
cprintf ("\n");
fclose(File);
}
}
if (AdaptedTemplates != NULL) {
free_adapted_templates(AdaptedTemplates);
AdaptedTemplates = NULL;
}
if (PreTrainedTemplates != NULL) {
free_int_templates(PreTrainedTemplates);
PreTrainedTemplates = NULL;
}
getDict().EndDangerousAmbigs();
FreeNormProtos();
if (AllProtosOn != NULL) {
FreeBitVector(AllProtosOn);
FreeBitVector(AllConfigsOn);
FreeBitVector(AllConfigsOff);
FreeBitVector(TempProtoMask);
AllProtosOn = NULL;
AllConfigsOn = NULL;
AllConfigsOff = NULL;
TempProtoMask = NULL;
}
delete shape_table_;
shape_table_ = NULL;
if (static_classifier_ != NULL) {
delete static_classifier_;
static_classifier_ = NULL;
}
} /* EndAdaptiveClassifier */
/*---------------------------------------------------------------------------*/
/**
* This routine reads in the training
* information needed by the adaptive classifier
* and saves it into global variables.
* Parameters:
* load_pre_trained_templates Indicates whether the pre-trained
* templates (inttemp, normproto and pffmtable components)
* should be lodaded. Should only be set to true if the
* necesary classifier components are present in the
* [lang].traineddata file.
* Globals:
* BuiltInTemplatesFile file to get built-in temps from
* BuiltInCutoffsFile file to get avg. feat per class from
* classify_use_pre_adapted_templates
* enables use of pre-adapted templates
* @note History: Mon Mar 11 12:49:34 1991, DSJ, Created.
*/
void Classify::InitAdaptiveClassifier(bool load_pre_trained_templates) {
if (!classify_enable_adaptive_matcher)
return;
if (AllProtosOn != NULL)
EndAdaptiveClassifier(); // Don't leak with multiple inits.
// If there is no language_data_path_prefix, the classifier will be
// adaptive only.
if (language_data_path_prefix.length() > 0 &&
load_pre_trained_templates) {
ASSERT_HOST(tessdata_manager.SeekToStart(TESSDATA_INTTEMP));
PreTrainedTemplates =
ReadIntTemplates(tessdata_manager.GetDataFilePtr());
if (tessdata_manager.DebugLevel() > 0) tprintf("Loaded inttemp\n");
if (tessdata_manager.SeekToStart(TESSDATA_SHAPE_TABLE)) {
shape_table_ = new ShapeTable(unicharset);
if (!shape_table_->DeSerialize(tessdata_manager.swap(),
tessdata_manager.GetDataFilePtr())) {
tprintf("Error loading shape table!\n");
delete shape_table_;
shape_table_ = NULL;
} else if (tessdata_manager.DebugLevel() > 0) {
tprintf("Successfully loaded shape table!\n");
}
}
ASSERT_HOST(tessdata_manager.SeekToStart(TESSDATA_PFFMTABLE));
ReadNewCutoffs(tessdata_manager.GetDataFilePtr(),
tessdata_manager.swap(),
tessdata_manager.GetEndOffset(TESSDATA_PFFMTABLE),
CharNormCutoffs);
if (tessdata_manager.DebugLevel() > 0) tprintf("Loaded pffmtable\n");
ASSERT_HOST(tessdata_manager.SeekToStart(TESSDATA_NORMPROTO));
NormProtos =
ReadNormProtos(tessdata_manager.GetDataFilePtr(),
tessdata_manager.GetEndOffset(TESSDATA_NORMPROTO));
if (tessdata_manager.DebugLevel() > 0) tprintf("Loaded normproto\n");
static_classifier_ = new TessClassifier(false, this);
}
im_.Init(&classify_debug_level);
InitIntegerFX();
AllProtosOn = NewBitVector(MAX_NUM_PROTOS);
AllConfigsOn = NewBitVector(MAX_NUM_CONFIGS);
AllConfigsOff = NewBitVector(MAX_NUM_CONFIGS);
TempProtoMask = NewBitVector(MAX_NUM_PROTOS);
set_all_bits(AllProtosOn, WordsInVectorOfSize(MAX_NUM_PROTOS));
set_all_bits(AllConfigsOn, WordsInVectorOfSize(MAX_NUM_CONFIGS));
zero_all_bits(AllConfigsOff, WordsInVectorOfSize(MAX_NUM_CONFIGS));
for (int i = 0; i < MAX_NUM_CLASSES; i++) {
BaselineCutoffs[i] = 0;
}
if (classify_use_pre_adapted_templates) {
FILE *File;
STRING Filename;
Filename = imagefile;
Filename += ADAPT_TEMPLATE_SUFFIX;
File = fopen(Filename.string(), "rb");
if (File == NULL) {
AdaptedTemplates = NewAdaptedTemplates(true);
} else {
cprintf("\nReading pre-adapted templates from %s ...\n",
Filename.string());
fflush(stdout);
AdaptedTemplates = ReadAdaptedTemplates(File);
cprintf("\n");
fclose(File);
PrintAdaptedTemplates(stdout, AdaptedTemplates);
for (int i = 0; i < AdaptedTemplates->Templates->NumClasses; i++) {
BaselineCutoffs[i] = CharNormCutoffs[i];
}
}
} else {
if (AdaptedTemplates != NULL)
free_adapted_templates(AdaptedTemplates);
AdaptedTemplates = NewAdaptedTemplates(true);
}
} /* InitAdaptiveClassifier */
void Classify::ResetAdaptiveClassifierInternal() {
if (classify_learning_debug_level > 0) {
tprintf("Resetting adaptive classifier (NumAdaptationsFailed=%d)\n",
NumAdaptationsFailed);
}
free_adapted_templates(AdaptedTemplates);
AdaptedTemplates = NewAdaptedTemplates(true);
NumAdaptationsFailed = 0;
}
/*---------------------------------------------------------------------------*/
/**
* This routine prepares the adaptive
* matcher for the start
* of the first pass. Learning is enabled (unless it
* is disabled for the whole program).
*
* @note this is somewhat redundant, it simply says that if learning is
* enabled then it will remain enabled on the first pass. If it is
* disabled, then it will remain disabled. This is only put here to
* make it very clear that learning is controlled directly by the global
* setting of EnableLearning.
*
* Globals:
* - #EnableLearning
* set to TRUE by this routine
*
* @note Exceptions: none
* @note History: Mon Apr 15 16:39:29 1991, DSJ, Created.
*/
void Classify::SettupPass1() {
EnableLearning = classify_enable_learning;
getDict().SettupStopperPass1();
} /* SettupPass1 */
/*---------------------------------------------------------------------------*/
/**
* This routine prepares the adaptive
* matcher for the start of the second pass. Further
* learning is disabled.
*
* Globals:
* - #EnableLearning set to FALSE by this routine
*
* @note Exceptions: none
* @note History: Mon Apr 15 16:39:29 1991, DSJ, Created.
*/
void Classify::SettupPass2() {
EnableLearning = FALSE;
getDict().SettupStopperPass2();
} /* SettupPass2 */
/*---------------------------------------------------------------------------*/
/**
* This routine creates a new adapted
* class and uses Blob as the model for the first
* config in that class.
*
* @param Blob blob to model new class after
* @param ClassId id of the class to be initialized
* @param FontinfoId font information inferred from pre-trained templates
* @param Class adapted class to be initialized
* @param Templates adapted templates to add new class to
*
* Globals:
* - #AllProtosOn dummy mask with all 1's
* - BaselineCutoffs kludge needed to get cutoffs
* - #PreTrainedTemplates kludge needed to get cutoffs
*
* @note Exceptions: none
* @note History: Thu Mar 14 12:49:39 1991, DSJ, Created.
*/
void Classify::InitAdaptedClass(TBLOB *Blob,
CLASS_ID ClassId,
int FontinfoId,
ADAPT_CLASS Class,
ADAPT_TEMPLATES Templates) {
FEATURE_SET Features;
int Fid, Pid;
FEATURE Feature;
int NumFeatures;
TEMP_PROTO TempProto;
PROTO Proto;
INT_CLASS IClass;
TEMP_CONFIG Config;
classify_norm_method.set_value(baseline);
Features = ExtractOutlineFeatures(Blob);
NumFeatures = Features->NumFeatures;
if (NumFeatures > UNLIKELY_NUM_FEAT || NumFeatures <= 0) {
FreeFeatureSet(Features);
return;
}
Config = NewTempConfig(NumFeatures - 1, FontinfoId);
TempConfigFor(Class, 0) = Config;
/* this is a kludge to construct cutoffs for adapted templates */
if (Templates == AdaptedTemplates)
BaselineCutoffs[ClassId] = CharNormCutoffs[ClassId];
IClass = ClassForClassId (Templates->Templates, ClassId);
for (Fid = 0; Fid < Features->NumFeatures; Fid++) {
Pid = AddIntProto (IClass);
assert (Pid != NO_PROTO);
Feature = Features->Features[Fid];
TempProto = NewTempProto ();
Proto = &(TempProto->Proto);
/* compute proto params - NOTE that Y_DIM_OFFSET must be used because
ConvertProto assumes that the Y dimension varies from -0.5 to 0.5
instead of the -0.25 to 0.75 used in baseline normalization */
Proto->Angle = Feature->Params[OutlineFeatDir];
Proto->X = Feature->Params[OutlineFeatX];
Proto->Y = Feature->Params[OutlineFeatY] - Y_DIM_OFFSET;
Proto->Length = Feature->Params[OutlineFeatLength];
FillABC(Proto);
TempProto->ProtoId = Pid;
SET_BIT (Config->Protos, Pid);
ConvertProto(Proto, Pid, IClass);
AddProtoToProtoPruner(Proto, Pid, IClass,
classify_learning_debug_level >= 2);
Class->TempProtos = push (Class->TempProtos, TempProto);
}
FreeFeatureSet(Features);
AddIntConfig(IClass);
ConvertConfig (AllProtosOn, 0, IClass);
if (classify_learning_debug_level >= 1) {
cprintf ("Added new class '%s' with class id %d and %d protos.\n",
unicharset.id_to_unichar(ClassId), ClassId, NumFeatures);
if (classify_learning_debug_level > 1)
DisplayAdaptedChar(Blob, IClass);
}
if (IsEmptyAdaptedClass(Class))
(Templates->NumNonEmptyClasses)++;
} /* InitAdaptedClass */
/*---------------------------------------------------------------------------*/
/**
* This routine sets up the feature
* extractor to extract baseline normalized
* pico-features.
*
* The extracted pico-features are converted
* to integer form and placed in IntFeatures. The
* original floating-pt. features are returned in
* FloatFeatures.
*
* Globals: none
* @param Blob blob to extract features from
* @param[out] IntFeatures array to fill with integer features
* @param[out] FloatFeatures place to return actual floating-pt features
*
* @return Number of pico-features returned (0 if
* an error occurred)
* @note Exceptions: none
* @note History: Tue Mar 12 17:55:18 1991, DSJ, Created.
*/
int Classify::GetAdaptiveFeatures(TBLOB *Blob,
INT_FEATURE_ARRAY IntFeatures,
FEATURE_SET *FloatFeatures) {
FEATURE_SET Features;
int NumFeatures;
classify_norm_method.set_value(baseline);
Features = ExtractPicoFeatures(Blob);
NumFeatures = Features->NumFeatures;
if (NumFeatures > UNLIKELY_NUM_FEAT) {
FreeFeatureSet(Features);
return 0;
}
ComputeIntFeatures(Features, IntFeatures);
*FloatFeatures = Features;
return NumFeatures;
} /* GetAdaptiveFeatures */
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* Return TRUE if the specified word is
* acceptable for adaptation.
*
* Globals: none
*
* @param Word current word
* @param BestChoiceWord best overall choice for word with context
*
* @return TRUE or FALSE
* @note Exceptions: none
* @note History: Thu May 30 14:25:06 1991, DSJ, Created.
*/
bool Classify::AdaptableWord(WERD_RES* word) {
if (word->best_choice == NULL) return false;
int BestChoiceLength = word->best_choice->length();
float adaptable_score =
getDict().segment_penalty_dict_case_ok + ADAPTABLE_WERD_ADJUSTMENT;
return // rules that apply in general - simplest to compute first
BestChoiceLength > 0 &&
BestChoiceLength == word->rebuild_word->NumBlobs() &&
BestChoiceLength <= MAX_ADAPTABLE_WERD_SIZE &&
// This basically ensures that the word is at least a dictionary match
// (freq word, user word, system dawg word, etc).
// Since all the other adjustments will make adjust factor higher
// than higher than adaptable_score=1.1+0.05=1.15
// Since these are other flags that ensure that the word is dict word,
// this check could be at times redundant.
word->best_choice->adjust_factor() <= adaptable_score &&
// Make sure that alternative choices are not dictionary words.
word->AlternativeChoiceAdjustmentsWorseThan(adaptable_score);
}
/*---------------------------------------------------------------------------*/
/**
* @param Blob blob to add to templates for ClassId
* @param ClassId class to add blob to
* @param FontinfoId font information from pre-trained templates
* @param Threshold minimum match rating to existing template
*
* Globals:
* - AdaptedTemplates current set of adapted templates
* - AllProtosOn dummy mask to match against all protos
* - AllConfigsOn dummy mask to match against all configs
*
* @return none
* @note Exceptions: none
* @note History: Thu Mar 14 09:36:03 1991, DSJ, Created.
*/
void Classify::AdaptToChar(TBLOB *Blob,
CLASS_ID ClassId,
int FontinfoId,
FLOAT32 Threshold) {
int NumFeatures;
INT_FEATURE_ARRAY IntFeatures;
INT_RESULT_STRUCT IntResult;
INT_CLASS IClass;
ADAPT_CLASS Class;
TEMP_CONFIG TempConfig;
FEATURE_SET FloatFeatures;
int NewTempConfigId;
if (!LegalClassId (ClassId))
return;
Class = AdaptedTemplates->Class[ClassId];
assert(Class != NULL);
if (IsEmptyAdaptedClass(Class)) {
InitAdaptedClass(Blob, ClassId, FontinfoId, Class, AdaptedTemplates);
}
else {
IClass = ClassForClassId (AdaptedTemplates->Templates, ClassId);
NumFeatures = GetAdaptiveFeatures(Blob, IntFeatures, &FloatFeatures);
if (NumFeatures <= 0)
return;
// Only match configs with the matching font.
BIT_VECTOR MatchingFontConfigs = NewBitVector(MAX_NUM_PROTOS);
for (int cfg = 0; cfg < IClass->NumConfigs; ++cfg) {
if (GetFontinfoId(Class, cfg) == FontinfoId) {
SET_BIT(MatchingFontConfigs, cfg);
} else {
reset_bit(MatchingFontConfigs, cfg);
}
}
im_.Match(IClass, AllProtosOn, MatchingFontConfigs,
NumFeatures, IntFeatures,
&IntResult, classify_adapt_feature_threshold,
NO_DEBUG, matcher_debug_separate_windows);
FreeBitVector(MatchingFontConfigs);
SetAdaptiveThreshold(Threshold);
if (IntResult.Rating <= Threshold) {
if (ConfigIsPermanent (Class, IntResult.Config)) {
if (classify_learning_debug_level >= 1)
cprintf ("Found good match to perm config %d = %4.1f%%.\n",
IntResult.Config, (1.0 - IntResult.Rating) * 100.0);
FreeFeatureSet(FloatFeatures);
return;
}
TempConfig = TempConfigFor (Class, IntResult.Config);
IncreaseConfidence(TempConfig);
if (TempConfig->NumTimesSeen > Class->MaxNumTimesSeen) {
Class->MaxNumTimesSeen = TempConfig->NumTimesSeen;
}
if (classify_learning_debug_level >= 1)
cprintf ("Increasing reliability of temp config %d to %d.\n",
IntResult.Config, TempConfig->NumTimesSeen);
if (TempConfigReliable(ClassId, TempConfig)) {
MakePermanent(AdaptedTemplates, ClassId, IntResult.Config, Blob);
UpdateAmbigsGroup(ClassId, Blob);
}
}
else {
if (classify_learning_debug_level >= 1) {
cprintf ("Found poor match to temp config %d = %4.1f%%.\n",
IntResult.Config, (1.0 - IntResult.Rating) * 100.0);
if (classify_learning_debug_level > 2)
DisplayAdaptedChar(Blob, IClass);
}
NewTempConfigId = MakeNewTemporaryConfig(AdaptedTemplates,
ClassId,
FontinfoId,
NumFeatures,
IntFeatures,
FloatFeatures);
if (NewTempConfigId >= 0 &&
TempConfigReliable(ClassId, TempConfigFor(Class, NewTempConfigId))) {
MakePermanent(AdaptedTemplates, ClassId, NewTempConfigId, Blob);
UpdateAmbigsGroup(ClassId, Blob);
}
#ifndef GRAPHICS_DISABLED
if (classify_learning_debug_level > 1) {
DisplayAdaptedChar(Blob, IClass);
}
#endif
}
FreeFeatureSet(FloatFeatures);
}
} /* AdaptToChar */
void Classify::DisplayAdaptedChar(TBLOB* blob, INT_CLASS_STRUCT* int_class) {
#ifndef GRAPHICS_DISABLED
INT_FX_RESULT_STRUCT fx_info;
GenericVector<INT_FEATURE_STRUCT> bl_features;
TrainingSample* sample =
BlobToTrainingSample(*blob, classify_nonlinear_norm, &fx_info,
&bl_features);
if (sample == NULL) return;
INT_RESULT_STRUCT IntResult;
im_.Match(int_class, AllProtosOn, AllConfigsOn,
bl_features.size(), &bl_features[0],
&IntResult, classify_adapt_feature_threshold,
NO_DEBUG, matcher_debug_separate_windows);
cprintf ("Best match to temp config %d = %4.1f%%.\n",
IntResult.Config, (1.0 - IntResult.Rating) * 100.0);
if (classify_learning_debug_level >= 2) {
uinT32 ConfigMask;
ConfigMask = 1 << IntResult.Config;
ShowMatchDisplay();
im_.Match(int_class, AllProtosOn, (BIT_VECTOR)&ConfigMask,
bl_features.size(), &bl_features[0],
&IntResult, classify_adapt_feature_threshold,
6 | 0x19, matcher_debug_separate_windows);
UpdateMatchDisplay();
}
#endif
}
/*---------------------------------------------------------------------------*/
/**
* This routine adds the result of a classification into
* Results. If the new rating is much worse than the current
* best rating, it is not entered into results because it
* would end up being stripped later anyway. If the new rating
* is better than the old rating for the class, it replaces the
* old rating. If this is the first rating for the class, the
* class is added to the list of matched classes in Results.
* If the new rating is better than the best so far, it
* becomes the best so far.
*
* Globals:
* - #matcher_bad_match_pad defines limits of an acceptable match
*
* @param[out] results results to add new result to
* @param class_id class of new result
* @param shape_id shape index
* @param rating rating of new result
* @param adapted adapted match or not
* @param config config id of new result
* @param fontinfo_id font information of the new result
* @param fontinfo_id2 font information of the 2nd choice result
*
* @note Exceptions: none
* @note History: Tue Mar 12 18:19:29 1991, DSJ, Created.
*/
void Classify::AddNewResult(ADAPT_RESULTS *results,
CLASS_ID class_id,
int shape_id,
FLOAT32 rating,
bool adapted,
int config,
int fontinfo_id,
int fontinfo_id2) {
ScoredClass *old_match = FindScoredUnichar(results, class_id);
ScoredClass match =
{ class_id,
shape_id,
rating,
adapted,
static_cast<inT16>(config),
static_cast<inT16>(fontinfo_id),
static_cast<inT16>(fontinfo_id2) };
if (rating > results->best_match.rating + matcher_bad_match_pad ||
(old_match && rating >= old_match->rating))
return;
if (!unicharset.get_fragment(class_id))
results->HasNonfragment = true;
if (old_match)
old_match->rating = rating;
else
results->match.push_back(match);
if (rating < results->best_match.rating &&
// Ensure that fragments do not affect best rating, class and config.
// This is needed so that at least one non-fragmented character is
// always present in the results.
// TODO(daria): verify that this helps accuracy and does not
// hurt performance.
!unicharset.get_fragment(class_id)) {
results->best_match = match;
}
} /* AddNewResult */
/*---------------------------------------------------------------------------*/
/**
* This routine is identical to CharNormClassifier()
* except that it does no class pruning. It simply matches
* the unknown blob against the classes listed in
* Ambiguities.
*
* Globals:
* - #AllProtosOn mask that enables all protos
* - #AllConfigsOn mask that enables all configs
*
* @param Blob blob to be classified
* @param Templates built-in templates to classify against
* @param Classes adapted class templates
* @param Ambiguities array of class id's to match against
* @param[out] Results place to put match results
*
* @note Exceptions: none
* @note History: Tue Mar 12 19:40:36 1991, DSJ, Created.
*/
void Classify::AmbigClassifier(
const GenericVector<INT_FEATURE_STRUCT>& int_features,
const INT_FX_RESULT_STRUCT& fx_info,
const TBLOB *blob,
INT_TEMPLATES templates,
ADAPT_CLASS *classes,
UNICHAR_ID *ambiguities,
ADAPT_RESULTS *results) {
if (int_features.empty()) return;
uinT8* CharNormArray = new uinT8[unicharset.size()];
INT_RESULT_STRUCT IntResult;
results->BlobLength = GetCharNormFeature(fx_info, templates, NULL,
CharNormArray);
bool debug = matcher_debug_level >= 2 || classify_debug_level > 1;
if (debug)
tprintf("AM Matches = ");
int top = blob->bounding_box().top();
int bottom = blob->bounding_box().bottom();
while (*ambiguities >= 0) {
CLASS_ID class_id = *ambiguities;
im_.Match(ClassForClassId(templates, class_id),
AllProtosOn, AllConfigsOn,
int_features.size(), &int_features[0],
&IntResult,
classify_adapt_feature_threshold, NO_DEBUG,
matcher_debug_separate_windows);
ExpandShapesAndApplyCorrections(NULL, debug, class_id, bottom, top, 0,
results->BlobLength,
classify_integer_matcher_multiplier,
CharNormArray, IntResult, results);
ambiguities++;
}
delete [] CharNormArray;
} /* AmbigClassifier */
/*---------------------------------------------------------------------------*/
/// Factored-out calls to IntegerMatcher based on class pruner results.
/// Returns integer matcher results inside CLASS_PRUNER_RESULTS structure.
void Classify::MasterMatcher(INT_TEMPLATES templates,
inT16 num_features,
const INT_FEATURE_STRUCT* features,
const uinT8* norm_factors,
ADAPT_CLASS* classes,
int debug,
int matcher_multiplier,
const TBOX& blob_box,
const GenericVector<CP_RESULT_STRUCT>& results,
ADAPT_RESULTS* final_results) {
int top = blob_box.top();
int bottom = blob_box.bottom();
for (int c = 0; c < results.size(); c++) {
CLASS_ID class_id = results[c].Class;
INT_RESULT_STRUCT& int_result = results[c].IMResult;
BIT_VECTOR protos = classes != NULL ? classes[class_id]->PermProtos
: AllProtosOn;
BIT_VECTOR configs = classes != NULL ? classes[class_id]->PermConfigs
: AllConfigsOn;
im_.Match(ClassForClassId(templates, class_id),
protos, configs,
num_features, features,
&int_result, classify_adapt_feature_threshold, debug,
matcher_debug_separate_windows);
bool debug = matcher_debug_level >= 2 || classify_debug_level > 1;
ExpandShapesAndApplyCorrections(classes, debug, class_id, bottom, top,
results[c].Rating,
final_results->BlobLength,
matcher_multiplier, norm_factors,
int_result, final_results);
}
}
// Converts configs to fonts, and if the result is not adapted, and a
// shape_table_ is present, the shape is expanded to include all
// unichar_ids represented, before applying a set of corrections to the
// distance rating in int_result, (see ComputeCorrectedRating.)
// The results are added to the final_results output.
void Classify::ExpandShapesAndApplyCorrections(
ADAPT_CLASS* classes, bool debug, int class_id, int bottom, int top,
float cp_rating, int blob_length, int matcher_multiplier,
const uinT8* cn_factors,
INT_RESULT_STRUCT& int_result, ADAPT_RESULTS* final_results) {
// Compute the fontinfo_ids.
int fontinfo_id = kBlankFontinfoId;
int fontinfo_id2 = kBlankFontinfoId;
if (classes != NULL) {
// Adapted result.
fontinfo_id = GetFontinfoId(classes[class_id], int_result.Config);
fontinfo_id2 = GetFontinfoId(classes[class_id], int_result.Config2);
} else {
// Pre-trained result.
fontinfo_id = ClassAndConfigIDToFontOrShapeID(class_id, int_result.Config);
fontinfo_id2 = ClassAndConfigIDToFontOrShapeID(class_id,
int_result.Config2);
if (shape_table_ != NULL) {
// Actually fontinfo_id is an index into the shape_table_ and it
// contains a list of unchar_id/font_id pairs.
int shape_id = fontinfo_id;
const Shape& shape = shape_table_->GetShape(fontinfo_id);
double min_rating = 0.0;
for (int c = 0; c < shape.size(); ++c) {
int unichar_id = shape[c].unichar_id;
fontinfo_id = shape[c].font_ids[0];
if (shape[c].font_ids.size() > 1)
fontinfo_id2 = shape[c].font_ids[1];
else if (fontinfo_id2 != kBlankFontinfoId)
fontinfo_id2 = shape_table_->GetShape(fontinfo_id2)[0].font_ids[0];
double rating = ComputeCorrectedRating(debug, unichar_id, cp_rating,
int_result.Rating,
int_result.FeatureMisses,
bottom, top, blob_length,
matcher_multiplier, cn_factors);
if (c == 0 || rating < min_rating)
min_rating = rating;
if (unicharset.get_enabled(unichar_id)) {
AddNewResult(final_results, unichar_id, shape_id, rating,
classes != NULL, int_result.Config,
fontinfo_id, fontinfo_id2);
}
}
int_result.Rating = min_rating;
return;
}
}
double rating = ComputeCorrectedRating(debug, class_id, cp_rating,
int_result.Rating,
int_result.FeatureMisses,
bottom, top, blob_length,
matcher_multiplier, cn_factors);
if (unicharset.get_enabled(class_id)) {
AddNewResult(final_results, class_id, -1, rating,
classes != NULL, int_result.Config,
fontinfo_id, fontinfo_id2);
}
int_result.Rating = rating;
}
// Applies a set of corrections to the distance im_rating,
// including the cn_correction, miss penalty and additional penalty
// for non-alnums being vertical misfits. Returns the corrected distance.
double Classify::ComputeCorrectedRating(bool debug, int unichar_id,
double cp_rating, double im_rating,
int feature_misses,
int bottom, int top,
int blob_length, int matcher_multiplier,
const uinT8* cn_factors) {
// Compute class feature corrections.
double cn_corrected = im_.ApplyCNCorrection(im_rating, blob_length,
cn_factors[unichar_id],
matcher_multiplier);
double miss_penalty = tessedit_class_miss_scale * feature_misses;
double vertical_penalty = 0.0;
// Penalize non-alnums for being vertical misfits.
if (!unicharset.get_isalpha(unichar_id) &&
!unicharset.get_isdigit(unichar_id) &&
cn_factors[unichar_id] != 0 && classify_misfit_junk_penalty > 0.0) {
int min_bottom, max_bottom, min_top, max_top;
unicharset.get_top_bottom(unichar_id, &min_bottom, &max_bottom,
&min_top, &max_top);
if (debug) {
tprintf("top=%d, vs [%d, %d], bottom=%d, vs [%d, %d]\n",
top, min_top, max_top, bottom, min_bottom, max_bottom);
}
if (top < min_top || top > max_top ||
bottom < min_bottom || bottom > max_bottom) {
vertical_penalty = classify_misfit_junk_penalty;
}
}
double result =cn_corrected + miss_penalty + vertical_penalty;
if (result > WORST_POSSIBLE_RATING)
result = WORST_POSSIBLE_RATING;
if (debug) {
tprintf("%s: %2.1f(CP%2.1f, IM%2.1f + CN%.2f(%d) + MP%2.1f + VP%2.1f)\n",
unicharset.id_to_unichar(unichar_id),
result * 100.0,
cp_rating * 100.0,
im_rating * 100.0,
(cn_corrected - im_rating) * 100.0,
cn_factors[unichar_id],
miss_penalty * 100.0,
vertical_penalty * 100.0);
}
return result;
}
/*---------------------------------------------------------------------------*/
/**
* This routine extracts baseline normalized features
* from the unknown character and matches them against the
* specified set of templates. The classes which match
* are added to Results.
*
* Globals:
* - BaselineCutoffs expected num features for each class
*
* @param Blob blob to be classified
* @param Templates current set of adapted templates
* @param Results place to put match results
*
* @return Array of possible ambiguous chars that should be checked.
* @note Exceptions: none
* @note History: Tue Mar 12 19:38:03 1991, DSJ, Created.
*/
UNICHAR_ID *Classify::BaselineClassifier(
TBLOB *Blob, const GenericVector<INT_FEATURE_STRUCT>& int_features,
const INT_FX_RESULT_STRUCT& fx_info,
ADAPT_TEMPLATES Templates, ADAPT_RESULTS *Results) {
if (int_features.empty()) return NULL;
uinT8* CharNormArray = new uinT8[unicharset.size()];
ClearCharNormArray(CharNormArray);
Results->BlobLength = IntCastRounded(fx_info.Length / kStandardFeatureLength);
PruneClasses(Templates->Templates, int_features.size(), &int_features[0],
CharNormArray, BaselineCutoffs, &Results->CPResults);
if (matcher_debug_level >= 2 || classify_debug_level > 1)
cprintf ("BL Matches = ");
MasterMatcher(Templates->Templates, int_features.size(), &int_features[0],
CharNormArray,
Templates->Class, matcher_debug_flags, 0,
Blob->bounding_box(), Results->CPResults, Results);
delete [] CharNormArray;
CLASS_ID ClassId = Results->best_match.unichar_id;
if (ClassId == NO_CLASS)
return (NULL);
/* this is a bug - maybe should return "" */
return Templates->Class[ClassId]->
Config[Results->best_match.config].Perm->Ambigs;
} /* BaselineClassifier */
/*---------------------------------------------------------------------------*/
/**
* This routine extracts character normalized features
* from the unknown character and matches them against the
* specified set of templates. The classes which match
* are added to Results.
*
* @param Blob blob to be classified
* @param Templates templates to classify unknown against
* @param Results place to put match results
*
* Globals:
* - CharNormCutoffs expected num features for each class
* - AllProtosOn mask that enables all protos
* - AllConfigsOn mask that enables all configs
*
* @note Exceptions: none
* @note History: Tue Mar 12 16:02:52 1991, DSJ, Created.
*/
int Classify::CharNormClassifier(TBLOB *blob,
const TrainingSample& sample,
ADAPT_RESULTS *adapt_results) {
// This is the length that is used for scaling ratings vs certainty.
adapt_results->BlobLength =
IntCastRounded(sample.outline_length() / kStandardFeatureLength);
GenericVector<UnicharRating> unichar_results;
static_classifier_->UnicharClassifySample(sample, blob->denorm().pix(), 0,
-1, &unichar_results);
// Convert results to the format used internally by AdaptiveClassifier.
for (int r = 0; r < unichar_results.size(); ++r) {
int unichar_id = unichar_results[r].unichar_id;
// Fonts are listed in order of preference.
int font1 = unichar_results[r].fonts.size() >= 1
? unichar_results[r].fonts[0] : kBlankFontinfoId;
int font2 = unichar_results[r].fonts.size() >= 2
? unichar_results[r].fonts[1] : kBlankFontinfoId;
float rating = 1.0f - unichar_results[r].rating;
AddNewResult(adapt_results, unichar_id, -1, rating, false, 0, font1, font2);
}
return sample.num_features();
} /* CharNormClassifier */
// As CharNormClassifier, but operates on a TrainingSample and outputs to
// a GenericVector of ShapeRating without conversion to classes.
int Classify::CharNormTrainingSample(bool pruner_only,
int keep_this,
const TrainingSample& sample,
GenericVector<UnicharRating>* results) {
results->clear();
ADAPT_RESULTS* adapt_results = new ADAPT_RESULTS();
adapt_results->Initialize();
// Compute the bounding box of the features.
int num_features = sample.num_features();
// Only the top and bottom of the blob_box are used by MasterMatcher, so
// fabricate right and left using top and bottom.
TBOX blob_box(sample.geo_feature(GeoBottom), sample.geo_feature(GeoBottom),
sample.geo_feature(GeoTop), sample.geo_feature(GeoTop));
// Compute the char_norm_array from the saved cn_feature.
FEATURE norm_feature = sample.GetCNFeature();
uinT8* char_norm_array = new uinT8[unicharset.size()];
int num_pruner_classes = MAX(unicharset.size(),
PreTrainedTemplates->NumClasses);
uinT8* pruner_norm_array = new uinT8[num_pruner_classes];
adapt_results->BlobLength =
static_cast<int>(ActualOutlineLength(norm_feature) * 20 + 0.5);
ComputeCharNormArrays(norm_feature, PreTrainedTemplates, char_norm_array,
pruner_norm_array);
PruneClasses(PreTrainedTemplates, num_features, sample.features(),
pruner_norm_array,
shape_table_ != NULL ? &shapetable_cutoffs_[0] : CharNormCutoffs,
&adapt_results->CPResults);
delete [] pruner_norm_array;
if (keep_this >= 0) {
adapt_results->CPResults[0].Class = keep_this;
adapt_results->CPResults.truncate(1);
}
if (pruner_only) {
// Convert pruner results to output format.
for (int i = 0; i < adapt_results->CPResults.size(); ++i) {
int class_id = adapt_results->CPResults[i].Class;
results->push_back(
UnicharRating(class_id, 1.0f - adapt_results->CPResults[i].Rating));
}
} else {
MasterMatcher(PreTrainedTemplates, num_features, sample.features(),
char_norm_array,
NULL, matcher_debug_flags,
classify_integer_matcher_multiplier,
blob_box, adapt_results->CPResults, adapt_results);
// Convert master matcher results to output format.
for (int i = 0; i < adapt_results->match.size(); i++) {
ScoredClass next = adapt_results->match[i];
UnicharRating rating(next.unichar_id, 1.0f - next.rating);
if (next.fontinfo_id >= 0) {
rating.fonts.push_back(next.fontinfo_id);
if (next.fontinfo_id2 >= 0)
rating.fonts.push_back(next.fontinfo_id2);
}
results->push_back(rating);
}
results->sort(&UnicharRating::SortDescendingRating);
}
delete [] char_norm_array;
delete adapt_results;
return num_features;
} /* CharNormTrainingSample */
/*---------------------------------------------------------------------------*/
/**
* This routine computes a rating which reflects the
* likelihood that the blob being classified is a noise
* blob. NOTE: assumes that the blob length has already been
* computed and placed into Results.
*
* @param Results results to add noise classification to
*
* Globals:
* - matcher_avg_noise_size avg. length of a noise blob
*
* @note Exceptions: none
* @note History: Tue Mar 12 18:36:52 1991, DSJ, Created.
*/
void Classify::ClassifyAsNoise(ADAPT_RESULTS *Results) {
register FLOAT32 Rating;
Rating = Results->BlobLength / matcher_avg_noise_size;
Rating *= Rating;
Rating /= 1.0 + Rating;
AddNewResult(Results, NO_CLASS, -1, Rating, false, -1,
kBlankFontinfoId, kBlankFontinfoId);
} /* ClassifyAsNoise */
} // namespace tesseract
/*---------------------------------------------------------------------------*/
// Return a pointer to the scored unichar in results, or NULL if not present.
ScoredClass *FindScoredUnichar(ADAPT_RESULTS *results, UNICHAR_ID id) {
for (int i = 0; i < results->match.size(); i++) {
if (results->match[i].unichar_id == id)
return &results->match[i];
}
return NULL;
}
// Retrieve the current rating for a unichar id if we have rated it, defaulting
// to WORST_POSSIBLE_RATING.
ScoredClass ScoredUnichar(ADAPT_RESULTS *results, UNICHAR_ID id) {
ScoredClass poor_result =
{id, -1, WORST_POSSIBLE_RATING, false, -1,
kBlankFontinfoId, kBlankFontinfoId};
ScoredClass *entry = FindScoredUnichar(results, id);
return (entry == NULL) ? poor_result : *entry;
}
// Compare character classes by rating as for qsort(3).
// For repeatability, use character class id as a tie-breaker.
int CompareByRating(const void *arg1, // ScoredClass *class1
const void *arg2) { // ScoredClass *class2
const ScoredClass *class1 = (const ScoredClass *)arg1;
const ScoredClass *class2 = (const ScoredClass *)arg2;
if (class1->rating < class2->rating)
return -1;
else if (class1->rating > class2->rating)
return 1;
if (class1->unichar_id < class2->unichar_id)
return -1;
else if (class1->unichar_id > class2->unichar_id)
return 1;
return 0;
}
/*---------------------------------------------------------------------------*/
namespace tesseract {
/// The function converts the given match ratings to the list of blob
/// choices with ratings and certainties (used by the context checkers).
/// If character fragments are present in the results, this function also makes
/// sure that there is at least one non-fragmented classification included.
/// For each classification result check the unicharset for "definite"
/// ambiguities and modify the resulting Choices accordingly.
void Classify::ConvertMatchesToChoices(const DENORM& denorm, const TBOX& box,
ADAPT_RESULTS *Results,
BLOB_CHOICE_LIST *Choices) {
assert(Choices != NULL);
FLOAT32 Rating;
FLOAT32 Certainty;
BLOB_CHOICE_IT temp_it;
bool contains_nonfrag = false;
temp_it.set_to_list(Choices);
int choices_length = 0;
// With no shape_table_ maintain the previous MAX_MATCHES as the maximum
// number of returned results, but with a shape_table_ we want to have room
// for at least the biggest shape (which might contain hundreds of Indic
// grapheme fragments) and more, so use double the size of the biggest shape
// if that is more than the default.
int max_matches = MAX_MATCHES;
if (shape_table_ != NULL) {
max_matches = shape_table_->MaxNumUnichars() * 2;
if (max_matches < MAX_MATCHES)
max_matches = MAX_MATCHES;
}
float best_certainty = -MAX_FLOAT32;
for (int i = 0; i < Results->match.size(); i++) {
ScoredClass next = Results->match[i];
int fontinfo_id = next.fontinfo_id;
int fontinfo_id2 = next.fontinfo_id2;
bool adapted = next.adapted;
bool current_is_frag = (unicharset.get_fragment(next.unichar_id) != NULL);
if (temp_it.length()+1 == max_matches &&
!contains_nonfrag && current_is_frag) {
continue; // look for a non-fragmented character to fill the
// last spot in Choices if only fragments are present
}
// BlobLength can never be legally 0, this means recognition failed.
// But we must return a classification result because some invoking
// functions (chopper/permuter) do not anticipate a null blob choice.
// So we need to assign a poor, but not infinitely bad score.
if (Results->BlobLength == 0) {
Certainty = -20;
Rating = 100; // should be -certainty * real_blob_length
} else {
Rating = Certainty = next.rating;
Rating *= rating_scale * Results->BlobLength;
Certainty *= -(getDict().certainty_scale);
}
// Adapted results, by their very nature, should have good certainty.
// Those that don't are at best misleading, and often lead to errors,
// so don't accept adapted results that are too far behind the best result,
// whether adapted or static.
// TODO(rays) find some way of automatically tuning these constants.
if (Certainty > best_certainty) {
best_certainty = MIN(Certainty, classify_adapted_pruning_threshold);
} else if (adapted &&
Certainty / classify_adapted_pruning_factor < best_certainty) {
continue; // Don't accept bad adapted results.
}
float min_xheight, max_xheight, yshift;
denorm.XHeightRange(next.unichar_id, unicharset, box,
&min_xheight, &max_xheight, &yshift);
temp_it.add_to_end(new BLOB_CHOICE(next.unichar_id, Rating, Certainty,
fontinfo_id, fontinfo_id2,
unicharset.get_script(next.unichar_id),
min_xheight, max_xheight, yshift,
adapted ? BCC_ADAPTED_CLASSIFIER
: BCC_STATIC_CLASSIFIER));
contains_nonfrag |= !current_is_frag; // update contains_nonfrag
choices_length++;
if (choices_length >= max_matches) break;
}
Results->match.truncate(choices_length);
} // ConvertMatchesToChoices
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**
*
* @param Blob blob whose classification is being debugged
* @param Results results of match being debugged
*
* Globals: none
*
* @note Exceptions: none
* @note History: Wed Mar 13 16:44:41 1991, DSJ, Created.
*/
void Classify::DebugAdaptiveClassifier(TBLOB *blob,
ADAPT_RESULTS *Results) {
if (static_classifier_ == NULL) return;
for (int i = 0; i < Results->match.size(); i++) {
if (i == 0 || Results->match[i].rating < Results->best_match.rating)
Results->best_match = Results->match[i];
}
INT_FX_RESULT_STRUCT fx_info;
GenericVector<INT_FEATURE_STRUCT> bl_features;
TrainingSample* sample =
BlobToTrainingSample(*blob, false, &fx_info, &bl_features);
if (sample == NULL) return;
static_classifier_->DebugDisplay(*sample, blob->denorm().pix(),
Results->best_match.unichar_id);
} /* DebugAdaptiveClassifier */
#endif
/*---------------------------------------------------------------------------*/
/**
* This routine performs an adaptive classification.
* If we have not yet adapted to enough classes, a simple
* classification to the pre-trained templates is performed.
* Otherwise, we match the blob against the adapted templates.
* If the adapted templates do not match well, we try a
* match against the pre-trained templates. If an adapted
* template match is found, we do a match to any pre-trained
* templates which could be ambiguous. The results from all
* of these classifications are merged together into Results.
*
* @param Blob blob to be classified
* @param Results place to put match results
*
* Globals:
* - PreTrainedTemplates built-in training templates
* - AdaptedTemplates templates adapted for this page
* - matcher_great_threshold rating limit for a great match
*
* @note Exceptions: none
* @note History: Tue Mar 12 08:50:11 1991, DSJ, Created.
*/
void Classify::DoAdaptiveMatch(TBLOB *Blob, ADAPT_RESULTS *Results) {
UNICHAR_ID *Ambiguities;
INT_FX_RESULT_STRUCT fx_info;
GenericVector<INT_FEATURE_STRUCT> bl_features;
TrainingSample* sample =
BlobToTrainingSample(*Blob, classify_nonlinear_norm, &fx_info,
&bl_features);
if (sample == NULL) return;
if (AdaptedTemplates->NumPermClasses < matcher_permanent_classes_min ||
tess_cn_matching) {
CharNormClassifier(Blob, *sample, Results);
} else {
Ambiguities = BaselineClassifier(Blob, bl_features, fx_info,
AdaptedTemplates, Results);
if ((!Results->match.empty() && MarginalMatch(Results->best_match.rating) &&
!tess_bn_matching) ||
Results->match.empty()) {
CharNormClassifier(Blob, *sample, Results);
} else if (Ambiguities && *Ambiguities >= 0 && !tess_bn_matching) {
AmbigClassifier(bl_features, fx_info, Blob,
PreTrainedTemplates,
AdaptedTemplates->Class,
Ambiguities,
Results);
}
}
// Force the blob to be classified as noise
// if the results contain only fragments.
// TODO(daria): verify that this is better than
// just adding a NULL classification.
if (!Results->HasNonfragment || Results->match.empty())
ClassifyAsNoise(Results);
delete sample;
} /* DoAdaptiveMatch */
/*---------------------------------------------------------------------------*/
/**
* This routine matches blob to the built-in templates
* to find out if there are any classes other than the correct
* class which are potential ambiguities.
*
* @param Blob blob to get classification ambiguities for
* @param CorrectClass correct class for Blob
*
* Globals:
* - CurrentRatings used by qsort compare routine
* - PreTrainedTemplates built-in templates
*
* @return String containing all possible ambiguous classes.
* @note Exceptions: none
* @note History: Fri Mar 15 08:08:22 1991, DSJ, Created.
*/
UNICHAR_ID *Classify::GetAmbiguities(TBLOB *Blob,
CLASS_ID CorrectClass) {
ADAPT_RESULTS *Results = new ADAPT_RESULTS();
UNICHAR_ID *Ambiguities;
int i;
Results->Initialize();
INT_FX_RESULT_STRUCT fx_info;
GenericVector<INT_FEATURE_STRUCT> bl_features;
TrainingSample* sample =
BlobToTrainingSample(*Blob, classify_nonlinear_norm, &fx_info,
&bl_features);
if (sample == NULL) {
delete Results;
return NULL;
}
CharNormClassifier(Blob, *sample, Results);
delete sample;
RemoveBadMatches(Results);
Results->match.sort(CompareByRating);
/* copy the class id's into an string of ambiguities - don't copy if
the correct class is the only class id matched */
Ambiguities = new UNICHAR_ID[Results->match.size() + 1];
if (Results->match.size() > 1 ||
(Results->match.size() == 1 &&
Results->match[0].unichar_id != CorrectClass)) {
for (i = 0; i < Results->match.size(); i++)
Ambiguities[i] = Results->match[i].unichar_id;
Ambiguities[i] = -1;
} else {
Ambiguities[0] = -1;
}
delete Results;
return Ambiguities;
} /* GetAmbiguities */
// Returns true if the given blob looks too dissimilar to any character
// present in the classifier templates.
bool Classify::LooksLikeGarbage(TBLOB *blob) {
BLOB_CHOICE_LIST *ratings = new BLOB_CHOICE_LIST();
AdaptiveClassifier(blob, ratings);
BLOB_CHOICE_IT ratings_it(ratings);
const UNICHARSET &unicharset = getDict().getUnicharset();
if (classify_debug_character_fragments) {
print_ratings_list("======================\nLooksLikeGarbage() got ",
ratings, unicharset);
}
for (ratings_it.mark_cycle_pt(); !ratings_it.cycled_list();
ratings_it.forward()) {
if (unicharset.get_fragment(ratings_it.data()->unichar_id()) != NULL) {
continue;
}
float certainty = ratings_it.data()->certainty();
delete ratings;
return certainty <
classify_character_fragments_garbage_certainty_threshold;
}
delete ratings;
return true; // no whole characters in ratings
}
/*---------------------------------------------------------------------------*/
/**
* This routine calls the integer (Hardware) feature
* extractor if it has not been called before for this blob.
*
* The results from the feature extractor are placed into
* globals so that they can be used in other routines without
* re-extracting the features.
*
* It then copies the char norm features into the IntFeatures
* array provided by the caller.
*
* @param Blob blob to extract features from
* @param Templates used to compute char norm adjustments
* @param IntFeatures array to fill with integer features
* @param PrunerNormArray Array of factors from blob normalization
* process
* @param CharNormArray array to fill with dummy char norm adjustments
* @param BlobLength length of blob in baseline-normalized units
*
* Globals:
*
* @return Number of features extracted or 0 if an error occured.
* @note Exceptions: none
* @note History: Tue May 28 10:40:52 1991, DSJ, Created.
*/
int Classify::GetCharNormFeature(const INT_FX_RESULT_STRUCT& fx_info,
INT_TEMPLATES templates,
uinT8* pruner_norm_array,
uinT8* char_norm_array) {
FEATURE norm_feature = NewFeature(&CharNormDesc);
float baseline = kBlnBaselineOffset;
float scale = MF_SCALE_FACTOR;
norm_feature->Params[CharNormY] = (fx_info.Ymean - baseline) * scale;
norm_feature->Params[CharNormLength] =
fx_info.Length * scale / LENGTH_COMPRESSION;
norm_feature->Params[CharNormRx] = fx_info.Rx * scale;
norm_feature->Params[CharNormRy] = fx_info.Ry * scale;
// Deletes norm_feature.
ComputeCharNormArrays(norm_feature, templates, char_norm_array,
pruner_norm_array);
return IntCastRounded(fx_info.Length / kStandardFeatureLength);
} /* GetCharNormFeature */
// Computes the char_norm_array for the unicharset and, if not NULL, the
// pruner_array as appropriate according to the existence of the shape_table.
void Classify::ComputeCharNormArrays(FEATURE_STRUCT* norm_feature,
INT_TEMPLATES_STRUCT* templates,
uinT8* char_norm_array,
uinT8* pruner_array) {
ComputeIntCharNormArray(*norm_feature, char_norm_array);
if (pruner_array != NULL) {
if (shape_table_ == NULL) {
ComputeIntCharNormArray(*norm_feature, pruner_array);
} else {
memset(pruner_array, MAX_UINT8,
templates->NumClasses * sizeof(pruner_array[0]));
// Each entry in the pruner norm array is the MIN of all the entries of
// the corresponding unichars in the CharNormArray.
for (int id = 0; id < templates->NumClasses; ++id) {
int font_set_id = templates->Class[id]->font_set_id;
const FontSet &fs = fontset_table_.get(font_set_id);
for (int config = 0; config < fs.size; ++config) {
const Shape& shape = shape_table_->GetShape(fs.configs[config]);
for (int c = 0; c < shape.size(); ++c) {
if (char_norm_array[shape[c].unichar_id] < pruner_array[id])
pruner_array[id] = char_norm_array[shape[c].unichar_id];
}
}
}
}
}
FreeFeature(norm_feature);
}
/*---------------------------------------------------------------------------*/
/**
*
* @param Templates adapted templates to add new config to
* @param ClassId class id to associate with new config
* @param FontinfoId font information inferred from pre-trained templates
* @param NumFeatures number of features in IntFeatures
* @param Features features describing model for new config
* @param FloatFeatures floating-pt representation of features
*
* @return The id of the new config created, a negative integer in
* case of error.
* @note Exceptions: none
* @note History: Fri Mar 15 08:49:46 1991, DSJ, Created.
*/
int Classify::MakeNewTemporaryConfig(ADAPT_TEMPLATES Templates,
CLASS_ID ClassId,
int FontinfoId,
int NumFeatures,
INT_FEATURE_ARRAY Features,
FEATURE_SET FloatFeatures) {
INT_CLASS IClass;
ADAPT_CLASS Class;
PROTO_ID OldProtos[MAX_NUM_PROTOS];
FEATURE_ID BadFeatures[MAX_NUM_INT_FEATURES];
int NumOldProtos;
int NumBadFeatures;
int MaxProtoId, OldMaxProtoId;
int BlobLength = 0;
int MaskSize;
int ConfigId;
TEMP_CONFIG Config;
int i;
int debug_level = NO_DEBUG;
if (classify_learning_debug_level >= 3)
debug_level =
PRINT_MATCH_SUMMARY | PRINT_FEATURE_MATCHES | PRINT_PROTO_MATCHES;
IClass = ClassForClassId(Templates->Templates, ClassId);
Class = Templates->Class[ClassId];
if (IClass->NumConfigs >= MAX_NUM_CONFIGS) {
++NumAdaptationsFailed;
if (classify_learning_debug_level >= 1)
cprintf("Cannot make new temporary config: maximum number exceeded.\n");
return -1;
}
OldMaxProtoId = IClass->NumProtos - 1;
NumOldProtos = im_.FindGoodProtos(IClass, AllProtosOn, AllConfigsOff,
BlobLength, NumFeatures, Features,
OldProtos, classify_adapt_proto_threshold,
debug_level);
MaskSize = WordsInVectorOfSize(MAX_NUM_PROTOS);
zero_all_bits(TempProtoMask, MaskSize);
for (i = 0; i < NumOldProtos; i++)
SET_BIT(TempProtoMask, OldProtos[i]);
NumBadFeatures = im_.FindBadFeatures(IClass, TempProtoMask, AllConfigsOn,
BlobLength, NumFeatures, Features,
BadFeatures,
classify_adapt_feature_threshold,
debug_level);
MaxProtoId = MakeNewTempProtos(FloatFeatures, NumBadFeatures, BadFeatures,
IClass, Class, TempProtoMask);
if (MaxProtoId == NO_PROTO) {
++NumAdaptationsFailed;
if (classify_learning_debug_level >= 1)
cprintf("Cannot make new temp protos: maximum number exceeded.\n");
return -1;
}
ConfigId = AddIntConfig(IClass);
ConvertConfig(TempProtoMask, ConfigId, IClass);
Config = NewTempConfig(MaxProtoId, FontinfoId);
TempConfigFor(Class, ConfigId) = Config;
copy_all_bits(TempProtoMask, Config->Protos, Config->ProtoVectorSize);
if (classify_learning_debug_level >= 1)
cprintf("Making new temp config %d fontinfo id %d"
" using %d old and %d new protos.\n",
ConfigId, Config->FontinfoId,
NumOldProtos, MaxProtoId - OldMaxProtoId);
return ConfigId;
} /* MakeNewTemporaryConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine finds sets of sequential bad features
* that all have the same angle and converts each set into
* a new temporary proto. The temp proto is added to the
* proto pruner for IClass, pushed onto the list of temp
* protos in Class, and added to TempProtoMask.
*
* @param Features floating-pt features describing new character
* @param NumBadFeat number of bad features to turn into protos
* @param BadFeat feature id's of bad features
* @param IClass integer class templates to add new protos to
* @param Class adapted class templates to add new protos to
* @param TempProtoMask proto mask to add new protos to
*
* Globals: none
*
* @return Max proto id in class after all protos have been added.
* Exceptions: none
* History: Fri Mar 15 11:39:38 1991, DSJ, Created.
*/
PROTO_ID Classify::MakeNewTempProtos(FEATURE_SET Features,
int NumBadFeat,
FEATURE_ID BadFeat[],
INT_CLASS IClass,
ADAPT_CLASS Class,
BIT_VECTOR TempProtoMask) {
FEATURE_ID *ProtoStart;
FEATURE_ID *ProtoEnd;
FEATURE_ID *LastBad;
TEMP_PROTO TempProto;
PROTO Proto;
FEATURE F1, F2;
FLOAT32 X1, X2, Y1, Y2;
FLOAT32 A1, A2, AngleDelta;
FLOAT32 SegmentLength;
PROTO_ID Pid;
for (ProtoStart = BadFeat, LastBad = ProtoStart + NumBadFeat;
ProtoStart < LastBad; ProtoStart = ProtoEnd) {
F1 = Features->Features[*ProtoStart];
X1 = F1->Params[PicoFeatX];
Y1 = F1->Params[PicoFeatY];
A1 = F1->Params[PicoFeatDir];
for (ProtoEnd = ProtoStart + 1,
SegmentLength = GetPicoFeatureLength();
ProtoEnd < LastBad;
ProtoEnd++, SegmentLength += GetPicoFeatureLength()) {
F2 = Features->Features[*ProtoEnd];
X2 = F2->Params[PicoFeatX];
Y2 = F2->Params[PicoFeatY];
A2 = F2->Params[PicoFeatDir];
AngleDelta = fabs(A1 - A2);
if (AngleDelta > 0.5)
AngleDelta = 1.0 - AngleDelta;
if (AngleDelta > matcher_clustering_max_angle_delta ||
fabs(X1 - X2) > SegmentLength ||
fabs(Y1 - Y2) > SegmentLength)
break;
}
F2 = Features->Features[*(ProtoEnd - 1)];
X2 = F2->Params[PicoFeatX];
Y2 = F2->Params[PicoFeatY];
A2 = F2->Params[PicoFeatDir];
Pid = AddIntProto(IClass);
if (Pid == NO_PROTO)
return (NO_PROTO);
TempProto = NewTempProto();
Proto = &(TempProto->Proto);
/* compute proto params - NOTE that Y_DIM_OFFSET must be used because
ConvertProto assumes that the Y dimension varies from -0.5 to 0.5
instead of the -0.25 to 0.75 used in baseline normalization */
Proto->Length = SegmentLength;
Proto->Angle = A1;
Proto->X = (X1 + X2) / 2.0;
Proto->Y = (Y1 + Y2) / 2.0 - Y_DIM_OFFSET;
FillABC(Proto);
TempProto->ProtoId = Pid;
SET_BIT(TempProtoMask, Pid);
ConvertProto(Proto, Pid, IClass);
AddProtoToProtoPruner(Proto, Pid, IClass,
classify_learning_debug_level >= 2);
Class->TempProtos = push(Class->TempProtos, TempProto);
}
return IClass->NumProtos - 1;
} /* MakeNewTempProtos */
/*---------------------------------------------------------------------------*/
/**
*
* @param Templates current set of adaptive templates
* @param ClassId class containing config to be made permanent
* @param ConfigId config to be made permanent
* @param Blob current blob being adapted to
*
* Globals: none
*
* @note Exceptions: none
* @note History: Thu Mar 14 15:54:08 1991, DSJ, Created.
*/
void Classify::MakePermanent(ADAPT_TEMPLATES Templates,
CLASS_ID ClassId,
int ConfigId,
TBLOB *Blob) {
UNICHAR_ID *Ambigs;
TEMP_CONFIG Config;
ADAPT_CLASS Class;
PROTO_KEY ProtoKey;
Class = Templates->Class[ClassId];
Config = TempConfigFor(Class, ConfigId);
MakeConfigPermanent(Class, ConfigId);
if (Class->NumPermConfigs == 0)
Templates->NumPermClasses++;
Class->NumPermConfigs++;
// Initialize permanent config.
Ambigs = GetAmbiguities(Blob, ClassId);
PERM_CONFIG Perm = (PERM_CONFIG) alloc_struct(sizeof(PERM_CONFIG_STRUCT),
"PERM_CONFIG_STRUCT");
Perm->Ambigs = Ambigs;
Perm->FontinfoId = Config->FontinfoId;
// Free memory associated with temporary config (since ADAPTED_CONFIG
// is a union we need to clean up before we record permanent config).
ProtoKey.Templates = Templates;
ProtoKey.ClassId = ClassId;
ProtoKey.ConfigId = ConfigId;
Class->TempProtos = delete_d(Class->TempProtos, &ProtoKey, MakeTempProtoPerm);
FreeTempConfig(Config);
// Record permanent config.
PermConfigFor(Class, ConfigId) = Perm;
if (classify_learning_debug_level >= 1) {
tprintf("Making config %d for %s (ClassId %d) permanent:"
" fontinfo id %d, ambiguities '",
ConfigId, getDict().getUnicharset().debug_str(ClassId).string(),
ClassId, PermConfigFor(Class, ConfigId)->FontinfoId);
for (UNICHAR_ID *AmbigsPointer = Ambigs;
*AmbigsPointer >= 0; ++AmbigsPointer)
tprintf("%s", unicharset.id_to_unichar(*AmbigsPointer));
tprintf("'.\n");
}
} /* MakePermanent */
} // namespace tesseract
/*---------------------------------------------------------------------------*/
/**
* This routine converts TempProto to be permanent if
* its proto id is used by the configuration specified in
* ProtoKey.
*
* @param item1 (TEMP_PROTO) temporary proto to compare to key
* @param item2 (PROTO_KEY) defines which protos to make permanent
*
* Globals: none
*
* @return TRUE if TempProto is converted, FALSE otherwise
* @note Exceptions: none
* @note History: Thu Mar 14 18:49:54 1991, DSJ, Created.
*/
int MakeTempProtoPerm(void *item1, void *item2) {
ADAPT_CLASS Class;
TEMP_CONFIG Config;
TEMP_PROTO TempProto;
PROTO_KEY *ProtoKey;
TempProto = (TEMP_PROTO) item1;
ProtoKey = (PROTO_KEY *) item2;
Class = ProtoKey->Templates->Class[ProtoKey->ClassId];
Config = TempConfigFor(Class, ProtoKey->ConfigId);
if (TempProto->ProtoId > Config->MaxProtoId ||
!test_bit (Config->Protos, TempProto->ProtoId))
return FALSE;
MakeProtoPermanent(Class, TempProto->ProtoId);
AddProtoToClassPruner(&(TempProto->Proto), ProtoKey->ClassId,
ProtoKey->Templates->Templates);
FreeTempProto(TempProto);
return TRUE;
} /* MakeTempProtoPerm */
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* This routine writes the matches in Results to File.
*
* @param File open text file to write Results to
* @param Results match results to write to File
*
* Globals: none
*
* @note Exceptions: none
* @note History: Mon Mar 18 09:24:53 1991, DSJ, Created.
*/
void Classify::PrintAdaptiveMatchResults(FILE *File, ADAPT_RESULTS *Results) {
for (int i = 0; i < Results->match.size(); ++i) {
tprintf("%s(%d), shape %d, %.2f ",
unicharset.debug_str(Results->match[i].unichar_id).string(),
Results->match[i].unichar_id, Results->match[i].shape_id,
Results->match[i].rating * 100.0);
}
tprintf("\n");
} /* PrintAdaptiveMatchResults */
/*---------------------------------------------------------------------------*/
/**
* This routine steps thru each matching class in Results
* and removes it from the match list if its rating
* is worse than the BestRating plus a pad. In other words,
* all good matches get moved to the front of the classes
* array.
*
* @param Results contains matches to be filtered
*
* Globals:
* - matcher_bad_match_pad defines a "bad match"
*
* @note Exceptions: none
* @note History: Tue Mar 12 13:51:03 1991, DSJ, Created.
*/
void Classify::RemoveBadMatches(ADAPT_RESULTS *Results) {
int Next, NextGood;
FLOAT32 BadMatchThreshold;
static const char* romans = "i v x I V X";
BadMatchThreshold = Results->best_match.rating + matcher_bad_match_pad;
if (classify_bln_numeric_mode) {
UNICHAR_ID unichar_id_one = unicharset.contains_unichar("1") ?
unicharset.unichar_to_id("1") : -1;
UNICHAR_ID unichar_id_zero = unicharset.contains_unichar("0") ?
unicharset.unichar_to_id("0") : -1;
ScoredClass scored_one = ScoredUnichar(Results, unichar_id_one);
ScoredClass scored_zero = ScoredUnichar(Results, unichar_id_zero);
for (Next = NextGood = 0; Next < Results->match.size(); Next++) {
if (Results->match[Next].rating <= BadMatchThreshold) {
ScoredClass match = Results->match[Next];
if (!unicharset.get_isalpha(match.unichar_id) ||
strstr(romans,
unicharset.id_to_unichar(match.unichar_id)) != NULL) {
Results->match[NextGood++] = Results->match[Next];
} else if (unicharset.eq(match.unichar_id, "l") &&
scored_one.rating >= BadMatchThreshold) {
Results->match[NextGood] = scored_one;
Results->match[NextGood].rating = match.rating;
NextGood++;
} else if (unicharset.eq(match.unichar_id, "O") &&
scored_zero.rating >= BadMatchThreshold) {
Results->match[NextGood] = scored_zero;
Results->match[NextGood].rating = match.rating;
NextGood++;
}
}
}
} else {
for (Next = NextGood = 0; Next < Results->match.size(); Next++) {
if (Results->match[Next].rating <= BadMatchThreshold)
Results->match[NextGood++] = Results->match[Next];
}
}
Results->match.truncate(NextGood);
} /* RemoveBadMatches */
/*----------------------------------------------------------------------------*/
/**
* This routine discards extra digits or punctuation from the results.
* We keep only the top 2 punctuation answers and the top 1 digit answer if
* present.
*
* @param Results contains matches to be filtered
*
* @note History: Tue Mar 12 13:51:03 1991, DSJ, Created.
*/
void Classify::RemoveExtraPuncs(ADAPT_RESULTS *Results) {
int Next, NextGood;
int punc_count; /*no of garbage characters */
int digit_count;
/*garbage characters */
static char punc_chars[] = ". , ; : / ` ~ ' - = \\ | \" ! _ ^";
static char digit_chars[] = "0 1 2 3 4 5 6 7 8 9";
punc_count = 0;
digit_count = 0;
for (Next = NextGood = 0; Next < Results->match.size(); Next++) {
ScoredClass match = Results->match[Next];
if (strstr(punc_chars,
unicharset.id_to_unichar(match.unichar_id)) != NULL) {
if (punc_count < 2)
Results->match[NextGood++] = match;
punc_count++;
} else {
if (strstr(digit_chars,
unicharset.id_to_unichar(match.unichar_id)) != NULL) {
if (digit_count < 1)
Results->match[NextGood++] = match;
digit_count++;
} else {
Results->match[NextGood++] = match;
}
}
}
Results->match.truncate(NextGood);
} /* RemoveExtraPuncs */
/*---------------------------------------------------------------------------*/
/**
* This routine resets the internal thresholds inside
* the integer matcher to correspond to the specified
* threshold.
*
* @param Threshold threshold for creating new templates
*
* Globals:
* - matcher_good_threshold default good match rating
*
* @note Exceptions: none
* @note History: Tue Apr 9 08:33:13 1991, DSJ, Created.
*/
void Classify::SetAdaptiveThreshold(FLOAT32 Threshold) {
Threshold = (Threshold == matcher_good_threshold) ? 0.9: (1.0 - Threshold);
classify_adapt_proto_threshold.set_value(
ClipToRange<int>(255 * Threshold, 0, 255));
classify_adapt_feature_threshold.set_value(
ClipToRange<int>(255 * Threshold, 0, 255));
} /* SetAdaptiveThreshold */
/*---------------------------------------------------------------------------*/
/**
* This routine displays debug information for the best config
* of the given shape_id for the given set of features.
*
* @param shape_id classifier id to work with
* @param features features of the unknown character
* @param num_features Number of features in the features array.
*
* @note Exceptions: none
* @note History: Fri Mar 22 08:43:52 1991, DSJ, Created.
*/
void Classify::ShowBestMatchFor(int shape_id,
const INT_FEATURE_STRUCT* features,
int num_features) {
#ifndef GRAPHICS_DISABLED
uinT32 config_mask;
if (UnusedClassIdIn(PreTrainedTemplates, shape_id)) {
tprintf("No built-in templates for class/shape %d\n", shape_id);
return;
}
if (num_features <= 0) {
tprintf("Illegal blob (char norm features)!\n");
return;
}
INT_RESULT_STRUCT cn_result;
classify_norm_method.set_value(character);
im_.Match(ClassForClassId(PreTrainedTemplates, shape_id),
AllProtosOn, AllConfigsOn,
num_features, features, &cn_result,
classify_adapt_feature_threshold, NO_DEBUG,
matcher_debug_separate_windows);
tprintf("\n");
config_mask = 1 << cn_result.Config;
tprintf("Static Shape ID: %d\n", shape_id);
ShowMatchDisplay();
im_.Match(ClassForClassId(PreTrainedTemplates, shape_id),
AllProtosOn, reinterpret_cast<BIT_VECTOR>(&config_mask),
num_features, features, &cn_result,
classify_adapt_feature_threshold,
matcher_debug_flags,
matcher_debug_separate_windows);
UpdateMatchDisplay();
#endif // GRAPHICS_DISABLED
} /* ShowBestMatchFor */
// Returns a string for the classifier class_id: either the corresponding
// unicharset debug_str or the shape_table_ debug str.
STRING Classify::ClassIDToDebugStr(const INT_TEMPLATES_STRUCT* templates,
int class_id, int config_id) const {
STRING class_string;
if (templates == PreTrainedTemplates && shape_table_ != NULL) {
int shape_id = ClassAndConfigIDToFontOrShapeID(class_id, config_id);
class_string = shape_table_->DebugStr(shape_id);
} else {
class_string = unicharset.debug_str(class_id);
}
return class_string;
}
// Converts a classifier class_id index to a shape_table_ index
int Classify::ClassAndConfigIDToFontOrShapeID(int class_id,
int int_result_config) const {
int font_set_id = PreTrainedTemplates->Class[class_id]->font_set_id;
// Older inttemps have no font_ids.
if (font_set_id < 0)
return kBlankFontinfoId;
const FontSet &fs = fontset_table_.get(font_set_id);
ASSERT_HOST(int_result_config >= 0 && int_result_config < fs.size);
return fs.configs[int_result_config];
}
// Converts a shape_table_ index to a classifier class_id index (not a
// unichar-id!). Uses a search, so not fast.
int Classify::ShapeIDToClassID(int shape_id) const {
for (int id = 0; id < PreTrainedTemplates->NumClasses; ++id) {
int font_set_id = PreTrainedTemplates->Class[id]->font_set_id;
ASSERT_HOST(font_set_id >= 0);
const FontSet &fs = fontset_table_.get(font_set_id);
for (int config = 0; config < fs.size; ++config) {
if (fs.configs[config] == shape_id)
return id;
}
}
tprintf("Shape %d not found\n", shape_id);
return -1;
}
// Returns true if the given TEMP_CONFIG is good enough to make it
// a permanent config.
bool Classify::TempConfigReliable(CLASS_ID class_id,
const TEMP_CONFIG &config) {
if (classify_learning_debug_level >= 1) {
tprintf("NumTimesSeen for config of %s is %d\n",
getDict().getUnicharset().debug_str(class_id).string(),
config->NumTimesSeen);
}
if (config->NumTimesSeen >= matcher_sufficient_examples_for_prototyping) {
return true;
} else if (config->NumTimesSeen < matcher_min_examples_for_prototyping) {
return false;
} else if (use_ambigs_for_adaption) {
// Go through the ambigs vector and see whether we have already seen
// enough times all the characters represented by the ambigs vector.
const UnicharIdVector *ambigs =
getDict().getUnicharAmbigs().AmbigsForAdaption(class_id);
int ambigs_size = (ambigs == NULL) ? 0 : ambigs->size();
for (int ambig = 0; ambig < ambigs_size; ++ambig) {
ADAPT_CLASS ambig_class = AdaptedTemplates->Class[(*ambigs)[ambig]];
assert(ambig_class != NULL);
if (ambig_class->NumPermConfigs == 0 &&
ambig_class->MaxNumTimesSeen <
matcher_min_examples_for_prototyping) {
if (classify_learning_debug_level >= 1) {
tprintf("Ambig %s has not been seen enough times,"
" not making config for %s permanent\n",
getDict().getUnicharset().debug_str(
(*ambigs)[ambig]).string(),
getDict().getUnicharset().debug_str(class_id).string());
}
return false;
}
}
}
return true;
}
void Classify::UpdateAmbigsGroup(CLASS_ID class_id, TBLOB *Blob) {
const UnicharIdVector *ambigs =
getDict().getUnicharAmbigs().ReverseAmbigsForAdaption(class_id);
int ambigs_size = (ambigs == NULL) ? 0 : ambigs->size();
if (classify_learning_debug_level >= 1) {
tprintf("Running UpdateAmbigsGroup for %s class_id=%d\n",
getDict().getUnicharset().debug_str(class_id).string(), class_id);
}
for (int ambig = 0; ambig < ambigs_size; ++ambig) {
CLASS_ID ambig_class_id = (*ambigs)[ambig];
const ADAPT_CLASS ambigs_class = AdaptedTemplates->Class[ambig_class_id];
for (int cfg = 0; cfg < MAX_NUM_CONFIGS; ++cfg) {
if (ConfigIsPermanent(ambigs_class, cfg)) continue;
const TEMP_CONFIG config =
TempConfigFor(AdaptedTemplates->Class[ambig_class_id], cfg);
if (config != NULL && TempConfigReliable(ambig_class_id, config)) {
if (classify_learning_debug_level >= 1) {
tprintf("Making config %d of %s permanent\n", cfg,
getDict().getUnicharset().debug_str(
ambig_class_id).string());
}
MakePermanent(AdaptedTemplates, ambig_class_id, cfg, Blob);
}
}
}
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: mfdefs.c
** Purpose: Basic routines for manipulating micro-features
** Author: Dan Johnson
** History: Mon Jan 22 08:48:58 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "mfdefs.h"
#include "emalloc.h"
#include <math.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
MICROFEATURE NewMicroFeature() {
/*
** Parameters: none
** Globals: none
** Operation:
** This routine allocates and returns a new micro-feature
** data structure.
** Return: New micro-feature.
** Exceptions: none
** History: 7/27/89, DSJ, Created.
*/
return ((MICROFEATURE) Emalloc (sizeof (MFBLOCK)));
} /* NewMicroFeature */
/*---------------------------------------------------------------------------*/
void FreeMicroFeatures(MICROFEATURES MicroFeatures) {
/*
** Parameters:
** MicroFeatures list of micro-features to be freed
** Globals: none
** Operation:
** This routine deallocates all of the memory consumed by
** a list of micro-features.
** Return: none
** Exceptions: none
** History: 7/27/89, DSJ, Created.
*/
destroy_nodes(MicroFeatures, Efree);
} /* FreeMicroFeatures */
| C++ |
/******************************************************************************
** Filename: clustertool.c
** Purpose: Misc. tools for use with the clustering routines
** Author: Dan Johnson
** History: 6/6/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
//--------------------------Include Files----------------------------------
#include "clusttool.h"
#include "const.h"
#include "danerror.h"
#include "emalloc.h"
#include "scanutils.h"
#include <stdio.h>
#include <math.h>
//---------------Global Data Definitions and Declarations--------------------
#define TOKENSIZE 80 //max size of tokens read from an input file
#define MAXSAMPLESIZE 65535 //max num of dimensions in feature space
//#define MAXBLOCKSIZE 65535 //max num of samples in a character (block size)
/*---------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/** ReadSampleSize ***********************************************************
Parameters: File open text file to read sample size from
Globals: None
Operation: This routine reads a single integer from the specified
file and checks to ensure that it is between 0 and
MAXSAMPLESIZE.
Return: Sample size
Exceptions: ILLEGALSAMPLESIZE illegal format or range
History: 6/6/89, DSJ, Created.
******************************************************************************/
uinT16 ReadSampleSize(FILE *File) {
int SampleSize;
if ((tfscanf(File, "%d", &SampleSize) != 1) ||
(SampleSize < 0) || (SampleSize > MAXSAMPLESIZE))
DoError (ILLEGALSAMPLESIZE, "Illegal sample size");
return (SampleSize);
} // ReadSampleSize
/** ReadParamDesc *************************************************************
Parameters: File open text file to read N parameter descriptions from
N number of parameter descriptions to read
Globals: None
Operation: This routine reads textual descriptions of sets of parameters
which describe the characteristics of feature dimensions.
Return: Pointer to an array of parameter descriptors.
Exceptions: ILLEGALCIRCULARSPEC
ILLEGALESSENTIALSPEC
ILLEGALMINMAXSPEC
History: 6/6/89, DSJ, Created.
******************************************************************************/
PARAM_DESC *ReadParamDesc(FILE *File, uinT16 N) {
int i;
PARAM_DESC *ParamDesc;
char Token[TOKENSIZE];
ParamDesc = (PARAM_DESC *) Emalloc (N * sizeof (PARAM_DESC));
for (i = 0; i < N; i++) {
if (tfscanf(File, "%s", Token) != 1)
DoError (ILLEGALCIRCULARSPEC,
"Illegal circular/linear specification");
if (Token[0] == 'c')
ParamDesc[i].Circular = TRUE;
else
ParamDesc[i].Circular = FALSE;
if (tfscanf(File, "%s", Token) != 1)
DoError (ILLEGALESSENTIALSPEC,
"Illegal essential/non-essential spec");
if (Token[0] == 'e')
ParamDesc[i].NonEssential = FALSE;
else
ParamDesc[i].NonEssential = TRUE;
if (tfscanf(File, "%f%f", &(ParamDesc[i].Min), &(ParamDesc[i].Max)) != 2)
DoError (ILLEGALMINMAXSPEC, "Illegal min or max specification");
ParamDesc[i].Range = ParamDesc[i].Max - ParamDesc[i].Min;
ParamDesc[i].HalfRange = ParamDesc[i].Range / 2;
ParamDesc[i].MidRange = (ParamDesc[i].Max + ParamDesc[i].Min) / 2;
}
return (ParamDesc);
} // ReadParamDesc
/** ReadPrototype *************************************************************
Parameters: File open text file to read prototype from
N number of dimensions used in prototype
Globals: None
Operation: This routine reads a textual description of a prototype from
the specified file.
Return: List of prototypes
Exceptions: ILLEGALSIGNIFICANCESPEC
ILLEGALSAMPLECOUNT
ILLEGALMEANSPEC
ILLEGALVARIANCESPEC
ILLEGALDISTRIBUTION
History: 6/6/89, DSJ, Created.
******************************************************************************/
PROTOTYPE *ReadPrototype(FILE *File, uinT16 N) {
char Token[TOKENSIZE];
int Status;
PROTOTYPE *Proto;
int SampleCount;
int i;
if ((Status = tfscanf(File, "%s", Token)) == 1) {
Proto = (PROTOTYPE *) Emalloc (sizeof (PROTOTYPE));
Proto->Cluster = NULL;
if (Token[0] == 's')
Proto->Significant = TRUE;
else
Proto->Significant = FALSE;
Proto->Style = ReadProtoStyle (File);
if ((tfscanf(File, "%d", &SampleCount) != 1) || (SampleCount < 0))
DoError (ILLEGALSAMPLECOUNT, "Illegal sample count");
Proto->NumSamples = SampleCount;
Proto->Mean = ReadNFloats (File, N, NULL);
if (Proto->Mean == NULL)
DoError (ILLEGALMEANSPEC, "Illegal prototype mean");
switch (Proto->Style) {
case spherical:
if (ReadNFloats (File, 1, &(Proto->Variance.Spherical)) == NULL)
DoError (ILLEGALVARIANCESPEC, "Illegal prototype variance");
Proto->Magnitude.Spherical =
1.0 / sqrt ((double) (2.0 * PI * Proto->Variance.Spherical));
Proto->TotalMagnitude =
pow (Proto->Magnitude.Spherical, (float) N);
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
Proto->Weight.Spherical = 1.0 / Proto->Variance.Spherical;
Proto->Distrib = NULL;
break;
case elliptical:
Proto->Variance.Elliptical = ReadNFloats (File, N, NULL);
if (Proto->Variance.Elliptical == NULL)
DoError (ILLEGALVARIANCESPEC, "Illegal prototype variance");
Proto->Magnitude.Elliptical =
(FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Proto->Weight.Elliptical =
(FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Proto->TotalMagnitude = 1.0;
for (i = 0; i < N; i++) {
Proto->Magnitude.Elliptical[i] =
1.0 /
sqrt ((double) (2.0 * PI * Proto->Variance.Elliptical[i]));
Proto->Weight.Elliptical[i] =
1.0 / Proto->Variance.Elliptical[i];
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
}
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
Proto->Distrib = NULL;
break;
case mixed:
Proto->Distrib =
(DISTRIBUTION *) Emalloc (N * sizeof (DISTRIBUTION));
for (i = 0; i < N; i++) {
if (tfscanf(File, "%s", Token) != 1)
DoError (ILLEGALDISTRIBUTION,
"Illegal prototype distribution");
switch (Token[0]) {
case 'n':
Proto->Distrib[i] = normal;
break;
case 'u':
Proto->Distrib[i] = uniform;
break;
case 'r':
Proto->Distrib[i] = D_random;
break;
default:
DoError (ILLEGALDISTRIBUTION,
"Illegal prototype distribution");
}
}
Proto->Variance.Elliptical = ReadNFloats (File, N, NULL);
if (Proto->Variance.Elliptical == NULL)
DoError (ILLEGALVARIANCESPEC, "Illegal prototype variance");
Proto->Magnitude.Elliptical =
(FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Proto->Weight.Elliptical =
(FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Proto->TotalMagnitude = 1.0;
for (i = 0; i < N; i++) {
switch (Proto->Distrib[i]) {
case normal:
Proto->Magnitude.Elliptical[i] = 1.0 /
sqrt ((double)
(2.0 * PI * Proto->Variance.Elliptical[i]));
Proto->Weight.Elliptical[i] =
1.0 / Proto->Variance.Elliptical[i];
break;
case uniform:
case D_random:
Proto->Magnitude.Elliptical[i] = 1.0 /
(2.0 * Proto->Variance.Elliptical[i]);
break;
case DISTRIBUTION_COUNT:
ASSERT_HOST(!"Distribution count not allowed!");
}
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
}
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
break;
}
return (Proto);
}
else if (Status == EOF)
return (NULL);
else {
DoError (ILLEGALSIGNIFICANCESPEC, "Illegal significance specification");
return (NULL);
}
} // ReadPrototype
/* ReadProtoStyle *************************************************************
Parameters: File open text file to read prototype style from
Globals: None
Operation: This routine reads an single token from the specified
text file and interprets it as a prototype specification.
Return: Prototype style read from text file
Exceptions: ILLEGALSTYLESPEC illegal prototype style specification
History: 6/8/89, DSJ, Created.
*******************************************************************************/
PROTOSTYLE ReadProtoStyle(FILE *File) {
char Token[TOKENSIZE];
PROTOSTYLE Style;
if (tfscanf(File, "%s", Token) != 1)
DoError (ILLEGALSTYLESPEC, "Illegal prototype style specification");
switch (Token[0]) {
case 's':
Style = spherical;
break;
case 'e':
Style = elliptical;
break;
case 'm':
Style = mixed;
break;
case 'a':
Style = automatic;
break;
default:
Style = elliptical;
DoError (ILLEGALSTYLESPEC, "Illegal prototype style specification");
}
return (Style);
} // ReadProtoStyle
/** ReadNFloats *************************************************************
Parameters: File open text file to read floats from
N number of floats to read
Buffer pointer to buffer to place floats into
Globals: None
Operation: This routine reads N floats from the specified text file
and places them into Buffer. If Buffer is NULL, a buffer
is created and passed back to the caller. If EOF is
encountered before any floats can be read, NULL is
returned.
Return: Pointer to buffer holding floats or NULL if EOF
Exceptions: ILLEGALFLOAT
History: 6/6/89, DSJ, Created.
******************************************************************************/
FLOAT32* ReadNFloats(FILE * File, uinT16 N, FLOAT32 Buffer[]) {
int i;
int NumFloatsRead;
if (Buffer == NULL)
Buffer = reinterpret_cast<FLOAT32*>(Emalloc(N * sizeof(FLOAT32)));
for (i = 0; i < N; i++) {
NumFloatsRead = tfscanf(File, "%f", &(Buffer[i]));
if (NumFloatsRead != 1) {
if ((NumFloatsRead == EOF) && (i == 0)) {
Efree(Buffer);
return NULL;
} else {
DoError(ILLEGALFLOAT, "Illegal float specification");
}
}
}
return Buffer;
} // ReadNFloats
/** WriteParamDesc ************************************************************
Parameters: File open text file to write param descriptors to
N number of param descriptors to write
ParamDesc array of param descriptors to write
Globals: None
Operation: This routine writes an array of dimension descriptors to
the specified text file.
Return: None
Exceptions: None
History: 6/6/89, DSJ, Created.
******************************************************************************/
void
WriteParamDesc (FILE * File, uinT16 N, PARAM_DESC ParamDesc[]) {
int i;
for (i = 0; i < N; i++) {
if (ParamDesc[i].Circular)
fprintf (File, "circular ");
else
fprintf (File, "linear ");
if (ParamDesc[i].NonEssential)
fprintf (File, "non-essential ");
else
fprintf (File, "essential ");
fprintf (File, "%10.6f %10.6f\n", ParamDesc[i].Min, ParamDesc[i].Max);
}
} // WriteParamDesc
/** WritePrototype ************************************************************
Parameters: File open text file to write prototype to
N number of dimensions in feature space
Proto prototype to write out
Globals: None
Operation: This routine writes a textual description of a prototype
to the specified text file.
Return: None
Exceptions: None
History: 6/12/89, DSJ, Created.
*******************************************************************************/
void WritePrototype(FILE *File, uinT16 N, PROTOTYPE *Proto) {
int i;
if (Proto->Significant)
fprintf (File, "significant ");
else
fprintf (File, "insignificant ");
WriteProtoStyle (File, (PROTOSTYLE) Proto->Style);
fprintf (File, "%6d\n\t", Proto->NumSamples);
WriteNFloats (File, N, Proto->Mean);
fprintf (File, "\t");
switch (Proto->Style) {
case spherical:
WriteNFloats (File, 1, &(Proto->Variance.Spherical));
break;
case elliptical:
WriteNFloats (File, N, Proto->Variance.Elliptical);
break;
case mixed:
for (i = 0; i < N; i++)
switch (Proto->Distrib[i]) {
case normal:
fprintf (File, " %9s", "normal");
break;
case uniform:
fprintf (File, " %9s", "uniform");
break;
case D_random:
fprintf (File, " %9s", "random");
break;
case DISTRIBUTION_COUNT:
ASSERT_HOST(!"Distribution count not allowed!");
}
fprintf (File, "\n\t");
WriteNFloats (File, N, Proto->Variance.Elliptical);
}
} // WritePrototype
/** WriteNFloats ***********************************************************
Parameters: File open text file to write N floats to
N number of floats to write
Array array of floats to write
Globals: None
Operation: This routine writes a text representation of N floats from
an array to a file. All of the floats are placed on one line.
Return: None
Exceptions: None
History: 6/6/89, DSJ, Created.
****************************************************************************/
void WriteNFloats(FILE * File, uinT16 N, FLOAT32 Array[]) {
for (int i = 0; i < N; i++)
fprintf(File, " %9.6f", Array[i]);
fprintf(File, "\n");
} // WriteNFloats
/** WriteProtoSyle **********************************************************
Parameters: File open text file to write prototype style to
ProtoStyle prototype style to write
Globals: None
Operation: This routine writes to the specified text file a word
which represents the ProtoStyle. It does not append
a carriage return to the end.
Return: None
Exceptions: None
History: 6/8/89, DSJ, Created.
****************************************************************************/
void WriteProtoStyle(FILE *File, PROTOSTYLE ProtoStyle) {
switch (ProtoStyle) {
case spherical:
fprintf (File, "spherical");
break;
case elliptical:
fprintf (File, "elliptical");
break;
case mixed:
fprintf (File, "mixed");
break;
case automatic:
fprintf (File, "automatic");
break;
}
} // WriteProtoStyle
/*---------------------------------------------------------------------------*/
void WriteProtoList(
FILE *File,
uinT16 N,
PARAM_DESC ParamDesc[],
LIST ProtoList,
BOOL8 WriteSigProtos,
BOOL8 WriteInsigProtos)
/*
** Parameters:
** File open text file to write prototypes to
** N number of dimensions in feature space
** ParamDesc descriptions for each dimension
** ProtoList list of prototypes to be written
** WriteSigProtos TRUE to write out significant prototypes
** WriteInsigProtos TRUE to write out insignificants
** Globals:
** None
** Operation:
** This routine writes a textual description of each prototype
** in the prototype list to the specified file. It also
** writes a file header which includes the number of dimensions
** in feature space and the descriptions for each dimension.
** Return:
** None
** Exceptions:
** None
** History:
** 6/12/89, DSJ, Created.
*/
{
PROTOTYPE *Proto;
/* write file header */
fprintf(File,"%0d\n",N);
WriteParamDesc(File,N,ParamDesc);
/* write prototypes */
iterate(ProtoList)
{
Proto = (PROTOTYPE *) first_node ( ProtoList );
if (( Proto->Significant && WriteSigProtos ) ||
( ! Proto->Significant && WriteInsigProtos ) )
WritePrototype( File, N, Proto );
}
} /* WriteProtoList */
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: tessclassifier.h
// Description: Tesseract implementation of a ShapeClassifier.
// Author: Ray Smith
// Created: Tue Nov 22 14:10:45 PST 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef THIRD_PARTY_TESSERACT_CLASSIFY_TESSCLASSIFIER_H_
#define THIRD_PARTY_TESSERACT_CLASSIFY_TESSCLASSIFIER_H_
#include "shapeclassifier.h"
namespace tesseract {
class Classify;
class TrainingSample;
// Tesseract implementation of a ShapeClassifier.
// Due to limitations in the content of TrainingSample, this currently
// only works for the static classifier and only works if the ShapeTable
// in classify is not NULL.
class TessClassifier : public ShapeClassifier {
public:
TessClassifier(bool pruner_only, tesseract::Classify* classify)
: pruner_only_(pruner_only), classify_(classify) {}
virtual ~TessClassifier() {}
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
virtual int UnicharClassifySample(const TrainingSample& sample, Pix* page_pix,
int debug, UNICHAR_ID keep_this,
GenericVector<UnicharRating>* results);
// Provides access to the ShapeTable that this classifier works with.
virtual const ShapeTable* GetShapeTable() const;
// Provides access to the UNICHARSET that this classifier works with.
// Only needs to be overridden if GetShapeTable() can return NULL.
virtual const UNICHARSET& GetUnicharset() const;
// Displays classification as the given shape_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
virtual int DisplayClassifyAs(const TrainingSample& sample, Pix* page_pix,
int unichar_id, int index,
PointerVector<ScrollView>* windows);
private:
// Indicates that this classifier is to use just the ClassPruner, or the
// full classifier if false.
bool pruner_only_;
// Borrowed pointer to the actual Tesseract classifier.
tesseract::Classify* classify_;
};
} // namespace tesseract
#endif /* THIRD_PARTY_TESSERACT_CLASSIFY_TESSCLASSIFIER_H_ */
| C++ |
///////////////////////////////////////////////////////////////////////
// File: classify.cpp
// Description: classify class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "classify.h"
#include "fontinfo.h"
#include "intproto.h"
#include "mfoutline.h"
#include "scrollview.h"
#include "shapeclassifier.h"
#include "shapetable.h"
#include "unicity_table.h"
#include <string.h>
namespace tesseract {
Classify::Classify()
: BOOL_MEMBER(prioritize_division, FALSE,
"Prioritize blob division over chopping", this->params()),
INT_MEMBER(tessedit_single_match, FALSE,
"Top choice only from CP", this->params()),
BOOL_MEMBER(classify_enable_learning, true,
"Enable adaptive classifier", this->params()),
INT_MEMBER(classify_debug_level, 0, "Classify debug level",
this->params()),
INT_MEMBER(classify_norm_method, character, "Normalization Method ...",
this->params()),
double_MEMBER(classify_char_norm_range, 0.2,
"Character Normalization Range ...", this->params()),
double_MEMBER(classify_min_norm_scale_x, 0.0, "Min char x-norm scale ...",
this->params()), /* PREV DEFAULT 0.1 */
double_MEMBER(classify_max_norm_scale_x, 0.325, "Max char x-norm scale ...",
this->params()), /* PREV DEFAULT 0.3 */
double_MEMBER(classify_min_norm_scale_y, 0.0, "Min char y-norm scale ...",
this->params()), /* PREV DEFAULT 0.1 */
double_MEMBER(classify_max_norm_scale_y, 0.325, "Max char y-norm scale ...",
this->params()), /* PREV DEFAULT 0.3 */
double_MEMBER(classify_max_rating_ratio, 1.5,
"Veto ratio between classifier ratings", this->params()),
double_MEMBER(classify_max_certainty_margin, 5.5,
"Veto difference between classifier certainties",
this->params()),
BOOL_MEMBER(tess_cn_matching, 0, "Character Normalized Matching",
this->params()),
BOOL_MEMBER(tess_bn_matching, 0, "Baseline Normalized Matching",
this->params()),
BOOL_MEMBER(classify_enable_adaptive_matcher, 1,
"Enable adaptive classifier",
this->params()),
BOOL_MEMBER(classify_use_pre_adapted_templates, 0,
"Use pre-adapted classifier templates", this->params()),
BOOL_MEMBER(classify_save_adapted_templates, 0,
"Save adapted templates to a file", this->params()),
BOOL_MEMBER(classify_enable_adaptive_debugger, 0, "Enable match debugger",
this->params()),
BOOL_MEMBER(classify_nonlinear_norm, 0,
"Non-linear stroke-density normalization", this->params()),
INT_MEMBER(matcher_debug_level, 0, "Matcher Debug Level", this->params()),
INT_MEMBER(matcher_debug_flags, 0, "Matcher Debug Flags", this->params()),
INT_MEMBER(classify_learning_debug_level, 0, "Learning Debug Level: ",
this->params()),
double_MEMBER(matcher_good_threshold, 0.125, "Good Match (0-1)",
this->params()),
double_MEMBER(matcher_great_threshold, 0.0, "Great Match (0-1)",
this->params()),
double_MEMBER(matcher_perfect_threshold, 0.02, "Perfect Match (0-1)",
this->params()),
double_MEMBER(matcher_bad_match_pad, 0.15, "Bad Match Pad (0-1)",
this->params()),
double_MEMBER(matcher_rating_margin, 0.1, "New template margin (0-1)",
this->params()),
double_MEMBER(matcher_avg_noise_size, 12.0, "Avg. noise blob length",
this->params()),
INT_MEMBER(matcher_permanent_classes_min, 1, "Min # of permanent classes",
this->params()),
INT_MEMBER(matcher_min_examples_for_prototyping, 3,
"Reliable Config Threshold", this->params()),
INT_MEMBER(matcher_sufficient_examples_for_prototyping, 5,
"Enable adaption even if the ambiguities have not been seen",
this->params()),
double_MEMBER(matcher_clustering_max_angle_delta, 0.015,
"Maximum angle delta for prototype clustering",
this->params()),
double_MEMBER(classify_misfit_junk_penalty, 0.0,
"Penalty to apply when a non-alnum is vertically out of "
"its expected textline position",
this->params()),
double_MEMBER(rating_scale, 1.5, "Rating scaling factor", this->params()),
double_MEMBER(certainty_scale, 20.0, "Certainty scaling factor",
this->params()),
double_MEMBER(tessedit_class_miss_scale, 0.00390625,
"Scale factor for features not used", this->params()),
double_MEMBER(classify_adapted_pruning_factor, 2.5,
"Prune poor adapted results this much worse than best result",
this->params()),
double_MEMBER(classify_adapted_pruning_threshold, -1.0,
"Threshold at which classify_adapted_pruning_factor starts",
this->params()),
INT_MEMBER(classify_adapt_proto_threshold, 230,
"Threshold for good protos during adaptive 0-255",
this->params()),
INT_MEMBER(classify_adapt_feature_threshold, 230,
"Threshold for good features during adaptive 0-255",
this->params()),
BOOL_MEMBER(disable_character_fragments, TRUE,
"Do not include character fragments in the"
" results of the classifier", this->params()),
double_MEMBER(classify_character_fragments_garbage_certainty_threshold,
-3.0, "Exclude fragments that do not look like whole"
" characters from training and adaption", this->params()),
BOOL_MEMBER(classify_debug_character_fragments, FALSE,
"Bring up graphical debugging windows for fragments training",
this->params()),
BOOL_MEMBER(matcher_debug_separate_windows, FALSE,
"Use two different windows for debugging the matching: "
"One for the protos and one for the features.", this->params()),
STRING_MEMBER(classify_learn_debug_str, "", "Class str to debug learning",
this->params()),
INT_MEMBER(classify_class_pruner_threshold, 229,
"Class Pruner Threshold 0-255", this->params()),
INT_MEMBER(classify_class_pruner_multiplier, 15,
"Class Pruner Multiplier 0-255: ", this->params()),
INT_MEMBER(classify_cp_cutoff_strength, 7,
"Class Pruner CutoffStrength: ", this->params()),
INT_MEMBER(classify_integer_matcher_multiplier, 10,
"Integer Matcher Multiplier 0-255: ", this->params()),
EnableLearning(true),
INT_MEMBER(il1_adaption_test, 0, "Dont adapt to i/I at beginning of word",
this->params()),
BOOL_MEMBER(classify_bln_numeric_mode, 0,
"Assume the input is numbers [0-9].", this->params()),
double_MEMBER(speckle_large_max_size, 0.30, "Max large speckle size",
this->params()),
double_MEMBER(speckle_rating_penalty, 10.0,
"Penalty to add to worst rating for noise", this->params()),
shape_table_(NULL),
dict_(this),
static_classifier_(NULL) {
fontinfo_table_.set_compare_callback(
NewPermanentTessCallback(CompareFontInfo));
fontinfo_table_.set_clear_callback(
NewPermanentTessCallback(FontInfoDeleteCallback));
fontset_table_.set_compare_callback(
NewPermanentTessCallback(CompareFontSet));
fontset_table_.set_clear_callback(
NewPermanentTessCallback(FontSetDeleteCallback));
AdaptedTemplates = NULL;
PreTrainedTemplates = NULL;
AllProtosOn = NULL;
AllConfigsOn = NULL;
AllConfigsOff = NULL;
TempProtoMask = NULL;
NormProtos = NULL;
NumAdaptationsFailed = 0;
learn_debug_win_ = NULL;
learn_fragmented_word_debug_win_ = NULL;
learn_fragments_debug_win_ = NULL;
CharNormCutoffs = new uinT16[MAX_NUM_CLASSES];
BaselineCutoffs = new uinT16[MAX_NUM_CLASSES];
}
Classify::~Classify() {
EndAdaptiveClassifier();
delete learn_debug_win_;
delete learn_fragmented_word_debug_win_;
delete learn_fragments_debug_win_;
delete[] CharNormCutoffs;
delete[] BaselineCutoffs;
}
// Takes ownership of the given classifier, and uses it for future calls
// to CharNormClassifier.
void Classify::SetStaticClassifier(ShapeClassifier* static_classifier) {
delete static_classifier_;
static_classifier_ = static_classifier;
}
// Moved from speckle.cpp
// Adds a noise classification result that is a bit worse than the worst
// current result, or the worst possible result if no current results.
void Classify::AddLargeSpeckleTo(int blob_length, BLOB_CHOICE_LIST *choices) {
BLOB_CHOICE_IT bc_it(choices);
// If there is no classifier result, we will use the worst possible certainty
// and corresponding rating.
float certainty = -getDict().certainty_scale;
float rating = rating_scale * blob_length;
if (!choices->empty() && blob_length > 0) {
bc_it.move_to_last();
BLOB_CHOICE* worst_choice = bc_it.data();
// Add speckle_rating_penalty to worst rating, matching old value.
rating = worst_choice->rating() + speckle_rating_penalty;
// Compute the rating to correspond to the certainty. (Used to be kept
// the same, but that messes up the language model search.)
certainty = -rating * getDict().certainty_scale /
(rating_scale * blob_length);
}
BLOB_CHOICE* blob_choice = new BLOB_CHOICE(UNICHAR_SPACE, rating, certainty,
-1, -1, 0, 0, MAX_FLOAT32, 0,
BCC_SPECKLE_CLASSIFIER);
bc_it.add_to_end(blob_choice);
}
// Returns true if the blob is small enough to be a large speckle.
bool Classify::LargeSpeckle(const TBLOB &blob) {
double speckle_size = kBlnXHeight * speckle_large_max_size;
TBOX bbox = blob.bounding_box();
return bbox.width() < speckle_size && bbox.height() < speckle_size;
}
} // namespace tesseract
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_SAMPLEITERATOR_H_
#define TESSERACT_CLASSIFY_SAMPLEITERATOR_H_
namespace tesseract {
class IndexMapBiDi;
class IntFeatureMap;
class ShapeTable;
class TrainingSample;
class TrainingSampleSet;
struct UnicharAndFonts;
// Iterator class to encapsulate the complex iteration involved in getting
// all samples of all shapes needed for a classification problem.
//
// =====INPUTS TO Init FUNCTION=====
// The charset_map defines a subset of the sample_set classes (with a NULL
// shape_table, or the shape_table classes if not NULL.)
//
// The shape_table (if not NULL) defines the mapping from shapes to
// font_id/class_id pairs. Each shape is a list of unichar_id and font lists.
//
// The sample_set holds the samples and provides indexed access to samples
// of font_id/class_id pairs.
//
// If randomize is true, the samples are perturbed slightly, but the
// perturbation is guaranteed to be the same for multiple identical
// iterations.
//
// =====DIFFERENT COMBINATIONS OF INPUTS=====
// NULL shape_table:
// Without a shape_table, everything works in UNICHAR_IDs.
//
// NULL shape_table, NULL charset_map:
// Iterations simply run over the samples in the order the samples occur in the
// input files.
// GetCompactClassID and GetSparseClassID both return the sample UNICHAR_ID.
//
// NULL shape_table, non-NULL charset_map:
// When shape_table is NULL, the charset_map indexes unichar_ids directly,
// and an iteration returns all samples of all chars in the charset_map, which
// is a subset of the full unicharset.
// The iteration will be in groups of the same unichar_id, in the order
// defined by the charset_map.
// GetCompactClassID returns the charset_map index of a sample, and
// GetSparseClassID returns the sample UNICHAR_ID.
//
// Non-NULL shape_table:
// With a shape_table, samples are grouped according to the shape_table, so
// multiple UNICHAR_IDs and fonts may be grouped together, and everything
// works in shape_ids.
//
// Non-NULL shape_table, NULL charset_map.
// Iterations simply run over the samples in the order of shape_id.
// GetCompactClassID and GetSparseClassID both return the shape_id.
// (If you want the unichar_id or font_id, the sample still has them.)
//
// Non-NULL shape_table, non-NULL charset_map.
// When shape_table is not NULL, the charset_map indexes and subsets shapes in
// the shape_table, and iterations will be in shape_table order, not
// charset_map order.
// GetCompactClassID returns the charset_map index of a shape, and
// GetSparseClassID returns the shape_id.
//
// =====What is SampleIterator good for?=====
// Inside a classifier training module, the SampleIterator has abstracted away
// all the different modes above.
// Use the following iteration to train your classifier:
// for (it.Begin(); !it.AtEnd(); it.Next()) {
// const TrainingSample& sample = it.GetSample();
// int class_id = it.GetCompactClassID();
// Your classifier may or may not be dealing with a shape_table, and may be
// dealing with some subset of the character/shape set. It doesn't need to
// know and shouldn't care. It is just learning shapes with compact class ids
// in the range [0, it.CompactCharsetSize()).
class SampleIterator {
public:
SampleIterator();
~SampleIterator();
void Clear();
// See class comment for arguments.
void Init(const IndexMapBiDi* charset_map,
const ShapeTable* shape_table,
bool randomize,
TrainingSampleSet* sample_set);
// Iterator functions designed for use with a simple for loop:
// for (it.Begin(); !it.AtEnd(); it.Next()) {
// const TrainingSample& sample = it.GetSample();
// int class_id = it.GetCompactClassID();
// ...
// }
void Begin();
bool AtEnd() const;
const TrainingSample& GetSample() const;
TrainingSample* MutableSample() const;
// Returns the total index (from the original set of samples) of the current
// sample.
int GlobalSampleIndex() const;
// Returns the index of the current sample in compact charset space, so
// in a 2-class problem between x and y, the returned indices will all be
// 0 or 1, and have nothing to do with the unichar_ids.
// If the charset_map_ is NULL, then this is equal to GetSparseClassID().
int GetCompactClassID() const;
// Returns the index of the current sample in sparse charset space, so
// in a 2-class problem between x and y, the returned indices will all be
// x or y, where x and y may be unichar_ids (no shape_table_) or shape_ids
// with a shape_table_.
int GetSparseClassID() const;
// Moves on to the next indexable sample. If the end is reached, leaves
// the state such that AtEnd() is true.
void Next();
// Returns the size of the compact charset space.
int CompactCharsetSize() const;
// Returns the size of the sparse charset space.
int SparseCharsetSize() const;
const IndexMapBiDi& charset_map() const {
return *charset_map_;
}
const ShapeTable* shape_table() const {
return shape_table_;
}
// Sample set operations.
const TrainingSampleSet* sample_set() const {
return sample_set_;
}
// A set of functions that do something to all the samples accessed by the
// iterator, as it is currently setup.
// Apply the supplied feature_space/feature_map transform to all samples
// accessed by this iterator.
void MapSampleFeatures(const IntFeatureMap& feature_map);
// Adjust the weights of all the samples to be uniform in the given charset.
// Returns the number of samples in the iterator.
int UniformSamples();
// Normalize the weights of all the samples defined by the iterator so they
// sum to 1. Returns the minimum assigned sample weight.
double NormalizeSamples();
private:
// Helper returns the current UnicharAndFont shape_entry.
const UnicharAndFonts* GetShapeEntry() const;
// Map to subset the actual charset space.
const IndexMapBiDi* charset_map_;
// Shape table to recombine character classes into shapes
const ShapeTable* shape_table_;
// The samples to iterate over.
TrainingSampleSet* sample_set_;
// Flag to control randomizing the sample features.
bool randomize_;
// Shape table owned by this used to iterate character classes.
ShapeTable* owned_shape_table_;
// Top-level iteration. Shape index in sparse charset_map space.
int shape_index_;
int num_shapes_;
// Index to the character class within a shape.
int shape_char_index_;
int num_shape_chars_;
// Index to the font within a shape/class pair.
int shape_font_index_;
int num_shape_fonts_;
// The lowest level iteration. sample_index_/num_samples_ counts samples
// in the current shape/class/font combination.
int sample_index_;
int num_samples_;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_SAMPLEITERATOR_H_
| C++ |
///////////////////////////////////////////////////////////////////////
// File: classify.h
// Description: classify class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_CLASSIFY_H__
#define TESSERACT_CLASSIFY_CLASSIFY_H__
#include "adaptive.h"
#include "ccstruct.h"
#include "classify.h"
#include "dict.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "intfx.h"
#include "intmatcher.h"
#include "normalis.h"
#include "ratngs.h"
#include "ocrfeatures.h"
#include "unicity_table.h"
class ScrollView;
class WERD_CHOICE;
class WERD_RES;
struct ADAPT_RESULTS;
struct NORM_PROTOS;
static const int kUnknownFontinfoId = -1;
static const int kBlankFontinfoId = -2;
namespace tesseract {
class ShapeClassifier;
struct ShapeRating;
class ShapeTable;
struct UnicharRating;
// How segmented is a blob. In this enum, character refers to a classifiable
// unit, but that is too long and character is usually easier to understand.
enum CharSegmentationType {
CST_FRAGMENT, // A partial character.
CST_WHOLE, // A correctly segmented character.
CST_IMPROPER, // More than one but less than 2 characters.
CST_NGRAM // Multiple characters.
};
class Classify : public CCStruct {
public:
Classify();
virtual ~Classify();
Dict& getDict() {
return dict_;
}
const ShapeTable* shape_table() const {
return shape_table_;
}
// Takes ownership of the given classifier, and uses it for future calls
// to CharNormClassifier.
void SetStaticClassifier(ShapeClassifier* static_classifier);
// Adds a noise classification result that is a bit worse than the worst
// current result, or the worst possible result if no current results.
void AddLargeSpeckleTo(int blob_length, BLOB_CHOICE_LIST *choices);
// Returns true if the blob is small enough to be a large speckle.
bool LargeSpeckle(const TBLOB &blob);
/* adaptive.cpp ************************************************************/
ADAPT_TEMPLATES NewAdaptedTemplates(bool InitFromUnicharset);
int GetFontinfoId(ADAPT_CLASS Class, uinT8 ConfigId);
// Runs the class pruner from int_templates on the given features, returning
// the number of classes output in results.
// int_templates Class pruner tables
// num_features Number of features in blob
// features Array of features
// normalization_factors (input) Array of int_templates->NumClasses fudge
// factors from blob normalization process.
// (Indexed by CLASS_INDEX)
// expected_num_features (input) Array of int_templates->NumClasses
// expected number of features for each class.
// (Indexed by CLASS_INDEX)
// results (output) Sorted Array of pruned classes.
// Array must be sized to take the maximum possible
// number of outputs : int_templates->NumClasses.
int PruneClasses(const INT_TEMPLATES_STRUCT* int_templates,
int num_features,
const INT_FEATURE_STRUCT* features,
const uinT8* normalization_factors,
const uinT16* expected_num_features,
GenericVector<CP_RESULT_STRUCT>* results);
void ReadNewCutoffs(FILE *CutoffFile, bool swap, inT64 end_offset,
CLASS_CUTOFF_ARRAY Cutoffs);
void PrintAdaptedTemplates(FILE *File, ADAPT_TEMPLATES Templates);
void WriteAdaptedTemplates(FILE *File, ADAPT_TEMPLATES Templates);
ADAPT_TEMPLATES ReadAdaptedTemplates(FILE *File);
/* normmatch.cpp ************************************************************/
FLOAT32 ComputeNormMatch(CLASS_ID ClassId,
const FEATURE_STRUCT& feature, BOOL8 DebugMatch);
void FreeNormProtos();
NORM_PROTOS *ReadNormProtos(FILE *File, inT64 end_offset);
/* protos.cpp ***************************************************************/
void ConvertProto(PROTO Proto, int ProtoId, INT_CLASS Class);
INT_TEMPLATES CreateIntTemplates(CLASSES FloatProtos,
const UNICHARSET& target_unicharset);
/* adaptmatch.cpp ***********************************************************/
// Learn the given word using its chopped_word, seam_array, denorm,
// box_word, best_state, and correct_text to learn both correctly and
// incorrectly segmented blobs. If filename is not NULL, then LearnBlob
// is called and the data will be written to a file for static training.
// Otherwise AdaptToBlob is called for adaption within a document.
void LearnWord(const char* filename, WERD_RES *word);
// Builds a blob of length fragments, from the word, starting at start,
// and then learn it, as having the given correct_text.
// If filename is not NULL, then LearnBlob
// is called and the data will be written to a file for static training.
// Otherwise AdaptToBlob is called for adaption within a document.
// threshold is a magic number required by AdaptToChar and generated by
// GetAdaptThresholds.
// Although it can be partly inferred from the string, segmentation is
// provided to explicitly clarify the character segmentation.
void LearnPieces(const char* filename, int start, int length,
float threshold, CharSegmentationType segmentation,
const char* correct_text, WERD_RES *word);
void InitAdaptiveClassifier(bool load_pre_trained_templates);
void InitAdaptedClass(TBLOB *Blob,
CLASS_ID ClassId,
int FontinfoId,
ADAPT_CLASS Class,
ADAPT_TEMPLATES Templates);
void AmbigClassifier(const GenericVector<INT_FEATURE_STRUCT>& int_features,
const INT_FX_RESULT_STRUCT& fx_info,
const TBLOB *blob,
INT_TEMPLATES templates,
ADAPT_CLASS *classes,
UNICHAR_ID *ambiguities,
ADAPT_RESULTS *results);
void MasterMatcher(INT_TEMPLATES templates,
inT16 num_features,
const INT_FEATURE_STRUCT* features,
const uinT8* norm_factors,
ADAPT_CLASS* classes,
int debug,
int matcher_multiplier,
const TBOX& blob_box,
const GenericVector<CP_RESULT_STRUCT>& results,
ADAPT_RESULTS* final_results);
// Converts configs to fonts, and if the result is not adapted, and a
// shape_table_ is present, the shape is expanded to include all
// unichar_ids represented, before applying a set of corrections to the
// distance rating in int_result, (see ComputeCorrectedRating.)
// The results are added to the final_results output.
void ExpandShapesAndApplyCorrections(ADAPT_CLASS* classes,
bool debug,
int class_id,
int bottom, int top,
float cp_rating,
int blob_length,
int matcher_multiplier,
const uinT8* cn_factors,
INT_RESULT_STRUCT& int_result,
ADAPT_RESULTS* final_results);
// Applies a set of corrections to the distance im_rating,
// including the cn_correction, miss penalty and additional penalty
// for non-alnums being vertical misfits. Returns the corrected distance.
double ComputeCorrectedRating(bool debug, int unichar_id, double cp_rating,
double im_rating, int feature_misses,
int bottom, int top,
int blob_length, int matcher_multiplier,
const uinT8* cn_factors);
void ConvertMatchesToChoices(const DENORM& denorm, const TBOX& box,
ADAPT_RESULTS *Results,
BLOB_CHOICE_LIST *Choices);
void AddNewResult(ADAPT_RESULTS *results,
CLASS_ID class_id,
int shape_id,
FLOAT32 rating,
bool adapted,
int config,
int fontinfo_id,
int fontinfo_id2);
int GetAdaptiveFeatures(TBLOB *Blob,
INT_FEATURE_ARRAY IntFeatures,
FEATURE_SET *FloatFeatures);
#ifndef GRAPHICS_DISABLED
void DebugAdaptiveClassifier(TBLOB *Blob,
ADAPT_RESULTS *Results);
#endif
PROTO_ID MakeNewTempProtos(FEATURE_SET Features,
int NumBadFeat,
FEATURE_ID BadFeat[],
INT_CLASS IClass,
ADAPT_CLASS Class,
BIT_VECTOR TempProtoMask);
int MakeNewTemporaryConfig(ADAPT_TEMPLATES Templates,
CLASS_ID ClassId,
int FontinfoId,
int NumFeatures,
INT_FEATURE_ARRAY Features,
FEATURE_SET FloatFeatures);
void MakePermanent(ADAPT_TEMPLATES Templates,
CLASS_ID ClassId,
int ConfigId,
TBLOB *Blob);
void PrintAdaptiveMatchResults(FILE *File, ADAPT_RESULTS *Results);
void RemoveExtraPuncs(ADAPT_RESULTS *Results);
void RemoveBadMatches(ADAPT_RESULTS *Results);
void SetAdaptiveThreshold(FLOAT32 Threshold);
void ShowBestMatchFor(int shape_id,
const INT_FEATURE_STRUCT* features,
int num_features);
// Returns a string for the classifier class_id: either the corresponding
// unicharset debug_str or the shape_table_ debug str.
STRING ClassIDToDebugStr(const INT_TEMPLATES_STRUCT* templates,
int class_id, int config_id) const;
// Converts a classifier class_id index with a config ID to:
// shape_table_ present: a shape_table_ index OR
// No shape_table_: a font ID.
// Without shape training, each class_id, config pair represents a single
// unichar id/font combination, so this function looks up the corresponding
// font id.
// With shape training, each class_id, config pair represents a single
// shape table index, so the fontset_table stores the shape table index,
// and the shape_table_ must be consulted to obtain the actual unichar_id/
// font combinations that the shape represents.
int ClassAndConfigIDToFontOrShapeID(int class_id,
int int_result_config) const;
// Converts a shape_table_ index to a classifier class_id index (not a
// unichar-id!). Uses a search, so not fast.
int ShapeIDToClassID(int shape_id) const;
UNICHAR_ID *BaselineClassifier(
TBLOB *Blob, const GenericVector<INT_FEATURE_STRUCT>& int_features,
const INT_FX_RESULT_STRUCT& fx_info,
ADAPT_TEMPLATES Templates, ADAPT_RESULTS *Results);
int CharNormClassifier(TBLOB *blob,
const TrainingSample& sample,
ADAPT_RESULTS *adapt_results);
// As CharNormClassifier, but operates on a TrainingSample and outputs to
// a GenericVector of ShapeRating without conversion to classes.
int CharNormTrainingSample(bool pruner_only, int keep_this,
const TrainingSample& sample,
GenericVector<UnicharRating>* results);
UNICHAR_ID *GetAmbiguities(TBLOB *Blob, CLASS_ID CorrectClass);
void DoAdaptiveMatch(TBLOB *Blob, ADAPT_RESULTS *Results);
void AdaptToChar(TBLOB *Blob,
CLASS_ID ClassId,
int FontinfoId,
FLOAT32 Threshold);
void DisplayAdaptedChar(TBLOB* blob, INT_CLASS_STRUCT* int_class);
bool AdaptableWord(WERD_RES* word);
void EndAdaptiveClassifier();
void SettupPass1();
void SettupPass2();
void AdaptiveClassifier(TBLOB *Blob, BLOB_CHOICE_LIST *Choices);
void ClassifyAsNoise(ADAPT_RESULTS *Results);
void ResetAdaptiveClassifierInternal();
int GetCharNormFeature(const INT_FX_RESULT_STRUCT& fx_info,
INT_TEMPLATES templates,
uinT8* pruner_norm_array,
uinT8* char_norm_array);
// Computes the char_norm_array for the unicharset and, if not NULL, the
// pruner_array as appropriate according to the existence of the shape_table.
// The norm_feature is deleted as it is almost certainly no longer needed.
void ComputeCharNormArrays(FEATURE_STRUCT* norm_feature,
INT_TEMPLATES_STRUCT* templates,
uinT8* char_norm_array,
uinT8* pruner_array);
bool TempConfigReliable(CLASS_ID class_id, const TEMP_CONFIG &config);
void UpdateAmbigsGroup(CLASS_ID class_id, TBLOB *Blob);
bool AdaptiveClassifierIsFull() { return NumAdaptationsFailed > 0; }
bool LooksLikeGarbage(TBLOB *blob);
void RefreshDebugWindow(ScrollView **win, const char *msg,
int y_offset, const TBOX &wbox);
// intfx.cpp
// Computes the DENORMS for bl(baseline) and cn(character) normalization
// during feature extraction. The input denorm describes the current state
// of the blob, which is usually a baseline-normalized word.
// The Transforms setup are as follows:
// Baseline Normalized (bl) Output:
// We center the grapheme by aligning the x-coordinate of its centroid with
// x=128 and leaving the already-baseline-normalized y as-is.
//
// Character Normalized (cn) Output:
// We align the grapheme's centroid at the origin and scale it
// asymmetrically in x and y so that the 2nd moments are a standard value
// (51.2) ie the result is vaguely square.
// If classify_nonlinear_norm is true:
// A non-linear normalization is setup that attempts to evenly distribute
// edges across x and y.
//
// Some of the fields of fx_info are also setup:
// Length: Total length of outline.
// Rx: Rounded y second moment. (Reversed by convention.)
// Ry: rounded x second moment.
// Xmean: Rounded x center of mass of the blob.
// Ymean: Rounded y center of mass of the blob.
static void SetupBLCNDenorms(const TBLOB& blob, bool nonlinear_norm,
DENORM* bl_denorm, DENORM* cn_denorm,
INT_FX_RESULT_STRUCT* fx_info);
// Extracts sets of 3-D features of length kStandardFeatureLength (=12.8), as
// (x,y) position and angle as measured counterclockwise from the vector
// <-1, 0>, from blob using two normalizations defined by bl_denorm and
// cn_denorm. See SetpuBLCNDenorms for definitions.
// If outline_cn_counts is not NULL, on return it contains the cumulative
// number of cn features generated for each outline in the blob (in order).
// Thus after the first outline, there were (*outline_cn_counts)[0] features,
// after the second outline, there were (*outline_cn_counts)[1] features etc.
static void ExtractFeatures(const TBLOB& blob,
bool nonlinear_norm,
GenericVector<INT_FEATURE_STRUCT>* bl_features,
GenericVector<INT_FEATURE_STRUCT>* cn_features,
INT_FX_RESULT_STRUCT* results,
GenericVector<int>* outline_cn_counts);
/* float2int.cpp ************************************************************/
void ClearCharNormArray(uinT8* char_norm_array);
void ComputeIntCharNormArray(const FEATURE_STRUCT& norm_feature,
uinT8* char_norm_array);
void ComputeIntFeatures(FEATURE_SET Features, INT_FEATURE_ARRAY IntFeatures);
/* intproto.cpp *************************************************************/
INT_TEMPLATES ReadIntTemplates(FILE *File);
void WriteIntTemplates(FILE *File, INT_TEMPLATES Templates,
const UNICHARSET& target_unicharset);
CLASS_ID GetClassToDebug(const char *Prompt, bool* adaptive_on,
bool* pretrained_on, int* shape_id);
void ShowMatchDisplay();
/* font detection ***********************************************************/
UnicityTable<FontInfo>& get_fontinfo_table() {
return fontinfo_table_;
}
const UnicityTable<FontInfo>& get_fontinfo_table() const {
return fontinfo_table_;
}
UnicityTable<FontSet>& get_fontset_table() {
return fontset_table_;
}
/* mfoutline.cpp ***********************************************************/
void NormalizeOutlines(LIST Outlines, FLOAT32 *XScale, FLOAT32 *YScale);
/* outfeat.cpp ***********************************************************/
FEATURE_SET ExtractOutlineFeatures(TBLOB *Blob);
/* picofeat.cpp ***********************************************************/
FEATURE_SET ExtractPicoFeatures(TBLOB *Blob);
// Member variables.
// Parameters.
BOOL_VAR_H(prioritize_division, FALSE,
"Prioritize blob division over chopping");
INT_VAR_H(tessedit_single_match, FALSE, "Top choice only from CP");
BOOL_VAR_H(classify_enable_learning, true, "Enable adaptive classifier");
INT_VAR_H(classify_debug_level, 0, "Classify debug level");
/* mfoutline.cpp ***********************************************************/
/* control knobs used to control normalization of outlines */
INT_VAR_H(classify_norm_method, character, "Normalization Method ...");
double_VAR_H(classify_char_norm_range, 0.2,
"Character Normalization Range ...");
double_VAR_H(classify_min_norm_scale_x, 0.0, "Min char x-norm scale ...");
double_VAR_H(classify_max_norm_scale_x, 0.325, "Max char x-norm scale ...");
double_VAR_H(classify_min_norm_scale_y, 0.0, "Min char y-norm scale ...");
double_VAR_H(classify_max_norm_scale_y, 0.325, "Max char y-norm scale ...");
double_VAR_H(classify_max_rating_ratio, 1.5,
"Veto ratio between classifier ratings");
double_VAR_H(classify_max_certainty_margin, 5.5,
"Veto difference between classifier certainties");
/* adaptmatch.cpp ***********************************************************/
BOOL_VAR_H(tess_cn_matching, 0, "Character Normalized Matching");
BOOL_VAR_H(tess_bn_matching, 0, "Baseline Normalized Matching");
BOOL_VAR_H(classify_enable_adaptive_matcher, 1, "Enable adaptive classifier");
BOOL_VAR_H(classify_use_pre_adapted_templates, 0,
"Use pre-adapted classifier templates");
BOOL_VAR_H(classify_save_adapted_templates, 0,
"Save adapted templates to a file");
BOOL_VAR_H(classify_enable_adaptive_debugger, 0, "Enable match debugger");
BOOL_VAR_H(classify_nonlinear_norm, 0,
"Non-linear stroke-density normalization");
INT_VAR_H(matcher_debug_level, 0, "Matcher Debug Level");
INT_VAR_H(matcher_debug_flags, 0, "Matcher Debug Flags");
INT_VAR_H(classify_learning_debug_level, 0, "Learning Debug Level: ");
double_VAR_H(matcher_good_threshold, 0.125, "Good Match (0-1)");
double_VAR_H(matcher_great_threshold, 0.0, "Great Match (0-1)");
double_VAR_H(matcher_perfect_threshold, 0.02, "Perfect Match (0-1)");
double_VAR_H(matcher_bad_match_pad, 0.15, "Bad Match Pad (0-1)");
double_VAR_H(matcher_rating_margin, 0.1, "New template margin (0-1)");
double_VAR_H(matcher_avg_noise_size, 12.0, "Avg. noise blob length: ");
INT_VAR_H(matcher_permanent_classes_min, 1, "Min # of permanent classes");
INT_VAR_H(matcher_min_examples_for_prototyping, 3,
"Reliable Config Threshold");
INT_VAR_H(matcher_sufficient_examples_for_prototyping, 5,
"Enable adaption even if the ambiguities have not been seen");
double_VAR_H(matcher_clustering_max_angle_delta, 0.015,
"Maximum angle delta for prototype clustering");
double_VAR_H(classify_misfit_junk_penalty, 0.0,
"Penalty to apply when a non-alnum is vertically out of "
"its expected textline position");
double_VAR_H(rating_scale, 1.5, "Rating scaling factor");
double_VAR_H(certainty_scale, 20.0, "Certainty scaling factor");
double_VAR_H(tessedit_class_miss_scale, 0.00390625,
"Scale factor for features not used");
double_VAR_H(classify_adapted_pruning_factor, 2.5,
"Prune poor adapted results this much worse than best result");
double_VAR_H(classify_adapted_pruning_threshold, -1.0,
"Threshold at which classify_adapted_pruning_factor starts");
INT_VAR_H(classify_adapt_proto_threshold, 230,
"Threshold for good protos during adaptive 0-255");
INT_VAR_H(classify_adapt_feature_threshold, 230,
"Threshold for good features during adaptive 0-255");
BOOL_VAR_H(disable_character_fragments, TRUE,
"Do not include character fragments in the"
" results of the classifier");
double_VAR_H(classify_character_fragments_garbage_certainty_threshold, -3.0,
"Exclude fragments that do not match any whole character"
" with at least this certainty");
BOOL_VAR_H(classify_debug_character_fragments, FALSE,
"Bring up graphical debugging windows for fragments training");
BOOL_VAR_H(matcher_debug_separate_windows, FALSE,
"Use two different windows for debugging the matching: "
"One for the protos and one for the features.");
STRING_VAR_H(classify_learn_debug_str, "", "Class str to debug learning");
/* intmatcher.cpp **********************************************************/
INT_VAR_H(classify_class_pruner_threshold, 229,
"Class Pruner Threshold 0-255");
INT_VAR_H(classify_class_pruner_multiplier, 15,
"Class Pruner Multiplier 0-255: ");
INT_VAR_H(classify_cp_cutoff_strength, 7,
"Class Pruner CutoffStrength: ");
INT_VAR_H(classify_integer_matcher_multiplier, 10,
"Integer Matcher Multiplier 0-255: ");
// Use class variables to hold onto built-in templates and adapted templates.
INT_TEMPLATES PreTrainedTemplates;
ADAPT_TEMPLATES AdaptedTemplates;
// Create dummy proto and config masks for use with the built-in templates.
BIT_VECTOR AllProtosOn;
BIT_VECTOR AllConfigsOn;
BIT_VECTOR AllConfigsOff;
BIT_VECTOR TempProtoMask;
bool EnableLearning;
/* normmatch.cpp */
NORM_PROTOS *NormProtos;
/* font detection ***********************************************************/
UnicityTable<FontInfo> fontinfo_table_;
// Without shape training, each class_id, config pair represents a single
// unichar id/font combination, so each fontset_table_ entry holds font ids
// for each config in the class.
// With shape training, each class_id, config pair represents a single
// shape_table_ index, so the fontset_table_ stores the shape_table_ index,
// and the shape_table_ must be consulted to obtain the actual unichar_id/
// font combinations that the shape represents.
UnicityTable<FontSet> fontset_table_;
INT_VAR_H(il1_adaption_test, 0, "Dont adapt to i/I at beginning of word");
BOOL_VAR_H(classify_bln_numeric_mode, 0,
"Assume the input is numbers [0-9].");
double_VAR_H(speckle_large_max_size, 0.30, "Max large speckle size");
double_VAR_H(speckle_rating_penalty, 10.0,
"Penalty to add to worst rating for noise");
protected:
IntegerMatcher im_;
FEATURE_DEFS_STRUCT feature_defs_;
// If a shape_table_ is present, it is used to remap classifier output in
// ExpandShapesAndApplyCorrections. font_ids referenced by configs actually
// mean an index to the shape_table_ and the choices returned are *all* the
// shape_table_ entries at that index.
ShapeTable* shape_table_;
private:
Dict dict_;
// The currently active static classifier.
ShapeClassifier* static_classifier_;
/* variables used to hold performance statistics */
int NumAdaptationsFailed;
// Expected number of features in the class pruner, used to penalize
// unknowns that have too few features (like a c being classified as e) so
// it doesn't recognize everything as '@' or '#'.
// CharNormCutoffs is for the static classifier (with no shapetable).
// BaselineCutoffs gets a copy of CharNormCutoffs as an estimate of the real
// value in the adaptive classifier. Both are indexed by unichar_id.
// shapetable_cutoffs_ provides a similar value for each shape in the
// shape_table_
uinT16* CharNormCutoffs;
uinT16* BaselineCutoffs;
GenericVector<uinT16> shapetable_cutoffs_;
ScrollView* learn_debug_win_;
ScrollView* learn_fragmented_word_debug_win_;
ScrollView* learn_fragments_debug_win_;
};
} // namespace tesseract
#endif // TESSERACT_CLASSIFY_CLASSIFY_H__
| C++ |
/******************************************************************************
** Filename: features.h
** Purpose: Generic definition of a feature.
** Author: Dan Johnson
** History: Sun May 20 10:28:30 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef FEATURES_H
#define FEATURES_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "blobs.h"
#include <stdio.h>
class DENORM;
struct INT_FX_RESULT_STRUCT;
#undef Min
#undef Max
#define FEAT_NAME_SIZE 80
// define trap errors which can be caused by this module
#define ILLEGAL_FEATURE_PARAM 1000
#define ILLEGAL_NUM_FEATURES 1001
// A character is described by multiple sets of extracted features. Each
// set contains a number of features of a particular type, for example, a
// set of bays, or a set of closures, or a set of microfeatures. Each
// feature consists of a number of parameters. All features within a
// feature set contain the same number of parameters. All circular
// parameters are required to be the first parameters in the feature.
struct PARAM_DESC {
inT8 Circular; // TRUE if dimension wraps around
inT8 NonEssential; // TRUE if dimension not used in searches
FLOAT32 Min; // low end of range for circular dimensions
FLOAT32 Max; // high end of range for circular dimensions
FLOAT32 Range; // Max - Min
FLOAT32 HalfRange; // (Max - Min)/2
FLOAT32 MidRange; // (Max + Min)/2
};
struct FEATURE_DESC_STRUCT {
uinT16 NumParams; // total # of params
const char *ShortName; // short name for feature
const PARAM_DESC *ParamDesc; // array - one per param
};
typedef FEATURE_DESC_STRUCT *FEATURE_DESC;
struct FEATURE_STRUCT {
const FEATURE_DESC_STRUCT *Type; // points to description of feature type
FLOAT32 Params[1]; // variable size array - params for feature
};
typedef FEATURE_STRUCT *FEATURE;
struct FEATURE_SET_STRUCT {
uinT16 NumFeatures; // number of features in set
uinT16 MaxNumFeatures; // maximum size of feature set
FEATURE Features[1]; // variable size array of features
};
typedef FEATURE_SET_STRUCT *FEATURE_SET;
// A generic character description as a char pointer. In reality, it will be
// a pointer to some data structure. Paired feature extractors/matchers need
// to agree on the data structure to be used, however, the high level
// classifier does not need to know the details of this data structure.
typedef char *CHAR_FEATURES;
typedef FEATURE_SET (*FX_FUNC)(TBLOB *, const DENORM&, const DENORM&,
const INT_FX_RESULT_STRUCT&);
struct FEATURE_EXT_STRUCT {
FX_FUNC Extractor; // func to extract features
};
/*----------------------------------------------------------------------
Macros for defining the parameters of a new features
----------------------------------------------------------------------*/
#define StartParamDesc(Name) \
const PARAM_DESC Name[] = {
#define DefineParam(Circular, NonEssential, Min, Max) \
{Circular, NonEssential, Min, Max, \
(Max) - (Min), (((Max) - (Min))/2.0), (((Max) + (Min))/2.0)},
#define EndParamDesc };
/*----------------------------------------------------------------------
Macro for describing a new feature. The parameters of the macro
are as follows:
DefineFeature (Name, NumLinear, NumCircular, ShortName, ParamName)
----------------------------------------------------------------------*/
#define DefineFeature(Name, NL, NC, SN, PN) \
const FEATURE_DESC_STRUCT Name = { \
((NL) + (NC)), SN, PN};
/*----------------------------------------------------------------------
Generic routines that work for all feature types
----------------------------------------------------------------------*/
BOOL8 AddFeature(FEATURE_SET FeatureSet, FEATURE Feature);
void FreeFeature(FEATURE Feature);
void FreeFeatureSet(FEATURE_SET FeatureSet);
FEATURE NewFeature(const FEATURE_DESC_STRUCT *FeatureDesc);
FEATURE_SET NewFeatureSet(int NumFeatures);
FEATURE ReadFeature(FILE *File, const FEATURE_DESC_STRUCT *FeatureDesc);
FEATURE_SET ReadFeatureSet(FILE *File, const FEATURE_DESC_STRUCT *FeatureDesc);
void WriteFeature(FILE *File, FEATURE Feature);
void WriteFeatureSet(FILE *File, FEATURE_SET FeatureSet);
void WriteOldParamDesc(FILE *File, const FEATURE_DESC_STRUCT *FeatureDesc);
#endif
| C++ |
/******************************************************************************
** Filename: adaptive.c
** Purpose: Adaptive matcher.
** Author: Dan Johnson
** History: Fri Mar 8 10:00:21 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#include "adaptive.h"
#include "emalloc.h"
#include "freelist.h"
#include "globals.h"
#include "classify.h"
#ifdef __UNIX__
#include <assert.h>
#endif
#include <stdio.h>
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* This routine adds a new adapted class to an existing
* set of adapted templates.
*
* @param Templates set of templates to add new class to
* @param Class new class to add to templates
* @param ClassId class id to associate with new class
*
* @note Globals: none
* @note Exceptions: none
* @note History: Thu Mar 14 13:06:09 1991, DSJ, Created.
*/
void AddAdaptedClass(ADAPT_TEMPLATES Templates,
ADAPT_CLASS Class,
CLASS_ID ClassId) {
INT_CLASS IntClass;
assert (Templates != NULL);
assert (Class != NULL);
assert (LegalClassId (ClassId));
assert (UnusedClassIdIn (Templates->Templates, ClassId));
assert (Class->NumPermConfigs == 0);
IntClass = NewIntClass (1, 1);
AddIntClass (Templates->Templates, ClassId, IntClass);
assert (Templates->Class[ClassId] == NULL);
Templates->Class[ClassId] = Class;
} /* AddAdaptedClass */
/*---------------------------------------------------------------------------*/
/**
* This routine frees all memory consumed by a temporary
* configuration.
*
* @param Config config to be freed
*
* @note Globals: none
* @note Exceptions: none
* @note History: Thu Mar 14 13:34:23 1991, DSJ, Created.
*/
void FreeTempConfig(TEMP_CONFIG Config) {
assert (Config != NULL);
destroy_nodes (Config->ContextsSeen, memfree);
FreeBitVector (Config->Protos);
free_struct (Config, sizeof (TEMP_CONFIG_STRUCT), "TEMP_CONFIG_STRUCT");
} /* FreeTempConfig */
/*---------------------------------------------------------------------------*/
void FreeTempProto(void *arg) {
PROTO proto = (PROTO) arg;
free_struct (proto, sizeof (TEMP_PROTO_STRUCT), "TEMP_PROTO_STRUCT");
}
void FreePermConfig(PERM_CONFIG Config) {
assert(Config != NULL);
delete [] Config->Ambigs;
free_struct(Config, sizeof(PERM_CONFIG_STRUCT), "PERM_CONFIG_STRUCT");
}
/*---------------------------------------------------------------------------*/
/**
* This operation allocates and initializes a new adapted
* class data structure and returns a ptr to it.
*
* @return Ptr to new class data structure.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Thu Mar 14 12:58:13 1991, DSJ, Created.
*/
ADAPT_CLASS NewAdaptedClass() {
ADAPT_CLASS Class;
int i;
Class = (ADAPT_CLASS) Emalloc (sizeof (ADAPT_CLASS_STRUCT));
Class->NumPermConfigs = 0;
Class->MaxNumTimesSeen = 0;
Class->TempProtos = NIL_LIST;
Class->PermProtos = NewBitVector (MAX_NUM_PROTOS);
Class->PermConfigs = NewBitVector (MAX_NUM_CONFIGS);
zero_all_bits (Class->PermProtos, WordsInVectorOfSize (MAX_NUM_PROTOS));
zero_all_bits (Class->PermConfigs, WordsInVectorOfSize (MAX_NUM_CONFIGS));
for (i = 0; i < MAX_NUM_CONFIGS; i++)
TempConfigFor (Class, i) = NULL;
return (Class);
} /* NewAdaptedClass */
/*-------------------------------------------------------------------------*/
void free_adapted_class(ADAPT_CLASS adapt_class) {
int i;
for (i = 0; i < MAX_NUM_CONFIGS; i++) {
if (ConfigIsPermanent (adapt_class, i)
&& PermConfigFor (adapt_class, i) != NULL)
FreePermConfig (PermConfigFor (adapt_class, i));
else if (!ConfigIsPermanent (adapt_class, i)
&& TempConfigFor (adapt_class, i) != NULL)
FreeTempConfig (TempConfigFor (adapt_class, i));
}
FreeBitVector (adapt_class->PermProtos);
FreeBitVector (adapt_class->PermConfigs);
destroy_nodes (adapt_class->TempProtos, FreeTempProto);
Efree(adapt_class);
}
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* Allocates memory for adapted tempates.
* each char in unicharset to the newly created templates
*
* @param InitFromUnicharset if true, add an empty class for
* @return Ptr to new adapted templates.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Fri Mar 8 10:15:28 1991, DSJ, Created.
*/
ADAPT_TEMPLATES Classify::NewAdaptedTemplates(bool InitFromUnicharset) {
ADAPT_TEMPLATES Templates;
int i;
Templates = (ADAPT_TEMPLATES) Emalloc (sizeof (ADAPT_TEMPLATES_STRUCT));
Templates->Templates = NewIntTemplates ();
Templates->NumPermClasses = 0;
Templates->NumNonEmptyClasses = 0;
/* Insert an empty class for each unichar id in unicharset */
for (i = 0; i < MAX_NUM_CLASSES; i++) {
Templates->Class[i] = NULL;
if (InitFromUnicharset && i < unicharset.size()) {
AddAdaptedClass(Templates, NewAdaptedClass(), i);
}
}
return (Templates);
} /* NewAdaptedTemplates */
// Returns FontinfoId of the given config of the given adapted class.
int Classify::GetFontinfoId(ADAPT_CLASS Class, uinT8 ConfigId) {
return (ConfigIsPermanent(Class, ConfigId) ?
PermConfigFor(Class, ConfigId)->FontinfoId :
TempConfigFor(Class, ConfigId)->FontinfoId);
}
} // namespace tesseract
/*----------------------------------------------------------------------------*/
void free_adapted_templates(ADAPT_TEMPLATES templates) {
if (templates != NULL) {
int i;
for (i = 0; i < (templates->Templates)->NumClasses; i++)
free_adapted_class (templates->Class[i]);
free_int_templates (templates->Templates);
Efree(templates);
}
}
/*---------------------------------------------------------------------------*/
/**
* This routine allocates and returns a new temporary config.
*
* @param MaxProtoId max id of any proto in new config
* @param FontinfoId font information from pre-trained templates
* @return Ptr to new temp config.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Thu Mar 14 13:28:21 1991, DSJ, Created.
*/
TEMP_CONFIG NewTempConfig(int MaxProtoId, int FontinfoId) {
TEMP_CONFIG Config;
int NumProtos = MaxProtoId + 1;
Config =
(TEMP_CONFIG) alloc_struct (sizeof (TEMP_CONFIG_STRUCT),
"TEMP_CONFIG_STRUCT");
Config->Protos = NewBitVector (NumProtos);
Config->NumTimesSeen = 1;
Config->MaxProtoId = MaxProtoId;
Config->ProtoVectorSize = WordsInVectorOfSize (NumProtos);
Config->ContextsSeen = NIL_LIST;
zero_all_bits (Config->Protos, Config->ProtoVectorSize);
Config->FontinfoId = FontinfoId;
return (Config);
} /* NewTempConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine allocates and returns a new temporary proto.
*
* @return Ptr to new temporary proto.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Thu Mar 14 13:31:31 1991, DSJ, Created.
*/
TEMP_PROTO NewTempProto() {
return ((TEMP_PROTO)
alloc_struct (sizeof (TEMP_PROTO_STRUCT), "TEMP_PROTO_STRUCT"));
} /* NewTempProto */
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* This routine prints a summary of the adapted templates
* in Templates to File.
*
* @param File open text file to print Templates to
* @param Templates adapted templates to print to File
*
* @note Globals: none
* @note Exceptions: none
* @note History: Wed Mar 20 13:35:29 1991, DSJ, Created.
*/
void Classify::PrintAdaptedTemplates(FILE *File, ADAPT_TEMPLATES Templates) {
int i;
INT_CLASS IClass;
ADAPT_CLASS AClass;
fprintf (File, "\n\nSUMMARY OF ADAPTED TEMPLATES:\n\n");
fprintf (File, "Num classes = %d; Num permanent classes = %d\n\n",
Templates->NumNonEmptyClasses, Templates->NumPermClasses);
fprintf (File, " Id NC NPC NP NPP\n");
fprintf (File, "------------------------\n");
for (i = 0; i < (Templates->Templates)->NumClasses; i++) {
IClass = Templates->Templates->Class[i];
AClass = Templates->Class[i];
if (!IsEmptyAdaptedClass (AClass)) {
fprintf (File, "%5d %s %3d %3d %3d %3d\n",
i, unicharset.id_to_unichar(i),
IClass->NumConfigs, AClass->NumPermConfigs,
IClass->NumProtos,
IClass->NumProtos - count (AClass->TempProtos));
}
}
fprintf (File, "\n");
} /* PrintAdaptedTemplates */
} // namespace tesseract
/*---------------------------------------------------------------------------*/
/**
* Read an adapted class description from File and return
* a ptr to the adapted class.
*
* @param File open file to read adapted class from
* @return Ptr to new adapted class.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Tue Mar 19 14:11:01 1991, DSJ, Created.
*/
ADAPT_CLASS ReadAdaptedClass(FILE *File) {
int NumTempProtos;
int NumConfigs;
int i;
ADAPT_CLASS Class;
TEMP_PROTO TempProto;
/* first read high level adapted class structure */
Class = (ADAPT_CLASS) Emalloc (sizeof (ADAPT_CLASS_STRUCT));
fread ((char *) Class, sizeof (ADAPT_CLASS_STRUCT), 1, File);
/* then read in the definitions of the permanent protos and configs */
Class->PermProtos = NewBitVector (MAX_NUM_PROTOS);
Class->PermConfigs = NewBitVector (MAX_NUM_CONFIGS);
fread ((char *) Class->PermProtos, sizeof (uinT32),
WordsInVectorOfSize (MAX_NUM_PROTOS), File);
fread ((char *) Class->PermConfigs, sizeof (uinT32),
WordsInVectorOfSize (MAX_NUM_CONFIGS), File);
/* then read in the list of temporary protos */
fread ((char *) &NumTempProtos, sizeof (int), 1, File);
Class->TempProtos = NIL_LIST;
for (i = 0; i < NumTempProtos; i++) {
TempProto =
(TEMP_PROTO) alloc_struct (sizeof (TEMP_PROTO_STRUCT),
"TEMP_PROTO_STRUCT");
fread ((char *) TempProto, sizeof (TEMP_PROTO_STRUCT), 1, File);
Class->TempProtos = push_last (Class->TempProtos, TempProto);
}
/* then read in the adapted configs */
fread ((char *) &NumConfigs, sizeof (int), 1, File);
for (i = 0; i < NumConfigs; i++)
if (test_bit (Class->PermConfigs, i))
Class->Config[i].Perm = ReadPermConfig (File);
else
Class->Config[i].Temp = ReadTempConfig (File);
return (Class);
} /* ReadAdaptedClass */
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* Read a set of adapted templates from File and return
* a ptr to the templates.
*
* @param File open text file to read adapted templates from
* @return Ptr to adapted templates read from File.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Mon Mar 18 15:18:10 1991, DSJ, Created.
*/
ADAPT_TEMPLATES Classify::ReadAdaptedTemplates(FILE *File) {
int i;
ADAPT_TEMPLATES Templates;
/* first read the high level adaptive template struct */
Templates = (ADAPT_TEMPLATES) Emalloc (sizeof (ADAPT_TEMPLATES_STRUCT));
fread ((char *) Templates, sizeof (ADAPT_TEMPLATES_STRUCT), 1, File);
/* then read in the basic integer templates */
Templates->Templates = ReadIntTemplates (File);
/* then read in the adaptive info for each class */
for (i = 0; i < (Templates->Templates)->NumClasses; i++) {
Templates->Class[i] = ReadAdaptedClass (File);
}
return (Templates);
} /* ReadAdaptedTemplates */
} // namespace tesseract
/*---------------------------------------------------------------------------*/
/**
* Read a permanent configuration description from File
* and return a ptr to it.
*
* @param File open file to read permanent config from
* @return Ptr to new permanent configuration description.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Tue Mar 19 14:25:26 1991, DSJ, Created.
*/
PERM_CONFIG ReadPermConfig(FILE *File) {
PERM_CONFIG Config = (PERM_CONFIG) alloc_struct(sizeof(PERM_CONFIG_STRUCT),
"PERM_CONFIG_STRUCT");
uinT8 NumAmbigs;
fread ((char *) &NumAmbigs, sizeof(uinT8), 1, File);
Config->Ambigs = new UNICHAR_ID[NumAmbigs + 1];
fread(Config->Ambigs, sizeof(UNICHAR_ID), NumAmbigs, File);
Config->Ambigs[NumAmbigs] = -1;
fread(&(Config->FontinfoId), sizeof(int), 1, File);
return (Config);
} /* ReadPermConfig */
/*---------------------------------------------------------------------------*/
/**
* Read a temporary configuration description from File
* and return a ptr to it.
*
* @param File open file to read temporary config from
* @return Ptr to new temporary configuration description.
*
* @note Globals: none
* @note Exceptions: none
* @note History: Tue Mar 19 14:29:59 1991, DSJ, Created.
*/
TEMP_CONFIG ReadTempConfig(FILE *File) {
TEMP_CONFIG Config;
Config =
(TEMP_CONFIG) alloc_struct (sizeof (TEMP_CONFIG_STRUCT),
"TEMP_CONFIG_STRUCT");
fread ((char *) Config, sizeof (TEMP_CONFIG_STRUCT), 1, File);
Config->Protos = NewBitVector (Config->ProtoVectorSize * BITSINLONG);
fread ((char *) Config->Protos, sizeof (uinT32),
Config->ProtoVectorSize, File);
return (Config);
} /* ReadTempConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine writes a binary representation of Class
* to File.
*
* @param File open file to write Class to
* @param Class adapted class to write to File
* @param NumConfigs number of configs in Class
*
* @note Globals: none
* @note Exceptions: none
* @note History: Tue Mar 19 13:33:51 1991, DSJ, Created.
*/
void WriteAdaptedClass(FILE *File, ADAPT_CLASS Class, int NumConfigs) {
int NumTempProtos;
LIST TempProtos;
int i;
/* first write high level adapted class structure */
fwrite ((char *) Class, sizeof (ADAPT_CLASS_STRUCT), 1, File);
/* then write out the definitions of the permanent protos and configs */
fwrite ((char *) Class->PermProtos, sizeof (uinT32),
WordsInVectorOfSize (MAX_NUM_PROTOS), File);
fwrite ((char *) Class->PermConfigs, sizeof (uinT32),
WordsInVectorOfSize (MAX_NUM_CONFIGS), File);
/* then write out the list of temporary protos */
NumTempProtos = count (Class->TempProtos);
fwrite ((char *) &NumTempProtos, sizeof (int), 1, File);
TempProtos = Class->TempProtos;
iterate (TempProtos) {
void* proto = first_node(TempProtos);
fwrite ((char *) proto, sizeof (TEMP_PROTO_STRUCT), 1, File);
}
/* then write out the adapted configs */
fwrite ((char *) &NumConfigs, sizeof (int), 1, File);
for (i = 0; i < NumConfigs; i++)
if (test_bit (Class->PermConfigs, i))
WritePermConfig (File, Class->Config[i].Perm);
else
WriteTempConfig (File, Class->Config[i].Temp);
} /* WriteAdaptedClass */
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* This routine saves Templates to File in a binary format.
*
* @param File open text file to write Templates to
* @param Templates set of adapted templates to write to File
*
* @note Globals: none
* @note Exceptions: none
* @note History: Mon Mar 18 15:07:32 1991, DSJ, Created.
*/
void Classify::WriteAdaptedTemplates(FILE *File, ADAPT_TEMPLATES Templates) {
int i;
/* first write the high level adaptive template struct */
fwrite ((char *) Templates, sizeof (ADAPT_TEMPLATES_STRUCT), 1, File);
/* then write out the basic integer templates */
WriteIntTemplates (File, Templates->Templates, unicharset);
/* then write out the adaptive info for each class */
for (i = 0; i < (Templates->Templates)->NumClasses; i++) {
WriteAdaptedClass (File, Templates->Class[i],
Templates->Templates->Class[i]->NumConfigs);
}
} /* WriteAdaptedTemplates */
} // namespace tesseract
/*---------------------------------------------------------------------------*/
/**
* This routine writes a binary representation of a
* permanent configuration to File.
*
* @param File open file to write Config to
* @param Config permanent config to write to File
*
* @note Globals: none
* @note Exceptions: none
* @note History: Tue Mar 19 13:55:44 1991, DSJ, Created.
*/
void WritePermConfig(FILE *File, PERM_CONFIG Config) {
uinT8 NumAmbigs = 0;
assert (Config != NULL);
while (Config->Ambigs[NumAmbigs] > 0) ++NumAmbigs;
fwrite((char *) &NumAmbigs, sizeof(uinT8), 1, File);
fwrite(Config->Ambigs, sizeof(UNICHAR_ID), NumAmbigs, File);
fwrite(&(Config->FontinfoId), sizeof(int), 1, File);
} /* WritePermConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine writes a binary representation of a
* temporary configuration to File.
*
* @param File open file to write Config to
* @param Config temporary config to write to File
*
* @note Globals: none
* @note Exceptions: none
* @note History: Tue Mar 19 14:00:28 1991, DSJ, Created.
*/
void WriteTempConfig(FILE *File, TEMP_CONFIG Config) {
assert (Config != NULL);
/* contexts not yet implemented */
assert (Config->ContextsSeen == NULL);
fwrite ((char *) Config, sizeof (TEMP_CONFIG_STRUCT), 1, File);
fwrite ((char *) Config->Protos, sizeof (uinT32),
Config->ProtoVectorSize, File);
} /* WriteTempConfig */
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: mastertrainer.cpp
// Description: Trainer to build the MasterClassifier.
// Author: Ray Smith
// Created: Wed Nov 03 18:10:01 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "mastertrainer.h"
#include <math.h>
#include <time.h>
#include "allheaders.h"
#include "boxread.h"
#include "classify.h"
#include "efio.h"
#include "errorcounter.h"
#include "featdefs.h"
#include "sampleiterator.h"
#include "shapeclassifier.h"
#include "shapetable.h"
#include "svmnode.h"
#include "scanutils.h"
namespace tesseract {
// Constants controlling clustering. With a low kMinClusteredShapes and a high
// kMaxUnicharsPerCluster, then kFontMergeDistance is the only limiting factor.
// Min number of shapes in the output.
const int kMinClusteredShapes = 1;
// Max number of unichars in any individual cluster.
const int kMaxUnicharsPerCluster = 2000;
// Mean font distance below which to merge fonts and unichars.
const float kFontMergeDistance = 0.025;
MasterTrainer::MasterTrainer(NormalizationMode norm_mode,
bool shape_analysis,
bool replicate_samples,
int debug_level)
: norm_mode_(norm_mode), samples_(fontinfo_table_),
junk_samples_(fontinfo_table_), verify_samples_(fontinfo_table_),
charsetsize_(0),
enable_shape_anaylsis_(shape_analysis),
enable_replication_(replicate_samples),
fragments_(NULL), prev_unichar_id_(-1), debug_level_(debug_level) {
}
MasterTrainer::~MasterTrainer() {
delete [] fragments_;
for (int p = 0; p < page_images_.size(); ++p)
pixDestroy(&page_images_[p]);
}
// WARNING! Serialize/DeSerialize are only partial, providing
// enough data to get the samples back and display them.
// Writes to the given file. Returns false in case of error.
bool MasterTrainer::Serialize(FILE* fp) const {
if (fwrite(&norm_mode_, sizeof(norm_mode_), 1, fp) != 1) return false;
if (!unicharset_.save_to_file(fp)) return false;
if (!feature_space_.Serialize(fp)) return false;
if (!samples_.Serialize(fp)) return false;
if (!junk_samples_.Serialize(fp)) return false;
if (!verify_samples_.Serialize(fp)) return false;
if (!master_shapes_.Serialize(fp)) return false;
if (!flat_shapes_.Serialize(fp)) return false;
if (!fontinfo_table_.Serialize(fp)) return false;
if (!xheights_.Serialize(fp)) return false;
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool MasterTrainer::DeSerialize(bool swap, FILE* fp) {
if (fread(&norm_mode_, sizeof(norm_mode_), 1, fp) != 1) return false;
if (swap) {
ReverseN(&norm_mode_, sizeof(norm_mode_));
}
if (!unicharset_.load_from_file(fp)) return false;
charsetsize_ = unicharset_.size();
if (!feature_space_.DeSerialize(swap, fp)) return false;
feature_map_.Init(feature_space_);
if (!samples_.DeSerialize(swap, fp)) return false;
if (!junk_samples_.DeSerialize(swap, fp)) return false;
if (!verify_samples_.DeSerialize(swap, fp)) return false;
if (!master_shapes_.DeSerialize(swap, fp)) return false;
if (!flat_shapes_.DeSerialize(swap, fp)) return false;
if (!fontinfo_table_.DeSerialize(swap, fp)) return false;
if (!xheights_.DeSerialize(swap, fp)) return false;
return true;
}
// Load an initial unicharset, or set one up if the file cannot be read.
void MasterTrainer::LoadUnicharset(const char* filename) {
if (!unicharset_.load_from_file(filename)) {
tprintf("Failed to load unicharset from file %s\n"
"Building unicharset for training from scratch...\n",
filename);
unicharset_.clear();
UNICHARSET initialized;
// Add special characters, as they were removed by the clear, but the
// default constructor puts them in.
unicharset_.AppendOtherUnicharset(initialized);
}
charsetsize_ = unicharset_.size();
delete [] fragments_;
fragments_ = new int[charsetsize_];
memset(fragments_, 0, sizeof(*fragments_) * charsetsize_);
samples_.LoadUnicharset(filename);
junk_samples_.LoadUnicharset(filename);
verify_samples_.LoadUnicharset(filename);
}
// Reads the samples and their features from the given .tr format file,
// adding them to the trainer with the font_id from the content of the file.
// See mftraining.cpp for a description of the file format.
// If verification, then these are verification samples, not training.
void MasterTrainer::ReadTrainingSamples(const char* page_name,
const FEATURE_DEFS_STRUCT& feature_defs,
bool verification) {
char buffer[2048];
int int_feature_type = ShortNameToFeatureType(feature_defs, kIntFeatureType);
int micro_feature_type = ShortNameToFeatureType(feature_defs,
kMicroFeatureType);
int cn_feature_type = ShortNameToFeatureType(feature_defs, kCNFeatureType);
int geo_feature_type = ShortNameToFeatureType(feature_defs, kGeoFeatureType);
FILE* fp = Efopen(page_name, "rb");
if (fp == NULL) {
tprintf("Failed to open tr file: %s\n", page_name);
return;
}
tr_filenames_.push_back(STRING(page_name));
while (fgets(buffer, sizeof(buffer), fp) != NULL) {
if (buffer[0] == '\n')
continue;
char* space = strchr(buffer, ' ');
if (space == NULL) {
tprintf("Bad format in tr file, reading fontname, unichar\n");
continue;
}
*space++ = '\0';
int font_id = GetFontInfoId(buffer);
if (font_id < 0) font_id = 0;
int page_number;
STRING unichar;
TBOX bounding_box;
if (!ParseBoxFileStr(space, &page_number, &unichar, &bounding_box)) {
tprintf("Bad format in tr file, reading box coords\n");
continue;
}
CHAR_DESC char_desc = ReadCharDescription(feature_defs, fp);
TrainingSample* sample = new TrainingSample;
sample->set_font_id(font_id);
sample->set_page_num(page_number + page_images_.size());
sample->set_bounding_box(bounding_box);
sample->ExtractCharDesc(int_feature_type, micro_feature_type,
cn_feature_type, geo_feature_type, char_desc);
AddSample(verification, unichar.string(), sample);
FreeCharDescription(char_desc);
}
charsetsize_ = unicharset_.size();
fclose(fp);
}
// Adds the given single sample to the trainer, setting the classid
// appropriately from the given unichar_str.
void MasterTrainer::AddSample(bool verification, const char* unichar,
TrainingSample* sample) {
if (verification) {
verify_samples_.AddSample(unichar, sample);
prev_unichar_id_ = -1;
} else if (unicharset_.contains_unichar(unichar)) {
if (prev_unichar_id_ >= 0)
fragments_[prev_unichar_id_] = -1;
prev_unichar_id_ = samples_.AddSample(unichar, sample);
if (flat_shapes_.FindShape(prev_unichar_id_, sample->font_id()) < 0)
flat_shapes_.AddShape(prev_unichar_id_, sample->font_id());
} else {
int junk_id = junk_samples_.AddSample(unichar, sample);
if (prev_unichar_id_ >= 0) {
CHAR_FRAGMENT* frag = CHAR_FRAGMENT::parse_from_string(unichar);
if (frag != NULL && frag->is_natural()) {
if (fragments_[prev_unichar_id_] == 0)
fragments_[prev_unichar_id_] = junk_id;
else if (fragments_[prev_unichar_id_] != junk_id)
fragments_[prev_unichar_id_] = -1;
}
delete frag;
}
prev_unichar_id_ = -1;
}
}
// Loads all pages from the given tif filename and append to page_images_.
// Must be called after ReadTrainingSamples, as the current number of images
// is used as an offset for page numbers in the samples.
void MasterTrainer::LoadPageImages(const char* filename) {
int page;
Pix* pix;
for (page = 0; (pix = pixReadTiff(filename, page)) != NULL; ++page) {
page_images_.push_back(pix);
}
tprintf("Loaded %d page images from %s\n", page, filename);
}
// Cleans up the samples after initial load from the tr files, and prior to
// saving the MasterTrainer:
// Remaps fragmented chars if running shape anaylsis.
// Sets up the samples appropriately for class/fontwise access.
// Deletes outlier samples.
void MasterTrainer::PostLoadCleanup() {
if (debug_level_ > 0)
tprintf("PostLoadCleanup...\n");
if (enable_shape_anaylsis_)
ReplaceFragmentedSamples();
SampleIterator sample_it;
sample_it.Init(NULL, NULL, true, &verify_samples_);
sample_it.NormalizeSamples();
verify_samples_.OrganizeByFontAndClass();
samples_.IndexFeatures(feature_space_);
// TODO(rays) DeleteOutliers is currently turned off to prove NOP-ness
// against current training.
// samples_.DeleteOutliers(feature_space_, debug_level_ > 0);
samples_.OrganizeByFontAndClass();
if (debug_level_ > 0)
tprintf("ComputeCanonicalSamples...\n");
samples_.ComputeCanonicalSamples(feature_map_, debug_level_ > 0);
}
// Gets the samples ready for training. Use after both
// ReadTrainingSamples+PostLoadCleanup or DeSerialize.
// Re-indexes the features and computes canonical and cloud features.
void MasterTrainer::PreTrainingSetup() {
if (debug_level_ > 0)
tprintf("PreTrainingSetup...\n");
samples_.IndexFeatures(feature_space_);
samples_.ComputeCanonicalFeatures();
if (debug_level_ > 0)
tprintf("ComputeCloudFeatures...\n");
samples_.ComputeCloudFeatures(feature_space_.Size());
}
// Sets up the master_shapes_ table, which tells which fonts should stay
// together until they get to a leaf node classifier.
void MasterTrainer::SetupMasterShapes() {
tprintf("Building master shape table\n");
int num_fonts = samples_.NumFonts();
ShapeTable char_shapes_begin_fragment(samples_.unicharset());
ShapeTable char_shapes_end_fragment(samples_.unicharset());
ShapeTable char_shapes(samples_.unicharset());
for (int c = 0; c < samples_.charsetsize(); ++c) {
ShapeTable shapes(samples_.unicharset());
for (int f = 0; f < num_fonts; ++f) {
if (samples_.NumClassSamples(f, c, true) > 0)
shapes.AddShape(c, f);
}
ClusterShapes(kMinClusteredShapes, 1, kFontMergeDistance, &shapes);
const CHAR_FRAGMENT *fragment = samples_.unicharset().get_fragment(c);
if (fragment == NULL)
char_shapes.AppendMasterShapes(shapes, NULL);
else if (fragment->is_beginning())
char_shapes_begin_fragment.AppendMasterShapes(shapes, NULL);
else if (fragment->is_ending())
char_shapes_end_fragment.AppendMasterShapes(shapes, NULL);
else
char_shapes.AppendMasterShapes(shapes, NULL);
}
ClusterShapes(kMinClusteredShapes, kMaxUnicharsPerCluster,
kFontMergeDistance, &char_shapes_begin_fragment);
char_shapes.AppendMasterShapes(char_shapes_begin_fragment, NULL);
ClusterShapes(kMinClusteredShapes, kMaxUnicharsPerCluster,
kFontMergeDistance, &char_shapes_end_fragment);
char_shapes.AppendMasterShapes(char_shapes_end_fragment, NULL);
ClusterShapes(kMinClusteredShapes, kMaxUnicharsPerCluster,
kFontMergeDistance, &char_shapes);
master_shapes_.AppendMasterShapes(char_shapes, NULL);
tprintf("Master shape_table:%s\n", master_shapes_.SummaryStr().string());
}
// Adds the junk_samples_ to the main samples_ set. Junk samples are initially
// fragments and n-grams (all incorrectly segmented characters).
// Various training functions may result in incorrectly segmented characters
// being added to the unicharset of the main samples, perhaps because they
// form a "radical" decomposition of some (Indic) grapheme, or because they
// just look the same as a real character (like rn/m)
// This function moves all the junk samples, to the main samples_ set, but
// desirable junk, being any sample for which the unichar already exists in
// the samples_ unicharset gets the unichar-ids re-indexed to match, but
// anything else gets re-marked as unichar_id 0 (space character) to identify
// it as junk to the error counter.
void MasterTrainer::IncludeJunk() {
// Get ids of fragments in junk_samples_ that replace the dead chars.
const UNICHARSET& junk_set = junk_samples_.unicharset();
const UNICHARSET& sample_set = samples_.unicharset();
int num_junks = junk_samples_.num_samples();
tprintf("Moving %d junk samples to master sample set.\n", num_junks);
for (int s = 0; s < num_junks; ++s) {
TrainingSample* sample = junk_samples_.mutable_sample(s);
int junk_id = sample->class_id();
const char* junk_utf8 = junk_set.id_to_unichar(junk_id);
int sample_id = sample_set.unichar_to_id(junk_utf8);
if (sample_id == INVALID_UNICHAR_ID)
sample_id = 0;
sample->set_class_id(sample_id);
junk_samples_.extract_sample(s);
samples_.AddSample(sample_id, sample);
}
junk_samples_.DeleteDeadSamples();
samples_.OrganizeByFontAndClass();
}
// Replicates the samples and perturbs them if the enable_replication_ flag
// is set. MUST be used after the last call to OrganizeByFontAndClass on
// the training samples, ie after IncludeJunk if it is going to be used, as
// OrganizeByFontAndClass will eat the replicated samples into the regular
// samples.
void MasterTrainer::ReplicateAndRandomizeSamplesIfRequired() {
if (enable_replication_) {
if (debug_level_ > 0)
tprintf("ReplicateAndRandomize...\n");
verify_samples_.ReplicateAndRandomizeSamples();
samples_.ReplicateAndRandomizeSamples();
samples_.IndexFeatures(feature_space_);
}
}
// Loads the basic font properties file into fontinfo_table_.
// Returns false on failure.
bool MasterTrainer::LoadFontInfo(const char* filename) {
FILE* fp = fopen(filename, "rb");
if (fp == NULL) {
fprintf(stderr, "Failed to load font_properties from %s\n", filename);
return false;
}
int italic, bold, fixed, serif, fraktur;
while (!feof(fp)) {
FontInfo fontinfo;
char* font_name = new char[1024];
fontinfo.name = font_name;
fontinfo.properties = 0;
fontinfo.universal_id = 0;
if (tfscanf(fp, "%1024s %i %i %i %i %i\n", font_name,
&italic, &bold, &fixed, &serif, &fraktur) != 6)
continue;
fontinfo.properties =
(italic << 0) +
(bold << 1) +
(fixed << 2) +
(serif << 3) +
(fraktur << 4);
if (!fontinfo_table_.contains(fontinfo)) {
fontinfo_table_.push_back(fontinfo);
}
}
fclose(fp);
return true;
}
// Loads the xheight font properties file into xheights_.
// Returns false on failure.
bool MasterTrainer::LoadXHeights(const char* filename) {
tprintf("fontinfo table is of size %d\n", fontinfo_table_.size());
xheights_.init_to_size(fontinfo_table_.size(), -1);
if (filename == NULL) return true;
FILE *f = fopen(filename, "rb");
if (f == NULL) {
fprintf(stderr, "Failed to load font xheights from %s\n", filename);
return false;
}
tprintf("Reading x-heights from %s ...\n", filename);
FontInfo fontinfo;
fontinfo.properties = 0; // Not used to lookup in the table.
fontinfo.universal_id = 0;
char buffer[1024];
int xht;
int total_xheight = 0;
int xheight_count = 0;
while (!feof(f)) {
if (tfscanf(f, "%1023s %d\n", buffer, &xht) != 2)
continue;
buffer[1023] = '\0';
fontinfo.name = buffer;
if (!fontinfo_table_.contains(fontinfo)) continue;
int fontinfo_id = fontinfo_table_.get_index(fontinfo);
xheights_[fontinfo_id] = xht;
total_xheight += xht;
++xheight_count;
}
if (xheight_count == 0) {
fprintf(stderr, "No valid xheights in %s!\n", filename);
fclose(f);
return false;
}
int mean_xheight = DivRounded(total_xheight, xheight_count);
for (int i = 0; i < fontinfo_table_.size(); ++i) {
if (xheights_[i] < 0)
xheights_[i] = mean_xheight;
}
fclose(f);
return true;
} // LoadXHeights
// Reads spacing stats from filename and adds them to fontinfo_table.
bool MasterTrainer::AddSpacingInfo(const char *filename) {
FILE* fontinfo_file = fopen(filename, "rb");
if (fontinfo_file == NULL)
return true; // We silently ignore missing files!
// Find the fontinfo_id.
int fontinfo_id = GetBestMatchingFontInfoId(filename);
if (fontinfo_id < 0) {
tprintf("No font found matching fontinfo filename %s\n", filename);
fclose(fontinfo_file);
return false;
}
tprintf("Reading spacing from %s for font %d...\n", filename, fontinfo_id);
// TODO(rays) scale should probably be a double, but keep as an int for now
// to duplicate current behavior.
int scale = kBlnXHeight / xheights_[fontinfo_id];
int num_unichars;
char uch[UNICHAR_LEN];
char kerned_uch[UNICHAR_LEN];
int x_gap, x_gap_before, x_gap_after, num_kerned;
ASSERT_HOST(tfscanf(fontinfo_file, "%d\n", &num_unichars) == 1);
FontInfo *fi = &fontinfo_table_.get(fontinfo_id);
fi->init_spacing(unicharset_.size());
FontSpacingInfo *spacing = NULL;
for (int l = 0; l < num_unichars; ++l) {
if (tfscanf(fontinfo_file, "%s %d %d %d",
uch, &x_gap_before, &x_gap_after, &num_kerned) != 4) {
tprintf("Bad format of font spacing file %s\n", filename);
fclose(fontinfo_file);
return false;
}
bool valid = unicharset_.contains_unichar(uch);
if (valid) {
spacing = new FontSpacingInfo();
spacing->x_gap_before = static_cast<inT16>(x_gap_before * scale);
spacing->x_gap_after = static_cast<inT16>(x_gap_after * scale);
}
for (int k = 0; k < num_kerned; ++k) {
if (tfscanf(fontinfo_file, "%s %d", kerned_uch, &x_gap) != 2) {
tprintf("Bad format of font spacing file %s\n", filename);
fclose(fontinfo_file);
delete spacing;
return false;
}
if (!valid || !unicharset_.contains_unichar(kerned_uch)) continue;
spacing->kerned_unichar_ids.push_back(
unicharset_.unichar_to_id(kerned_uch));
spacing->kerned_x_gaps.push_back(static_cast<inT16>(x_gap * scale));
}
if (valid) fi->add_spacing(unicharset_.unichar_to_id(uch), spacing);
}
fclose(fontinfo_file);
return true;
}
// Returns the font id corresponding to the given font name.
// Returns -1 if the font cannot be found.
int MasterTrainer::GetFontInfoId(const char* font_name) {
FontInfo fontinfo;
// We are only borrowing the string, so it is OK to const cast it.
fontinfo.name = const_cast<char*>(font_name);
fontinfo.properties = 0; // Not used to lookup in the table
fontinfo.universal_id = 0;
return fontinfo_table_.get_index(fontinfo);
}
// Returns the font_id of the closest matching font name to the given
// filename. It is assumed that a substring of the filename will match
// one of the fonts. If more than one is matched, the longest is returned.
int MasterTrainer::GetBestMatchingFontInfoId(const char* filename) {
int fontinfo_id = -1;
int best_len = 0;
for (int f = 0; f < fontinfo_table_.size(); ++f) {
if (strstr(filename, fontinfo_table_.get(f).name) != NULL) {
int len = strlen(fontinfo_table_.get(f).name);
// Use the longest matching length in case a substring of a font matched.
if (len > best_len) {
best_len = len;
fontinfo_id = f;
}
}
}
return fontinfo_id;
}
// Sets up a flat shapetable with one shape per class/font combination.
void MasterTrainer::SetupFlatShapeTable(ShapeTable* shape_table) {
// To exactly mimic the results of the previous implementation, the shapes
// must be clustered in order the fonts arrived, and reverse order of the
// characters within each font.
// Get a list of the fonts in the order they appeared.
GenericVector<int> active_fonts;
int num_shapes = flat_shapes_.NumShapes();
for (int s = 0; s < num_shapes; ++s) {
int font = flat_shapes_.GetShape(s)[0].font_ids[0];
int f = 0;
for (f = 0; f < active_fonts.size(); ++f) {
if (active_fonts[f] == font)
break;
}
if (f == active_fonts.size())
active_fonts.push_back(font);
}
// For each font in order, add all the shapes with that font in reverse order.
int num_fonts = active_fonts.size();
for (int f = 0; f < num_fonts; ++f) {
for (int s = num_shapes - 1; s >= 0; --s) {
int font = flat_shapes_.GetShape(s)[0].font_ids[0];
if (font == active_fonts[f]) {
shape_table->AddShape(flat_shapes_.GetShape(s));
}
}
}
}
// Sets up a Clusterer for mftraining on a single shape_id.
// Call FreeClusterer on the return value after use.
CLUSTERER* MasterTrainer::SetupForClustering(
const ShapeTable& shape_table,
const FEATURE_DEFS_STRUCT& feature_defs,
int shape_id,
int* num_samples) {
int desc_index = ShortNameToFeatureType(feature_defs, kMicroFeatureType);
int num_params = feature_defs.FeatureDesc[desc_index]->NumParams;
ASSERT_HOST(num_params == MFCount);
CLUSTERER* clusterer = MakeClusterer(
num_params, feature_defs.FeatureDesc[desc_index]->ParamDesc);
// We want to iterate over the samples of just the one shape.
IndexMapBiDi shape_map;
shape_map.Init(shape_table.NumShapes(), false);
shape_map.SetMap(shape_id, true);
shape_map.Setup();
// Reverse the order of the samples to match the previous behavior.
GenericVector<const TrainingSample*> sample_ptrs;
SampleIterator it;
it.Init(&shape_map, &shape_table, false, &samples_);
for (it.Begin(); !it.AtEnd(); it.Next()) {
sample_ptrs.push_back(&it.GetSample());
}
int sample_id = 0;
for (int i = sample_ptrs.size() - 1; i >= 0; --i) {
const TrainingSample* sample = sample_ptrs[i];
int num_features = sample->num_micro_features();
for (int f = 0; f < num_features; ++f)
MakeSample(clusterer, sample->micro_features()[f], sample_id);
++sample_id;
}
*num_samples = sample_id;
return clusterer;
}
// Writes the given float_classes (produced by SetupForFloat2Int) as inttemp
// to the given inttemp_file, and the corresponding pffmtable.
// The unicharset is the original encoding of graphemes, and shape_set should
// match the size of the shape_table, and may possibly be totally fake.
void MasterTrainer::WriteInttempAndPFFMTable(const UNICHARSET& unicharset,
const UNICHARSET& shape_set,
const ShapeTable& shape_table,
CLASS_STRUCT* float_classes,
const char* inttemp_file,
const char* pffmtable_file) {
tesseract::Classify *classify = new tesseract::Classify();
// Move the fontinfo table to classify.
fontinfo_table_.MoveTo(&classify->get_fontinfo_table());
INT_TEMPLATES int_templates = classify->CreateIntTemplates(float_classes,
shape_set);
FILE* fp = fopen(inttemp_file, "wb");
classify->WriteIntTemplates(fp, int_templates, shape_set);
fclose(fp);
// Now write pffmtable. This is complicated by the fact that the adaptive
// classifier still wants one indexed by unichar-id, but the static
// classifier needs one indexed by its shape class id.
// We put the shapetable_cutoffs in a GenericVector, and compute the
// unicharset cutoffs along the way.
GenericVector<uinT16> shapetable_cutoffs;
GenericVector<uinT16> unichar_cutoffs;
for (int c = 0; c < unicharset.size(); ++c)
unichar_cutoffs.push_back(0);
/* then write out each class */
for (int i = 0; i < int_templates->NumClasses; ++i) {
INT_CLASS Class = ClassForClassId(int_templates, i);
// Todo: Test with min instead of max
// int MaxLength = LengthForConfigId(Class, 0);
uinT16 max_length = 0;
for (int config_id = 0; config_id < Class->NumConfigs; config_id++) {
// Todo: Test with min instead of max
// if (LengthForConfigId (Class, config_id) < MaxLength)
uinT16 length = Class->ConfigLengths[config_id];
if (length > max_length)
max_length = Class->ConfigLengths[config_id];
int shape_id = float_classes[i].font_set.get(config_id);
const Shape& shape = shape_table.GetShape(shape_id);
for (int c = 0; c < shape.size(); ++c) {
int unichar_id = shape[c].unichar_id;
if (length > unichar_cutoffs[unichar_id])
unichar_cutoffs[unichar_id] = length;
}
}
shapetable_cutoffs.push_back(max_length);
}
fp = fopen(pffmtable_file, "wb");
shapetable_cutoffs.Serialize(fp);
for (int c = 0; c < unicharset.size(); ++c) {
const char *unichar = unicharset.id_to_unichar(c);
if (strcmp(unichar, " ") == 0) {
unichar = "NULL";
}
fprintf(fp, "%s %d\n", unichar, unichar_cutoffs[c]);
}
fclose(fp);
free_int_templates(int_templates);
delete classify;
}
// Generate debug output relating to the canonical distance between the
// two given UTF8 grapheme strings.
void MasterTrainer::DebugCanonical(const char* unichar_str1,
const char* unichar_str2) {
int class_id1 = unicharset_.unichar_to_id(unichar_str1);
int class_id2 = unicharset_.unichar_to_id(unichar_str2);
if (class_id2 == INVALID_UNICHAR_ID)
class_id2 = class_id1;
if (class_id1 == INVALID_UNICHAR_ID) {
tprintf("No unicharset entry found for %s\n", unichar_str1);
return;
} else {
tprintf("Font ambiguities for unichar %d = %s and %d = %s\n",
class_id1, unichar_str1, class_id2, unichar_str2);
}
int num_fonts = samples_.NumFonts();
const IntFeatureMap& feature_map = feature_map_;
// Iterate the fonts to get the similarity with other fonst of the same
// class.
tprintf(" ");
for (int f = 0; f < num_fonts; ++f) {
if (samples_.NumClassSamples(f, class_id2, false) == 0)
continue;
tprintf("%6d", f);
}
tprintf("\n");
for (int f1 = 0; f1 < num_fonts; ++f1) {
// Map the features of the canonical_sample.
if (samples_.NumClassSamples(f1, class_id1, false) == 0)
continue;
tprintf("%4d ", f1);
for (int f2 = 0; f2 < num_fonts; ++f2) {
if (samples_.NumClassSamples(f2, class_id2, false) == 0)
continue;
float dist = samples_.ClusterDistance(f1, class_id1, f2, class_id2,
feature_map);
tprintf(" %5.3f", dist);
}
tprintf("\n");
}
// Build a fake ShapeTable containing all the sample types.
ShapeTable shapes(unicharset_);
for (int f = 0; f < num_fonts; ++f) {
if (samples_.NumClassSamples(f, class_id1, true) > 0)
shapes.AddShape(class_id1, f);
if (class_id1 != class_id2 &&
samples_.NumClassSamples(f, class_id2, true) > 0)
shapes.AddShape(class_id2, f);
}
}
#ifndef GRAPHICS_DISABLED
// Debugging for cloud/canonical features.
// Displays a Features window containing:
// If unichar_str2 is in the unicharset, and canonical_font is non-negative,
// displays the canonical features of the char/font combination in red.
// If unichar_str1 is in the unicharset, and cloud_font is non-negative,
// displays the cloud feature of the char/font combination in green.
// The canonical features are drawn first to show which ones have no
// matches in the cloud features.
// Until the features window is destroyed, each click in the features window
// will display the samples that have that feature in a separate window.
void MasterTrainer::DisplaySamples(const char* unichar_str1, int cloud_font,
const char* unichar_str2,
int canonical_font) {
const IntFeatureMap& feature_map = feature_map_;
const IntFeatureSpace& feature_space = feature_map.feature_space();
ScrollView* f_window = CreateFeatureSpaceWindow("Features", 100, 500);
ClearFeatureSpaceWindow(norm_mode_ == NM_BASELINE ? baseline : character,
f_window);
int class_id2 = samples_.unicharset().unichar_to_id(unichar_str2);
if (class_id2 != INVALID_UNICHAR_ID && canonical_font >= 0) {
const TrainingSample* sample = samples_.GetCanonicalSample(canonical_font,
class_id2);
for (int f = 0; f < sample->num_features(); ++f) {
RenderIntFeature(f_window, &sample->features()[f], ScrollView::RED);
}
}
int class_id1 = samples_.unicharset().unichar_to_id(unichar_str1);
if (class_id1 != INVALID_UNICHAR_ID && cloud_font >= 0) {
const BitVector& cloud = samples_.GetCloudFeatures(cloud_font, class_id1);
for (int f = 0; f < cloud.size(); ++f) {
if (cloud[f]) {
INT_FEATURE_STRUCT feature =
feature_map.InverseIndexFeature(f);
RenderIntFeature(f_window, &feature, ScrollView::GREEN);
}
}
}
f_window->Update();
ScrollView* s_window = CreateFeatureSpaceWindow("Samples", 100, 500);
SVEventType ev_type;
do {
SVEvent* ev;
// Wait until a click or popup event.
ev = f_window->AwaitEvent(SVET_ANY);
ev_type = ev->type;
if (ev_type == SVET_CLICK) {
int feature_index = feature_space.XYToFeatureIndex(ev->x, ev->y);
if (feature_index >= 0) {
// Iterate samples and display those with the feature.
Shape shape;
shape.AddToShape(class_id1, cloud_font);
s_window->Clear();
samples_.DisplaySamplesWithFeature(feature_index, shape,
feature_space, ScrollView::GREEN,
s_window);
s_window->Update();
}
}
delete ev;
} while (ev_type != SVET_DESTROY);
}
#endif // GRAPHICS_DISABLED
void MasterTrainer::TestClassifierVOld(bool replicate_samples,
ShapeClassifier* test_classifier,
ShapeClassifier* old_classifier) {
SampleIterator sample_it;
sample_it.Init(NULL, NULL, replicate_samples, &samples_);
ErrorCounter::DebugNewErrors(test_classifier, old_classifier,
CT_UNICHAR_TOPN_ERR, fontinfo_table_,
page_images_, &sample_it);
}
// Tests the given test_classifier on the internal samples.
// See TestClassifier for details.
void MasterTrainer::TestClassifierOnSamples(CountTypes error_mode,
int report_level,
bool replicate_samples,
ShapeClassifier* test_classifier,
STRING* report_string) {
TestClassifier(error_mode, report_level, replicate_samples, &samples_,
test_classifier, report_string);
}
// Tests the given test_classifier on the given samples.
// error_mode indicates what counts as an error.
// report_levels:
// 0 = no output.
// 1 = bottom-line error rate.
// 2 = bottom-line error rate + time.
// 3 = font-level error rate + time.
// 4 = list of all errors + short classifier debug output on 16 errors.
// 5 = list of all errors + short classifier debug output on 25 errors.
// If replicate_samples is true, then the test is run on an extended test
// sample including replicated and systematically perturbed samples.
// If report_string is non-NULL, a summary of the results for each font
// is appended to the report_string.
double MasterTrainer::TestClassifier(CountTypes error_mode,
int report_level,
bool replicate_samples,
TrainingSampleSet* samples,
ShapeClassifier* test_classifier,
STRING* report_string) {
SampleIterator sample_it;
sample_it.Init(NULL, NULL, replicate_samples, samples);
if (report_level > 0) {
int num_samples = 0;
for (sample_it.Begin(); !sample_it.AtEnd(); sample_it.Next())
++num_samples;
tprintf("Iterator has charset size of %d/%d, %d shapes, %d samples\n",
sample_it.SparseCharsetSize(), sample_it.CompactCharsetSize(),
test_classifier->GetShapeTable()->NumShapes(), num_samples);
tprintf("Testing %sREPLICATED:\n", replicate_samples ? "" : "NON-");
}
double unichar_error = 0.0;
ErrorCounter::ComputeErrorRate(test_classifier, report_level,
error_mode, fontinfo_table_,
page_images_, &sample_it, &unichar_error,
NULL, report_string);
return unichar_error;
}
// Returns the average (in some sense) distance between the two given
// shapes, which may contain multiple fonts and/or unichars.
float MasterTrainer::ShapeDistance(const ShapeTable& shapes, int s1, int s2) {
const IntFeatureMap& feature_map = feature_map_;
const Shape& shape1 = shapes.GetShape(s1);
const Shape& shape2 = shapes.GetShape(s2);
int num_chars1 = shape1.size();
int num_chars2 = shape2.size();
float dist_sum = 0.0f;
int dist_count = 0;
if (num_chars1 > 1 || num_chars2 > 1) {
// In the multi-char case try to optimize the calculation by computing
// distances between characters of matching font where possible.
for (int c1 = 0; c1 < num_chars1; ++c1) {
for (int c2 = 0; c2 < num_chars2; ++c2) {
dist_sum += samples_.UnicharDistance(shape1[c1], shape2[c2],
true, feature_map);
++dist_count;
}
}
} else {
// In the single unichar case, there is little alternative, but to compute
// the squared-order distance between pairs of fonts.
dist_sum = samples_.UnicharDistance(shape1[0], shape2[0],
false, feature_map);
++dist_count;
}
return dist_sum / dist_count;
}
// Replaces samples that are always fragmented with the corresponding
// fragment samples.
void MasterTrainer::ReplaceFragmentedSamples() {
if (fragments_ == NULL) return;
// Remove samples that are replaced by fragments. Each class that was
// always naturally fragmented should be replaced by its fragments.
int num_samples = samples_.num_samples();
for (int s = 0; s < num_samples; ++s) {
TrainingSample* sample = samples_.mutable_sample(s);
if (fragments_[sample->class_id()] > 0)
samples_.KillSample(sample);
}
samples_.DeleteDeadSamples();
// Get ids of fragments in junk_samples_ that replace the dead chars.
const UNICHARSET& frag_set = junk_samples_.unicharset();
#if 0
// TODO(rays) The original idea was to replace only graphemes that were
// always naturally fragmented, but that left a lot of the Indic graphemes
// out. Determine whether we can go back to that idea now that spacing
// is fixed in the training images, or whether this code is obsolete.
bool* good_junk = new bool[frag_set.size()];
memset(good_junk, 0, sizeof(*good_junk) * frag_set.size());
for (int dead_ch = 1; dead_ch < unicharset_.size(); ++dead_ch) {
int frag_ch = fragments_[dead_ch];
if (frag_ch <= 0) continue;
const char* frag_utf8 = frag_set.id_to_unichar(frag_ch);
CHAR_FRAGMENT* frag = CHAR_FRAGMENT::parse_from_string(frag_utf8);
// Mark the chars for all parts of the fragment as good in good_junk.
for (int part = 0; part < frag->get_total(); ++part) {
frag->set_pos(part);
int good_ch = frag_set.unichar_to_id(frag->to_string().string());
if (good_ch != INVALID_UNICHAR_ID)
good_junk[good_ch] = true; // We want this one.
}
}
#endif
// For now just use all the junk that was from natural fragments.
// Get samples of fragments in junk_samples_ that replace the dead chars.
int num_junks = junk_samples_.num_samples();
for (int s = 0; s < num_junks; ++s) {
TrainingSample* sample = junk_samples_.mutable_sample(s);
int junk_id = sample->class_id();
const char* frag_utf8 = frag_set.id_to_unichar(junk_id);
CHAR_FRAGMENT* frag = CHAR_FRAGMENT::parse_from_string(frag_utf8);
if (frag != NULL && frag->is_natural()) {
junk_samples_.extract_sample(s);
samples_.AddSample(frag_set.id_to_unichar(junk_id), sample);
}
}
junk_samples_.DeleteDeadSamples();
junk_samples_.OrganizeByFontAndClass();
samples_.OrganizeByFontAndClass();
unicharset_.clear();
unicharset_.AppendOtherUnicharset(samples_.unicharset());
// delete [] good_junk;
// Fragments_ no longer needed?
delete [] fragments_;
fragments_ = NULL;
}
// Runs a hierarchical agglomerative clustering to merge shapes in the given
// shape_table, while satisfying the given constraints:
// * End with at least min_shapes left in shape_table,
// * No shape shall have more than max_shape_unichars in it,
// * Don't merge shapes where the distance between them exceeds max_dist.
const float kInfiniteDist = 999.0f;
void MasterTrainer::ClusterShapes(int min_shapes, int max_shape_unichars,
float max_dist, ShapeTable* shapes) {
int num_shapes = shapes->NumShapes();
int max_merges = num_shapes - min_shapes;
GenericVector<ShapeDist>* shape_dists =
new GenericVector<ShapeDist>[num_shapes];
float min_dist = kInfiniteDist;
int min_s1 = 0;
int min_s2 = 0;
tprintf("Computing shape distances...");
for (int s1 = 0; s1 < num_shapes; ++s1) {
for (int s2 = s1 + 1; s2 < num_shapes; ++s2) {
ShapeDist dist(s1, s2, ShapeDistance(*shapes, s1, s2));
shape_dists[s1].push_back(dist);
if (dist.distance < min_dist) {
min_dist = dist.distance;
min_s1 = s1;
min_s2 = s2;
}
}
tprintf(" %d", s1);
}
tprintf("\n");
int num_merged = 0;
while (num_merged < max_merges && min_dist < max_dist) {
tprintf("Distance = %f: ", min_dist);
int num_unichars = shapes->MergedUnicharCount(min_s1, min_s2);
shape_dists[min_s1][min_s2 - min_s1 - 1].distance = kInfiniteDist;
if (num_unichars > max_shape_unichars) {
tprintf("Merge of %d and %d with %d would exceed max of %d unichars\n",
min_s1, min_s2, num_unichars, max_shape_unichars);
} else {
shapes->MergeShapes(min_s1, min_s2);
shape_dists[min_s2].clear();
++num_merged;
for (int s = 0; s < min_s1; ++s) {
if (!shape_dists[s].empty()) {
shape_dists[s][min_s1 - s - 1].distance =
ShapeDistance(*shapes, s, min_s1);
shape_dists[s][min_s2 - s -1].distance = kInfiniteDist;
}
}
for (int s2 = min_s1 + 1; s2 < num_shapes; ++s2) {
if (shape_dists[min_s1][s2 - min_s1 - 1].distance < kInfiniteDist)
shape_dists[min_s1][s2 - min_s1 - 1].distance =
ShapeDistance(*shapes, min_s1, s2);
}
for (int s = min_s1 + 1; s < min_s2; ++s) {
if (!shape_dists[s].empty()) {
shape_dists[s][min_s2 - s - 1].distance = kInfiniteDist;
}
}
}
min_dist = kInfiniteDist;
for (int s1 = 0; s1 < num_shapes; ++s1) {
for (int i = 0; i < shape_dists[s1].size(); ++i) {
if (shape_dists[s1][i].distance < min_dist) {
min_dist = shape_dists[s1][i].distance;
min_s1 = s1;
min_s2 = s1 + 1 + i;
}
}
}
}
tprintf("Stopped with %d merged, min dist %f\n", num_merged, min_dist);
delete [] shape_dists;
if (debug_level_ > 1) {
for (int s1 = 0; s1 < num_shapes; ++s1) {
if (shapes->MasterDestinationIndex(s1) == s1) {
tprintf("Master shape:%s\n", shapes->DebugStr(s1).string());
}
}
}
}
} // namespace tesseract.
| C++ |
/******************************************************************************
** Filename: intmatcher.c
** Purpose: Generic high level classification routines.
** Author: Robert Moss
** History: Wed Feb 13 17:35:28 MST 1991, RWM, Created.
** Mon Mar 11 16:33:02 MST 1991, RWM, Modified to add
** support for adaptive matching.
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#include "intmatcher.h"
#include "intproto.h"
#include "callcpp.h"
#include "scrollview.h"
#include "float2int.h"
#include "globals.h"
#include "helpers.h"
#include "classify.h"
#include "shapetable.h"
#include <math.h>
/*----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------*/
// Parameters of the sigmoid used to convert similarity to evidence in the
// similarity_evidence_table_ that is used to convert distance metric to an
// 8 bit evidence value in the secondary matcher. (See IntMatcher::Init).
const float IntegerMatcher::kSEExponentialMultiplier = 0.0;
const float IntegerMatcher::kSimilarityCenter = 0.0075;
static const uinT8 offset_table[256] = {
255, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
static const uinT8 next_table[256] = {
0, 0, 0, 0x2, 0, 0x4, 0x4, 0x6, 0, 0x8, 0x8, 0x0a, 0x08, 0x0c, 0x0c, 0x0e,
0, 0x10, 0x10, 0x12, 0x10, 0x14, 0x14, 0x16, 0x10, 0x18, 0x18, 0x1a, 0x18,
0x1c, 0x1c, 0x1e,
0, 0x20, 0x20, 0x22, 0x20, 0x24, 0x24, 0x26, 0x20, 0x28, 0x28, 0x2a, 0x28,
0x2c, 0x2c, 0x2e,
0x20, 0x30, 0x30, 0x32, 0x30, 0x34, 0x34, 0x36, 0x30, 0x38, 0x38, 0x3a,
0x38, 0x3c, 0x3c, 0x3e,
0, 0x40, 0x40, 0x42, 0x40, 0x44, 0x44, 0x46, 0x40, 0x48, 0x48, 0x4a, 0x48,
0x4c, 0x4c, 0x4e,
0x40, 0x50, 0x50, 0x52, 0x50, 0x54, 0x54, 0x56, 0x50, 0x58, 0x58, 0x5a,
0x58, 0x5c, 0x5c, 0x5e,
0x40, 0x60, 0x60, 0x62, 0x60, 0x64, 0x64, 0x66, 0x60, 0x68, 0x68, 0x6a,
0x68, 0x6c, 0x6c, 0x6e,
0x60, 0x70, 0x70, 0x72, 0x70, 0x74, 0x74, 0x76, 0x70, 0x78, 0x78, 0x7a,
0x78, 0x7c, 0x7c, 0x7e,
0, 0x80, 0x80, 0x82, 0x80, 0x84, 0x84, 0x86, 0x80, 0x88, 0x88, 0x8a, 0x88,
0x8c, 0x8c, 0x8e,
0x80, 0x90, 0x90, 0x92, 0x90, 0x94, 0x94, 0x96, 0x90, 0x98, 0x98, 0x9a,
0x98, 0x9c, 0x9c, 0x9e,
0x80, 0xa0, 0xa0, 0xa2, 0xa0, 0xa4, 0xa4, 0xa6, 0xa0, 0xa8, 0xa8, 0xaa,
0xa8, 0xac, 0xac, 0xae,
0xa0, 0xb0, 0xb0, 0xb2, 0xb0, 0xb4, 0xb4, 0xb6, 0xb0, 0xb8, 0xb8, 0xba,
0xb8, 0xbc, 0xbc, 0xbe,
0x80, 0xc0, 0xc0, 0xc2, 0xc0, 0xc4, 0xc4, 0xc6, 0xc0, 0xc8, 0xc8, 0xca,
0xc8, 0xcc, 0xcc, 0xce,
0xc0, 0xd0, 0xd0, 0xd2, 0xd0, 0xd4, 0xd4, 0xd6, 0xd0, 0xd8, 0xd8, 0xda,
0xd8, 0xdc, 0xdc, 0xde,
0xc0, 0xe0, 0xe0, 0xe2, 0xe0, 0xe4, 0xe4, 0xe6, 0xe0, 0xe8, 0xe8, 0xea,
0xe8, 0xec, 0xec, 0xee,
0xe0, 0xf0, 0xf0, 0xf2, 0xf0, 0xf4, 0xf4, 0xf6, 0xf0, 0xf8, 0xf8, 0xfa,
0xf8, 0xfc, 0xfc, 0xfe
};
namespace tesseract {
// Encapsulation of the intermediate data and computations made by the class
// pruner. The class pruner implements a simple linear classifier on binary
// features by heavily quantizing the feature space, and applying
// NUM_BITS_PER_CLASS (2)-bit weights to the features. Lack of resolution in
// weights is compensated by a non-constant bias that is dependent on the
// number of features present.
class ClassPruner {
public:
ClassPruner(int max_classes) {
// The unrolled loop in ComputeScores means that the array sizes need to
// be rounded up so that the array is big enough to accommodate the extra
// entries accessed by the unrolling. Each pruner word is of sized
// BITS_PER_WERD and each entry is NUM_BITS_PER_CLASS, so there are
// BITS_PER_WERD / NUM_BITS_PER_CLASS entries.
// See ComputeScores.
max_classes_ = max_classes;
rounded_classes_ = RoundUp(
max_classes, WERDS_PER_CP_VECTOR * BITS_PER_WERD / NUM_BITS_PER_CLASS);
class_count_ = new int[rounded_classes_];
norm_count_ = new int[rounded_classes_];
sort_key_ = new int[rounded_classes_ + 1];
sort_index_ = new int[rounded_classes_ + 1];
for (int i = 0; i < rounded_classes_; i++) {
class_count_[i] = 0;
}
pruning_threshold_ = 0;
num_features_ = 0;
num_classes_ = 0;
}
~ClassPruner() {
delete []class_count_;
delete []norm_count_;
delete []sort_key_;
delete []sort_index_;
}
// Computes the scores for every class in the character set, by summing the
// weights for each feature and stores the sums internally in class_count_.
void ComputeScores(const INT_TEMPLATES_STRUCT* int_templates,
int num_features, const INT_FEATURE_STRUCT* features) {
num_features_ = num_features;
int num_pruners = int_templates->NumClassPruners;
for (int f = 0; f < num_features; ++f) {
const INT_FEATURE_STRUCT* feature = &features[f];
// Quantize the feature to NUM_CP_BUCKETS*NUM_CP_BUCKETS*NUM_CP_BUCKETS.
int x = feature->X * NUM_CP_BUCKETS >> 8;
int y = feature->Y * NUM_CP_BUCKETS >> 8;
int theta = feature->Theta * NUM_CP_BUCKETS >> 8;
int class_id = 0;
// Each CLASS_PRUNER_STRUCT only covers CLASSES_PER_CP(32) classes, so
// we need a collection of them, indexed by pruner_set.
for (int pruner_set = 0; pruner_set < num_pruners; ++pruner_set) {
// Look up quantized feature in a 3-D array, an array of weights for
// each class.
const uinT32* pruner_word_ptr =
int_templates->ClassPruners[pruner_set]->p[x][y][theta];
for (int word = 0; word < WERDS_PER_CP_VECTOR; ++word) {
uinT32 pruner_word = *pruner_word_ptr++;
// This inner loop is unrolled to speed up the ClassPruner.
// Currently gcc would not unroll it unless it is set to O3
// level of optimization or -funroll-loops is specified.
/*
uinT32 class_mask = (1 << NUM_BITS_PER_CLASS) - 1;
for (int bit = 0; bit < BITS_PER_WERD/NUM_BITS_PER_CLASS; bit++) {
class_count_[class_id++] += pruner_word & class_mask;
pruner_word >>= NUM_BITS_PER_CLASS;
}
*/
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
}
}
}
}
// Adjusts the scores according to the number of expected features. Used
// in lieu of a constant bias, this penalizes classes that expect more
// features than there are present. Thus an actual c will score higher for c
// than e, even though almost all the features match e as well as c, because
// e expects more features to be present.
void AdjustForExpectedNumFeatures(const uinT16* expected_num_features,
int cutoff_strength) {
for (int class_id = 0; class_id < max_classes_; ++class_id) {
if (num_features_ < expected_num_features[class_id]) {
int deficit = expected_num_features[class_id] - num_features_;
class_count_[class_id] -= class_count_[class_id] * deficit /
(num_features_ * cutoff_strength + deficit);
}
}
}
// Zeros the scores for classes disabled in the unicharset.
// Implements the black-list to recognize a subset of the character set.
void DisableDisabledClasses(const UNICHARSET& unicharset) {
for (int class_id = 0; class_id < max_classes_; ++class_id) {
if (!unicharset.get_enabled(class_id))
class_count_[class_id] = 0; // This char is disabled!
}
}
// Zeros the scores of fragments.
void DisableFragments(const UNICHARSET& unicharset) {
for (int class_id = 0; class_id < max_classes_; ++class_id) {
// Do not include character fragments in the class pruner
// results if disable_character_fragments is true.
if (unicharset.get_fragment(class_id)) {
class_count_[class_id] = 0;
}
}
}
// Normalizes the counts for xheight, putting the normalized result in
// norm_count_. Applies a simple subtractive penalty for incorrect vertical
// position provided by the normalization_factors array, indexed by
// character class, and scaled by the norm_multiplier.
void NormalizeForXheight(int norm_multiplier,
const uinT8* normalization_factors) {
for (int class_id = 0; class_id < max_classes_; class_id++) {
norm_count_[class_id] = class_count_[class_id] -
((norm_multiplier * normalization_factors[class_id]) >> 8);
}
}
// The nop normalization copies the class_count_ array to norm_count_.
void NoNormalization() {
for (int class_id = 0; class_id < max_classes_; class_id++) {
norm_count_[class_id] = class_count_[class_id];
}
}
// Prunes the classes using <the maximum count> * pruning_factor/256 as a
// threshold for keeping classes. If max_of_non_fragments, then ignore
// fragments in computing the maximum count.
void PruneAndSort(int pruning_factor, bool max_of_non_fragments,
const UNICHARSET& unicharset) {
int max_count = 0;
for (int c = 0; c < max_classes_; ++c) {
if (norm_count_[c] > max_count &&
// This additional check is added in order to ensure that
// the classifier will return at least one non-fragmented
// character match.
// TODO(daria): verify that this helps accuracy and does not
// hurt performance.
(!max_of_non_fragments || !unicharset.get_fragment(c))) {
max_count = norm_count_[c];
}
}
// Prune Classes.
pruning_threshold_ = (max_count * pruning_factor) >> 8;
// Select Classes.
if (pruning_threshold_ < 1)
pruning_threshold_ = 1;
num_classes_ = 0;
for (int class_id = 0; class_id < max_classes_; class_id++) {
if (norm_count_[class_id] >= pruning_threshold_) {
++num_classes_;
sort_index_[num_classes_] = class_id;
sort_key_[num_classes_] = norm_count_[class_id];
}
}
// Sort Classes using Heapsort Algorithm.
if (num_classes_ > 1)
HeapSort(num_classes_, sort_key_, sort_index_);
}
// Prints debug info on the class pruner matches for the pruned classes only.
void DebugMatch(const Classify& classify,
const INT_TEMPLATES_STRUCT* int_templates,
const INT_FEATURE_STRUCT* features) const {
int num_pruners = int_templates->NumClassPruners;
int max_num_classes = int_templates->NumClasses;
for (int f = 0; f < num_features_; ++f) {
const INT_FEATURE_STRUCT* feature = &features[f];
tprintf("F=%3d(%d,%d,%d),", f, feature->X, feature->Y, feature->Theta);
// Quantize the feature to NUM_CP_BUCKETS*NUM_CP_BUCKETS*NUM_CP_BUCKETS.
int x = feature->X * NUM_CP_BUCKETS >> 8;
int y = feature->Y * NUM_CP_BUCKETS >> 8;
int theta = feature->Theta * NUM_CP_BUCKETS >> 8;
int class_id = 0;
for (int pruner_set = 0; pruner_set < num_pruners; ++pruner_set) {
// Look up quantized feature in a 3-D array, an array of weights for
// each class.
const uinT32* pruner_word_ptr =
int_templates->ClassPruners[pruner_set]->p[x][y][theta];
for (int word = 0; word < WERDS_PER_CP_VECTOR; ++word) {
uinT32 pruner_word = *pruner_word_ptr++;
for (int word_class = 0; word_class < 16 &&
class_id < max_num_classes; ++word_class, ++class_id) {
if (norm_count_[class_id] >= pruning_threshold_) {
tprintf(" %s=%d,",
classify.ClassIDToDebugStr(int_templates,
class_id, 0).string(),
pruner_word & CLASS_PRUNER_CLASS_MASK);
}
pruner_word >>= NUM_BITS_PER_CLASS;
}
}
tprintf("\n");
}
}
}
// Prints a summary of the pruner result.
void SummarizeResult(const Classify& classify,
const INT_TEMPLATES_STRUCT* int_templates,
const uinT16* expected_num_features,
int norm_multiplier,
const uinT8* normalization_factors) const {
tprintf("CP:%d classes, %d features:\n", num_classes_, num_features_);
for (int i = 0; i < num_classes_; ++i) {
int class_id = sort_index_[num_classes_ - i];
STRING class_string = classify.ClassIDToDebugStr(int_templates,
class_id, 0);
tprintf("%s:Initial=%d, E=%d, Xht-adj=%d, N=%d, Rat=%.2f\n",
class_string.string(),
class_count_[class_id],
expected_num_features[class_id],
(norm_multiplier * normalization_factors[class_id]) >> 8,
sort_key_[num_classes_ - i],
100.0 - 100.0 * sort_key_[num_classes_ - i] /
(CLASS_PRUNER_CLASS_MASK * num_features_));
}
}
// Copies the pruned, sorted classes into the output results and returns
// the number of classes.
int SetupResults(GenericVector<CP_RESULT_STRUCT>* results) const {
CP_RESULT_STRUCT empty;
results->init_to_size(num_classes_, empty);
for (int c = 0; c < num_classes_; ++c) {
(*results)[c].Class = sort_index_[num_classes_ - c];
(*results)[c].Rating = 1.0 - sort_key_[num_classes_ - c] /
(static_cast<float>(CLASS_PRUNER_CLASS_MASK) * num_features_);
}
return num_classes_;
}
private:
// Array[rounded_classes_] of initial counts for each class.
int *class_count_;
// Array[rounded_classes_] of modified counts for each class after normalizing
// for expected number of features, disabled classes, fragments, and xheights.
int *norm_count_;
// Array[rounded_classes_ +1] of pruned counts that gets sorted
int *sort_key_;
// Array[rounded_classes_ +1] of classes corresponding to sort_key_.
int *sort_index_;
// Number of classes in this class pruner.
int max_classes_;
// Rounded up number of classes used for array sizes.
int rounded_classes_;
// Threshold count applied to prune classes.
int pruning_threshold_;
// The number of features used to compute the scores.
int num_features_;
// Final number of pruned classes.
int num_classes_;
};
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
// Runs the class pruner from int_templates on the given features, returning
// the number of classes output in results.
// int_templates Class pruner tables
// num_features Number of features in blob
// features Array of features
// normalization_factors Array of fudge factors from blob
// normalization process (by CLASS_INDEX)
// expected_num_features Array of expected number of features
// for each class (by CLASS_INDEX)
// results Sorted Array of pruned classes. Must be an array
// of size at least int_templates->NumClasses.
int Classify::PruneClasses(const INT_TEMPLATES_STRUCT* int_templates,
int num_features,
const INT_FEATURE_STRUCT* features,
const uinT8* normalization_factors,
const uinT16* expected_num_features,
GenericVector<CP_RESULT_STRUCT>* results) {
/*
** Operation:
** Prunes the classes using a modified fast match table.
** Returns a sorted list of classes along with the number
** of pruned classes in that list.
** Return: Number of pruned classes.
** Exceptions: none
** History: Tue Feb 19 10:24:24 MST 1991, RWM, Created.
*/
ClassPruner pruner(int_templates->NumClasses);
// Compute initial match scores for all classes.
pruner.ComputeScores(int_templates, num_features, features);
// Adjust match scores for number of expected features.
pruner.AdjustForExpectedNumFeatures(expected_num_features,
classify_cp_cutoff_strength);
// Apply disabled classes in unicharset - only works without a shape_table.
if (shape_table_ == NULL)
pruner.DisableDisabledClasses(unicharset);
// If fragments are disabled, remove them, also only without a shape table.
if (disable_character_fragments && shape_table_ == NULL)
pruner.DisableFragments(unicharset);
// If we have good x-heights, apply the given normalization factors.
if (normalization_factors != NULL) {
pruner.NormalizeForXheight(classify_class_pruner_multiplier,
normalization_factors);
} else {
pruner.NoNormalization();
}
// Do the actual pruning and sort the short-list.
pruner.PruneAndSort(classify_class_pruner_threshold,
shape_table_ == NULL, unicharset);
if (classify_debug_level > 2) {
pruner.DebugMatch(*this, int_templates, features);
}
if (classify_debug_level > 1) {
pruner.SummarizeResult(*this, int_templates, expected_num_features,
classify_class_pruner_multiplier,
normalization_factors);
}
// Convert to the expected output format.
return pruner.SetupResults(results);
}
} // namespace tesseract
/*---------------------------------------------------------------------------*/
void IntegerMatcher::Match(INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
inT16 NumFeatures,
const INT_FEATURE_STRUCT* Features,
INT_RESULT Result,
int AdaptFeatureThreshold,
int Debug,
bool SeparateDebugWindows) {
/*
** Parameters:
** ClassTemplate Prototypes & tables for a class
** BlobLength Length of unormalized blob
** NumFeatures Number of features in blob
** Features Array of features
** NormalizationFactor Fudge factor from blob
** normalization process
** Result Class rating & configuration:
** (0.0 -> 1.0), 0=good, 1=bad
** Debug Debugger flag: 1=debugger on
** Globals:
** local_matcher_multiplier_ Normalization factor multiplier
** Operation:
** IntegerMatcher returns the best configuration and rating
** for a single class. The class matched against is determined
** by the uniqueness of the ClassTemplate parameter. The
** best rating and its associated configuration are returned.
** Return:
** Exceptions: none
** History: Tue Feb 19 16:36:23 MST 1991, RWM, Created.
*/
ScratchEvidence *tables = new ScratchEvidence();
int Feature;
int BestMatch;
if (MatchDebuggingOn (Debug))
cprintf ("Integer Matcher -------------------------------------------\n");
tables->Clear(ClassTemplate);
Result->FeatureMisses = 0;
for (Feature = 0; Feature < NumFeatures; Feature++) {
int csum = UpdateTablesForFeature(ClassTemplate, ProtoMask, ConfigMask,
Feature, &Features[Feature],
tables, Debug);
// Count features that were missed over all configs.
if (csum == 0)
Result->FeatureMisses++;
}
#ifndef GRAPHICS_DISABLED
if (PrintProtoMatchesOn(Debug) || PrintMatchSummaryOn(Debug)) {
DebugFeatureProtoError(ClassTemplate, ProtoMask, ConfigMask, *tables,
NumFeatures, Debug);
}
if (DisplayProtoMatchesOn(Debug)) {
DisplayProtoDebugInfo(ClassTemplate, ProtoMask, ConfigMask,
*tables, SeparateDebugWindows);
}
if (DisplayFeatureMatchesOn(Debug)) {
DisplayFeatureDebugInfo(ClassTemplate, ProtoMask, ConfigMask, NumFeatures,
Features, AdaptFeatureThreshold, Debug,
SeparateDebugWindows);
}
#endif
tables->UpdateSumOfProtoEvidences(ClassTemplate, ConfigMask, NumFeatures);
tables->NormalizeSums(ClassTemplate, NumFeatures, NumFeatures);
BestMatch = FindBestMatch(ClassTemplate, *tables, Result);
#ifndef GRAPHICS_DISABLED
if (PrintMatchSummaryOn(Debug))
DebugBestMatch(BestMatch, Result);
if (MatchDebuggingOn(Debug))
cprintf("Match Complete --------------------------------------------\n");
#endif
delete tables;
}
/*---------------------------------------------------------------------------*/
int IntegerMatcher::FindGoodProtos(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
uinT16 BlobLength,
inT16 NumFeatures,
INT_FEATURE_ARRAY Features,
PROTO_ID *ProtoArray,
int AdaptProtoThreshold,
int Debug) {
/*
** Parameters:
** ClassTemplate Prototypes & tables for a class
** ProtoMask AND Mask for proto word
** ConfigMask AND Mask for config word
** BlobLength Length of unormalized blob
** NumFeatures Number of features in blob
** Features Array of features
** ProtoArray Array of good protos
** AdaptProtoThreshold Threshold for good protos
** Debug Debugger flag: 1=debugger on
** Globals:
** local_matcher_multiplier_ Normalization factor multiplier
** Operation:
** FindGoodProtos finds all protos whose normalized proto-evidence
** exceed classify_adapt_proto_thresh. The list is ordered by increasing
** proto id number.
** Return:
** Number of good protos in ProtoArray.
** Exceptions: none
** History: Tue Mar 12 17:09:26 MST 1991, RWM, Created
*/
ScratchEvidence *tables = new ScratchEvidence();
int NumGoodProtos = 0;
/* DEBUG opening heading */
if (MatchDebuggingOn (Debug))
cprintf
("Find Good Protos -------------------------------------------\n");
tables->Clear(ClassTemplate);
for (int Feature = 0; Feature < NumFeatures; Feature++)
UpdateTablesForFeature(
ClassTemplate, ProtoMask, ConfigMask, Feature, &(Features[Feature]),
tables, Debug);
#ifndef GRAPHICS_DISABLED
if (PrintProtoMatchesOn (Debug) || PrintMatchSummaryOn (Debug))
DebugFeatureProtoError(ClassTemplate, ProtoMask, ConfigMask, *tables,
NumFeatures, Debug);
#endif
/* Average Proto Evidences & Find Good Protos */
for (int proto = 0; proto < ClassTemplate->NumProtos; proto++) {
/* Compute Average for Actual Proto */
int Temp = 0;
for (int i = 0; i < ClassTemplate->ProtoLengths[proto]; i++)
Temp += tables->proto_evidence_[proto][i];
Temp /= ClassTemplate->ProtoLengths[proto];
/* Find Good Protos */
if (Temp >= AdaptProtoThreshold) {
*ProtoArray = proto;
ProtoArray++;
NumGoodProtos++;
}
}
if (MatchDebuggingOn (Debug))
cprintf ("Match Complete --------------------------------------------\n");
delete tables;
return NumGoodProtos;
}
/*---------------------------------------------------------------------------*/
int IntegerMatcher::FindBadFeatures(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
uinT16 BlobLength,
inT16 NumFeatures,
INT_FEATURE_ARRAY Features,
FEATURE_ID *FeatureArray,
int AdaptFeatureThreshold,
int Debug) {
/*
** Parameters:
** ClassTemplate Prototypes & tables for a class
** ProtoMask AND Mask for proto word
** ConfigMask AND Mask for config word
** BlobLength Length of unormalized blob
** NumFeatures Number of features in blob
** Features Array of features
** FeatureArray Array of bad features
** AdaptFeatureThreshold Threshold for bad features
** Debug Debugger flag: 1=debugger on
** Operation:
** FindBadFeatures finds all features with maximum feature-evidence <
** AdaptFeatureThresh. The list is ordered by increasing feature number.
** Return:
** Number of bad features in FeatureArray.
** History: Tue Mar 12 17:09:26 MST 1991, RWM, Created
*/
ScratchEvidence *tables = new ScratchEvidence();
int NumBadFeatures = 0;
/* DEBUG opening heading */
if (MatchDebuggingOn(Debug))
cprintf("Find Bad Features -------------------------------------------\n");
tables->Clear(ClassTemplate);
for (int Feature = 0; Feature < NumFeatures; Feature++) {
UpdateTablesForFeature(
ClassTemplate, ProtoMask, ConfigMask, Feature, &Features[Feature],
tables, Debug);
/* Find Best Evidence for Current Feature */
int best = 0;
for (int i = 0; i < ClassTemplate->NumConfigs; i++)
if (tables->feature_evidence_[i] > best)
best = tables->feature_evidence_[i];
/* Find Bad Features */
if (best < AdaptFeatureThreshold) {
*FeatureArray = Feature;
FeatureArray++;
NumBadFeatures++;
}
}
#ifndef GRAPHICS_DISABLED
if (PrintProtoMatchesOn(Debug) || PrintMatchSummaryOn(Debug))
DebugFeatureProtoError(ClassTemplate, ProtoMask, ConfigMask, *tables,
NumFeatures, Debug);
#endif
if (MatchDebuggingOn(Debug))
cprintf("Match Complete --------------------------------------------\n");
delete tables;
return NumBadFeatures;
}
/*---------------------------------------------------------------------------*/
void IntegerMatcher::Init(tesseract::IntParam *classify_debug_level) {
classify_debug_level_ = classify_debug_level;
/* Initialize table for evidence to similarity lookup */
for (int i = 0; i < SE_TABLE_SIZE; i++) {
uinT32 IntSimilarity = i << (27 - SE_TABLE_BITS);
double Similarity = ((double) IntSimilarity) / 65536.0 / 65536.0;
double evidence = Similarity / kSimilarityCenter;
evidence = 255.0 / (evidence * evidence + 1.0);
if (kSEExponentialMultiplier > 0.0) {
double scale = 1.0 - exp(-kSEExponentialMultiplier) *
exp(kSEExponentialMultiplier * ((double) i / SE_TABLE_SIZE));
evidence *= ClipToRange(scale, 0.0, 1.0);
}
similarity_evidence_table_[i] = (uinT8) (evidence + 0.5);
}
/* Initialize evidence computation variables */
evidence_table_mask_ =
((1 << kEvidenceTableBits) - 1) << (9 - kEvidenceTableBits);
mult_trunc_shift_bits_ = (14 - kIntEvidenceTruncBits);
table_trunc_shift_bits_ = (27 - SE_TABLE_BITS - (mult_trunc_shift_bits_ << 1));
evidence_mult_mask_ = ((1 << kIntEvidenceTruncBits) - 1);
}
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
void ScratchEvidence::Clear(const INT_CLASS class_template) {
memset(sum_feature_evidence_, 0,
class_template->NumConfigs * sizeof(sum_feature_evidence_[0]));
memset(proto_evidence_, 0,
class_template->NumProtos * sizeof(proto_evidence_[0]));
}
void ScratchEvidence::ClearFeatureEvidence(const INT_CLASS class_template) {
memset(feature_evidence_, 0,
class_template->NumConfigs * sizeof(feature_evidence_[0]));
}
/*---------------------------------------------------------------------------*/
void IMDebugConfiguration(int FeatureNum,
uinT16 ActualProtoNum,
uinT8 Evidence,
BIT_VECTOR ConfigMask,
uinT32 ConfigWord) {
/*
** Parameters:
** Globals:
** Operation:
** Print debugging information for Configuations
** Return:
** Exceptions: none
** History: Wed Feb 27 14:12:28 MST 1991, RWM, Created.
*/
cprintf ("F = %3d, P = %3d, E = %3d, Configs = ",
FeatureNum, (int) ActualProtoNum, (int) Evidence);
while (ConfigWord) {
if (ConfigWord & 1)
cprintf ("1");
else
cprintf ("0");
ConfigWord >>= 1;
}
cprintf ("\n");
}
/*---------------------------------------------------------------------------*/
void IMDebugConfigurationSum(int FeatureNum,
uinT8 *FeatureEvidence,
inT32 ConfigCount) {
/*
** Parameters:
** Globals:
** Operation:
** Print debugging information for Configuations
** Return:
** Exceptions: none
** History: Wed Feb 27 14:12:28 MST 1991, RWM, Created.
*/
cprintf("F=%3d, C=", FeatureNum);
for (int ConfigNum = 0; ConfigNum < ConfigCount; ConfigNum++) {
cprintf("%4d", FeatureEvidence[ConfigNum]);
}
cprintf("\n");
}
/*---------------------------------------------------------------------------*/
int IntegerMatcher::UpdateTablesForFeature(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
int FeatureNum,
const INT_FEATURE_STRUCT* Feature,
ScratchEvidence *tables,
int Debug) {
/*
** Parameters:
** ClassTemplate Prototypes & tables for a class
** FeatureNum Current feature number (for DEBUG only)
** Feature Pointer to a feature struct
** tables Evidence tables
** Debug Debugger flag: 1=debugger on
** Operation:
** For the given feature: prune protos, compute evidence,
** update Feature Evidence, Proto Evidence, and Sum of Feature
** Evidence tables.
** Return:
*/
register uinT32 ConfigWord;
register uinT32 ProtoWord;
register uinT32 ProtoNum;
register uinT32 ActualProtoNum;
uinT8 proto_byte;
inT32 proto_word_offset;
inT32 proto_offset;
uinT8 config_byte;
inT32 config_offset;
PROTO_SET ProtoSet;
uinT32 *ProtoPrunerPtr;
INT_PROTO Proto;
int ProtoSetIndex;
uinT8 Evidence;
uinT32 XFeatureAddress;
uinT32 YFeatureAddress;
uinT32 ThetaFeatureAddress;
register uinT8 *UINT8Pointer;
register int ProtoIndex;
uinT8 Temp;
register int *IntPointer;
int ConfigNum;
register inT32 M3;
register inT32 A3;
register uinT32 A4;
tables->ClearFeatureEvidence(ClassTemplate);
/* Precompute Feature Address offset for Proto Pruning */
XFeatureAddress = ((Feature->X >> 2) << 1);
YFeatureAddress = (NUM_PP_BUCKETS << 1) + ((Feature->Y >> 2) << 1);
ThetaFeatureAddress = (NUM_PP_BUCKETS << 2) + ((Feature->Theta >> 2) << 1);
for (ProtoSetIndex = 0, ActualProtoNum = 0;
ProtoSetIndex < ClassTemplate->NumProtoSets; ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
ProtoPrunerPtr = (uinT32 *) ((*ProtoSet).ProtoPruner);
for (ProtoNum = 0; ProtoNum < PROTOS_PER_PROTO_SET;
ProtoNum += (PROTOS_PER_PROTO_SET >> 1), ActualProtoNum +=
(PROTOS_PER_PROTO_SET >> 1), ProtoMask++, ProtoPrunerPtr++) {
/* Prune Protos of current Proto Set */
ProtoWord = *(ProtoPrunerPtr + XFeatureAddress);
ProtoWord &= *(ProtoPrunerPtr + YFeatureAddress);
ProtoWord &= *(ProtoPrunerPtr + ThetaFeatureAddress);
ProtoWord &= *ProtoMask;
if (ProtoWord != 0) {
proto_byte = ProtoWord & 0xff;
ProtoWord >>= 8;
proto_word_offset = 0;
while (ProtoWord != 0 || proto_byte != 0) {
while (proto_byte == 0) {
proto_byte = ProtoWord & 0xff;
ProtoWord >>= 8;
proto_word_offset += 8;
}
proto_offset = offset_table[proto_byte] + proto_word_offset;
proto_byte = next_table[proto_byte];
Proto = &(ProtoSet->Protos[ProtoNum + proto_offset]);
ConfigWord = Proto->Configs[0];
A3 = (((Proto->A * (Feature->X - 128)) << 1)
- (Proto->B * (Feature->Y - 128)) + (Proto->C << 9));
M3 =
(((inT8) (Feature->Theta - Proto->Angle)) * kIntThetaFudge) << 1;
if (A3 < 0)
A3 = ~A3;
if (M3 < 0)
M3 = ~M3;
A3 >>= mult_trunc_shift_bits_;
M3 >>= mult_trunc_shift_bits_;
if (A3 > evidence_mult_mask_)
A3 = evidence_mult_mask_;
if (M3 > evidence_mult_mask_)
M3 = evidence_mult_mask_;
A4 = (A3 * A3) + (M3 * M3);
A4 >>= table_trunc_shift_bits_;
if (A4 > evidence_table_mask_)
Evidence = 0;
else
Evidence = similarity_evidence_table_[A4];
if (PrintFeatureMatchesOn (Debug))
IMDebugConfiguration (FeatureNum,
ActualProtoNum + proto_offset,
Evidence, ConfigMask, ConfigWord);
ConfigWord &= *ConfigMask;
UINT8Pointer = tables->feature_evidence_ - 8;
config_byte = 0;
while (ConfigWord != 0 || config_byte != 0) {
while (config_byte == 0) {
config_byte = ConfigWord & 0xff;
ConfigWord >>= 8;
UINT8Pointer += 8;
}
config_offset = offset_table[config_byte];
config_byte = next_table[config_byte];
if (Evidence > UINT8Pointer[config_offset])
UINT8Pointer[config_offset] = Evidence;
}
UINT8Pointer =
&(tables->proto_evidence_[ActualProtoNum + proto_offset][0]);
for (ProtoIndex =
ClassTemplate->ProtoLengths[ActualProtoNum + proto_offset];
ProtoIndex > 0; ProtoIndex--, UINT8Pointer++) {
if (Evidence > *UINT8Pointer) {
Temp = *UINT8Pointer;
*UINT8Pointer = Evidence;
Evidence = Temp;
}
else if (Evidence == 0)
break;
}
}
}
}
}
if (PrintFeatureMatchesOn(Debug)) {
IMDebugConfigurationSum(FeatureNum, tables->feature_evidence_,
ClassTemplate->NumConfigs);
}
IntPointer = tables->sum_feature_evidence_;
UINT8Pointer = tables->feature_evidence_;
int SumOverConfigs = 0;
for (ConfigNum = ClassTemplate->NumConfigs; ConfigNum > 0; ConfigNum--) {
int evidence = *UINT8Pointer++;
SumOverConfigs += evidence;
*IntPointer++ += evidence;
}
return SumOverConfigs;
}
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
void IntegerMatcher::DebugFeatureProtoError(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
const ScratchEvidence& tables,
inT16 NumFeatures,
int Debug) {
/*
** Parameters:
** Globals:
** Operation:
** Print debugging information for Configuations
** Return:
** Exceptions: none
** History: Wed Feb 27 14:12:28 MST 1991, RWM, Created.
*/
FLOAT32 ProtoConfigs[MAX_NUM_CONFIGS];
int ConfigNum;
uinT32 ConfigWord;
int ProtoSetIndex;
uinT16 ProtoNum;
uinT8 ProtoWordNum;
PROTO_SET ProtoSet;
uinT16 ActualProtoNum;
if (PrintMatchSummaryOn(Debug)) {
cprintf("Configuration Mask:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++)
cprintf("%1d", (((*ConfigMask) >> ConfigNum) & 1));
cprintf("\n");
cprintf("Feature Error for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
cprintf(
" %5.1f",
100.0 * (1.0 -
(FLOAT32) tables.sum_feature_evidence_[ConfigNum]
/ NumFeatures / 256.0));
}
cprintf("\n\n\n");
}
if (PrintMatchSummaryOn (Debug)) {
cprintf ("Proto Mask:\n");
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets;
ProtoSetIndex++) {
ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoWordNum = 0; ProtoWordNum < 2;
ProtoWordNum++, ProtoMask++) {
ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoNum = 0;
((ProtoNum < (PROTOS_PER_PROTO_SET >> 1))
&& (ActualProtoNum < ClassTemplate->NumProtos));
ProtoNum++, ActualProtoNum++)
cprintf ("%1d", (((*ProtoMask) >> ProtoNum) & 1));
cprintf ("\n");
}
}
cprintf ("\n");
}
for (int i = 0; i < ClassTemplate->NumConfigs; i++)
ProtoConfigs[i] = 0;
if (PrintProtoMatchesOn (Debug)) {
cprintf ("Proto Evidence:\n");
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets;
ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoNum = 0;
((ProtoNum < PROTOS_PER_PROTO_SET) &&
(ActualProtoNum < ClassTemplate->NumProtos));
ProtoNum++, ActualProtoNum++) {
cprintf ("P %3d =", ActualProtoNum);
int temp = 0;
for (int j = 0; j < ClassTemplate->ProtoLengths[ActualProtoNum]; j++) {
uinT8 data = tables.proto_evidence_[ActualProtoNum][j];
cprintf(" %d", data);
temp += data;
}
cprintf(" = %6.4f%%\n",
temp / 256.0 / ClassTemplate->ProtoLengths[ActualProtoNum]);
ConfigWord = ProtoSet->Protos[ProtoNum].Configs[0];
ConfigNum = 0;
while (ConfigWord) {
cprintf ("%5d", ConfigWord & 1 ? temp : 0);
if (ConfigWord & 1)
ProtoConfigs[ConfigNum] += temp;
ConfigNum++;
ConfigWord >>= 1;
}
cprintf("\n");
}
}
}
if (PrintMatchSummaryOn (Debug)) {
cprintf ("Proto Error for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++)
cprintf (" %5.1f",
100.0 * (1.0 -
ProtoConfigs[ConfigNum] /
ClassTemplate->ConfigLengths[ConfigNum] / 256.0));
cprintf ("\n\n");
}
if (PrintProtoMatchesOn (Debug)) {
cprintf ("Proto Sum for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++)
cprintf (" %4.1f", ProtoConfigs[ConfigNum] / 256.0);
cprintf ("\n\n");
cprintf ("Proto Length for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++)
cprintf (" %4.1f",
(float) ClassTemplate->ConfigLengths[ConfigNum]);
cprintf ("\n\n");
}
}
/*---------------------------------------------------------------------------*/
void IntegerMatcher::DisplayProtoDebugInfo(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
const ScratchEvidence& tables,
bool SeparateDebugWindows) {
uinT16 ProtoNum;
uinT16 ActualProtoNum;
PROTO_SET ProtoSet;
int ProtoSetIndex;
InitIntMatchWindowIfReqd();
if (SeparateDebugWindows) {
InitFeatureDisplayWindowIfReqd();
InitProtoDisplayWindowIfReqd();
}
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets;
ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
ActualProtoNum = ProtoSetIndex * PROTOS_PER_PROTO_SET;
for (ProtoNum = 0;
((ProtoNum < PROTOS_PER_PROTO_SET) &&
(ActualProtoNum < ClassTemplate->NumProtos));
ProtoNum++, ActualProtoNum++) {
/* Compute Average for Actual Proto */
int temp = 0;
for (int i = 0; i < ClassTemplate->ProtoLengths[ActualProtoNum]; i++)
temp += tables.proto_evidence_[ActualProtoNum][i];
temp /= ClassTemplate->ProtoLengths[ActualProtoNum];
if ((ProtoSet->Protos[ProtoNum]).Configs[0] & (*ConfigMask)) {
DisplayIntProto(ClassTemplate, ActualProtoNum, temp / 255.0);
}
}
}
}
/*---------------------------------------------------------------------------*/
void IntegerMatcher::DisplayFeatureDebugInfo(
INT_CLASS ClassTemplate,
BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask,
inT16 NumFeatures,
const INT_FEATURE_STRUCT* Features,
int AdaptFeatureThreshold,
int Debug,
bool SeparateDebugWindows) {
ScratchEvidence *tables = new ScratchEvidence();
tables->Clear(ClassTemplate);
InitIntMatchWindowIfReqd();
if (SeparateDebugWindows) {
InitFeatureDisplayWindowIfReqd();
InitProtoDisplayWindowIfReqd();
}
for (int Feature = 0; Feature < NumFeatures; Feature++) {
UpdateTablesForFeature(
ClassTemplate, ProtoMask, ConfigMask, Feature, &Features[Feature],
tables, 0);
/* Find Best Evidence for Current Feature */
int best = 0;
for (int i = 0; i < ClassTemplate->NumConfigs; i++)
if (tables->feature_evidence_[i] > best)
best = tables->feature_evidence_[i];
/* Update display for current feature */
if (ClipMatchEvidenceOn(Debug)) {
if (best < AdaptFeatureThreshold)
DisplayIntFeature(&Features[Feature], 0.0);
else
DisplayIntFeature(&Features[Feature], 1.0);
} else {
DisplayIntFeature(&Features[Feature], best / 255.0);
}
}
delete tables;
}
#endif
/*---------------------------------------------------------------------------*/
// Add sum of Proto Evidences into Sum Of Feature Evidence Array
void ScratchEvidence::UpdateSumOfProtoEvidences(
INT_CLASS ClassTemplate, BIT_VECTOR ConfigMask, inT16 NumFeatures) {
int *IntPointer;
uinT32 ConfigWord;
int ProtoSetIndex;
uinT16 ProtoNum;
PROTO_SET ProtoSet;
int NumProtos;
uinT16 ActualProtoNum;
NumProtos = ClassTemplate->NumProtos;
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets;
ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoNum = 0;
((ProtoNum < PROTOS_PER_PROTO_SET) && (ActualProtoNum < NumProtos));
ProtoNum++, ActualProtoNum++) {
int temp = 0;
for (int i = 0; i < ClassTemplate->ProtoLengths[ActualProtoNum]; i++)
temp += proto_evidence_[ActualProtoNum] [i];
ConfigWord = ProtoSet->Protos[ProtoNum].Configs[0];
ConfigWord &= *ConfigMask;
IntPointer = sum_feature_evidence_;
while (ConfigWord) {
if (ConfigWord & 1)
*IntPointer += temp;
IntPointer++;
ConfigWord >>= 1;
}
}
}
}
/*---------------------------------------------------------------------------*/
// Normalize Sum of Proto and Feature Evidence by dividing by the sum of
// the Feature Lengths and the Proto Lengths for each configuration.
void ScratchEvidence::NormalizeSums(
INT_CLASS ClassTemplate, inT16 NumFeatures, inT32 used_features) {
for (int i = 0; i < ClassTemplate->NumConfigs; i++) {
sum_feature_evidence_[i] = (sum_feature_evidence_[i] << 8) /
(NumFeatures + ClassTemplate->ConfigLengths[i]);
}
}
/*---------------------------------------------------------------------------*/
int IntegerMatcher::FindBestMatch(
INT_CLASS ClassTemplate,
const ScratchEvidence &tables,
INT_RESULT Result) {
/*
** Parameters:
** Globals:
** Operation:
** Find the best match for the current class and update the Result
** with the configuration and match rating.
** Return:
** The best normalized sum of evidences
** Exceptions: none
** History: Wed Feb 27 14:12:28 MST 1991, RWM, Created.
*/
int BestMatch = 0;
int Best2Match = 0;
Result->Config = 0;
Result->Config2 = 0;
/* Find best match */
for (int ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
int rating = tables.sum_feature_evidence_[ConfigNum];
if (*classify_debug_level_ > 2)
cprintf("Config %d, rating=%d\n", ConfigNum, rating);
if (rating > BestMatch) {
if (BestMatch > 0) {
Result->Config2 = Result->Config;
Best2Match = BestMatch;
} else {
Result->Config2 = ConfigNum;
}
Result->Config = ConfigNum;
BestMatch = rating;
} else if (rating > Best2Match) {
Result->Config2 = ConfigNum;
Best2Match = rating;
}
}
/* Compute Certainty Rating */
Result->Rating = (65536.0 - BestMatch) / 65536.0;
return BestMatch;
}
// Applies the CN normalization factor to the given rating and returns
// the modified rating.
float IntegerMatcher::ApplyCNCorrection(float rating, int blob_length,
int normalization_factor,
int matcher_multiplier) {
return (rating * blob_length +
matcher_multiplier * normalization_factor / 256.0) /
(blob_length + matcher_multiplier);
}
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
// Print debug information about the best match for the current class.
void IntegerMatcher::DebugBestMatch(
int BestMatch, INT_RESULT Result) {
tprintf("Rating = %5.1f%% Best Config = %3d, Distance = %5.1f\n",
100.0 * Result->Rating, Result->Config,
100.0 * (65536.0 - BestMatch) / 65536.0);
}
#endif
/*---------------------------------------------------------------------------*/
void
HeapSort (int n, register int ra[], register int rb[]) {
/*
** Parameters:
** n Number of elements to sort
** ra Key array [1..n]
** rb Index array [1..n]
** Globals:
** Operation:
** Sort Key array in ascending order using heap sort
** algorithm. Also sort Index array that is tied to
** the key array.
** Return:
** Exceptions: none
** History: Tue Feb 19 10:24:24 MST 1991, RWM, Created.
*/
register int i, rra, rrb;
int l, j, ir;
l = (n >> 1) + 1;
ir = n;
for (;;) {
if (l > 1) {
rra = ra[--l];
rrb = rb[l];
}
else {
rra = ra[ir];
rrb = rb[ir];
ra[ir] = ra[1];
rb[ir] = rb[1];
if (--ir == 1) {
ra[1] = rra;
rb[1] = rrb;
return;
}
}
i = l;
j = l << 1;
while (j <= ir) {
if (j < ir && ra[j] < ra[j + 1])
++j;
if (rra < ra[j]) {
ra[i] = ra[j];
rb[i] = rb[j];
j += (i = j);
}
else
j = ir + 1;
}
ra[i] = rra;
rb[i] = rrb;
}
}
| C++ |
/******************************************************************************
** Filename: mfoutline.c
** Purpose: Interface to outline struct used for extracting features
** Author: Dan Johnson
** History: Thu May 17 08:14:18 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#include "clusttool.h" //If remove you get cought in a loop somewhere
#include "emalloc.h"
#include "mfoutline.h"
#include "blobs.h"
#include "const.h"
#include "mfx.h"
#include "params.h"
#include "classify.h"
#include <math.h>
#include <stdio.h>
#define MIN_INERTIA (0.00001)
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
// Convert a blob into a list of MFOUTLINEs (float-based microfeature format).
LIST ConvertBlob(TBLOB *blob) {
LIST outlines = NIL_LIST;
return (blob == NULL)
? NIL_LIST
: ConvertOutlines(blob->outlines, outlines, outer);
}
/*---------------------------------------------------------------------------*/
// Convert a TESSLINE into the float-based MFOUTLINE micro-feature format.
MFOUTLINE ConvertOutline(TESSLINE *outline) {
MFEDGEPT *NewPoint;
MFOUTLINE MFOutline = NIL_LIST;
EDGEPT *EdgePoint;
EDGEPT *StartPoint;
EDGEPT *NextPoint;
if (outline == NULL || outline->loop == NULL)
return MFOutline;
StartPoint = outline->loop;
EdgePoint = StartPoint;
do {
NextPoint = EdgePoint->next;
/* filter out duplicate points */
if (EdgePoint->pos.x != NextPoint->pos.x ||
EdgePoint->pos.y != NextPoint->pos.y) {
NewPoint = NewEdgePoint();
ClearMark(NewPoint);
NewPoint->Hidden = EdgePoint->IsHidden();
NewPoint->Point.x = EdgePoint->pos.x;
NewPoint->Point.y = EdgePoint->pos.y;
MFOutline = push(MFOutline, NewPoint);
}
EdgePoint = NextPoint;
} while (EdgePoint != StartPoint);
if (MFOutline != NULL)
MakeOutlineCircular(MFOutline);
return MFOutline;
}
/*---------------------------------------------------------------------------*/
// Convert a tree of outlines to a list of MFOUTLINEs (lists of MFEDGEPTs).
//
// Parameters:
// outline first outline to be converted
// mf_outlines list to add converted outlines to
// outline_type are the outlines outer or holes?
LIST ConvertOutlines(TESSLINE *outline,
LIST mf_outlines,
OUTLINETYPE outline_type) {
MFOUTLINE mf_outline;
while (outline != NULL) {
mf_outline = ConvertOutline(outline);
if (mf_outline != NULL)
mf_outlines = push(mf_outlines, mf_outline);
outline = outline->next;
}
return mf_outlines;
}
/*---------------------------------------------------------------------------*/
void FindDirectionChanges(MFOUTLINE Outline,
FLOAT32 MinSlope,
FLOAT32 MaxSlope) {
/*
** Parameters:
** Outline micro-feature outline to analyze
** MinSlope controls "snapping" of segments to horizontal
** MaxSlope controls "snapping" of segments to vertical
** Globals: none
** Operation:
** This routine searches thru the specified outline, computes
** a slope for each vector in the outline, and marks each
** vector as having one of the following directions:
** N, S, E, W, NE, NW, SE, SW
** This information is then stored in the outline and the
** outline is returned.
** Return: none
** Exceptions: none
** History: 7/21/89, DSJ, Created.
*/
MFEDGEPT *Current;
MFEDGEPT *Last;
MFOUTLINE EdgePoint;
if (DegenerateOutline (Outline))
return;
Last = PointAt (Outline);
Outline = NextPointAfter (Outline);
EdgePoint = Outline;
do {
Current = PointAt (EdgePoint);
ComputeDirection(Last, Current, MinSlope, MaxSlope);
Last = Current;
EdgePoint = NextPointAfter (EdgePoint);
}
while (EdgePoint != Outline);
} /* FindDirectionChanges */
/*---------------------------------------------------------------------------*/
void FreeMFOutline(void *arg) { //MFOUTLINE Outline)
/*
** Parameters:
** Outline micro-feature outline to be freed
** Globals: none
** Operation:
** This routine deallocates all of the memory consumed by
** a micro-feature outline.
** Return: none
** Exceptions: none
** History: 7/27/89, DSJ, Created.
*/
MFOUTLINE Start;
MFOUTLINE Outline = (MFOUTLINE) arg;
/* break the circular outline so we can use std. techniques to deallocate */
Start = list_rest (Outline);
set_rest(Outline, NIL_LIST);
while (Start != NULL) {
free_struct (first_node (Start), sizeof (MFEDGEPT), "MFEDGEPT");
Start = pop (Start);
}
} /* FreeMFOutline */
/*---------------------------------------------------------------------------*/
void FreeOutlines(LIST Outlines) {
/*
** Parameters:
** Outlines list of mf-outlines to be freed
** Globals: none
** Operation: Release all memory consumed by the specified list
** of outlines.
** Return: none
** Exceptions: none
** History: Thu Dec 13 16:14:50 1990, DSJ, Created.
*/
destroy_nodes(Outlines, FreeMFOutline);
} /* FreeOutlines */
/*---------------------------------------------------------------------------*/
void MarkDirectionChanges(MFOUTLINE Outline) {
/*
** Parameters:
** Outline micro-feature outline to analyze
** Globals: none
** Operation:
** This routine searches thru the specified outline and finds
** the points at which the outline changes direction. These
** points are then marked as "extremities". This routine is
** used as an alternative to FindExtremities(). It forces the
** endpoints of the microfeatures to be at the direction
** changes rather than at the midpoint between direction
** changes.
** Return: none
** Exceptions: none
** History: 6/29/90, DSJ, Created.
*/
MFOUTLINE Current;
MFOUTLINE Last;
MFOUTLINE First;
if (DegenerateOutline (Outline))
return;
First = NextDirectionChange (Outline);
Last = First;
do {
Current = NextDirectionChange (Last);
MarkPoint (PointAt (Current));
Last = Current;
}
while (Last != First);
} /* MarkDirectionChanges */
/*---------------------------------------------------------------------------*/
// Return a new edge point for a micro-feature outline.
MFEDGEPT *NewEdgePoint() {
return ((MFEDGEPT *) alloc_struct(sizeof(MFEDGEPT), "MFEDGEPT"));
}
/*---------------------------------------------------------------------------*/
MFOUTLINE NextExtremity(MFOUTLINE EdgePoint) {
/*
** Parameters:
** EdgePoint start search from this point
** Globals: none
** Operation:
** This routine returns the next point in the micro-feature
** outline that is an extremity. The search starts after
** EdgePoint. The routine assumes that the outline being
** searched is not a degenerate outline (i.e. it must have
** 2 or more edge points).
** Return: Next extremity in the outline after EdgePoint.
** Exceptions: none
** History: 7/26/89, DSJ, Created.
*/
EdgePoint = NextPointAfter(EdgePoint);
while (!PointAt(EdgePoint)->ExtremityMark)
EdgePoint = NextPointAfter(EdgePoint);
return (EdgePoint);
} /* NextExtremity */
/*---------------------------------------------------------------------------*/
void NormalizeOutline(MFOUTLINE Outline,
FLOAT32 XOrigin) {
/*
** Parameters:
** Outline outline to be normalized
** XOrigin x-origin of text
** Globals: none
** Operation:
** This routine normalizes the coordinates of the specified
** outline so that the outline is deskewed down to the
** baseline, translated so that x=0 is at XOrigin, and scaled
** so that the height of a character cell from descender to
** ascender is 1. Of this height, 0.25 is for the descender,
** 0.25 for the ascender, and 0.5 for the x-height. The
** y coordinate of the baseline is 0.
** Return: none
** Exceptions: none
** History: 8/2/89, DSJ, Created.
*/
if (Outline == NIL_LIST)
return;
MFOUTLINE EdgePoint = Outline;
do {
MFEDGEPT *Current = PointAt(EdgePoint);
Current->Point.y = MF_SCALE_FACTOR *
(Current->Point.y - kBlnBaselineOffset);
Current->Point.x = MF_SCALE_FACTOR * (Current->Point.x - XOrigin);
EdgePoint = NextPointAfter(EdgePoint);
} while (EdgePoint != Outline);
} /* NormalizeOutline */
/*---------------------------------------------------------------------------*/
namespace tesseract {
void Classify::NormalizeOutlines(LIST Outlines,
FLOAT32 *XScale,
FLOAT32 *YScale) {
/*
** Parameters:
** Outlines list of outlines to be normalized
** XScale x-direction scale factor used by routine
** YScale y-direction scale factor used by routine
** Globals:
** classify_norm_method method being used for normalization
** classify_char_norm_range map radius of gyration to this value
** Operation: This routine normalizes every outline in Outlines
** according to the currently selected normalization method.
** It also returns the scale factors that it used to do this
** scaling. The scale factors returned represent the x and
** y sizes in the normalized coordinate system that correspond
** to 1 pixel in the original coordinate system.
** Return: none (Outlines are changed and XScale and YScale are updated)
** Exceptions: none
** History: Fri Dec 14 08:14:55 1990, DSJ, Created.
*/
MFOUTLINE Outline;
switch (classify_norm_method) {
case character:
ASSERT_HOST(!"How did NormalizeOutlines get called in character mode?");
break;
case baseline:
iterate(Outlines) {
Outline = (MFOUTLINE) first_node(Outlines);
NormalizeOutline(Outline, 0.0);
}
*XScale = *YScale = MF_SCALE_FACTOR;
break;
}
} /* NormalizeOutlines */
} // namespace tesseract
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
void ChangeDirection(MFOUTLINE Start, MFOUTLINE End, DIRECTION Direction) {
/*
** Parameters:
** Start, End defines segment of outline to be modified
** Direction new direction to assign to segment
** Globals: none
** Operation: Change the direction of every vector in the specified
** outline segment to Direction. The segment to be changed
** starts at Start and ends at End. Note that the previous
** direction of End must also be changed to reflect the
** change in direction of the point before it.
** Return: none
** Exceptions: none
** History: Fri May 4 10:42:04 1990, DSJ, Created.
*/
MFOUTLINE Current;
for (Current = Start; Current != End; Current = NextPointAfter (Current))
PointAt (Current)->Direction = Direction;
PointAt (End)->PreviousDirection = Direction;
} /* ChangeDirection */
/*---------------------------------------------------------------------------*/
void CharNormalizeOutline(MFOUTLINE Outline, const DENORM& cn_denorm) {
/*
** Parameters:
** Outline outline to be character normalized
** XCenter, YCenter center point for normalization
** XScale, YScale scale factors for normalization
** Globals: none
** Operation: This routine normalizes each point in Outline by
** translating it to the specified center and scaling it
** anisotropically according to the given scale factors.
** Return: none
** Exceptions: none
** History: Fri Dec 14 10:27:11 1990, DSJ, Created.
*/
MFOUTLINE First, Current;
MFEDGEPT *CurrentPoint;
if (Outline == NIL_LIST)
return;
First = Outline;
Current = First;
do {
CurrentPoint = PointAt(Current);
FCOORD pos(CurrentPoint->Point.x, CurrentPoint->Point.y);
cn_denorm.LocalNormTransform(pos, &pos);
CurrentPoint->Point.x = (pos.x() - MAX_UINT8 / 2) * MF_SCALE_FACTOR;
CurrentPoint->Point.y = (pos.y() - MAX_UINT8 / 2) * MF_SCALE_FACTOR;
Current = NextPointAfter(Current);
}
while (Current != First);
} /* CharNormalizeOutline */
/*---------------------------------------------------------------------------*/
void ComputeDirection(MFEDGEPT *Start,
MFEDGEPT *Finish,
FLOAT32 MinSlope,
FLOAT32 MaxSlope) {
/*
** Parameters:
** Start starting point to compute direction from
** Finish finishing point to compute direction to
** MinSlope slope below which lines are horizontal
** MaxSlope slope above which lines are vertical
** Globals: none
** Operation:
** This routine computes the slope from Start to Finish and
** and then computes the approximate direction of the line
** segment from Start to Finish. The direction is quantized
** into 8 buckets:
** N, S, E, W, NE, NW, SE, SW
** Both the slope and the direction are then stored into
** the appropriate fields of the Start edge point. The
** direction is also stored into the PreviousDirection field
** of the Finish edge point.
** Return: none
** Exceptions: none
** History: 7/25/89, DSJ, Created.
*/
FVECTOR Delta;
Delta.x = Finish->Point.x - Start->Point.x;
Delta.y = Finish->Point.y - Start->Point.y;
if (Delta.x == 0)
if (Delta.y < 0) {
Start->Slope = -MAX_FLOAT32;
Start->Direction = south;
}
else {
Start->Slope = MAX_FLOAT32;
Start->Direction = north;
}
else {
Start->Slope = Delta.y / Delta.x;
if (Delta.x > 0)
if (Delta.y > 0)
if (Start->Slope > MinSlope)
if (Start->Slope < MaxSlope)
Start->Direction = northeast;
else
Start->Direction = north;
else
Start->Direction = east;
else if (Start->Slope < -MinSlope)
if (Start->Slope > -MaxSlope)
Start->Direction = southeast;
else
Start->Direction = south;
else
Start->Direction = east;
else if (Delta.y > 0)
if (Start->Slope < -MinSlope)
if (Start->Slope > -MaxSlope)
Start->Direction = northwest;
else
Start->Direction = north;
else
Start->Direction = west;
else if (Start->Slope > MinSlope)
if (Start->Slope < MaxSlope)
Start->Direction = southwest;
else
Start->Direction = south;
else
Start->Direction = west;
}
Finish->PreviousDirection = Start->Direction;
} /* ComputeDirection */
/*---------------------------------------------------------------------------*/
void FinishOutlineStats(register OUTLINE_STATS *OutlineStats) {
/*
** Parameters:
** OutlineStats statistics about a set of outlines
** Globals: none
** Operation: Use the preliminary statistics accumulated in OutlineStats
** to compute the final statistics.
** (see Dan Johnson's Tesseract lab
** notebook #2, pgs. 74-78).
** Return: none
** Exceptions: none
** History: Fri Dec 14 10:13:36 1990, DSJ, Created.
*/
OutlineStats->x = 0.5 * OutlineStats->My / OutlineStats->L;
OutlineStats->y = 0.5 * OutlineStats->Mx / OutlineStats->L;
OutlineStats->Ix = (OutlineStats->Ix / 3.0 -
OutlineStats->y * OutlineStats->Mx +
OutlineStats->y * OutlineStats->y * OutlineStats->L);
OutlineStats->Iy = (OutlineStats->Iy / 3.0 -
OutlineStats->x * OutlineStats->My +
OutlineStats->x * OutlineStats->x * OutlineStats->L);
/* Ix and/or Iy could possibly be negative due to roundoff error */
if (OutlineStats->Ix < 0.0)
OutlineStats->Ix = MIN_INERTIA;
if (OutlineStats->Iy < 0.0)
OutlineStats->Iy = MIN_INERTIA;
OutlineStats->Rx = sqrt (OutlineStats->Ix / OutlineStats->L);
OutlineStats->Ry = sqrt (OutlineStats->Iy / OutlineStats->L);
OutlineStats->Mx *= 0.5;
OutlineStats->My *= 0.5;
} /* FinishOutlineStats */
/*---------------------------------------------------------------------------*/
void InitOutlineStats(OUTLINE_STATS *OutlineStats) {
/*
** Parameters:
** OutlineStats stats data structure to be initialized
** Globals: none
** Operation: Initialize the outline statistics data structure so
** that it is ready to start accumulating statistics.
** Return: none
** Exceptions: none
** History: Fri Dec 14 08:55:22 1990, DSJ, Created.
*/
OutlineStats->Mx = 0.0;
OutlineStats->My = 0.0;
OutlineStats->L = 0.0;
OutlineStats->x = 0.0;
OutlineStats->y = 0.0;
OutlineStats->Ix = 0.0;
OutlineStats->Iy = 0.0;
OutlineStats->Rx = 0.0;
OutlineStats->Ry = 0.0;
} /* InitOutlineStats */
/*---------------------------------------------------------------------------*/
MFOUTLINE NextDirectionChange(MFOUTLINE EdgePoint) {
/*
** Parameters:
** EdgePoint start search from this point
** Globals: none
** Operation:
** This routine returns the next point in the micro-feature
** outline that has a direction different than EdgePoint. The
** routine assumes that the outline being searched is not a
** degenerate outline (i.e. it must have 2 or more edge points).
** Return: Point of next direction change in micro-feature outline.
** Exceptions: none
** History: 7/25/89, DSJ, Created.
*/
DIRECTION InitialDirection;
InitialDirection = PointAt (EdgePoint)->Direction;
MFOUTLINE next_pt = NULL;
do {
EdgePoint = NextPointAfter(EdgePoint);
next_pt = NextPointAfter(EdgePoint);
} while (PointAt(EdgePoint)->Direction == InitialDirection &&
!PointAt(EdgePoint)->Hidden &&
next_pt != NULL && !PointAt(next_pt)->Hidden);
return (EdgePoint);
} /* NextDirectionChange */
/*---------------------------------------------------------------------------*/
void UpdateOutlineStats(register OUTLINE_STATS *OutlineStats,
register FLOAT32 x1,
register FLOAT32 x2,
register FLOAT32 y1,
register FLOAT32 y2) {
/*
** Parameters:
** OutlineStats statistics to add this segment to
** x1, y1, x2, y2 segment to be added to statistics
** Globals: none
** Operation: This routine adds the statistics for the specified
** line segment to OutlineStats. The statistics that are
** kept are:
** sum of length of all segments
** sum of 2*Mx for all segments
** sum of 2*My for all segments
** sum of 2*Mx*(y1+y2) - L*y1*y2 for all segments
** sum of 2*My*(x1+x2) - L*x1*x2 for all segments
** These numbers, once collected can later be used to easily
** compute the center of mass, first and second moments,
** and radii of gyration. (see Dan Johnson's Tesseract lab
** notebook #2, pgs. 74-78).
** Return: none
** Exceptions: none
** History: Fri Dec 14 08:59:17 1990, DSJ, Created.
*/
register FLOAT64 L;
register FLOAT64 Mx2;
register FLOAT64 My2;
/* compute length of segment */
L = sqrt ((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
OutlineStats->L += L;
/* compute 2Mx and 2My components */
Mx2 = L * (y1 + y2);
My2 = L * (x1 + x2);
OutlineStats->Mx += Mx2;
OutlineStats->My += My2;
/* compute second moment component */
OutlineStats->Ix += Mx2 * (y1 + y2) - L * y1 * y2;
OutlineStats->Iy += My2 * (x1 + x2) - L * x1 * x2;
} /* UpdateOutlineStats */
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_TRAININGSAMPLE_H__
#define TESSERACT_TRAINING_TRAININGSAMPLE_H__
#include "elst.h"
#include "featdefs.h"
#include "intfx.h"
#include "intmatcher.h"
#include "matrix.h"
#include "mf.h"
#include "picofeat.h"
#include "shapetable.h"
#include "unicharset.h"
struct Pix;
namespace tesseract {
class IntFeatureMap;
class IntFeatureSpace;
class ShapeTable;
// Number of elements of cn_feature_.
static const int kNumCNParams = 4;
// Number of ways to shift the features when randomizing.
static const int kSampleYShiftSize = 5;
// Number of ways to scale the features when randomizing.
static const int kSampleScaleSize = 3;
// Total number of different ways to manipulate the features when randomizing.
// The first and last combinations are removed to avoid an excessive
// top movement (first) and an identity transformation (last).
// WARNING: To avoid patterned duplication of samples, be sure to keep
// kSampleRandomSize prime!
// Eg with current values (kSampleYShiftSize = 5 and TkSampleScaleSize = 3)
// kSampleRandomSize is 13, which is prime.
static const int kSampleRandomSize = kSampleYShiftSize * kSampleScaleSize - 2;
// ASSERT_IS_PRIME(kSampleRandomSize) !!
class TrainingSample : public ELIST_LINK {
public:
TrainingSample()
: class_id_(INVALID_UNICHAR_ID), font_id_(0), page_num_(0),
num_features_(0), num_micro_features_(0), outline_length_(0),
features_(NULL), micro_features_(NULL), weight_(1.0),
max_dist_(0.0), sample_index_(0),
features_are_indexed_(false), features_are_mapped_(false),
is_error_(false) {
}
~TrainingSample();
// Saves the given features into a TrainingSample. The features are copied,
// so may be deleted afterwards. Delete the return value after use.
static TrainingSample* CopyFromFeatures(const INT_FX_RESULT_STRUCT& fx_info,
const TBOX& bounding_box,
const INT_FEATURE_STRUCT* features,
int num_features);
// Returns the cn_feature as a FEATURE_STRUCT* needed by cntraining.
FEATURE_STRUCT* GetCNFeature() const;
// Constructs and returns a copy "randomized" by the method given by
// the randomizer index. If index is out of [0, kSampleRandomSize) then
// an exact copy is returned.
TrainingSample* RandomizedCopy(int index) const;
// Constructs and returns an exact copy.
TrainingSample* Copy() const;
// WARNING! Serialize/DeSerialize do not save/restore the "cache" data
// members, which is mostly the mapped features, and the weight.
// It is assumed these can all be reconstructed from what is saved.
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Creates from the given file. Returns NULL in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
static TrainingSample* DeSerializeCreate(bool swap, FILE* fp);
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
// Extracts the needed information from the CHAR_DESC_STRUCT.
void ExtractCharDesc(int feature_type, int micro_type,
int cn_type, int geo_type,
CHAR_DESC_STRUCT* char_desc);
// Sets the mapped_features_ from the features_ using the provided
// feature_space to the indexed versions of the features.
void IndexFeatures(const IntFeatureSpace& feature_space);
// Sets the mapped_features_ from the features_ using the provided
// feature_map.
void MapFeatures(const IntFeatureMap& feature_map);
// Returns a pix representing the sample. (Int features only.)
Pix* RenderToPix(const UNICHARSET* unicharset) const;
// Displays the features in the given window with the given color.
void DisplayFeatures(ScrollView::Color color, ScrollView* window) const;
// Returns a pix of the original sample image. The pix is padded all round
// by padding wherever possible.
// The returned Pix must be pixDestroyed after use.
// If the input page_pix is NULL, NULL is returned.
Pix* GetSamplePix(int padding, Pix* page_pix) const;
// Accessors.
UNICHAR_ID class_id() const {
return class_id_;
}
void set_class_id(int id) {
class_id_ = id;
}
int font_id() const {
return font_id_;
}
void set_font_id(int id) {
font_id_ = id;
}
int page_num() const {
return page_num_;
}
void set_page_num(int page) {
page_num_ = page;
}
const TBOX& bounding_box() const {
return bounding_box_;
}
void set_bounding_box(const TBOX& box) {
bounding_box_ = box;
}
int num_features() const {
return num_features_;
}
const INT_FEATURE_STRUCT* features() const {
return features_;
}
int num_micro_features() const {
return num_micro_features_;
}
const MicroFeature* micro_features() const {
return micro_features_;
}
int outline_length() const {
return outline_length_;
}
float cn_feature(int index) const {
return cn_feature_[index];
}
int geo_feature(int index) const {
return geo_feature_[index];
}
double weight() const {
return weight_;
}
void set_weight(double value) {
weight_ = value;
}
double max_dist() const {
return max_dist_;
}
void set_max_dist(double value) {
max_dist_ = value;
}
int sample_index() const {
return sample_index_;
}
void set_sample_index(int value) {
sample_index_ = value;
}
bool features_are_mapped() const {
return features_are_mapped_;
}
const GenericVector<int>& mapped_features() const {
ASSERT_HOST(features_are_mapped_);
return mapped_features_;
}
const GenericVector<int>& indexed_features() const {
ASSERT_HOST(features_are_indexed_);
return mapped_features_;
}
bool is_error() const {
return is_error_;
}
void set_is_error(bool value) {
is_error_ = value;
}
private:
// Unichar id that this sample represents. There obviously must be a
// reference UNICHARSET somewhere. Usually in TrainingSampleSet.
UNICHAR_ID class_id_;
// Font id in which this sample was printed. Refers to a fontinfo_table_ in
// MasterTrainer.
int font_id_;
// Number of page that the sample came from.
int page_num_;
// Bounding box of sample in original image.
TBOX bounding_box_;
// Number of INT_FEATURE_STRUCT in features_ array.
int num_features_;
// Number of MicroFeature in micro_features_ array.
int num_micro_features_;
// Total length of outline in the baseline normalized coordinate space.
// See comment in WERD_RES class definition for a discussion of coordinate
// spaces.
int outline_length_;
// Array of features.
INT_FEATURE_STRUCT* features_;
// Array of features.
MicroFeature* micro_features_;
// The one and only CN feature. Indexed by NORM_PARAM_NAME enum.
float cn_feature_[kNumCNParams];
// The one and only geometric feature. (Aims at replacing cn_feature_).
// Indexed by GeoParams enum in picofeat.h
int geo_feature_[GeoCount];
// Non-serialized cache data.
// Weight used for boosting training.
double weight_;
// Maximum distance to other samples of same class/font used in computing
// the canonical sample.
double max_dist_;
// Global index of this sample.
int sample_index_;
// Indexed/mapped features, as indicated by the bools below.
GenericVector<int> mapped_features_;
bool features_are_indexed_;
bool features_are_mapped_;
// True if the last classification was an error by the current definition.
bool is_error_;
// Randomizing factors.
static const int kYShiftValues[kSampleYShiftSize];
static const double kScaleValues[kSampleScaleSize];
};
ELISTIZEH(TrainingSample)
} // namespace tesseract
#endif // TESSERACT_TRAINING_TRAININGSAMPLE_H__
| C++ |
/******************************************************************************
** Filename: cluster.c
** Purpose: Routines for clustering points in N-D space
** Author: Dan Johnson
** History: 5/29/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "const.h"
#include "cluster.h"
#include "emalloc.h"
#include "genericheap.h"
#include "helpers.h"
#include "kdpair.h"
#include "matrix.h"
#include "tprintf.h"
#include "danerror.h"
#include "freelist.h"
#include <math.h>
#define HOTELLING 1 // If true use Hotelling's test to decide where to split.
#define FTABLE_X 10 // Size of FTable.
#define FTABLE_Y 100 // Size of FTable.
// Table of values approximating the cumulative F-distribution for a confidence of 1%.
const double FTable[FTABLE_Y][FTABLE_X] = {
{4052.19, 4999.52, 5403.34, 5624.62, 5763.65, 5858.97, 5928.33, 5981.10, 6022.50, 6055.85,},
{98.502, 99.000, 99.166, 99.249, 99.300, 99.333, 99.356, 99.374, 99.388, 99.399,},
{34.116, 30.816, 29.457, 28.710, 28.237, 27.911, 27.672, 27.489, 27.345, 27.229,},
{21.198, 18.000, 16.694, 15.977, 15.522, 15.207, 14.976, 14.799, 14.659, 14.546,},
{16.258, 13.274, 12.060, 11.392, 10.967, 10.672, 10.456, 10.289, 10.158, 10.051,},
{13.745, 10.925, 9.780, 9.148, 8.746, 8.466, 8.260, 8.102, 7.976, 7.874,},
{12.246, 9.547, 8.451, 7.847, 7.460, 7.191, 6.993, 6.840, 6.719, 6.620,},
{11.259, 8.649, 7.591, 7.006, 6.632, 6.371, 6.178, 6.029, 5.911, 5.814,},
{10.561, 8.022, 6.992, 6.422, 6.057, 5.802, 5.613, 5.467, 5.351, 5.257,},
{10.044, 7.559, 6.552, 5.994, 5.636, 5.386, 5.200, 5.057, 4.942, 4.849,},
{ 9.646, 7.206, 6.217, 5.668, 5.316, 5.069, 4.886, 4.744, 4.632, 4.539,},
{ 9.330, 6.927, 5.953, 5.412, 5.064, 4.821, 4.640, 4.499, 4.388, 4.296,},
{ 9.074, 6.701, 5.739, 5.205, 4.862, 4.620, 4.441, 4.302, 4.191, 4.100,},
{ 8.862, 6.515, 5.564, 5.035, 4.695, 4.456, 4.278, 4.140, 4.030, 3.939,},
{ 8.683, 6.359, 5.417, 4.893, 4.556, 4.318, 4.142, 4.004, 3.895, 3.805,},
{ 8.531, 6.226, 5.292, 4.773, 4.437, 4.202, 4.026, 3.890, 3.780, 3.691,},
{ 8.400, 6.112, 5.185, 4.669, 4.336, 4.102, 3.927, 3.791, 3.682, 3.593,},
{ 8.285, 6.013, 5.092, 4.579, 4.248, 4.015, 3.841, 3.705, 3.597, 3.508,},
{ 8.185, 5.926, 5.010, 4.500, 4.171, 3.939, 3.765, 3.631, 3.523, 3.434,},
{ 8.096, 5.849, 4.938, 4.431, 4.103, 3.871, 3.699, 3.564, 3.457, 3.368,},
{ 8.017, 5.780, 4.874, 4.369, 4.042, 3.812, 3.640, 3.506, 3.398, 3.310,},
{ 7.945, 5.719, 4.817, 4.313, 3.988, 3.758, 3.587, 3.453, 3.346, 3.258,},
{ 7.881, 5.664, 4.765, 4.264, 3.939, 3.710, 3.539, 3.406, 3.299, 3.211,},
{ 7.823, 5.614, 4.718, 4.218, 3.895, 3.667, 3.496, 3.363, 3.256, 3.168,},
{ 7.770, 5.568, 4.675, 4.177, 3.855, 3.627, 3.457, 3.324, 3.217, 3.129,},
{ 7.721, 5.526, 4.637, 4.140, 3.818, 3.591, 3.421, 3.288, 3.182, 3.094,},
{ 7.677, 5.488, 4.601, 4.106, 3.785, 3.558, 3.388, 3.256, 3.149, 3.062,},
{ 7.636, 5.453, 4.568, 4.074, 3.754, 3.528, 3.358, 3.226, 3.120, 3.032,},
{ 7.598, 5.420, 4.538, 4.045, 3.725, 3.499, 3.330, 3.198, 3.092, 3.005,},
{ 7.562, 5.390, 4.510, 4.018, 3.699, 3.473, 3.305, 3.173, 3.067, 2.979,},
{ 7.530, 5.362, 4.484, 3.993, 3.675, 3.449, 3.281, 3.149, 3.043, 2.955,},
{ 7.499, 5.336, 4.459, 3.969, 3.652, 3.427, 3.258, 3.127, 3.021, 2.934,},
{ 7.471, 5.312, 4.437, 3.948, 3.630, 3.406, 3.238, 3.106, 3.000, 2.913,},
{ 7.444, 5.289, 4.416, 3.927, 3.611, 3.386, 3.218, 3.087, 2.981, 2.894,},
{ 7.419, 5.268, 4.396, 3.908, 3.592, 3.368, 3.200, 3.069, 2.963, 2.876,},
{ 7.396, 5.248, 4.377, 3.890, 3.574, 3.351, 3.183, 3.052, 2.946, 2.859,},
{ 7.373, 5.229, 4.360, 3.873, 3.558, 3.334, 3.167, 3.036, 2.930, 2.843,},
{ 7.353, 5.211, 4.343, 3.858, 3.542, 3.319, 3.152, 3.021, 2.915, 2.828,},
{ 7.333, 5.194, 4.327, 3.843, 3.528, 3.305, 3.137, 3.006, 2.901, 2.814,},
{ 7.314, 5.179, 4.313, 3.828, 3.514, 3.291, 3.124, 2.993, 2.888, 2.801,},
{ 7.296, 5.163, 4.299, 3.815, 3.501, 3.278, 3.111, 2.980, 2.875, 2.788,},
{ 7.280, 5.149, 4.285, 3.802, 3.488, 3.266, 3.099, 2.968, 2.863, 2.776,},
{ 7.264, 5.136, 4.273, 3.790, 3.476, 3.254, 3.087, 2.957, 2.851, 2.764,},
{ 7.248, 5.123, 4.261, 3.778, 3.465, 3.243, 3.076, 2.946, 2.840, 2.754,},
{ 7.234, 5.110, 4.249, 3.767, 3.454, 3.232, 3.066, 2.935, 2.830, 2.743,},
{ 7.220, 5.099, 4.238, 3.757, 3.444, 3.222, 3.056, 2.925, 2.820, 2.733,},
{ 7.207, 5.087, 4.228, 3.747, 3.434, 3.213, 3.046, 2.916, 2.811, 2.724,},
{ 7.194, 5.077, 4.218, 3.737, 3.425, 3.204, 3.037, 2.907, 2.802, 2.715,},
{ 7.182, 5.066, 4.208, 3.728, 3.416, 3.195, 3.028, 2.898, 2.793, 2.706,},
{ 7.171, 5.057, 4.199, 3.720, 3.408, 3.186, 3.020, 2.890, 2.785, 2.698,},
{ 7.159, 5.047, 4.191, 3.711, 3.400, 3.178, 3.012, 2.882, 2.777, 2.690,},
{ 7.149, 5.038, 4.182, 3.703, 3.392, 3.171, 3.005, 2.874, 2.769, 2.683,},
{ 7.139, 5.030, 4.174, 3.695, 3.384, 3.163, 2.997, 2.867, 2.762, 2.675,},
{ 7.129, 5.021, 4.167, 3.688, 3.377, 3.156, 2.990, 2.860, 2.755, 2.668,},
{ 7.119, 5.013, 4.159, 3.681, 3.370, 3.149, 2.983, 2.853, 2.748, 2.662,},
{ 7.110, 5.006, 4.152, 3.674, 3.363, 3.143, 2.977, 2.847, 2.742, 2.655,},
{ 7.102, 4.998, 4.145, 3.667, 3.357, 3.136, 2.971, 2.841, 2.736, 2.649,},
{ 7.093, 4.991, 4.138, 3.661, 3.351, 3.130, 2.965, 2.835, 2.730, 2.643,},
{ 7.085, 4.984, 4.132, 3.655, 3.345, 3.124, 2.959, 2.829, 2.724, 2.637,},
{ 7.077, 4.977, 4.126, 3.649, 3.339, 3.119, 2.953, 2.823, 2.718, 2.632,},
{ 7.070, 4.971, 4.120, 3.643, 3.333, 3.113, 2.948, 2.818, 2.713, 2.626,},
{ 7.062, 4.965, 4.114, 3.638, 3.328, 3.108, 2.942, 2.813, 2.708, 2.621,},
{ 7.055, 4.959, 4.109, 3.632, 3.323, 3.103, 2.937, 2.808, 2.703, 2.616,},
{ 7.048, 4.953, 4.103, 3.627, 3.318, 3.098, 2.932, 2.803, 2.698, 2.611,},
{ 7.042, 4.947, 4.098, 3.622, 3.313, 3.093, 2.928, 2.798, 2.693, 2.607,},
{ 7.035, 4.942, 4.093, 3.618, 3.308, 3.088, 2.923, 2.793, 2.689, 2.602,},
{ 7.029, 4.937, 4.088, 3.613, 3.304, 3.084, 2.919, 2.789, 2.684, 2.598,},
{ 7.023, 4.932, 4.083, 3.608, 3.299, 3.080, 2.914, 2.785, 2.680, 2.593,},
{ 7.017, 4.927, 4.079, 3.604, 3.295, 3.075, 2.910, 2.781, 2.676, 2.589,},
{ 7.011, 4.922, 4.074, 3.600, 3.291, 3.071, 2.906, 2.777, 2.672, 2.585,},
{ 7.006, 4.917, 4.070, 3.596, 3.287, 3.067, 2.902, 2.773, 2.668, 2.581,},
{ 7.001, 4.913, 4.066, 3.591, 3.283, 3.063, 2.898, 2.769, 2.664, 2.578,},
{ 6.995, 4.908, 4.062, 3.588, 3.279, 3.060, 2.895, 2.765, 2.660, 2.574,},
{ 6.990, 4.904, 4.058, 3.584, 3.275, 3.056, 2.891, 2.762, 2.657, 2.570,},
{ 6.985, 4.900, 4.054, 3.580, 3.272, 3.052, 2.887, 2.758, 2.653, 2.567,},
{ 6.981, 4.896, 4.050, 3.577, 3.268, 3.049, 2.884, 2.755, 2.650, 2.563,},
{ 6.976, 4.892, 4.047, 3.573, 3.265, 3.046, 2.881, 2.751, 2.647, 2.560,},
{ 6.971, 4.888, 4.043, 3.570, 3.261, 3.042, 2.877, 2.748, 2.644, 2.557,},
{ 6.967, 4.884, 4.040, 3.566, 3.258, 3.039, 2.874, 2.745, 2.640, 2.554,},
{ 6.963, 4.881, 4.036, 3.563, 3.255, 3.036, 2.871, 2.742, 2.637, 2.551,},
{ 6.958, 4.877, 4.033, 3.560, 3.252, 3.033, 2.868, 2.739, 2.634, 2.548,},
{ 6.954, 4.874, 4.030, 3.557, 3.249, 3.030, 2.865, 2.736, 2.632, 2.545,},
{ 6.950, 4.870, 4.027, 3.554, 3.246, 3.027, 2.863, 2.733, 2.629, 2.542,},
{ 6.947, 4.867, 4.024, 3.551, 3.243, 3.025, 2.860, 2.731, 2.626, 2.539,},
{ 6.943, 4.864, 4.021, 3.548, 3.240, 3.022, 2.857, 2.728, 2.623, 2.537,},
{ 6.939, 4.861, 4.018, 3.545, 3.238, 3.019, 2.854, 2.725, 2.621, 2.534,},
{ 6.935, 4.858, 4.015, 3.543, 3.235, 3.017, 2.852, 2.723, 2.618, 2.532,},
{ 6.932, 4.855, 4.012, 3.540, 3.233, 3.014, 2.849, 2.720, 2.616, 2.529,},
{ 6.928, 4.852, 4.010, 3.538, 3.230, 3.012, 2.847, 2.718, 2.613, 2.527,},
{ 6.925, 4.849, 4.007, 3.535, 3.228, 3.009, 2.845, 2.715, 2.611, 2.524,},
{ 6.922, 4.846, 4.004, 3.533, 3.225, 3.007, 2.842, 2.713, 2.609, 2.522,},
{ 6.919, 4.844, 4.002, 3.530, 3.223, 3.004, 2.840, 2.711, 2.606, 2.520,},
{ 6.915, 4.841, 3.999, 3.528, 3.221, 3.002, 2.838, 2.709, 2.604, 2.518,},
{ 6.912, 4.838, 3.997, 3.525, 3.218, 3.000, 2.835, 2.706, 2.602, 2.515,},
{ 6.909, 4.836, 3.995, 3.523, 3.216, 2.998, 2.833, 2.704, 2.600, 2.513,},
{ 6.906, 4.833, 3.992, 3.521, 3.214, 2.996, 2.831, 2.702, 2.598, 2.511,},
{ 6.904, 4.831, 3.990, 3.519, 3.212, 2.994, 2.829, 2.700, 2.596, 2.509,},
{ 6.901, 4.829, 3.988, 3.517, 3.210, 2.992, 2.827, 2.698, 2.594, 2.507,},
{ 6.898, 4.826, 3.986, 3.515, 3.208, 2.990, 2.825, 2.696, 2.592, 2.505,},
{ 6.895, 4.824, 3.984, 3.513, 3.206, 2.988, 2.823, 2.694, 2.590, 2.503}
};
/* define the variance which will be used as a minimum variance for any
dimension of any feature. Since most features are calculated from numbers
with a precision no better than 1 in 128, the variance should never be
less than the square of this number for parameters whose range is 1. */
#define MINVARIANCE 0.0004
/* define the absolute minimum number of samples which must be present in
order to accurately test hypotheses about underlying probability
distributions. Define separately the minimum samples that are needed
before a statistical analysis is attempted; this number should be
equal to MINSAMPLES but can be set to a lower number for early testing
when very few samples are available. */
#define MINSAMPLESPERBUCKET 5
#define MINSAMPLES (MINBUCKETS * MINSAMPLESPERBUCKET)
#define MINSAMPLESNEEDED 1
/* define the size of the table which maps normalized samples to
histogram buckets. Also define the number of standard deviations
in a normal distribution which are considered to be significant.
The mapping table will be defined in such a way that it covers
the specified number of standard deviations on either side of
the mean. BUCKETTABLESIZE should always be even. */
#define BUCKETTABLESIZE 1024
#define NORMALEXTENT 3.0
struct TEMPCLUSTER {
CLUSTER *Cluster;
CLUSTER *Neighbor;
};
typedef tesseract::KDPairInc<float, TEMPCLUSTER*> ClusterPair;
typedef tesseract::GenericHeap<ClusterPair> ClusterHeap;
struct STATISTICS {
FLOAT32 AvgVariance;
FLOAT32 *CoVariance;
FLOAT32 *Min; // largest negative distance from the mean
FLOAT32 *Max; // largest positive distance from the mean
};
struct BUCKETS {
DISTRIBUTION Distribution; // distribution being tested for
uinT32 SampleCount; // # of samples in histogram
FLOAT64 Confidence; // confidence level of test
FLOAT64 ChiSquared; // test threshold
uinT16 NumberOfBuckets; // number of cells in histogram
uinT16 Bucket[BUCKETTABLESIZE];// mapping to histogram buckets
uinT32 *Count; // frequency of occurence histogram
FLOAT32 *ExpectedCount; // expected histogram
};
struct CHISTRUCT{
uinT16 DegreesOfFreedom;
FLOAT64 Alpha;
FLOAT64 ChiSquared;
};
// For use with KDWalk / MakePotentialClusters
struct ClusteringContext {
ClusterHeap *heap; // heap used to hold temp clusters, "best" on top
TEMPCLUSTER *candidates; // array of potential clusters
KDTREE *tree; // kd-tree to be searched for neighbors
inT32 next; // next candidate to be used
};
typedef FLOAT64 (*DENSITYFUNC) (inT32);
typedef FLOAT64 (*SOLVEFUNC) (CHISTRUCT *, double);
#define Odd(N) ((N)%2)
#define Mirror(N,R) ((R) - (N) - 1)
#define Abs(N) ( ( (N) < 0 ) ? ( -(N) ) : (N) )
//--------------Global Data Definitions and Declarations----------------------
/* the following variables describe a discrete normal distribution
which is used by NormalDensity() and NormalBucket(). The
constant NORMALEXTENT determines how many standard
deviations of the distribution are mapped onto the fixed
discrete range of x. x=0 is mapped to -NORMALEXTENT standard
deviations and x=BUCKETTABLESIZE is mapped to
+NORMALEXTENT standard deviations. */
#define SqrtOf2Pi 2.506628275
static const FLOAT64 kNormalStdDev = BUCKETTABLESIZE / (2.0 * NORMALEXTENT);
static const FLOAT64 kNormalVariance =
(BUCKETTABLESIZE * BUCKETTABLESIZE) / (4.0 * NORMALEXTENT * NORMALEXTENT);
static const FLOAT64 kNormalMagnitude =
(2.0 * NORMALEXTENT) / (SqrtOf2Pi * BUCKETTABLESIZE);
static const FLOAT64 kNormalMean = BUCKETTABLESIZE / 2;
/* define lookup tables used to compute the number of histogram buckets
that should be used for a given number of samples. */
#define LOOKUPTABLESIZE 8
#define MAXDEGREESOFFREEDOM MAXBUCKETS
static const uinT32 kCountTable[LOOKUPTABLESIZE] = {
MINSAMPLES, 200, 400, 600, 800, 1000, 1500, 2000
}; // number of samples
static const uinT16 kBucketsTable[LOOKUPTABLESIZE] = {
MINBUCKETS, 16, 20, 24, 27, 30, 35, MAXBUCKETS
}; // number of buckets
/*-------------------------------------------------------------------------
Private Function Prototypes
--------------------------------------------------------------------------*/
void CreateClusterTree(CLUSTERER *Clusterer);
void MakePotentialClusters(ClusteringContext *context, CLUSTER *Cluster,
inT32 Level);
CLUSTER *FindNearestNeighbor(KDTREE *Tree,
CLUSTER *Cluster,
FLOAT32 *Distance);
CLUSTER *MakeNewCluster(CLUSTERER *Clusterer, TEMPCLUSTER *TempCluster);
inT32 MergeClusters (inT16 N,
register PARAM_DESC ParamDesc[],
register inT32 n1,
register inT32 n2,
register FLOAT32 m[],
register FLOAT32 m1[], register FLOAT32 m2[]);
void ComputePrototypes(CLUSTERER *Clusterer, CLUSTERCONFIG *Config);
PROTOTYPE *MakePrototype(CLUSTERER *Clusterer,
CLUSTERCONFIG *Config,
CLUSTER *Cluster);
PROTOTYPE *MakeDegenerateProto(uinT16 N,
CLUSTER *Cluster,
STATISTICS *Statistics,
PROTOSTYLE Style,
inT32 MinSamples);
PROTOTYPE *TestEllipticalProto(CLUSTERER *Clusterer,
CLUSTERCONFIG *Config,
CLUSTER *Cluster,
STATISTICS *Statistics);
PROTOTYPE *MakeSphericalProto(CLUSTERER *Clusterer,
CLUSTER *Cluster,
STATISTICS *Statistics,
BUCKETS *Buckets);
PROTOTYPE *MakeEllipticalProto(CLUSTERER *Clusterer,
CLUSTER *Cluster,
STATISTICS *Statistics,
BUCKETS *Buckets);
PROTOTYPE *MakeMixedProto(CLUSTERER *Clusterer,
CLUSTER *Cluster,
STATISTICS *Statistics,
BUCKETS *NormalBuckets,
FLOAT64 Confidence);
void MakeDimRandom(uinT16 i, PROTOTYPE *Proto, PARAM_DESC *ParamDesc);
void MakeDimUniform(uinT16 i, PROTOTYPE *Proto, STATISTICS *Statistics);
STATISTICS *ComputeStatistics (inT16 N,
PARAM_DESC ParamDesc[], CLUSTER * Cluster);
PROTOTYPE *NewSphericalProto(uinT16 N,
CLUSTER *Cluster,
STATISTICS *Statistics);
PROTOTYPE *NewEllipticalProto(inT16 N,
CLUSTER *Cluster,
STATISTICS *Statistics);
PROTOTYPE *NewMixedProto(inT16 N, CLUSTER *Cluster, STATISTICS *Statistics);
PROTOTYPE *NewSimpleProto(inT16 N, CLUSTER *Cluster);
BOOL8 Independent (PARAM_DESC ParamDesc[],
inT16 N, FLOAT32 * CoVariance, FLOAT32 Independence);
BUCKETS *GetBuckets(CLUSTERER* clusterer,
DISTRIBUTION Distribution,
uinT32 SampleCount,
FLOAT64 Confidence);
BUCKETS *MakeBuckets(DISTRIBUTION Distribution,
uinT32 SampleCount,
FLOAT64 Confidence);
uinT16 OptimumNumberOfBuckets(uinT32 SampleCount);
FLOAT64 ComputeChiSquared(uinT16 DegreesOfFreedom, FLOAT64 Alpha);
FLOAT64 NormalDensity(inT32 x);
FLOAT64 UniformDensity(inT32 x);
FLOAT64 Integral(FLOAT64 f1, FLOAT64 f2, FLOAT64 Dx);
void FillBuckets(BUCKETS *Buckets,
CLUSTER *Cluster,
uinT16 Dim,
PARAM_DESC *ParamDesc,
FLOAT32 Mean,
FLOAT32 StdDev);
uinT16 NormalBucket(PARAM_DESC *ParamDesc,
FLOAT32 x,
FLOAT32 Mean,
FLOAT32 StdDev);
uinT16 UniformBucket(PARAM_DESC *ParamDesc,
FLOAT32 x,
FLOAT32 Mean,
FLOAT32 StdDev);
BOOL8 DistributionOK(BUCKETS *Buckets);
void FreeStatistics(STATISTICS *Statistics);
void FreeBuckets(BUCKETS *Buckets);
void FreeCluster(CLUSTER *Cluster);
uinT16 DegreesOfFreedom(DISTRIBUTION Distribution, uinT16 HistogramBuckets);
int NumBucketsMatch(void *arg1, // BUCKETS *Histogram,
void *arg2); // uinT16 *DesiredNumberOfBuckets);
int ListEntryMatch(void *arg1, void *arg2);
void AdjustBuckets(BUCKETS *Buckets, uinT32 NewSampleCount);
void InitBuckets(BUCKETS *Buckets);
int AlphaMatch(void *arg1, // CHISTRUCT *ChiStruct,
void *arg2); // CHISTRUCT *SearchKey);
CHISTRUCT *NewChiStruct(uinT16 DegreesOfFreedom, FLOAT64 Alpha);
FLOAT64 Solve(SOLVEFUNC Function,
void *FunctionParams,
FLOAT64 InitialGuess,
FLOAT64 Accuracy);
FLOAT64 ChiArea(CHISTRUCT *ChiParams, FLOAT64 x);
BOOL8 MultipleCharSamples(CLUSTERER *Clusterer,
CLUSTER *Cluster,
FLOAT32 MaxIllegal);
double InvertMatrix(const float* input, int size, float* inv);
//--------------------------Public Code--------------------------------------
/** MakeClusterer **********************************************************
Parameters: SampleSize number of dimensions in feature space
ParamDesc description of each dimension
Operation: This routine creates a new clusterer data structure,
initializes it, and returns a pointer to it.
Return: pointer to the new clusterer data structure
Exceptions: None
History: 5/29/89, DSJ, Created.
****************************************************************************/
CLUSTERER *
MakeClusterer (inT16 SampleSize, const PARAM_DESC ParamDesc[]) {
CLUSTERER *Clusterer;
int i;
// allocate main clusterer data structure and init simple fields
Clusterer = (CLUSTERER *) Emalloc (sizeof (CLUSTERER));
Clusterer->SampleSize = SampleSize;
Clusterer->NumberOfSamples = 0;
Clusterer->NumChar = 0;
// init fields which will not be used initially
Clusterer->Root = NULL;
Clusterer->ProtoList = NIL_LIST;
// maintain a copy of param descriptors in the clusterer data structure
Clusterer->ParamDesc =
(PARAM_DESC *) Emalloc (SampleSize * sizeof (PARAM_DESC));
for (i = 0; i < SampleSize; i++) {
Clusterer->ParamDesc[i].Circular = ParamDesc[i].Circular;
Clusterer->ParamDesc[i].NonEssential = ParamDesc[i].NonEssential;
Clusterer->ParamDesc[i].Min = ParamDesc[i].Min;
Clusterer->ParamDesc[i].Max = ParamDesc[i].Max;
Clusterer->ParamDesc[i].Range = ParamDesc[i].Max - ParamDesc[i].Min;
Clusterer->ParamDesc[i].HalfRange = Clusterer->ParamDesc[i].Range / 2;
Clusterer->ParamDesc[i].MidRange =
(ParamDesc[i].Max + ParamDesc[i].Min) / 2;
}
// allocate a kd tree to hold the samples
Clusterer->KDTree = MakeKDTree (SampleSize, ParamDesc);
// Initialize cache of histogram buckets to minimize recomputing them.
for (int d = 0; d < DISTRIBUTION_COUNT; ++d) {
for (int c = 0; c < MAXBUCKETS + 1 - MINBUCKETS; ++c)
Clusterer->bucket_cache[d][c] = NULL;
}
return Clusterer;
} // MakeClusterer
/** MakeSample ***********************************************************
Parameters: Clusterer clusterer data structure to add sample to
Feature feature to be added to clusterer
CharID unique ident. of char that sample came from
Operation: This routine creates a new sample data structure to hold
the specified feature. This sample is added to the clusterer
data structure (so that it knows which samples are to be
clustered later), and a pointer to the sample is returned to
the caller.
Return: Pointer to the new sample data structure
Exceptions: ALREADYCLUSTERED MakeSample can't be called after
ClusterSamples has been called
History: 5/29/89, DSJ, Created.
*****************************************************************************/
SAMPLE* MakeSample(CLUSTERER * Clusterer, const FLOAT32* Feature,
inT32 CharID) {
SAMPLE *Sample;
int i;
// see if the samples have already been clustered - if so trap an error
if (Clusterer->Root != NULL)
DoError (ALREADYCLUSTERED,
"Can't add samples after they have been clustered");
// allocate the new sample and initialize it
Sample = (SAMPLE *) Emalloc (sizeof (SAMPLE) +
(Clusterer->SampleSize -
1) * sizeof (FLOAT32));
Sample->Clustered = FALSE;
Sample->Prototype = FALSE;
Sample->SampleCount = 1;
Sample->Left = NULL;
Sample->Right = NULL;
Sample->CharID = CharID;
for (i = 0; i < Clusterer->SampleSize; i++)
Sample->Mean[i] = Feature[i];
// add the sample to the KD tree - keep track of the total # of samples
Clusterer->NumberOfSamples++;
KDStore (Clusterer->KDTree, Sample->Mean, (char *) Sample);
if (CharID >= Clusterer->NumChar)
Clusterer->NumChar = CharID + 1;
// execute hook for monitoring clustering operation
// (*SampleCreationHook)( Sample );
return (Sample);
} // MakeSample
/** ClusterSamples ***********************************************************
Parameters: Clusterer data struct containing samples to be clustered
Config parameters which control clustering process
Operation: This routine first checks to see if the samples in this
clusterer have already been clustered before; if so, it does
not bother to recreate the cluster tree. It simply recomputes
the prototypes based on the new Config info.
If the samples have not been clustered before, the
samples in the KD tree are formed into a cluster tree and then
the prototypes are computed from the cluster tree.
In either case this routine returns a pointer to a
list of prototypes that best represent the samples given
the constraints specified in Config.
Return: Pointer to a list of prototypes
Exceptions: None
History: 5/29/89, DSJ, Created.
*******************************************************************************/
LIST ClusterSamples(CLUSTERER *Clusterer, CLUSTERCONFIG *Config) {
//only create cluster tree if samples have never been clustered before
if (Clusterer->Root == NULL)
CreateClusterTree(Clusterer);
//deallocate the old prototype list if one exists
FreeProtoList (&Clusterer->ProtoList);
Clusterer->ProtoList = NIL_LIST;
//compute prototypes starting at the root node in the tree
ComputePrototypes(Clusterer, Config);
return (Clusterer->ProtoList);
} // ClusterSamples
/** FreeClusterer *************************************************************
Parameters: Clusterer pointer to data structure to be freed
Operation: This routine frees all of the memory allocated to the
specified data structure. It will not, however, free
the memory used by the prototype list. The pointers to
the clusters for each prototype in the list will be set
to NULL to indicate that the cluster data structures no
longer exist. Any sample lists that have been obtained
via calls to GetSamples are no longer valid.
Return: None
Exceptions: None
History: 6/6/89, DSJ, Created.
*******************************************************************************/
void FreeClusterer(CLUSTERER *Clusterer) {
if (Clusterer != NULL) {
memfree (Clusterer->ParamDesc);
if (Clusterer->KDTree != NULL)
FreeKDTree (Clusterer->KDTree);
if (Clusterer->Root != NULL)
FreeCluster (Clusterer->Root);
// Free up all used buckets structures.
for (int d = 0; d < DISTRIBUTION_COUNT; ++d) {
for (int c = 0; c < MAXBUCKETS + 1 - MINBUCKETS; ++c)
if (Clusterer->bucket_cache[d][c] != NULL)
FreeBuckets(Clusterer->bucket_cache[d][c]);
}
memfree(Clusterer);
}
} // FreeClusterer
/** FreeProtoList ************************************************************
Parameters: ProtoList pointer to list of prototypes to be freed
Operation: This routine frees all of the memory allocated to the
specified list of prototypes. The clusters which are
pointed to by the prototypes are not freed.
Return: None
Exceptions: None
History: 6/6/89, DSJ, Created.
*****************************************************************************/
void FreeProtoList(LIST *ProtoList) {
destroy_nodes(*ProtoList, FreePrototype);
} // FreeProtoList
/** FreePrototype ************************************************************
Parameters: Prototype prototype data structure to be deallocated
Operation: This routine deallocates the memory consumed by the specified
prototype and modifies the corresponding cluster so that it
is no longer marked as a prototype. The cluster is NOT
deallocated by this routine.
Return: None
Exceptions: None
History: 5/30/89, DSJ, Created.
*******************************************************************************/
void FreePrototype(void *arg) { //PROTOTYPE *Prototype)
PROTOTYPE *Prototype = (PROTOTYPE *) arg;
// unmark the corresponding cluster (if there is one
if (Prototype->Cluster != NULL)
Prototype->Cluster->Prototype = FALSE;
// deallocate the prototype statistics and then the prototype itself
if (Prototype->Distrib != NULL)
memfree (Prototype->Distrib);
if (Prototype->Mean != NULL)
memfree (Prototype->Mean);
if (Prototype->Style != spherical) {
if (Prototype->Variance.Elliptical != NULL)
memfree (Prototype->Variance.Elliptical);
if (Prototype->Magnitude.Elliptical != NULL)
memfree (Prototype->Magnitude.Elliptical);
if (Prototype->Weight.Elliptical != NULL)
memfree (Prototype->Weight.Elliptical);
}
memfree(Prototype);
} // FreePrototype
/** NextSample ************************************************************
Parameters: SearchState ptr to list containing clusters to be searched
Operation: This routine is used to find all of the samples which
belong to a cluster. It starts by removing the top
cluster on the cluster list (SearchState). If this cluster is
a leaf it is returned. Otherwise, the right subcluster
is pushed on the list and we continue the search in the
left subcluster. This continues until a leaf is found.
If all samples have been found, NULL is returned.
InitSampleSearch() must be called
before NextSample() to initialize the search.
Return: Pointer to the next leaf cluster (sample) or NULL.
Exceptions: None
History: 6/16/89, DSJ, Created.
****************************************************************************/
CLUSTER *NextSample(LIST *SearchState) {
CLUSTER *Cluster;
if (*SearchState == NIL_LIST)
return (NULL);
Cluster = (CLUSTER *) first_node (*SearchState);
*SearchState = pop (*SearchState);
while (TRUE) {
if (Cluster->Left == NULL)
return (Cluster);
*SearchState = push (*SearchState, Cluster->Right);
Cluster = Cluster->Left;
}
} // NextSample
/** Mean ***********************************************************
Parameters: Proto prototype to return mean of
Dimension dimension whose mean is to be returned
Operation: This routine returns the mean of the specified
prototype in the indicated dimension.
Return: Mean of Prototype in Dimension
Exceptions: none
History: 7/6/89, DSJ, Created.
*********************************************************************/
FLOAT32 Mean(PROTOTYPE *Proto, uinT16 Dimension) {
return (Proto->Mean[Dimension]);
} // Mean
/** StandardDeviation *************************************************
Parameters: Proto prototype to return standard deviation of
Dimension dimension whose stddev is to be returned
Operation: This routine returns the standard deviation of the
prototype in the indicated dimension.
Return: Standard deviation of Prototype in Dimension
Exceptions: none
History: 7/6/89, DSJ, Created.
**********************************************************************/
FLOAT32 StandardDeviation(PROTOTYPE *Proto, uinT16 Dimension) {
switch (Proto->Style) {
case spherical:
return ((FLOAT32) sqrt ((double) Proto->Variance.Spherical));
case elliptical:
return ((FLOAT32)
sqrt ((double) Proto->Variance.Elliptical[Dimension]));
case mixed:
switch (Proto->Distrib[Dimension]) {
case normal:
return ((FLOAT32)
sqrt ((double) Proto->Variance.Elliptical[Dimension]));
case uniform:
case D_random:
return (Proto->Variance.Elliptical[Dimension]);
case DISTRIBUTION_COUNT:
ASSERT_HOST(!"Distribution count not allowed!");
}
}
return 0.0f;
} // StandardDeviation
/*---------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/** CreateClusterTree *******************************************************
Parameters: Clusterer data structure holdings samples to be clustered
Operation: This routine performs a bottoms-up clustering on the samples
held in the kd-tree of the Clusterer data structure. The
result is a cluster tree. Each node in the tree represents
a cluster which conceptually contains a subset of the samples.
More precisely, the cluster contains all of the samples which
are contained in its two sub-clusters. The leaves of the
tree are the individual samples themselves; they have no
sub-clusters. The root node of the tree conceptually contains
all of the samples.
Return: None (the Clusterer data structure is changed)
Exceptions: None
History: 5/29/89, DSJ, Created.
******************************************************************************/
void CreateClusterTree(CLUSTERER *Clusterer) {
ClusteringContext context;
ClusterPair HeapEntry;
TEMPCLUSTER *PotentialCluster;
// each sample and its nearest neighbor form a "potential" cluster
// save these in a heap with the "best" potential clusters on top
context.tree = Clusterer->KDTree;
context.candidates = (TEMPCLUSTER *)
Emalloc(Clusterer->NumberOfSamples * sizeof(TEMPCLUSTER));
context.next = 0;
context.heap = new ClusterHeap(Clusterer->NumberOfSamples);
KDWalk(context.tree, (void_proc)MakePotentialClusters, &context);
// form potential clusters into actual clusters - always do "best" first
while (context.heap->Pop(&HeapEntry)) {
PotentialCluster = HeapEntry.data;
// if main cluster of potential cluster is already in another cluster
// then we don't need to worry about it
if (PotentialCluster->Cluster->Clustered) {
continue;
}
// if main cluster is not yet clustered, but its nearest neighbor is
// then we must find a new nearest neighbor
else if (PotentialCluster->Neighbor->Clustered) {
PotentialCluster->Neighbor =
FindNearestNeighbor(context.tree, PotentialCluster->Cluster,
&HeapEntry.key);
if (PotentialCluster->Neighbor != NULL) {
context.heap->Push(&HeapEntry);
}
}
// if neither cluster is already clustered, form permanent cluster
else {
PotentialCluster->Cluster =
MakeNewCluster(Clusterer, PotentialCluster);
PotentialCluster->Neighbor =
FindNearestNeighbor(context.tree, PotentialCluster->Cluster,
&HeapEntry.key);
if (PotentialCluster->Neighbor != NULL) {
context.heap->Push(&HeapEntry);
}
}
}
// the root node in the cluster tree is now the only node in the kd-tree
Clusterer->Root = (CLUSTER *) RootOf(Clusterer->KDTree);
// free up the memory used by the K-D tree, heap, and temp clusters
FreeKDTree(context.tree);
Clusterer->KDTree = NULL;
delete context.heap;
memfree(context.candidates);
} // CreateClusterTree
/** MakePotentialClusters **************************************************
Parameters:
context ClusteringContext (see definition above)
Cluster current cluster being visited in kd-tree walk
Level level of this cluster in the kd-tree
Operation:
This routine is designed to be used in concert with the
KDWalk routine. It will create a potential cluster for
each sample in the kd-tree that is being walked. This
potential cluster will then be pushed on the heap.
******************************************************************************/
void MakePotentialClusters(ClusteringContext *context,
CLUSTER *Cluster, inT32 Level) {
ClusterPair HeapEntry;
int next = context->next;
context->candidates[next].Cluster = Cluster;
HeapEntry.data = &(context->candidates[next]);
context->candidates[next].Neighbor =
FindNearestNeighbor(context->tree,
context->candidates[next].Cluster,
&HeapEntry.key);
if (context->candidates[next].Neighbor != NULL) {
context->heap->Push(&HeapEntry);
context->next++;
}
} // MakePotentialClusters
/** FindNearestNeighbor *********************************************************
Parameters: Tree kd-tree to search in for nearest neighbor
Cluster cluster whose nearest neighbor is to be found
Distance ptr to variable to report distance found
Operation: This routine searches the specified kd-tree for the nearest
neighbor of the specified cluster. It actually uses the
kd routines to find the 2 nearest neighbors since one of them
will be the original cluster. A pointer to the nearest
neighbor is returned, if it can be found, otherwise NULL is
returned. The distance between the 2 nodes is placed
in the specified variable.
Return: Pointer to the nearest neighbor of Cluster, or NULL
Exceptions: none
History: 5/29/89, DSJ, Created.
7/13/89, DSJ, Removed visibility of kd-tree node data struct
********************************************************************************/
CLUSTER *
FindNearestNeighbor(KDTREE * Tree, CLUSTER * Cluster, FLOAT32 * Distance)
#define MAXNEIGHBORS 2
#define MAXDISTANCE MAX_FLOAT32
{
CLUSTER *Neighbor[MAXNEIGHBORS];
FLOAT32 Dist[MAXNEIGHBORS];
int NumberOfNeighbors;
inT32 i;
CLUSTER *BestNeighbor;
// find the 2 nearest neighbors of the cluster
KDNearestNeighborSearch(Tree, Cluster->Mean, MAXNEIGHBORS, MAXDISTANCE,
&NumberOfNeighbors, (void **)Neighbor, Dist);
// search for the nearest neighbor that is not the cluster itself
*Distance = MAXDISTANCE;
BestNeighbor = NULL;
for (i = 0; i < NumberOfNeighbors; i++) {
if ((Dist[i] < *Distance) && (Neighbor[i] != Cluster)) {
*Distance = Dist[i];
BestNeighbor = Neighbor[i];
}
}
return BestNeighbor;
} // FindNearestNeighbor
/** MakeNewCluster *************************************************************
Parameters: Clusterer current clustering environment
TempCluster potential cluster to make permanent
Operation: This routine creates a new permanent cluster from the
clusters specified in TempCluster. The 2 clusters in
TempCluster are marked as "clustered" and deleted from
the kd-tree. The new cluster is then added to the kd-tree.
Return: Pointer to the new permanent cluster
Exceptions: none
History: 5/29/89, DSJ, Created.
7/13/89, DSJ, Removed visibility of kd-tree node data struct
********************************************************************************/
CLUSTER *MakeNewCluster(CLUSTERER *Clusterer, TEMPCLUSTER *TempCluster) {
CLUSTER *Cluster;
// allocate the new cluster and initialize it
Cluster = (CLUSTER *) Emalloc(
sizeof(CLUSTER) + (Clusterer->SampleSize - 1) * sizeof(FLOAT32));
Cluster->Clustered = FALSE;
Cluster->Prototype = FALSE;
Cluster->Left = TempCluster->Cluster;
Cluster->Right = TempCluster->Neighbor;
Cluster->CharID = -1;
// mark the old clusters as "clustered" and delete them from the kd-tree
Cluster->Left->Clustered = TRUE;
Cluster->Right->Clustered = TRUE;
KDDelete(Clusterer->KDTree, Cluster->Left->Mean, Cluster->Left);
KDDelete(Clusterer->KDTree, Cluster->Right->Mean, Cluster->Right);
// compute the mean and sample count for the new cluster
Cluster->SampleCount =
MergeClusters(Clusterer->SampleSize, Clusterer->ParamDesc,
Cluster->Left->SampleCount, Cluster->Right->SampleCount,
Cluster->Mean, Cluster->Left->Mean, Cluster->Right->Mean);
// add the new cluster to the KD tree
KDStore(Clusterer->KDTree, Cluster->Mean, Cluster);
return Cluster;
} // MakeNewCluster
/** MergeClusters ************************************************************
Parameters: N # of dimensions (size of arrays)
ParamDesc array of dimension descriptions
n1, n2 number of samples in each old cluster
m array to hold mean of new cluster
m1, m2 arrays containing means of old clusters
Operation: This routine merges two clusters into one larger cluster.
To do this it computes the number of samples in the new
cluster and the mean of the new cluster. The ParamDesc
information is used to ensure that circular dimensions
are handled correctly.
Return: The number of samples in the new cluster.
Exceptions: None
History: 5/31/89, DSJ, Created.
*********************************************************************************/
inT32 MergeClusters(inT16 N,
PARAM_DESC ParamDesc[],
inT32 n1,
inT32 n2,
FLOAT32 m[],
FLOAT32 m1[], FLOAT32 m2[]) {
inT32 i, n;
n = n1 + n2;
for (i = N; i > 0; i--, ParamDesc++, m++, m1++, m2++) {
if (ParamDesc->Circular) {
// if distance between means is greater than allowed
// reduce upper point by one "rotation" to compute mean
// then normalize the mean back into the accepted range
if ((*m2 - *m1) > ParamDesc->HalfRange) {
*m = (n1 * *m1 + n2 * (*m2 - ParamDesc->Range)) / n;
if (*m < ParamDesc->Min)
*m += ParamDesc->Range;
}
else if ((*m1 - *m2) > ParamDesc->HalfRange) {
*m = (n1 * (*m1 - ParamDesc->Range) + n2 * *m2) / n;
if (*m < ParamDesc->Min)
*m += ParamDesc->Range;
}
else
*m = (n1 * *m1 + n2 * *m2) / n;
}
else
*m = (n1 * *m1 + n2 * *m2) / n;
}
return n;
} // MergeClusters
/** ComputePrototypes *******************************************************
Parameters: Clusterer data structure holding cluster tree
Config parameters used to control prototype generation
Operation: This routine decides which clusters in the cluster tree
should be represented by prototypes, forms a list of these
prototypes, and places the list in the Clusterer data
structure.
Return: None
Exceptions: None
History: 5/30/89, DSJ, Created.
*******************************************************************************/
void ComputePrototypes(CLUSTERER *Clusterer, CLUSTERCONFIG *Config) {
LIST ClusterStack = NIL_LIST;
CLUSTER *Cluster;
PROTOTYPE *Prototype;
// use a stack to keep track of clusters waiting to be processed
// initially the only cluster on the stack is the root cluster
if (Clusterer->Root != NULL)
ClusterStack = push (NIL_LIST, Clusterer->Root);
// loop until we have analyzed all clusters which are potential prototypes
while (ClusterStack != NIL_LIST) {
// remove the next cluster to be analyzed from the stack
// try to make a prototype from the cluster
// if successful, put it on the proto list, else split the cluster
Cluster = (CLUSTER *) first_node (ClusterStack);
ClusterStack = pop (ClusterStack);
Prototype = MakePrototype(Clusterer, Config, Cluster);
if (Prototype != NULL) {
Clusterer->ProtoList = push (Clusterer->ProtoList, Prototype);
}
else {
ClusterStack = push (ClusterStack, Cluster->Right);
ClusterStack = push (ClusterStack, Cluster->Left);
}
}
} // ComputePrototypes
/** MakePrototype ***********************************************************
Parameters:
Clusterer data structure holding cluster tree
Config parameters used to control prototype generation
Cluster cluster to be made into a prototype
Operation: This routine attempts to create a prototype from the
specified cluster that conforms to the distribution
specified in Config. If there are too few samples in the
cluster to perform a statistical analysis, then a prototype
is generated but labelled as insignificant. If the
dimensions of the cluster are not independent, no prototype
is generated and NULL is returned. If a prototype can be
found that matches the desired distribution then a pointer
to it is returned, otherwise NULL is returned.
Return: Pointer to new prototype or NULL
Exceptions: None
History: 6/19/89, DSJ, Created.
*******************************************************************************/
PROTOTYPE *MakePrototype(CLUSTERER *Clusterer,
CLUSTERCONFIG *Config,
CLUSTER *Cluster) {
STATISTICS *Statistics;
PROTOTYPE *Proto;
BUCKETS *Buckets;
// filter out clusters which contain samples from the same character
if (MultipleCharSamples (Clusterer, Cluster, Config->MaxIllegal))
return NULL;
// compute the covariance matrix and ranges for the cluster
Statistics =
ComputeStatistics(Clusterer->SampleSize, Clusterer->ParamDesc, Cluster);
// check for degenerate clusters which need not be analyzed further
// note that the MinSamples test assumes that all clusters with multiple
// character samples have been removed (as above)
Proto = MakeDegenerateProto(
Clusterer->SampleSize, Cluster, Statistics, Config->ProtoStyle,
(inT32) (Config->MinSamples * Clusterer->NumChar));
if (Proto != NULL) {
FreeStatistics(Statistics);
return Proto;
}
// check to ensure that all dimensions are independent
if (!Independent(Clusterer->ParamDesc, Clusterer->SampleSize,
Statistics->CoVariance, Config->Independence)) {
FreeStatistics(Statistics);
return NULL;
}
if (HOTELLING && Config->ProtoStyle == elliptical) {
Proto = TestEllipticalProto(Clusterer, Config, Cluster, Statistics);
if (Proto != NULL) {
FreeStatistics(Statistics);
return Proto;
}
}
// create a histogram data structure used to evaluate distributions
Buckets = GetBuckets(Clusterer, normal, Cluster->SampleCount,
Config->Confidence);
// create a prototype based on the statistics and test it
switch (Config->ProtoStyle) {
case spherical:
Proto = MakeSphericalProto(Clusterer, Cluster, Statistics, Buckets);
break;
case elliptical:
Proto = MakeEllipticalProto(Clusterer, Cluster, Statistics, Buckets);
break;
case mixed:
Proto = MakeMixedProto(Clusterer, Cluster, Statistics, Buckets,
Config->Confidence);
break;
case automatic:
Proto = MakeSphericalProto(Clusterer, Cluster, Statistics, Buckets);
if (Proto != NULL)
break;
Proto = MakeEllipticalProto(Clusterer, Cluster, Statistics, Buckets);
if (Proto != NULL)
break;
Proto = MakeMixedProto(Clusterer, Cluster, Statistics, Buckets,
Config->Confidence);
break;
}
FreeStatistics(Statistics);
return Proto;
} // MakePrototype
/** MakeDegenerateProto ******************************************************
Parameters: N number of dimensions
Cluster cluster being analyzed
Statistics statistical info about cluster
Style type of prototype to be generated
MinSamples minimum number of samples in a cluster
Operation: This routine checks for clusters which are degenerate and
therefore cannot be analyzed in a statistically valid way.
A cluster is defined as degenerate if it does not have at
least MINSAMPLESNEEDED samples in it. If the cluster is
found to be degenerate, a prototype of the specified style
is generated and marked as insignificant. A cluster is
also degenerate if it does not have at least MinSamples
samples in it.
If the cluster is not degenerate, NULL is returned.
Return: Pointer to degenerate prototype or NULL.
Exceptions: None
History: 6/20/89, DSJ, Created.
7/12/89, DSJ, Changed name and added check for 0 stddev.
8/8/89, DSJ, Removed check for 0 stddev (handled elsewhere).
********************************************************************************/
PROTOTYPE *MakeDegenerateProto( //this was MinSample
uinT16 N,
CLUSTER *Cluster,
STATISTICS *Statistics,
PROTOSTYLE Style,
inT32 MinSamples) {
PROTOTYPE *Proto = NULL;
if (MinSamples < MINSAMPLESNEEDED)
MinSamples = MINSAMPLESNEEDED;
if (Cluster->SampleCount < MinSamples) {
switch (Style) {
case spherical:
Proto = NewSphericalProto (N, Cluster, Statistics);
break;
case elliptical:
case automatic:
Proto = NewEllipticalProto (N, Cluster, Statistics);
break;
case mixed:
Proto = NewMixedProto (N, Cluster, Statistics);
break;
}
Proto->Significant = FALSE;
}
return (Proto);
} // MakeDegenerateProto
/** TestEllipticalProto ****************************************************
Parameters: Clusterer data struct containing samples being clustered
Config provides the magic number of samples that make a good cluster
Cluster cluster to be made into an elliptical prototype
Statistics statistical info about cluster
Operation: This routine tests the specified cluster to see if **
* there is a statistically significant difference between
* the sub-clusters that would be made if the cluster were to
* be split. If not, then a new prototype is formed and
* returned to the caller. If there is, then NULL is returned
* to the caller.
Return: Pointer to new elliptical prototype or NULL.
****************************************************************************/
PROTOTYPE *TestEllipticalProto(CLUSTERER *Clusterer,
CLUSTERCONFIG *Config,
CLUSTER *Cluster,
STATISTICS *Statistics) {
// Fraction of the number of samples used as a range around 1 within
// which a cluster has the magic size that allows a boost to the
// FTable by kFTableBoostMargin, thus allowing clusters near the
// magic size (equal to the number of sample characters) to be more
// likely to stay together.
const double kMagicSampleMargin = 0.0625;
const double kFTableBoostMargin = 2.0;
int N = Clusterer->SampleSize;
CLUSTER* Left = Cluster->Left;
CLUSTER* Right = Cluster->Right;
if (Left == NULL || Right == NULL)
return NULL;
int TotalDims = Left->SampleCount + Right->SampleCount;
if (TotalDims < N + 1 || TotalDims < 2)
return NULL;
const int kMatrixSize = N * N * sizeof(FLOAT32);
FLOAT32* Covariance = reinterpret_cast<FLOAT32 *>(Emalloc(kMatrixSize));
FLOAT32* Inverse = reinterpret_cast<FLOAT32 *>(Emalloc(kMatrixSize));
FLOAT32* Delta = reinterpret_cast<FLOAT32*>(Emalloc(N * sizeof(FLOAT32)));
// Compute a new covariance matrix that only uses essential features.
for (int i = 0; i < N; ++i) {
int row_offset = i * N;
if (!Clusterer->ParamDesc[i].NonEssential) {
for (int j = 0; j < N; ++j) {
if (!Clusterer->ParamDesc[j].NonEssential)
Covariance[j + row_offset] = Statistics->CoVariance[j + row_offset];
else
Covariance[j + row_offset] = 0.0f;
}
} else {
for (int j = 0; j < N; ++j) {
if (i == j)
Covariance[j + row_offset] = 1.0f;
else
Covariance[j + row_offset] = 0.0f;
}
}
}
double err = InvertMatrix(Covariance, N, Inverse);
if (err > 1) {
tprintf("Clustering error: Matrix inverse failed with error %g\n", err);
}
int EssentialN = 0;
for (int dim = 0; dim < N; ++dim) {
if (!Clusterer->ParamDesc[dim].NonEssential) {
Delta[dim] = Left->Mean[dim] - Right->Mean[dim];
++EssentialN;
} else {
Delta[dim] = 0.0f;
}
}
// Compute Hotelling's T-squared.
double Tsq = 0.0;
for (int x = 0; x < N; ++x) {
double temp = 0.0;
for (int y = 0; y < N; ++y) {
temp += Inverse[y + N*x] * Delta[y];
}
Tsq += Delta[x] * temp;
}
memfree(Covariance);
memfree(Inverse);
memfree(Delta);
// Changed this function to match the formula in
// Statistical Methods in Medical Research p 473
// By Peter Armitage, Geoffrey Berry, J. N. S. Matthews.
// Tsq *= Left->SampleCount * Right->SampleCount / TotalDims;
double F = Tsq * (TotalDims - EssentialN - 1) / ((TotalDims - 2)*EssentialN);
int Fx = EssentialN;
if (Fx > FTABLE_X)
Fx = FTABLE_X;
--Fx;
int Fy = TotalDims - EssentialN - 1;
if (Fy > FTABLE_Y)
Fy = FTABLE_Y;
--Fy;
double FTarget = FTable[Fy][Fx];
if (Config->MagicSamples > 0 &&
TotalDims >= Config->MagicSamples * (1.0 - kMagicSampleMargin) &&
TotalDims <= Config->MagicSamples * (1.0 + kMagicSampleMargin)) {
// Give magic-sized clusters a magic FTable boost.
FTarget += kFTableBoostMargin;
}
if (F < FTarget) {
return NewEllipticalProto (Clusterer->SampleSize, Cluster, Statistics);
}
return NULL;
}
/* MakeSphericalProto *******************************************************
Parameters: Clusterer data struct containing samples being clustered
Cluster cluster to be made into a spherical prototype
Statistics statistical info about cluster
Buckets histogram struct used to analyze distribution
Operation: This routine tests the specified cluster to see if it can
be approximated by a spherical normal distribution. If it
can be, then a new prototype is formed and returned to the
caller. If it can't be, then NULL is returned to the caller.
Return: Pointer to new spherical prototype or NULL.
Exceptions: None
History: 6/1/89, DSJ, Created.
******************************************************************************/
PROTOTYPE *MakeSphericalProto(CLUSTERER *Clusterer,
CLUSTER *Cluster,
STATISTICS *Statistics,
BUCKETS *Buckets) {
PROTOTYPE *Proto = NULL;
int i;
// check that each dimension is a normal distribution
for (i = 0; i < Clusterer->SampleSize; i++) {
if (Clusterer->ParamDesc[i].NonEssential)
continue;
FillBuckets (Buckets, Cluster, i, &(Clusterer->ParamDesc[i]),
Cluster->Mean[i],
sqrt ((FLOAT64) (Statistics->AvgVariance)));
if (!DistributionOK (Buckets))
break;
}
// if all dimensions matched a normal distribution, make a proto
if (i >= Clusterer->SampleSize)
Proto = NewSphericalProto (Clusterer->SampleSize, Cluster, Statistics);
return (Proto);
} // MakeSphericalProto
/** MakeEllipticalProto ****************************************************
Parameters: Clusterer data struct containing samples being clustered
Cluster cluster to be made into an elliptical prototype
Statistics statistical info about cluster
Buckets histogram struct used to analyze distribution
Operation: This routine tests the specified cluster to see if it can
be approximated by an elliptical normal distribution. If it
can be, then a new prototype is formed and returned to the
caller. If it can't be, then NULL is returned to the caller.
Return: Pointer to new elliptical prototype or NULL.
Exceptions: None
History: 6/12/89, DSJ, Created.
****************************************************************************/
PROTOTYPE *MakeEllipticalProto(CLUSTERER *Clusterer,
CLUSTER *Cluster,
STATISTICS *Statistics,
BUCKETS *Buckets) {
PROTOTYPE *Proto = NULL;
int i;
// check that each dimension is a normal distribution
for (i = 0; i < Clusterer->SampleSize; i++) {
if (Clusterer->ParamDesc[i].NonEssential)
continue;
FillBuckets (Buckets, Cluster, i, &(Clusterer->ParamDesc[i]),
Cluster->Mean[i],
sqrt ((FLOAT64) Statistics->
CoVariance[i * (Clusterer->SampleSize + 1)]));
if (!DistributionOK (Buckets))
break;
}
// if all dimensions matched a normal distribution, make a proto
if (i >= Clusterer->SampleSize)
Proto = NewEllipticalProto (Clusterer->SampleSize, Cluster, Statistics);
return (Proto);
} // MakeEllipticalProto
/** MakeMixedProto ***********************************************************
Parameters:
Clusterer data struct containing samples being clustered
Cluster cluster to be made into a prototype
Statistics statistical info about cluster
NormalBuckets histogram struct used to analyze distribution
Confidence confidence level for alternate distributions
Operation: This routine tests each dimension of the specified cluster to
see what distribution would best approximate that dimension.
Each dimension is compared to the following distributions
in order: normal, random, uniform. If each dimension can
be represented by one of these distributions,
then a new prototype is formed and returned to the
caller. If it can't be, then NULL is returned to the caller.
Return: Pointer to new mixed prototype or NULL.
Exceptions: None
History: 6/12/89, DSJ, Created.
********************************************************************************/
PROTOTYPE *MakeMixedProto(CLUSTERER *Clusterer,
CLUSTER *Cluster,
STATISTICS *Statistics,
BUCKETS *NormalBuckets,
FLOAT64 Confidence) {
PROTOTYPE *Proto;
int i;
BUCKETS *UniformBuckets = NULL;
BUCKETS *RandomBuckets = NULL;
// create a mixed proto to work on - initially assume all dimensions normal*/
Proto = NewMixedProto (Clusterer->SampleSize, Cluster, Statistics);
// find the proper distribution for each dimension
for (i = 0; i < Clusterer->SampleSize; i++) {
if (Clusterer->ParamDesc[i].NonEssential)
continue;
FillBuckets (NormalBuckets, Cluster, i, &(Clusterer->ParamDesc[i]),
Proto->Mean[i],
sqrt ((FLOAT64) Proto->Variance.Elliptical[i]));
if (DistributionOK (NormalBuckets))
continue;
if (RandomBuckets == NULL)
RandomBuckets =
GetBuckets(Clusterer, D_random, Cluster->SampleCount, Confidence);
MakeDimRandom (i, Proto, &(Clusterer->ParamDesc[i]));
FillBuckets (RandomBuckets, Cluster, i, &(Clusterer->ParamDesc[i]),
Proto->Mean[i], Proto->Variance.Elliptical[i]);
if (DistributionOK (RandomBuckets))
continue;
if (UniformBuckets == NULL)
UniformBuckets =
GetBuckets(Clusterer, uniform, Cluster->SampleCount, Confidence);
MakeDimUniform(i, Proto, Statistics);
FillBuckets (UniformBuckets, Cluster, i, &(Clusterer->ParamDesc[i]),
Proto->Mean[i], Proto->Variance.Elliptical[i]);
if (DistributionOK (UniformBuckets))
continue;
break;
}
// if any dimension failed to match a distribution, discard the proto
if (i < Clusterer->SampleSize) {
FreePrototype(Proto);
Proto = NULL;
}
return (Proto);
} // MakeMixedProto
/* MakeDimRandom *************************************************************
Parameters: i index of dimension to be changed
Proto prototype whose dimension is to be altered
ParamDesc description of specified dimension
Operation: This routine alters the ith dimension of the specified
mixed prototype to be D_random.
Return: None
Exceptions: None
History: 6/20/89, DSJ, Created.
******************************************************************************/
void MakeDimRandom(uinT16 i, PROTOTYPE *Proto, PARAM_DESC *ParamDesc) {
Proto->Distrib[i] = D_random;
Proto->Mean[i] = ParamDesc->MidRange;
Proto->Variance.Elliptical[i] = ParamDesc->HalfRange;
// subtract out the previous magnitude of this dimension from the total
Proto->TotalMagnitude /= Proto->Magnitude.Elliptical[i];
Proto->Magnitude.Elliptical[i] = 1.0 / ParamDesc->Range;
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
// note that the proto Weight is irrelevant for D_random protos
} // MakeDimRandom
/** MakeDimUniform ***********************************************************
Parameters: i index of dimension to be changed
Proto prototype whose dimension is to be altered
Statistics statistical info about prototype
Operation: This routine alters the ith dimension of the specified
mixed prototype to be uniform.
Return: None
Exceptions: None
History: 6/20/89, DSJ, Created.
******************************************************************************/
void MakeDimUniform(uinT16 i, PROTOTYPE *Proto, STATISTICS *Statistics) {
Proto->Distrib[i] = uniform;
Proto->Mean[i] = Proto->Cluster->Mean[i] +
(Statistics->Min[i] + Statistics->Max[i]) / 2;
Proto->Variance.Elliptical[i] =
(Statistics->Max[i] - Statistics->Min[i]) / 2;
if (Proto->Variance.Elliptical[i] < MINVARIANCE)
Proto->Variance.Elliptical[i] = MINVARIANCE;
// subtract out the previous magnitude of this dimension from the total
Proto->TotalMagnitude /= Proto->Magnitude.Elliptical[i];
Proto->Magnitude.Elliptical[i] =
1.0 / (2.0 * Proto->Variance.Elliptical[i]);
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
// note that the proto Weight is irrelevant for uniform protos
} // MakeDimUniform
/** ComputeStatistics *********************************************************
Parameters: N number of dimensions
ParamDesc array of dimension descriptions
Cluster cluster whose stats are to be computed
Operation: This routine searches the cluster tree for all leaf nodes
which are samples in the specified cluster. It computes
a full covariance matrix for these samples as well as
keeping track of the ranges (min and max) for each
dimension. A special data structure is allocated to
return this information to the caller. An incremental
algorithm for computing statistics is not used because
it will not work with circular dimensions.
Return: Pointer to new data structure containing statistics
Exceptions: None
History: 6/2/89, DSJ, Created.
*********************************************************************************/
STATISTICS *
ComputeStatistics (inT16 N, PARAM_DESC ParamDesc[], CLUSTER * Cluster) {
STATISTICS *Statistics;
int i, j;
FLOAT32 *CoVariance;
FLOAT32 *Distance;
LIST SearchState;
SAMPLE *Sample;
uinT32 SampleCountAdjustedForBias;
// allocate memory to hold the statistics results
Statistics = (STATISTICS *) Emalloc (sizeof (STATISTICS));
Statistics->CoVariance = (FLOAT32 *) Emalloc (N * N * sizeof (FLOAT32));
Statistics->Min = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Statistics->Max = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
// allocate temporary memory to hold the sample to mean distances
Distance = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
// initialize the statistics
Statistics->AvgVariance = 1.0;
CoVariance = Statistics->CoVariance;
for (i = 0; i < N; i++) {
Statistics->Min[i] = 0.0;
Statistics->Max[i] = 0.0;
for (j = 0; j < N; j++, CoVariance++)
*CoVariance = 0;
}
// find each sample in the cluster and merge it into the statistics
InitSampleSearch(SearchState, Cluster);
while ((Sample = NextSample (&SearchState)) != NULL) {
for (i = 0; i < N; i++) {
Distance[i] = Sample->Mean[i] - Cluster->Mean[i];
if (ParamDesc[i].Circular) {
if (Distance[i] > ParamDesc[i].HalfRange)
Distance[i] -= ParamDesc[i].Range;
if (Distance[i] < -ParamDesc[i].HalfRange)
Distance[i] += ParamDesc[i].Range;
}
if (Distance[i] < Statistics->Min[i])
Statistics->Min[i] = Distance[i];
if (Distance[i] > Statistics->Max[i])
Statistics->Max[i] = Distance[i];
}
CoVariance = Statistics->CoVariance;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++, CoVariance++)
*CoVariance += Distance[i] * Distance[j];
}
// normalize the variances by the total number of samples
// use SampleCount-1 instead of SampleCount to get an unbiased estimate
// also compute the geometic mean of the diagonal variances
// ensure that clusters with only 1 sample are handled correctly
if (Cluster->SampleCount > 1)
SampleCountAdjustedForBias = Cluster->SampleCount - 1;
else
SampleCountAdjustedForBias = 1;
CoVariance = Statistics->CoVariance;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++, CoVariance++) {
*CoVariance /= SampleCountAdjustedForBias;
if (j == i) {
if (*CoVariance < MINVARIANCE)
*CoVariance = MINVARIANCE;
Statistics->AvgVariance *= *CoVariance;
}
}
Statistics->AvgVariance = (float)pow((double)Statistics->AvgVariance,
1.0 / N);
// release temporary memory and return
memfree(Distance);
return (Statistics);
} // ComputeStatistics
/** NewSpericalProto *********************************************************
Parameters: N number of dimensions
Cluster cluster to be made into a spherical prototype
Statistics statistical info about samples in cluster
Operation: This routine creates a spherical prototype data structure to
approximate the samples in the specified cluster.
Spherical prototypes have a single variance which is
common across all dimensions. All dimensions are normally
distributed and independent.
Return: Pointer to a new spherical prototype data structure
Exceptions: None
History: 6/19/89, DSJ, Created.
******************************************************************************/
PROTOTYPE *NewSphericalProto(uinT16 N,
CLUSTER *Cluster,
STATISTICS *Statistics) {
PROTOTYPE *Proto;
Proto = NewSimpleProto (N, Cluster);
Proto->Variance.Spherical = Statistics->AvgVariance;
if (Proto->Variance.Spherical < MINVARIANCE)
Proto->Variance.Spherical = MINVARIANCE;
Proto->Magnitude.Spherical =
1.0 / sqrt ((double) (2.0 * PI * Proto->Variance.Spherical));
Proto->TotalMagnitude = (float)pow((double)Proto->Magnitude.Spherical,
(double) N);
Proto->Weight.Spherical = 1.0 / Proto->Variance.Spherical;
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
return (Proto);
} // NewSphericalProto
/** NewEllipticalProto *******************************************************
Parameters: N number of dimensions
Cluster cluster to be made into an elliptical prototype
Statistics statistical info about samples in cluster
Operation: This routine creates an elliptical prototype data structure to
approximate the samples in the specified cluster.
Elliptical prototypes have a variance for each dimension.
All dimensions are normally distributed and independent.
Return: Pointer to a new elliptical prototype data structure
Exceptions: None
History: 6/19/89, DSJ, Created.
*******************************************************************************/
PROTOTYPE *NewEllipticalProto(inT16 N,
CLUSTER *Cluster,
STATISTICS *Statistics) {
PROTOTYPE *Proto;
FLOAT32 *CoVariance;
int i;
Proto = NewSimpleProto (N, Cluster);
Proto->Variance.Elliptical = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Proto->Magnitude.Elliptical = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
Proto->Weight.Elliptical = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
CoVariance = Statistics->CoVariance;
Proto->TotalMagnitude = 1.0;
for (i = 0; i < N; i++, CoVariance += N + 1) {
Proto->Variance.Elliptical[i] = *CoVariance;
if (Proto->Variance.Elliptical[i] < MINVARIANCE)
Proto->Variance.Elliptical[i] = MINVARIANCE;
Proto->Magnitude.Elliptical[i] =
1.0 / sqrt ((double) (2.0 * PI * Proto->Variance.Elliptical[i]));
Proto->Weight.Elliptical[i] = 1.0 / Proto->Variance.Elliptical[i];
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
}
Proto->LogMagnitude = log ((double) Proto->TotalMagnitude);
Proto->Style = elliptical;
return (Proto);
} // NewEllipticalProto
/** MewMixedProto ************************************************************
Parameters: N number of dimensions
Cluster cluster to be made into a mixed prototype
Statistics statistical info about samples in cluster
Operation: This routine creates a mixed prototype data structure to
approximate the samples in the specified cluster.
Mixed prototypes can have different distributions for
each dimension. All dimensions are independent. The
structure is initially filled in as though it were an
elliptical prototype. The actual distributions of the
dimensions can be altered by other routines.
Return: Pointer to a new mixed prototype data structure
Exceptions: None
History: 6/19/89, DSJ, Created.
********************************************************************************/
PROTOTYPE *NewMixedProto(inT16 N, CLUSTER *Cluster, STATISTICS *Statistics) {
PROTOTYPE *Proto;
int i;
Proto = NewEllipticalProto (N, Cluster, Statistics);
Proto->Distrib = (DISTRIBUTION *) Emalloc (N * sizeof (DISTRIBUTION));
for (i = 0; i < N; i++) {
Proto->Distrib[i] = normal;
}
Proto->Style = mixed;
return (Proto);
} // NewMixedProto
/** NewSimpleProto ***********************************************************
Parameters: N number of dimensions
Cluster cluster to be made into a prototype
Operation: This routine allocates memory to hold a simple prototype
data structure, i.e. one without independent distributions
and variances for each dimension.
Return: Pointer to new simple prototype
Exceptions: None
History: 6/19/89, DSJ, Created.
*******************************************************************************/
PROTOTYPE *NewSimpleProto(inT16 N, CLUSTER *Cluster) {
PROTOTYPE *Proto;
int i;
Proto = (PROTOTYPE *) Emalloc (sizeof (PROTOTYPE));
Proto->Mean = (FLOAT32 *) Emalloc (N * sizeof (FLOAT32));
for (i = 0; i < N; i++)
Proto->Mean[i] = Cluster->Mean[i];
Proto->Distrib = NULL;
Proto->Significant = TRUE;
Proto->Merged = FALSE;
Proto->Style = spherical;
Proto->NumSamples = Cluster->SampleCount;
Proto->Cluster = Cluster;
Proto->Cluster->Prototype = TRUE;
return (Proto);
} // NewSimpleProto
/** Independent ***************************************************************
Parameters: ParamDesc descriptions of each feature space dimension
N number of dimensions
CoVariance ptr to a covariance matrix
Independence max off-diagonal correlation coefficient
Operation: This routine returns TRUE if the specified covariance
matrix indicates that all N dimensions are independent of
one another. One dimension is judged to be independent of
another when the magnitude of the corresponding correlation
coefficient is
less than the specified Independence factor. The
correlation coefficient is calculated as: (see Duda and
Hart, pg. 247)
coeff[ij] = stddev[ij] / sqrt (stddev[ii] * stddev[jj])
The covariance matrix is assumed to be symmetric (which
should always be true).
Return: TRUE if dimensions are independent, FALSE otherwise
Exceptions: None
History: 6/4/89, DSJ, Created.
*******************************************************************************/
BOOL8
Independent (PARAM_DESC ParamDesc[],
inT16 N, FLOAT32 * CoVariance, FLOAT32 Independence) {
int i, j;
FLOAT32 *VARii; // points to ith on-diagonal element
FLOAT32 *VARjj; // points to jth on-diagonal element
FLOAT32 CorrelationCoeff;
VARii = CoVariance;
for (i = 0; i < N; i++, VARii += N + 1) {
if (ParamDesc[i].NonEssential)
continue;
VARjj = VARii + N + 1;
CoVariance = VARii + 1;
for (j = i + 1; j < N; j++, CoVariance++, VARjj += N + 1) {
if (ParamDesc[j].NonEssential)
continue;
if ((*VARii == 0.0) || (*VARjj == 0.0))
CorrelationCoeff = 0.0;
else
CorrelationCoeff =
sqrt (sqrt (*CoVariance * *CoVariance / (*VARii * *VARjj)));
if (CorrelationCoeff > Independence)
return (FALSE);
}
}
return (TRUE);
} // Independent
/** GetBuckets **************************************************************
Parameters:
Clusterer which keeps a bucket_cache for us.
Distribution type of probability distribution to test for
SampleCount number of samples that are available
Confidence probability of a Type I error
Operation: This routine returns a histogram data structure which can
be used by other routines to place samples into histogram
buckets, and then apply a goodness of fit test to the
histogram data to determine if the samples belong to the
specified probability distribution. The routine keeps
a list of bucket data structures which have already been
created so that it minimizes the computation time needed
to create a new bucket.
Return: Bucket data structure
Exceptions: none
History: Thu Aug 3 12:58:10 1989, DSJ, Created.
*****************************************************************************/
BUCKETS *GetBuckets(CLUSTERER* clusterer,
DISTRIBUTION Distribution,
uinT32 SampleCount,
FLOAT64 Confidence) {
// Get an old bucket structure with the same number of buckets.
uinT16 NumberOfBuckets = OptimumNumberOfBuckets(SampleCount);
BUCKETS *Buckets =
clusterer->bucket_cache[Distribution][NumberOfBuckets - MINBUCKETS];
// If a matching bucket structure is not found, make one and save it.
if (Buckets == NULL) {
Buckets = MakeBuckets(Distribution, SampleCount, Confidence);
clusterer->bucket_cache[Distribution][NumberOfBuckets - MINBUCKETS] =
Buckets;
} else {
// Just adjust the existing buckets.
if (SampleCount != Buckets->SampleCount)
AdjustBuckets(Buckets, SampleCount);
if (Confidence != Buckets->Confidence) {
Buckets->Confidence = Confidence;
Buckets->ChiSquared = ComputeChiSquared(
DegreesOfFreedom(Distribution, Buckets->NumberOfBuckets),
Confidence);
}
InitBuckets(Buckets);
}
return Buckets;
} // GetBuckets
/** Makebuckets *************************************************************
Parameters:
Distribution type of probability distribution to test for
SampleCount number of samples that are available
Confidence probability of a Type I error
Operation:
This routine creates a histogram data structure which can
be used by other routines to place samples into histogram
buckets, and then apply a goodness of fit test to the
histogram data to determine if the samples belong to the
specified probability distribution. The buckets are
allocated in such a way that the expected frequency of
samples in each bucket is approximately the same. In
order to make this possible, a mapping table is
computed which maps "normalized" samples into the
appropriate bucket.
Return: Pointer to new histogram data structure
Exceptions: None
History: 6/4/89, DSJ, Created.
*****************************************************************************/
BUCKETS *MakeBuckets(DISTRIBUTION Distribution,
uinT32 SampleCount,
FLOAT64 Confidence) {
const DENSITYFUNC DensityFunction[] =
{ NormalDensity, UniformDensity, UniformDensity };
int i, j;
BUCKETS *Buckets;
FLOAT64 BucketProbability;
FLOAT64 NextBucketBoundary;
FLOAT64 Probability;
FLOAT64 ProbabilityDelta;
FLOAT64 LastProbDensity;
FLOAT64 ProbDensity;
uinT16 CurrentBucket;
BOOL8 Symmetrical;
// allocate memory needed for data structure
Buckets = reinterpret_cast<BUCKETS*>(Emalloc(sizeof(BUCKETS)));
Buckets->NumberOfBuckets = OptimumNumberOfBuckets(SampleCount);
Buckets->SampleCount = SampleCount;
Buckets->Confidence = Confidence;
Buckets->Count = reinterpret_cast<uinT32*>(
Emalloc(Buckets->NumberOfBuckets * sizeof(uinT32)));
Buckets->ExpectedCount = reinterpret_cast<FLOAT32*>(
Emalloc(Buckets->NumberOfBuckets * sizeof(FLOAT32)));
// initialize simple fields
Buckets->Distribution = Distribution;
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
Buckets->Count[i] = 0;
Buckets->ExpectedCount[i] = 0.0;
}
// all currently defined distributions are symmetrical
Symmetrical = TRUE;
Buckets->ChiSquared = ComputeChiSquared(
DegreesOfFreedom(Distribution, Buckets->NumberOfBuckets), Confidence);
if (Symmetrical) {
// allocate buckets so that all have approx. equal probability
BucketProbability = 1.0 / (FLOAT64) (Buckets->NumberOfBuckets);
// distribution is symmetric so fill in upper half then copy
CurrentBucket = Buckets->NumberOfBuckets / 2;
if (Odd (Buckets->NumberOfBuckets))
NextBucketBoundary = BucketProbability / 2;
else
NextBucketBoundary = BucketProbability;
Probability = 0.0;
LastProbDensity =
(*DensityFunction[(int) Distribution]) (BUCKETTABLESIZE / 2);
for (i = BUCKETTABLESIZE / 2; i < BUCKETTABLESIZE; i++) {
ProbDensity = (*DensityFunction[(int) Distribution]) (i + 1);
ProbabilityDelta = Integral (LastProbDensity, ProbDensity, 1.0);
Probability += ProbabilityDelta;
if (Probability > NextBucketBoundary) {
if (CurrentBucket < Buckets->NumberOfBuckets - 1)
CurrentBucket++;
NextBucketBoundary += BucketProbability;
}
Buckets->Bucket[i] = CurrentBucket;
Buckets->ExpectedCount[CurrentBucket] +=
(FLOAT32) (ProbabilityDelta * SampleCount);
LastProbDensity = ProbDensity;
}
// place any leftover probability into the last bucket
Buckets->ExpectedCount[CurrentBucket] +=
(FLOAT32) ((0.5 - Probability) * SampleCount);
// copy upper half of distribution to lower half
for (i = 0, j = BUCKETTABLESIZE - 1; i < j; i++, j--)
Buckets->Bucket[i] =
Mirror(Buckets->Bucket[j], Buckets->NumberOfBuckets);
// copy upper half of expected counts to lower half
for (i = 0, j = Buckets->NumberOfBuckets - 1; i <= j; i++, j--)
Buckets->ExpectedCount[i] += Buckets->ExpectedCount[j];
}
return Buckets;
} // MakeBuckets
//---------------------------------------------------------------------------
uinT16 OptimumNumberOfBuckets(uinT32 SampleCount) {
/*
** Parameters:
** SampleCount number of samples to be tested
** Operation:
** This routine computes the optimum number of histogram
** buckets that should be used in a chi-squared goodness of
** fit test for the specified number of samples. The optimum
** number is computed based on Table 4.1 on pg. 147 of
** "Measurement and Analysis of Random Data" by Bendat & Piersol.
** Linear interpolation is used to interpolate between table
** values. The table is intended for a 0.05 level of
** significance (alpha). This routine assumes that it is
** equally valid for other alpha's, which may not be true.
** Return:
** Optimum number of histogram buckets
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
uinT8 Last, Next;
FLOAT32 Slope;
if (SampleCount < kCountTable[0])
return kBucketsTable[0];
for (Last = 0, Next = 1; Next < LOOKUPTABLESIZE; Last++, Next++) {
if (SampleCount <= kCountTable[Next]) {
Slope = (FLOAT32) (kBucketsTable[Next] - kBucketsTable[Last]) /
(FLOAT32) (kCountTable[Next] - kCountTable[Last]);
return ((uinT16) (kBucketsTable[Last] +
Slope * (SampleCount - kCountTable[Last])));
}
}
return kBucketsTable[Last];
} // OptimumNumberOfBuckets
//---------------------------------------------------------------------------
FLOAT64
ComputeChiSquared (uinT16 DegreesOfFreedom, FLOAT64 Alpha)
/*
** Parameters:
** DegreesOfFreedom determines shape of distribution
** Alpha probability of right tail
** Operation:
** This routine computes the chi-squared value which will
** leave a cumulative probability of Alpha in the right tail
** of a chi-squared distribution with the specified number of
** degrees of freedom. Alpha must be between 0 and 1.
** DegreesOfFreedom must be even. The routine maintains an
** array of lists. Each list corresponds to a different
** number of degrees of freedom. Each entry in the list
** corresponds to a different alpha value and its corresponding
** chi-squared value. Therefore, once a particular chi-squared
** value is computed, it is stored in the list and never
** needs to be computed again.
** Return: Desired chi-squared value
** Exceptions: none
** History: 6/5/89, DSJ, Created.
*/
#define CHIACCURACY 0.01
#define MINALPHA (1e-200)
{
static LIST ChiWith[MAXDEGREESOFFREEDOM + 1];
CHISTRUCT *OldChiSquared;
CHISTRUCT SearchKey;
// limit the minimum alpha that can be used - if alpha is too small
// it may not be possible to compute chi-squared.
Alpha = ClipToRange(Alpha, MINALPHA, 1.0);
if (Odd (DegreesOfFreedom))
DegreesOfFreedom++;
/* find the list of chi-squared values which have already been computed
for the specified number of degrees of freedom. Search the list for
the desired chi-squared. */
SearchKey.Alpha = Alpha;
OldChiSquared = (CHISTRUCT *) first_node (search (ChiWith[DegreesOfFreedom],
&SearchKey, AlphaMatch));
if (OldChiSquared == NULL) {
OldChiSquared = NewChiStruct (DegreesOfFreedom, Alpha);
OldChiSquared->ChiSquared = Solve (ChiArea, OldChiSquared,
(FLOAT64) DegreesOfFreedom,
(FLOAT64) CHIACCURACY);
ChiWith[DegreesOfFreedom] = push (ChiWith[DegreesOfFreedom],
OldChiSquared);
}
else {
// further optimization might move OldChiSquared to front of list
}
return (OldChiSquared->ChiSquared);
} // ComputeChiSquared
//---------------------------------------------------------------------------
FLOAT64 NormalDensity(inT32 x) {
/*
** Parameters:
** x number to compute the normal probability density for
** Globals:
** kNormalMean mean of a discrete normal distribution
** kNormalVariance variance of a discrete normal distribution
** kNormalMagnitude magnitude of a discrete normal distribution
** Operation:
** This routine computes the probability density function
** of a discrete normal distribution defined by the global
** variables kNormalMean, kNormalVariance, and kNormalMagnitude.
** Normal magnitude could, of course, be computed in terms of
** the normal variance but it is precomputed for efficiency.
** Return:
** The value of the normal distribution at x.
** Exceptions:
** None
** History:
** 6/4/89, DSJ, Created.
*/
FLOAT64 Distance;
Distance = x - kNormalMean;
return kNormalMagnitude * exp(-0.5 * Distance * Distance / kNormalVariance);
} // NormalDensity
//---------------------------------------------------------------------------
FLOAT64 UniformDensity(inT32 x) {
/*
** Parameters:
** x number to compute the uniform probability density for
** Operation:
** This routine computes the probability density function
** of a uniform distribution at the specified point. The
** range of the distribution is from 0 to BUCKETTABLESIZE.
** Return:
** The value of the uniform distribution at x.
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
static FLOAT64 UniformDistributionDensity = (FLOAT64) 1.0 / BUCKETTABLESIZE;
if ((x >= 0.0) && (x <= BUCKETTABLESIZE))
return UniformDistributionDensity;
else
return (FLOAT64) 0.0;
} // UniformDensity
//---------------------------------------------------------------------------
FLOAT64 Integral(FLOAT64 f1, FLOAT64 f2, FLOAT64 Dx) {
/*
** Parameters:
** f1 value of function at x1
** f2 value of function at x2
** Dx x2 - x1 (should always be positive)
** Operation:
** This routine computes a trapezoidal approximation to the
** integral of a function over a small delta in x.
** Return:
** Approximation of the integral of the function from x1 to x2.
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
return (f1 + f2) * Dx / 2.0;
} // Integral
//---------------------------------------------------------------------------
void FillBuckets(BUCKETS *Buckets,
CLUSTER *Cluster,
uinT16 Dim,
PARAM_DESC *ParamDesc,
FLOAT32 Mean,
FLOAT32 StdDev) {
/*
** Parameters:
** Buckets histogram buckets to count samples
** Cluster cluster whose samples are being analyzed
** Dim dimension of samples which is being analyzed
** ParamDesc description of the dimension
** Mean "mean" of the distribution
** StdDev "standard deviation" of the distribution
** Operation:
** This routine counts the number of cluster samples which
** fall within the various histogram buckets in Buckets. Only
** one dimension of each sample is examined. The exact meaning
** of the Mean and StdDev parameters depends on the
** distribution which is being analyzed (this info is in the
** Buckets data structure). For normal distributions, Mean
** and StdDev have the expected meanings. For uniform and
** random distributions the Mean is the center point of the
** range and the StdDev is 1/2 the range. A dimension with
** zero standard deviation cannot be statistically analyzed.
** In this case, a pseudo-analysis is used.
** Return:
** None (the Buckets data structure is filled in)
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
uinT16 BucketID;
int i;
LIST SearchState;
SAMPLE *Sample;
// initialize the histogram bucket counts to 0
for (i = 0; i < Buckets->NumberOfBuckets; i++)
Buckets->Count[i] = 0;
if (StdDev == 0.0) {
/* if the standard deviation is zero, then we can't statistically
analyze the cluster. Use a pseudo-analysis: samples exactly on
the mean are distributed evenly across all buckets. Samples greater
than the mean are placed in the last bucket; samples less than the
mean are placed in the first bucket. */
InitSampleSearch(SearchState, Cluster);
i = 0;
while ((Sample = NextSample (&SearchState)) != NULL) {
if (Sample->Mean[Dim] > Mean)
BucketID = Buckets->NumberOfBuckets - 1;
else if (Sample->Mean[Dim] < Mean)
BucketID = 0;
else
BucketID = i;
Buckets->Count[BucketID] += 1;
i++;
if (i >= Buckets->NumberOfBuckets)
i = 0;
}
}
else {
// search for all samples in the cluster and add to histogram buckets
InitSampleSearch(SearchState, Cluster);
while ((Sample = NextSample (&SearchState)) != NULL) {
switch (Buckets->Distribution) {
case normal:
BucketID = NormalBucket (ParamDesc, Sample->Mean[Dim],
Mean, StdDev);
break;
case D_random:
case uniform:
BucketID = UniformBucket (ParamDesc, Sample->Mean[Dim],
Mean, StdDev);
break;
default:
BucketID = 0;
}
Buckets->Count[Buckets->Bucket[BucketID]] += 1;
}
}
} // FillBuckets
//---------------------------------------------------------------------------*/
uinT16 NormalBucket(PARAM_DESC *ParamDesc,
FLOAT32 x,
FLOAT32 Mean,
FLOAT32 StdDev) {
/*
** Parameters:
** ParamDesc used to identify circular dimensions
** x value to be normalized
** Mean mean of normal distribution
** StdDev standard deviation of normal distribution
** Operation:
** This routine determines which bucket x falls into in the
** discrete normal distribution defined by kNormalMean
** and kNormalStdDev. x values which exceed the range of
** the discrete distribution are clipped.
** Return:
** Bucket number into which x falls
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
FLOAT32 X;
// wraparound circular parameters if necessary
if (ParamDesc->Circular) {
if (x - Mean > ParamDesc->HalfRange)
x -= ParamDesc->Range;
else if (x - Mean < -ParamDesc->HalfRange)
x += ParamDesc->Range;
}
X = ((x - Mean) / StdDev) * kNormalStdDev + kNormalMean;
if (X < 0)
return 0;
if (X > BUCKETTABLESIZE - 1)
return ((uinT16) (BUCKETTABLESIZE - 1));
return (uinT16) floor((FLOAT64) X);
} // NormalBucket
//---------------------------------------------------------------------------
uinT16 UniformBucket(PARAM_DESC *ParamDesc,
FLOAT32 x,
FLOAT32 Mean,
FLOAT32 StdDev) {
/*
** Parameters:
** ParamDesc used to identify circular dimensions
** x value to be normalized
** Mean center of range of uniform distribution
** StdDev 1/2 the range of the uniform distribution
** Operation:
** This routine determines which bucket x falls into in the
** discrete uniform distribution defined by
** BUCKETTABLESIZE. x values which exceed the range of
** the discrete distribution are clipped.
** Return:
** Bucket number into which x falls
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
FLOAT32 X;
// wraparound circular parameters if necessary
if (ParamDesc->Circular) {
if (x - Mean > ParamDesc->HalfRange)
x -= ParamDesc->Range;
else if (x - Mean < -ParamDesc->HalfRange)
x += ParamDesc->Range;
}
X = ((x - Mean) / (2 * StdDev) * BUCKETTABLESIZE + BUCKETTABLESIZE / 2.0);
if (X < 0)
return 0;
if (X > BUCKETTABLESIZE - 1)
return (uinT16) (BUCKETTABLESIZE - 1);
return (uinT16) floor((FLOAT64) X);
} // UniformBucket
//---------------------------------------------------------------------------
BOOL8 DistributionOK(BUCKETS *Buckets) {
/*
** Parameters:
** Buckets histogram data to perform chi-square test on
** Operation:
** This routine performs a chi-square goodness of fit test
** on the histogram data in the Buckets data structure. TRUE
** is returned if the histogram matches the probability
** distribution which was specified when the Buckets
** structure was originally created. Otherwise FALSE is
** returned.
** Return:
** TRUE if samples match distribution, FALSE otherwise
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
FLOAT32 FrequencyDifference;
FLOAT32 TotalDifference;
int i;
// compute how well the histogram matches the expected histogram
TotalDifference = 0.0;
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
FrequencyDifference = Buckets->Count[i] - Buckets->ExpectedCount[i];
TotalDifference += (FrequencyDifference * FrequencyDifference) /
Buckets->ExpectedCount[i];
}
// test to see if the difference is more than expected
if (TotalDifference > Buckets->ChiSquared)
return FALSE;
else
return TRUE;
} // DistributionOK
//---------------------------------------------------------------------------
void FreeStatistics(STATISTICS *Statistics) {
/*
** Parameters:
** Statistics pointer to data structure to be freed
** Operation:
** This routine frees the memory used by the statistics
** data structure.
** Return:
** None
** Exceptions:
** None
** History:
** 6/5/89, DSJ, Created.
*/
memfree (Statistics->CoVariance);
memfree (Statistics->Min);
memfree (Statistics->Max);
memfree(Statistics);
} // FreeStatistics
//---------------------------------------------------------------------------
void FreeBuckets(BUCKETS *buckets) {
/*
** Parameters:
** buckets pointer to data structure to be freed
** Operation:
** This routine properly frees the memory used by a BUCKETS.
*/
Efree(buckets->Count);
Efree(buckets->ExpectedCount);
Efree(buckets);
} // FreeBuckets
//---------------------------------------------------------------------------
void FreeCluster(CLUSTER *Cluster) {
/*
** Parameters:
** Cluster pointer to cluster to be freed
** Operation:
** This routine frees the memory consumed by the specified
** cluster and all of its subclusters. This is done by
** recursive calls to FreeCluster().
** Return:
** None
** Exceptions:
** None
** History:
** 6/6/89, DSJ, Created.
*/
if (Cluster != NULL) {
FreeCluster (Cluster->Left);
FreeCluster (Cluster->Right);
memfree(Cluster);
}
} // FreeCluster
//---------------------------------------------------------------------------
uinT16 DegreesOfFreedom(DISTRIBUTION Distribution, uinT16 HistogramBuckets) {
/*
** Parameters:
** Distribution distribution being tested for
** HistogramBuckets number of buckets in chi-square test
** Operation:
** This routine computes the degrees of freedom that should
** be used in a chi-squared test with the specified number of
** histogram buckets. The result is always rounded up to
** the next even number so that the value of chi-squared can be
** computed more easily. This will cause the value of
** chi-squared to be higher than the optimum value, resulting
** in the chi-square test being more lenient than optimum.
** Return: The number of degrees of freedom for a chi-square test
** Exceptions: none
** History: Thu Aug 3 14:04:18 1989, DSJ, Created.
*/
static uinT8 DegreeOffsets[] = { 3, 3, 1 };
uinT16 AdjustedNumBuckets;
AdjustedNumBuckets = HistogramBuckets - DegreeOffsets[(int) Distribution];
if (Odd (AdjustedNumBuckets))
AdjustedNumBuckets++;
return (AdjustedNumBuckets);
} // DegreesOfFreedom
//---------------------------------------------------------------------------
int NumBucketsMatch(void *arg1, // BUCKETS *Histogram,
void *arg2) { // uinT16 *DesiredNumberOfBuckets)
/*
** Parameters:
** Histogram current histogram being tested for a match
** DesiredNumberOfBuckets match key
** Operation:
** This routine is used to search a list of histogram data
** structures to find one with the specified number of
** buckets. It is called by the list search routines.
** Return: TRUE if Histogram matches DesiredNumberOfBuckets
** Exceptions: none
** History: Thu Aug 3 14:17:33 1989, DSJ, Created.
*/
BUCKETS *Histogram = (BUCKETS *) arg1;
uinT16 *DesiredNumberOfBuckets = (uinT16 *) arg2;
return (*DesiredNumberOfBuckets == Histogram->NumberOfBuckets);
} // NumBucketsMatch
//---------------------------------------------------------------------------
int ListEntryMatch(void *arg1, //ListNode
void *arg2) { //Key
/*
** Parameters: none
** Operation:
** This routine is used to search a list for a list node
** whose contents match Key. It is called by the list
** delete_d routine.
** Return: TRUE if ListNode matches Key
** Exceptions: none
** History: Thu Aug 3 14:23:58 1989, DSJ, Created.
*/
return (arg1 == arg2);
} // ListEntryMatch
//---------------------------------------------------------------------------
void AdjustBuckets(BUCKETS *Buckets, uinT32 NewSampleCount) {
/*
** Parameters:
** Buckets histogram data structure to adjust
** NewSampleCount new sample count to adjust to
** Operation:
** This routine multiplies each ExpectedCount histogram entry
** by NewSampleCount/OldSampleCount so that the histogram
** is now adjusted to the new sample count.
** Return: none
** Exceptions: none
** History: Thu Aug 3 14:31:14 1989, DSJ, Created.
*/
int i;
FLOAT64 AdjustFactor;
AdjustFactor = (((FLOAT64) NewSampleCount) /
((FLOAT64) Buckets->SampleCount));
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
Buckets->ExpectedCount[i] *= AdjustFactor;
}
Buckets->SampleCount = NewSampleCount;
} // AdjustBuckets
//---------------------------------------------------------------------------
void InitBuckets(BUCKETS *Buckets) {
/*
** Parameters:
** Buckets histogram data structure to init
** Operation:
** This routine sets the bucket counts in the specified histogram
** to zero.
** Return: none
** Exceptions: none
** History: Thu Aug 3 14:31:14 1989, DSJ, Created.
*/
int i;
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
Buckets->Count[i] = 0;
}
} // InitBuckets
//---------------------------------------------------------------------------
int AlphaMatch(void *arg1, //CHISTRUCT *ChiStruct,
void *arg2) { //CHISTRUCT *SearchKey)
/*
** Parameters:
** ChiStruct chi-squared struct being tested for a match
** SearchKey chi-squared struct that is the search key
** Operation:
** This routine is used to search a list of structures which
** hold pre-computed chi-squared values for a chi-squared
** value whose corresponding alpha field matches the alpha
** field of SearchKey.
** It is called by the list search routines.
** Return: TRUE if ChiStruct's Alpha matches SearchKey's Alpha
** Exceptions: none
** History: Thu Aug 3 14:17:33 1989, DSJ, Created.
*/
CHISTRUCT *ChiStruct = (CHISTRUCT *) arg1;
CHISTRUCT *SearchKey = (CHISTRUCT *) arg2;
return (ChiStruct->Alpha == SearchKey->Alpha);
} // AlphaMatch
//---------------------------------------------------------------------------
CHISTRUCT *NewChiStruct(uinT16 DegreesOfFreedom, FLOAT64 Alpha) {
/*
** Parameters:
** DegreesOfFreedom degrees of freedom for new chi value
** Alpha confidence level for new chi value
** Operation:
** This routine allocates a new data structure which is used
** to hold a chi-squared value along with its associated
** number of degrees of freedom and alpha value.
** Return: none
** Exceptions: none
** History: Fri Aug 4 11:04:59 1989, DSJ, Created.
*/
CHISTRUCT *NewChiStruct;
NewChiStruct = (CHISTRUCT *) Emalloc (sizeof (CHISTRUCT));
NewChiStruct->DegreesOfFreedom = DegreesOfFreedom;
NewChiStruct->Alpha = Alpha;
return (NewChiStruct);
} // NewChiStruct
//---------------------------------------------------------------------------
FLOAT64
Solve (SOLVEFUNC Function,
void *FunctionParams, FLOAT64 InitialGuess, FLOAT64 Accuracy)
/*
** Parameters:
** Function function whose zero is to be found
** FunctionParams arbitrary data to pass to function
** InitialGuess point to start solution search at
** Accuracy maximum allowed error
** Operation:
** This routine attempts to find an x value at which Function
** goes to zero (i.e. a root of the function ). It will only
** work correctly if a solution actually exists and there
** are no extrema between the solution and the InitialGuess.
** The algorithms used are extremely primitive.
** Return: Solution of function ( x for which f(x) = 0 ).
** Exceptions: none
** History: Fri Aug 4 11:08:59 1989, DSJ, Created.
*/
#define INITIALDELTA 0.1
#define DELTARATIO 0.1
{
FLOAT64 x;
FLOAT64 f;
FLOAT64 Slope;
FLOAT64 Delta;
FLOAT64 NewDelta;
FLOAT64 xDelta;
FLOAT64 LastPosX, LastNegX;
x = InitialGuess;
Delta = INITIALDELTA;
LastPosX = MAX_FLOAT32;
LastNegX = -MAX_FLOAT32;
f = (*Function) ((CHISTRUCT *) FunctionParams, x);
while (Abs (LastPosX - LastNegX) > Accuracy) {
// keep track of outer bounds of current estimate
if (f < 0)
LastNegX = x;
else
LastPosX = x;
// compute the approx. slope of f(x) at the current point
Slope =
((*Function) ((CHISTRUCT *) FunctionParams, x + Delta) - f) / Delta;
// compute the next solution guess */
xDelta = f / Slope;
x -= xDelta;
// reduce the delta used for computing slope to be a fraction of
//the amount moved to get to the new guess
NewDelta = Abs (xDelta) * DELTARATIO;
if (NewDelta < Delta)
Delta = NewDelta;
// compute the value of the function at the new guess
f = (*Function) ((CHISTRUCT *) FunctionParams, x);
}
return (x);
} // Solve
//---------------------------------------------------------------------------
FLOAT64 ChiArea(CHISTRUCT *ChiParams, FLOAT64 x) {
/*
** Parameters:
** ChiParams contains degrees of freedom and alpha
** x value of chi-squared to evaluate
** Operation:
** This routine computes the area under a chi density curve
** from 0 to x, minus the desired area under the curve. The
** number of degrees of freedom of the chi curve is specified
** in the ChiParams structure. The desired area is also
** specified in the ChiParams structure as Alpha ( or 1 minus
** the desired area ). This routine is intended to be passed
** to the Solve() function to find the value of chi-squared
** which will yield a desired area under the right tail of
** the chi density curve. The function will only work for
** even degrees of freedom. The equations are based on
** integrating the chi density curve in parts to obtain
** a series that can be used to compute the area under the
** curve.
** Return: Error between actual and desired area under the chi curve.
** Exceptions: none
** History: Fri Aug 4 12:48:41 1989, DSJ, Created.
*/
int i, N;
FLOAT64 SeriesTotal;
FLOAT64 Denominator;
FLOAT64 PowerOfx;
N = ChiParams->DegreesOfFreedom / 2 - 1;
SeriesTotal = 1;
Denominator = 1;
PowerOfx = 1;
for (i = 1; i <= N; i++) {
Denominator *= 2 * i;
PowerOfx *= x;
SeriesTotal += PowerOfx / Denominator;
}
return ((SeriesTotal * exp (-0.5 * x)) - ChiParams->Alpha);
} // ChiArea
//---------------------------------------------------------------------------
BOOL8
MultipleCharSamples (CLUSTERER * Clusterer,
CLUSTER * Cluster, FLOAT32 MaxIllegal)
/*
** Parameters:
** Clusterer data structure holding cluster tree
** Cluster cluster containing samples to be tested
** MaxIllegal max percentage of samples allowed to have
** more than 1 feature in the cluster
** Operation:
** This routine looks at all samples in the specified cluster.
** It computes a running estimate of the percentage of the
** charaters which have more than 1 sample in the cluster.
** When this percentage exceeds MaxIllegal, TRUE is returned.
** Otherwise FALSE is returned. The CharID
** fields must contain integers which identify the training
** characters which were used to generate the sample. One
** integer is used for each sample. The NumChar field in
** the Clusterer must contain the number of characters in the
** training set. All CharID fields must be between 0 and
** NumChar-1. The main function of this routine is to help
** identify clusters which need to be split further, i.e. if
** numerous training characters have 2 or more features which are
** contained in the same cluster, then the cluster should be
** split.
** Return: TRUE if the cluster should be split, FALSE otherwise.
** Exceptions: none
** History: Wed Aug 30 11:13:05 1989, DSJ, Created.
** 2/22/90, DSJ, Added MaxIllegal control rather than always
** splitting illegal clusters.
*/
#define ILLEGAL_CHAR 2
{
static BOOL8 *CharFlags = NULL;
static inT32 NumFlags = 0;
int i;
LIST SearchState;
SAMPLE *Sample;
inT32 CharID;
inT32 NumCharInCluster;
inT32 NumIllegalInCluster;
FLOAT32 PercentIllegal;
// initial estimate assumes that no illegal chars exist in the cluster
NumCharInCluster = Cluster->SampleCount;
NumIllegalInCluster = 0;
if (Clusterer->NumChar > NumFlags) {
if (CharFlags != NULL)
memfree(CharFlags);
NumFlags = Clusterer->NumChar;
CharFlags = (BOOL8 *) Emalloc (NumFlags * sizeof (BOOL8));
}
for (i = 0; i < NumFlags; i++)
CharFlags[i] = FALSE;
// find each sample in the cluster and check if we have seen it before
InitSampleSearch(SearchState, Cluster);
while ((Sample = NextSample (&SearchState)) != NULL) {
CharID = Sample->CharID;
if (CharFlags[CharID] == FALSE) {
CharFlags[CharID] = TRUE;
}
else {
if (CharFlags[CharID] == TRUE) {
NumIllegalInCluster++;
CharFlags[CharID] = ILLEGAL_CHAR;
}
NumCharInCluster--;
PercentIllegal = (FLOAT32) NumIllegalInCluster / NumCharInCluster;
if (PercentIllegal > MaxIllegal) {
destroy(SearchState);
return (TRUE);
}
}
}
return (FALSE);
} // MultipleCharSamples
// Compute the inverse of a matrix using LU decomposition with partial pivoting.
// The return value is the sum of norms of the off-diagonal terms of the
// product of a and inv. (A measure of the error.)
double InvertMatrix(const float* input, int size, float* inv) {
// Allocate memory for the 2D arrays.
GENERIC_2D_ARRAY<double> U(size, size, 0.0);
GENERIC_2D_ARRAY<double> U_inv(size, size, 0.0);
GENERIC_2D_ARRAY<double> L(size, size, 0.0);
// Initialize the working matrices. U starts as input, L as I and U_inv as O.
int row;
int col;
for (row = 0; row < size; row++) {
for (col = 0; col < size; col++) {
U[row][col] = input[row*size + col];
L[row][col] = row == col ? 1.0 : 0.0;
U_inv[row][col] = 0.0;
}
}
// Compute forward matrix by inversion by LU decomposition of input.
for (col = 0; col < size; ++col) {
// Find best pivot
int best_row = 0;
double best_pivot = -1.0;
for (row = col; row < size; ++row) {
if (Abs(U[row][col]) > best_pivot) {
best_pivot = Abs(U[row][col]);
best_row = row;
}
}
// Exchange pivot rows.
if (best_row != col) {
for (int k = 0; k < size; ++k) {
double tmp = U[best_row][k];
U[best_row][k] = U[col][k];
U[col][k] = tmp;
tmp = L[best_row][k];
L[best_row][k] = L[col][k];
L[col][k] = tmp;
}
}
// Now do the pivot itself.
for (row = col + 1; row < size; ++row) {
double ratio = -U[row][col] / U[col][col];
for (int j = col; j < size; ++j) {
U[row][j] += U[col][j] * ratio;
}
for (int k = 0; k < size; ++k) {
L[row][k] += L[col][k] * ratio;
}
}
}
// Next invert U.
for (col = 0; col < size; ++col) {
U_inv[col][col] = 1.0 / U[col][col];
for (row = col - 1; row >= 0; --row) {
double total = 0.0;
for (int k = col; k > row; --k) {
total += U[row][k] * U_inv[k][col];
}
U_inv[row][col] = -total / U[row][row];
}
}
// Now the answer is U_inv.L.
for (row = 0; row < size; row++) {
for (col = 0; col < size; col++) {
double sum = 0.0;
for (int k = row; k < size; ++k) {
sum += U_inv[row][k] * L[k][col];
}
inv[row*size + col] = sum;
}
}
// Check matrix product.
double error_sum = 0.0;
for (row = 0; row < size; row++) {
for (col = 0; col < size; col++) {
double sum = 0.0;
for (int k = 0; k < size; ++k) {
sum += input[row*size + k] * inv[k *size + col];
}
if (row != col) {
error_sum += Abs(sum);
}
}
}
return error_sum;
}
| C++ |
/******************************************************************************
** Filename: kdtree.cpp
** Purpose: Routines for managing K-D search trees
** Author: Dan Johnson
** History: 3/10/89, DSJ, Created.
** 5/23/89, DSJ, Added circular feature capability.
** 7/13/89, DSJ, Made tree nodes invisible to outside.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include "kdtree.h"
#include "const.h"
#include "emalloc.h"
#include "freelist.h"
#include <stdio.h>
#include <math.h>
#define Magnitude(X) ((X) < 0 ? -(X) : (X))
#define NodeFound(N,K,D) (( (N)->Key == (K) ) && ( (N)->Data == (D) ))
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
#define MINSEARCH -MAX_FLOAT32
#define MAXSEARCH MAX_FLOAT32
// Helper function to find the next essential dimension in a cycle.
static int NextLevel(KDTREE *tree, int level) {
do {
++level;
if (level >= tree->KeySize)
level = 0;
} while (tree->KeyDesc[level].NonEssential);
return level;
}
//-----------------------------------------------------------------------------
// Store the k smallest-keyed key-value pairs.
template<typename Key, typename Value>
class MinK {
public:
MinK(Key max_key, int k);
~MinK();
struct Element {
Element() {}
Element(const Key& k, const Value& v) : key(k), value(v) {}
Key key;
Value value;
};
bool insert(Key k, Value v);
const Key& max_insertable_key();
int elements_count() { return elements_count_; }
const Element* elements() { return elements_; }
private:
const Key max_key_; // the maximum possible Key
Element* elements_; // unsorted array of elements
int elements_count_; // the number of results collected so far
int k_; // the number of results we want from the search
int max_index_; // the index of the result with the largest key
};
template<typename Key, typename Value>
MinK<Key, Value>::MinK(Key max_key, int k) :
max_key_(max_key), elements_count_(0), k_(k < 1 ? 1 : k), max_index_(0) {
elements_ = new Element[k_];
}
template<typename Key, typename Value>
MinK<Key, Value>::~MinK() {
delete []elements_;
}
template<typename Key, typename Value>
const Key& MinK<Key, Value>::max_insertable_key() {
if (elements_count_ < k_)
return max_key_;
return elements_[max_index_].key;
}
template<typename Key, typename Value>
bool MinK<Key, Value>::insert(Key key, Value value) {
if (elements_count_ < k_) {
elements_[elements_count_++] = Element(key, value);
if (key > elements_[max_index_].key)
max_index_ = elements_count_ - 1;
return true;
} else if (key < elements_[max_index_].key) {
// evict the largest element.
elements_[max_index_] = Element(key, value);
// recompute max_index_
for (int i = 0; i < elements_count_; i++) {
if (elements_[i].key > elements_[max_index_].key)
max_index_ = i;
}
return true;
}
return false;
}
//-----------------------------------------------------------------------------
// Helper class for searching for the k closest points to query_point in tree.
class KDTreeSearch {
public:
KDTreeSearch(KDTREE* tree, FLOAT32 *query_point, int k_closest);
~KDTreeSearch();
// Return the k nearest points' data.
void Search(int *result_count, FLOAT32 *distances, void **results);
private:
void SearchRec(int Level, KDNODE *SubTree);
bool BoxIntersectsSearch(FLOAT32 *lower, FLOAT32 *upper);
KDTREE *tree_;
FLOAT32 *query_point_;
MinK<FLOAT32, void *>* results_;
FLOAT32 *sb_min_; // search box minimum
FLOAT32 *sb_max_; // search box maximum
};
KDTreeSearch::KDTreeSearch(KDTREE* tree, FLOAT32 *query_point, int k_closest) :
tree_(tree),
query_point_(query_point) {
results_ = new MinK<FLOAT32, void *>(MAXSEARCH, k_closest);
sb_min_ = new FLOAT32[tree->KeySize];
sb_max_ = new FLOAT32[tree->KeySize];
}
KDTreeSearch::~KDTreeSearch() {
delete results_;
delete[] sb_min_;
delete[] sb_max_;
}
// Locate the k_closest points to query_point_, and return their distances and
// data into the given buffers.
void KDTreeSearch::Search(int *result_count,
FLOAT32 *distances,
void **results) {
if (tree_->Root.Left == NULL) {
*result_count = 0;
} else {
for (int i = 0; i < tree_->KeySize; i++) {
sb_min_[i] = tree_->KeyDesc[i].Min;
sb_max_[i] = tree_->KeyDesc[i].Max;
}
SearchRec(0, tree_->Root.Left);
int count = results_->elements_count();
*result_count = count;
for (int j = 0; j < count; j++) {
distances[j] = (FLOAT32) sqrt((FLOAT64)results_->elements()[j].key);
results[j] = results_->elements()[j].value;
}
}
}
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/// Return a new KDTREE based on the specified parameters.
/// Parameters:
/// KeySize # of dimensions in the K-D tree
/// KeyDesc array of params to describe key dimensions
KDTREE *MakeKDTree(inT16 KeySize, const PARAM_DESC KeyDesc[]) {
KDTREE *KDTree = (KDTREE *) Emalloc(
sizeof(KDTREE) + (KeySize - 1) * sizeof(PARAM_DESC));
for (int i = 0; i < KeySize; i++) {
KDTree->KeyDesc[i].NonEssential = KeyDesc[i].NonEssential;
KDTree->KeyDesc[i].Circular = KeyDesc[i].Circular;
if (KeyDesc[i].Circular) {
KDTree->KeyDesc[i].Min = KeyDesc[i].Min;
KDTree->KeyDesc[i].Max = KeyDesc[i].Max;
KDTree->KeyDesc[i].Range = KeyDesc[i].Max - KeyDesc[i].Min;
KDTree->KeyDesc[i].HalfRange = KDTree->KeyDesc[i].Range / 2;
KDTree->KeyDesc[i].MidRange = (KeyDesc[i].Max + KeyDesc[i].Min) / 2;
} else {
KDTree->KeyDesc[i].Min = MINSEARCH;
KDTree->KeyDesc[i].Max = MAXSEARCH;
}
}
KDTree->KeySize = KeySize;
KDTree->Root.Left = NULL;
KDTree->Root.Right = NULL;
return KDTree;
}
/*---------------------------------------------------------------------------*/
void KDStore(KDTREE *Tree, FLOAT32 *Key, void *Data) {
/**
* This routine stores Data in the K-D tree specified by Tree
* using Key as an access key.
*
* @param Tree K-D tree in which data is to be stored
* @param Key ptr to key by which data can be retrieved
* @param Data ptr to data to be stored in the tree
*
* @note Exceptions: none
* @note History: 3/10/89, DSJ, Created.
* 7/13/89, DSJ, Changed return to void.
*/
int Level;
KDNODE *Node;
KDNODE **PtrToNode;
PtrToNode = &(Tree->Root.Left);
Node = *PtrToNode;
Level = NextLevel(Tree, -1);
while (Node != NULL) {
if (Key[Level] < Node->BranchPoint) {
PtrToNode = &(Node->Left);
if (Key[Level] > Node->LeftBranch)
Node->LeftBranch = Key[Level];
}
else {
PtrToNode = &(Node->Right);
if (Key[Level] < Node->RightBranch)
Node->RightBranch = Key[Level];
}
Level = NextLevel(Tree, Level);
Node = *PtrToNode;
}
*PtrToNode = MakeKDNode(Tree, Key, (void *) Data, Level);
} /* KDStore */
/*---------------------------------------------------------------------------*/
/**
* This routine deletes a node from Tree. The node to be
* deleted is specified by the Key for the node and the Data
* contents of the node. These two pointers must be identical
* to the pointers that were used for the node when it was
* originally stored in the tree. A node will be deleted from
* the tree only if its key and data pointers are identical
* to Key and Data respectively. The tree is re-formed by removing
* the affected subtree and inserting all elements but the root.
*
* @param Tree K-D tree to delete node from
* @param Key key of node to be deleted
* @param Data data contents of node to be deleted
*
* @note Exceptions: none
*
* @note History: 3/13/89, DSJ, Created.
* 7/13/89, DSJ, Specify node indirectly by key and data.
*/
void
KDDelete (KDTREE * Tree, FLOAT32 Key[], void *Data) {
int Level;
KDNODE *Current;
KDNODE *Father;
/* initialize search at root of tree */
Father = &(Tree->Root);
Current = Father->Left;
Level = NextLevel(Tree, -1);
/* search tree for node to be deleted */
while ((Current != NULL) && (!NodeFound (Current, Key, Data))) {
Father = Current;
if (Key[Level] < Current->BranchPoint)
Current = Current->Left;
else
Current = Current->Right;
Level = NextLevel(Tree, Level);
}
if (Current != NULL) { /* if node to be deleted was found */
if (Current == Father->Left) {
Father->Left = NULL;
Father->LeftBranch = Tree->KeyDesc[Level].Min;
} else {
Father->Right = NULL;
Father->RightBranch = Tree->KeyDesc[Level].Max;
}
InsertNodes(Tree, Current->Left);
InsertNodes(Tree, Current->Right);
FreeSubTree(Current);
}
} /* KDDelete */
/*---------------------------------------------------------------------------*/
void KDNearestNeighborSearch(
KDTREE *Tree, FLOAT32 Query[], int QuerySize, FLOAT32 MaxDistance,
int *NumberOfResults, void **NBuffer, FLOAT32 DBuffer[]) {
/*
** Parameters:
** Tree ptr to K-D tree to be searched
** Query ptr to query key (point in D-space)
** QuerySize number of nearest neighbors to be found
** MaxDistance all neighbors must be within this distance
** NBuffer ptr to QuerySize buffer to hold nearest neighbors
** DBuffer ptr to QuerySize buffer to hold distances
** from nearest neighbor to query point
** Operation:
** This routine searches the K-D tree specified by Tree and
** finds the QuerySize nearest neighbors of Query. All neighbors
** must be within MaxDistance of Query. The data contents of
** the nearest neighbors
** are placed in NBuffer and their distances from Query are
** placed in DBuffer.
** Return: Number of nearest neighbors actually found
** Exceptions: none
** History:
** 3/10/89, DSJ, Created.
** 7/13/89, DSJ, Return contents of node instead of node itself.
*/
KDTreeSearch search(Tree, Query, QuerySize);
search.Search(NumberOfResults, DBuffer, NBuffer);
}
/*---------------------------------------------------------------------------*/
// Walk a given Tree with action.
void KDWalk(KDTREE *Tree, void_proc action, void *context) {
if (Tree->Root.Left != NULL)
Walk(Tree, action, context, Tree->Root.Left, NextLevel(Tree, -1));
}
/*---------------------------------------------------------------------------*/
void FreeKDTree(KDTREE *Tree) {
/*
** Parameters:
** Tree tree data structure to be released
** Operation:
** This routine frees all memory which is allocated to the
** specified KD-tree. This includes the data structure for
** the kd-tree itself plus the data structures for each node
** in the tree. It does not include the Key and Data items
** which are pointed to by the nodes. This memory is left
** untouched.
** Return: none
** Exceptions: none
** History:
** 5/26/89, DSJ, Created.
*/
FreeSubTree(Tree->Root.Left);
memfree(Tree);
} /* FreeKDTree */
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
KDNODE *MakeKDNode(KDTREE *tree, FLOAT32 Key[], void *Data, int Index) {
/*
** Parameters:
** tree The tree to create the node for
** Key Access key for new node in KD tree
** Data ptr to data to be stored in new node
** Index index of Key to branch on
** Operation:
** This routine allocates memory for a new K-D tree node
** and places the specified Key and Data into it. The
** left and right subtree pointers for the node are
** initialized to empty subtrees.
** Return:
** pointer to new K-D tree node
** Exceptions:
** None
** History:
** 3/11/89, DSJ, Created.
*/
KDNODE *NewNode;
NewNode = (KDNODE *) Emalloc (sizeof (KDNODE));
NewNode->Key = Key;
NewNode->Data = Data;
NewNode->BranchPoint = Key[Index];
NewNode->LeftBranch = tree->KeyDesc[Index].Min;
NewNode->RightBranch = tree->KeyDesc[Index].Max;
NewNode->Left = NULL;
NewNode->Right = NULL;
return NewNode;
} /* MakeKDNode */
/*---------------------------------------------------------------------------*/
void FreeKDNode(KDNODE *Node) {
memfree ((char *)Node);
}
/*---------------------------------------------------------------------------*/
// Recursively accumulate the k_closest points to query_point_ into results_.
// Parameters:
// Level level in tree of sub-tree to be searched
// SubTree sub-tree to be searched
void KDTreeSearch::SearchRec(int level, KDNODE *sub_tree) {
if (level >= tree_->KeySize)
level = 0;
if (!BoxIntersectsSearch(sb_min_, sb_max_))
return;
results_->insert(DistanceSquared(tree_->KeySize, tree_->KeyDesc,
query_point_, sub_tree->Key),
sub_tree->Data);
if (query_point_[level] < sub_tree->BranchPoint) {
if (sub_tree->Left != NULL) {
FLOAT32 tmp = sb_max_[level];
sb_max_[level] = sub_tree->LeftBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Left);
sb_max_[level] = tmp;
}
if (sub_tree->Right != NULL) {
FLOAT32 tmp = sb_min_[level];
sb_min_[level] = sub_tree->RightBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Right);
sb_min_[level] = tmp;
}
} else {
if (sub_tree->Right != NULL) {
FLOAT32 tmp = sb_min_[level];
sb_min_[level] = sub_tree->RightBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Right);
sb_min_[level] = tmp;
}
if (sub_tree->Left != NULL) {
FLOAT32 tmp = sb_max_[level];
sb_max_[level] = sub_tree->LeftBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Left);
sb_max_[level] = tmp;
}
}
}
/*---------------------------------------------------------------------------*/
// Returns the Euclidean distance squared between p1 and p2 for all essential
// dimensions.
// Parameters:
// k keys are in k-space
// dim dimension descriptions (essential, circular, etc)
// p1,p2 two different points in K-D space
FLOAT32 DistanceSquared(int k, PARAM_DESC *dim, FLOAT32 p1[], FLOAT32 p2[]) {
FLOAT32 total_distance = 0;
for (; k > 0; k--, p1++, p2++, dim++) {
if (dim->NonEssential)
continue;
FLOAT32 dimension_distance = *p1 - *p2;
/* if this dimension is circular - check wraparound distance */
if (dim->Circular) {
dimension_distance = Magnitude(dimension_distance);
FLOAT32 wrap_distance = dim->Max - dim->Min - dimension_distance;
dimension_distance = MIN(dimension_distance, wrap_distance);
}
total_distance += dimension_distance * dimension_distance;
}
return total_distance;
}
FLOAT32 ComputeDistance(int k, PARAM_DESC *dim, FLOAT32 p1[], FLOAT32 p2[]) {
return sqrt(DistanceSquared(k, dim, p1, p2));
}
/*---------------------------------------------------------------------------*/
// Return whether the query region (the smallest known circle about
// query_point_ containing results->k_ points) intersects the box specified
// between lower and upper. For circular dimensions, we also check the point
// one wrap distance away from the query.
bool KDTreeSearch::BoxIntersectsSearch(FLOAT32 *lower, FLOAT32 *upper) {
FLOAT32 *query = query_point_;
FLOAT64 total_distance = 0.0;
FLOAT64 radius_squared =
results_->max_insertable_key() * results_->max_insertable_key();
PARAM_DESC *dim = tree_->KeyDesc;
for (int i = tree_->KeySize; i > 0; i--, dim++, query++, lower++, upper++) {
if (dim->NonEssential)
continue;
FLOAT32 dimension_distance;
if (*query < *lower)
dimension_distance = *lower - *query;
else if (*query > *upper)
dimension_distance = *query - *upper;
else
dimension_distance = 0;
/* if this dimension is circular - check wraparound distance */
if (dim->Circular) {
FLOAT32 wrap_distance = MAX_FLOAT32;
if (*query < *lower)
wrap_distance = *query + dim->Max - dim->Min - *upper;
else if (*query > *upper)
wrap_distance = *lower - (*query - (dim->Max - dim->Min));
dimension_distance = MIN(dimension_distance, wrap_distance);
}
total_distance += dimension_distance * dimension_distance;
if (total_distance >= radius_squared)
return FALSE;
}
return TRUE;
}
/*---------------------------------------------------------------------------*/
// Walk a tree, calling action once on each node.
//
// Parameters:
// tree root of the tree being walked.
// action action to be performed at every node
// context action's context
// sub_tree ptr to root of subtree to be walked
// level current level in the tree for this node
// Operation:
// This routine walks thru the specified sub_tree and invokes action
// action at each node as follows:
// action(context, data, level)
// data the data contents of the node being visited,
// level is the level of the node in the tree with the root being level 0.
void Walk(KDTREE *tree, void_proc action, void *context,
KDNODE *sub_tree, inT32 level) {
(*action)(context, sub_tree->Data, level);
if (sub_tree->Left != NULL)
Walk(tree, action, context, sub_tree->Left, NextLevel(tree, level));
if (sub_tree->Right != NULL)
Walk(tree, action, context, sub_tree->Right, NextLevel(tree, level));
}
// Given a subtree nodes, insert all of its elements into tree.
void InsertNodes(KDTREE *tree, KDNODE *nodes) {
if (nodes == NULL)
return;
KDStore(tree, nodes->Key, nodes->Data);
InsertNodes(tree, nodes->Left);
InsertNodes(tree, nodes->Right);
}
// Free all of the nodes of a sub tree.
void FreeSubTree(KDNODE *sub_tree) {
if (sub_tree != NULL) {
FreeSubTree(sub_tree->Left);
FreeSubTree(sub_tree->Right);
memfree(sub_tree);
}
} /* FreeSubTree */
| C++ |
/******************************************************************************
** Filename: picofeat.c
** Purpose: Definition of pico-features.
** Author: Dan Johnson
** History: 9/4/90, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "picofeat.h"
#include "classify.h"
#include "efio.h"
#include "featdefs.h"
#include "fpoint.h"
#include "mfoutline.h"
#include "ocrfeatures.h"
#include "params.h"
#include "trainingsample.h"
#include <math.h>
#include <stdio.h>
/*---------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------*/
double_VAR(classify_pico_feature_length, 0.05, "Pico Feature Length");
/*---------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------*/
void ConvertSegmentToPicoFeat(FPOINT *Start,
FPOINT *End,
FEATURE_SET FeatureSet);
void ConvertToPicoFeatures2(MFOUTLINE Outline, FEATURE_SET FeatureSet);
void NormalizePicoX(FEATURE_SET FeatureSet);
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
namespace tesseract {
FEATURE_SET Classify::ExtractPicoFeatures(TBLOB *Blob) {
/*
** Parameters:
** Blob blob to extract pico-features from
** LineStats statistics on text row blob is in
** Globals:
** classify_norm_method normalization method currently specified
** Operation: Dummy for now.
** Return: Pico-features for Blob.
** Exceptions: none
** History: 9/4/90, DSJ, Created.
*/
LIST Outlines;
LIST RemainingOutlines;
MFOUTLINE Outline;
FEATURE_SET FeatureSet;
FLOAT32 XScale, YScale;
FeatureSet = NewFeatureSet(MAX_PICO_FEATURES);
Outlines = ConvertBlob(Blob);
NormalizeOutlines(Outlines, &XScale, &YScale);
RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
Outline = (MFOUTLINE) first_node (RemainingOutlines);
ConvertToPicoFeatures2(Outline, FeatureSet);
}
if (classify_norm_method == baseline)
NormalizePicoX(FeatureSet);
FreeOutlines(Outlines);
return (FeatureSet);
} /* ExtractPicoFeatures */
} // namespace tesseract
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
void ConvertSegmentToPicoFeat(FPOINT *Start,
FPOINT *End,
FEATURE_SET FeatureSet) {
/*
** Parameters:
** Start starting point of pico-feature
** End ending point of pico-feature
** FeatureSet set to add pico-feature to
** Globals:
** classify_pico_feature_length length of a single pico-feature
** Operation: This routine converts an entire segment of an outline
** into a set of pico features which are added to
** FeatureSet. The length of the segment is rounded to the
** nearest whole number of pico-features. The pico-features
** are spaced evenly over the entire segment.
** Return: none (results are placed in FeatureSet)
** Exceptions: none
** History: Tue Apr 30 15:44:34 1991, DSJ, Created.
*/
FEATURE Feature;
FLOAT32 Angle;
FLOAT32 Length;
int NumFeatures;
FPOINT Center;
FPOINT Delta;
int i;
Angle = NormalizedAngleFrom (Start, End, 1.0);
Length = DistanceBetween (*Start, *End);
NumFeatures = (int) floor (Length / classify_pico_feature_length + 0.5);
if (NumFeatures < 1)
NumFeatures = 1;
/* compute vector for one pico feature */
Delta.x = XDelta (*Start, *End) / NumFeatures;
Delta.y = YDelta (*Start, *End) / NumFeatures;
/* compute position of first pico feature */
Center.x = Start->x + Delta.x / 2.0;
Center.y = Start->y + Delta.y / 2.0;
/* compute each pico feature in segment and add to feature set */
for (i = 0; i < NumFeatures; i++) {
Feature = NewFeature (&PicoFeatDesc);
Feature->Params[PicoFeatDir] = Angle;
Feature->Params[PicoFeatX] = Center.x;
Feature->Params[PicoFeatY] = Center.y;
AddFeature(FeatureSet, Feature);
Center.x += Delta.x;
Center.y += Delta.y;
}
} /* ConvertSegmentToPicoFeat */
/*---------------------------------------------------------------------------*/
void ConvertToPicoFeatures2(MFOUTLINE Outline, FEATURE_SET FeatureSet) {
/*
** Parameters:
** Outline outline to extract micro-features from
** FeatureSet set of features to add pico-features to
** Globals:
** classify_pico_feature_length
** length of features to be extracted
** Operation:
** This routine steps thru the specified outline and cuts it
** up into pieces of equal length. These pieces become the
** desired pico-features. Each segment in the outline
** is converted into an integral number of pico-features.
** Return: none (results are returned in FeatureSet)
** Exceptions: none
** History: 4/30/91, DSJ, Adapted from ConvertToPicoFeatures().
*/
MFOUTLINE Next;
MFOUTLINE First;
MFOUTLINE Current;
if (DegenerateOutline(Outline))
return;
First = Outline;
Current = First;
Next = NextPointAfter(Current);
do {
/* note that an edge is hidden if the ending point of the edge is
marked as hidden. This situation happens because the order of
the outlines is reversed when they are converted from the old
format. In the old format, a hidden edge is marked by the
starting point for that edge. */
if (!(PointAt(Next)->Hidden))
ConvertSegmentToPicoFeat (&(PointAt(Current)->Point),
&(PointAt(Next)->Point), FeatureSet);
Current = Next;
Next = NextPointAfter(Current);
}
while (Current != First);
} /* ConvertToPicoFeatures2 */
/*---------------------------------------------------------------------------*/
void NormalizePicoX(FEATURE_SET FeatureSet) {
/*
** Parameters:
** FeatureSet pico-features to be normalized
** Globals: none
** Operation: This routine computes the average x position over all
** of the pico-features in FeatureSet and then renormalizes
** the pico-features to force this average to be the x origin
** (i.e. x=0).
** Return: none (FeatureSet is changed)
** Exceptions: none
** History: Tue Sep 4 16:50:08 1990, DSJ, Created.
*/
int i;
FEATURE Feature;
FLOAT32 Origin = 0.0;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Origin += Feature->Params[PicoFeatX];
}
Origin /= FeatureSet->NumFeatures;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Feature->Params[PicoFeatX] -= Origin;
}
} /* NormalizePicoX */
/*---------------------------------------------------------------------------*/
FEATURE_SET ExtractIntCNFeatures(TBLOB *blob, const DENORM& bl_denorm,
const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info) {
/*
** Parameters:
** blob blob to extract features from
** denorm normalization/denormalization parameters.
** Return: Integer character-normalized features for blob.
** Exceptions: none
** History: 8/8/2011, rays, Created.
*/
INT_FX_RESULT_STRUCT local_fx_info(fx_info);
GenericVector<INT_FEATURE_STRUCT> bl_features;
tesseract::TrainingSample* sample =
tesseract::BlobToTrainingSample(*blob, false, &local_fx_info,
&bl_features);
if (sample == NULL) return NULL;
int num_features = sample->num_features();
const INT_FEATURE_STRUCT* features = sample->features();
FEATURE_SET feature_set = NewFeatureSet(num_features);
for (int f = 0; f < num_features; ++f) {
FEATURE feature = NewFeature(&IntFeatDesc);
feature->Params[IntX] = features[f].X;
feature->Params[IntY] = features[f].Y;
feature->Params[IntDir] = features[f].Theta;
AddFeature(feature_set, feature);
}
delete sample;
return feature_set;
} /* ExtractIntCNFeatures */
/*---------------------------------------------------------------------------*/
FEATURE_SET ExtractIntGeoFeatures(TBLOB *blob, const DENORM& bl_denorm,
const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info) {
/*
** Parameters:
** blob blob to extract features from
** denorm normalization/denormalization parameters.
** Return: Geometric (top/bottom/width) features for blob.
** Exceptions: none
** History: 8/8/2011, rays, Created.
*/
INT_FX_RESULT_STRUCT local_fx_info(fx_info);
GenericVector<INT_FEATURE_STRUCT> bl_features;
tesseract::TrainingSample* sample =
tesseract::BlobToTrainingSample(*blob, false, &local_fx_info,
&bl_features);
if (sample == NULL) return NULL;
FEATURE_SET feature_set = NewFeatureSet(1);
FEATURE feature = NewFeature(&IntFeatDesc);
feature->Params[GeoBottom] = sample->geo_feature(GeoBottom);
feature->Params[GeoTop] = sample->geo_feature(GeoTop);
feature->Params[GeoWidth] = sample->geo_feature(GeoWidth);
AddFeature(feature_set, feature);
delete sample;
return feature_set;
} /* ExtractIntGeoFeatures */
| C++ |
/******************************************************************************
** Filename: extract.c
** Purpose: Generic high level feature extractor routines.
** Author: Dan Johnson
** History: Sun Jan 21 09:44:08 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include "extract.h"
#include "flexfx.h"
#include "danerror.h"
typedef CHAR_FEATURES (*CF_FUNC) ();
/*-----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
void ExtractorStub();
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* Extract features from Blob by calling the feature
* extractor which is currently being used. This routine
* simply provides a high level interface to feature
* extraction. The caller can extract any type of features
* from a blob without understanding any lower level details.
*
* @param FeatureDefs definitions of feature types/extractors
* @param denorm Normalize/denormalize to access original image
* @param Blob blob to extract features from
*
* @return The character features extracted from Blob.
* @note Exceptions: none
* @note History: Sun Jan 21 10:07:28 1990, DSJ, Created.
*/
CHAR_DESC ExtractBlobFeatures(const FEATURE_DEFS_STRUCT &FeatureDefs,
const DENORM& bl_denorm, const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info,
TBLOB *Blob) {
return ExtractFlexFeatures(FeatureDefs, Blob, bl_denorm, cn_denorm, fx_info);
} /* ExtractBlobFeatures */
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
void
ExtractorStub ()
/**
* This routine is used to stub out feature extractors
* that are no longer used. It simply calls DoError.
*
* @note Exceptions: none
* @note History: Wed Jan 2 14:16:49 1991, DSJ, Created.
*/
#define DUMMY_ERROR 1
{
DoError (DUMMY_ERROR, "Selected feature extractor has been stubbed out!");
} /* ExtractorStub */
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturedist.h
// Description: Fast set-difference-based feature distance calculator.
// Created: Thu Sep 01 12:14:30 PDT 2011
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_INTFEATUREDIST_H_
#define TESSERACT_CLASSIFY_INTFEATUREDIST_H_
#include "genericvector.h"
namespace tesseract {
class IntFeatureMap;
// Feature distance calculator designed to provide a fast distance calculation
// based on set difference between a given feature set and many other feature
// sets in turn.
// Representation of a feature set as an array of bools that are sparsely
// true, and companion arrays that allow fast feature set distance
// calculations with allowance of offsets in position.
// Init is expensive, so for greatest efficiency, to re-initialize for a new
// feature set, use Set(..., false) on the SAME feature set as was used to
// setup with Set(..., true), to return to its initialized state before
// reuse with Set(..., true) on a new feature set.
class IntFeatureDist {
public:
IntFeatureDist();
~IntFeatureDist();
// Initialize the bool array to the given size of feature space.
// The feature_map is just borrowed, and must exist for the entire
// lifetime of the IntFeatureDist.
void Init(const IntFeatureMap* feature_map);
// Setup the map for the given indexed_features that have been indexed by
// feature_map. After use, use Set(..., false) to reset to the initial state
// as this is faster than calling Init for sparse spaces.
void Set(const GenericVector<int>& indexed_features,
int canonical_count, bool value);
// Compute the distance between the given feature vector and the last
// Set feature vector.
double FeatureDistance(const GenericVector<int>& features) const;
double DebugFeatureDistance(const GenericVector<int>& features) const;
private:
// Clear all data.
void Clear();
// Size of the indexed feature space.
int size_;
// Total weight of features currently stored in the maps.
double total_feature_weight_;
// Pointer to IntFeatureMap given at Init to find offset features.
const IntFeatureMap* feature_map_;
// Array of bools indicating presence of a feature.
bool* features_;
// Array indicating the presence of a feature offset by one unit.
bool* features_delta_one_;
// Array indicating the presence of a feature offset by two units.
bool* features_delta_two_;
};
} // namespace tesseract
#endif // TESSERACT_CLASSIFY_INTFEATUREDIST_H_
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturedist.cpp
// Description: Fast set-difference-based feature distance calculator.
// Created: Thu Sep 01 13:07:30 PDT 2011
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "intfeaturedist.h"
#include "intfeaturemap.h"
namespace tesseract {
IntFeatureDist::IntFeatureDist()
: size_(0), total_feature_weight_(0.0),
feature_map_(NULL), features_(NULL),
features_delta_one_(NULL), features_delta_two_(NULL) {
}
IntFeatureDist::~IntFeatureDist() {
Clear();
}
// Initialize the table to the given size of feature space.
void IntFeatureDist::Init(const IntFeatureMap* feature_map) {
size_ = feature_map->sparse_size();
Clear();
feature_map_ = feature_map;
features_ = new bool[size_];
features_delta_one_ = new bool[size_];
features_delta_two_ = new bool[size_];
memset(features_, false, size_ * sizeof(features_[0]));
memset(features_delta_one_, false, size_ * sizeof(features_delta_one_[0]));
memset(features_delta_two_, false, size_ * sizeof(features_delta_two_[0]));
total_feature_weight_ = 0.0;
}
// Setup the map for the given indexed_features that have been indexed by
// feature_map.
void IntFeatureDist::Set(const GenericVector<int>& indexed_features,
int canonical_count, bool value) {
total_feature_weight_ = canonical_count;
for (int i = 0; i < indexed_features.size(); ++i) {
int f = indexed_features[i];
features_[f] = value;
for (int dir = -kNumOffsetMaps; dir <= kNumOffsetMaps; ++dir) {
if (dir == 0) continue;
int mapped_f = feature_map_->OffsetFeature(f, dir);
if (mapped_f >= 0) {
features_delta_one_[mapped_f] = value;
for (int dir2 = -kNumOffsetMaps; dir2 <= kNumOffsetMaps; ++dir2) {
if (dir2 == 0) continue;
int mapped_f2 = feature_map_->OffsetFeature(mapped_f, dir2);
if (mapped_f2 >= 0)
features_delta_two_[mapped_f2] = value;
}
}
}
}
}
// Compute the distance between the given feature vector and the last
// Set feature vector.
double IntFeatureDist::FeatureDistance(
const GenericVector<int>& features) const {
int num_test_features = features.size();
double denominator = total_feature_weight_ + num_test_features;
double misses = denominator;
for (int i = 0; i < num_test_features; ++i) {
int index = features[i];
double weight = 1.0;
if (features_[index]) {
// A perfect match.
misses -= 2.0 * weight;
} else if (features_delta_one_[index]) {
misses -= 1.5 * weight;
} else if (features_delta_two_[index]) {
// A near miss.
misses -= 1.0 * weight;
}
}
return misses / denominator;
}
// Compute the distance between the given feature vector and the last
// Set feature vector.
double IntFeatureDist::DebugFeatureDistance(
const GenericVector<int>& features) const {
int num_test_features = features.size();
double denominator = total_feature_weight_ + num_test_features;
double misses = denominator;
for (int i = 0; i < num_test_features; ++i) {
int index = features[i];
double weight = 1.0;
INT_FEATURE_STRUCT f = feature_map_->InverseMapFeature(features[i]);
tprintf("Testing feature weight %g:", weight);
f.print();
if (features_[index]) {
// A perfect match.
misses -= 2.0 * weight;
tprintf("Perfect hit\n");
} else if (features_delta_one_[index]) {
misses -= 1.5 * weight;
tprintf("-1 hit\n");
} else if (features_delta_two_[index]) {
// A near miss.
misses -= 1.0 * weight;
tprintf("-2 hit\n");
} else {
tprintf("Total miss\n");
}
}
tprintf("Features present:");
for (int i = 0; i < size_; ++i) {
if (features_[i]) {
INT_FEATURE_STRUCT f = feature_map_->InverseMapFeature(i);
f.print();
}
}
tprintf("\nMinus one features:");
for (int i = 0; i < size_; ++i) {
if (features_delta_one_[i]) {
INT_FEATURE_STRUCT f = feature_map_->InverseMapFeature(i);
f.print();
}
}
tprintf("\nMinus two features:");
for (int i = 0; i < size_; ++i) {
if (features_delta_two_[i]) {
INT_FEATURE_STRUCT f = feature_map_->InverseMapFeature(i);
f.print();
}
}
tprintf("\n");
return misses / denominator;
}
// Clear all data.
void IntFeatureDist::Clear() {
delete [] features_;
features_ = NULL;
delete [] features_delta_one_;
features_delta_one_ = NULL;
delete [] features_delta_two_;
features_delta_two_ = NULL;
}
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: mfx.c
** Purpose: Micro feature extraction routines
** Author: Dan Johnson
** History: 7/21/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "mfdefs.h"
#include "mfoutline.h"
#include "clusttool.h" //NEEDED
#include "const.h"
#include "intfx.h"
#include "normalis.h"
#include "params.h"
#include <math.h>
/**----------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------**/
/* old numbers corresponded to 10.0 degrees and 80.0 degrees */
double_VAR(classify_min_slope, 0.414213562,
"Slope below which lines are called horizontal");
double_VAR(classify_max_slope, 2.414213562,
"Slope above which lines are called vertical");
/**----------------------------------------------------------------------------
Macros
----------------------------------------------------------------------------**/
/* miscellaneous macros */
#define NormalizeAngle(A) ( (((A)<0)?((A)+2*PI):(A)) / (2*PI) )
/*----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
FLOAT32 ComputeOrientation(MFEDGEPT *Start, MFEDGEPT *End);
MICROFEATURES ConvertToMicroFeatures(MFOUTLINE Outline,
MICROFEATURES MicroFeatures);
MICROFEATURE ExtractMicroFeature(MFOUTLINE Start, MFOUTLINE End);
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
CHAR_FEATURES BlobMicroFeatures(TBLOB *Blob, const DENORM& bl_denorm,
const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info) {
/*
** Parameters:
** Blob blob to extract micro-features from
** denorm control parameter to feature extractor
** Operation:
** This routine extracts micro-features from the specified
** blob and returns a list of the micro-features. All
** micro-features are normalized according to the specified
** line statistics.
** Return: List of micro-features extracted from the blob.
** Exceptions: none
** History: 7/21/89, DSJ, Created.
*/
MICROFEATURES MicroFeatures = NIL_LIST;
LIST Outlines;
LIST RemainingOutlines;
MFOUTLINE Outline;
if (Blob != NULL) {
Outlines = ConvertBlob(Blob);
RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
Outline = (MFOUTLINE) first_node (RemainingOutlines);
CharNormalizeOutline(Outline, cn_denorm);
}
RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
Outline = (MFOUTLINE) first_node(RemainingOutlines);
FindDirectionChanges(Outline, classify_min_slope, classify_max_slope);
MarkDirectionChanges(Outline);
MicroFeatures = ConvertToMicroFeatures(Outline, MicroFeatures);
}
FreeOutlines(Outlines);
}
return ((CHAR_FEATURES) MicroFeatures);
} /* BlobMicroFeatures */
/*---------------------------------------------------------------------------
Private Code
---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
FLOAT32 ComputeOrientation(MFEDGEPT *Start, MFEDGEPT *End) {
/*
** Parameters:
** Start starting edge point of micro-feature
** End ending edge point of micro-feature
** Globals: none
** Operation:
** This routine computes the orientation parameter of the
** specified micro-feature. The orientation is the angle of
** the vector from Start to End. It is normalized to a number
** between 0 and 1 where 0 corresponds to 0 degrees and 1
** corresponds to 360 degrees. The actual range is [0,1), i.e.
** 1 is excluded from the range (since it is actual the
** same orientation as 0). This routine assumes that Start
** and End are not the same point.
** Return: Orientation parameter for the specified micro-feature.
** Exceptions: none
** History: 7/27/89, DSJ, Created.
*/
FLOAT32 Orientation;
Orientation = NormalizeAngle (AngleFrom (Start->Point, End->Point));
/* ensure that round-off errors do not put circular param out of range */
if ((Orientation < 0) || (Orientation >= 1))
Orientation = 0;
return (Orientation);
} /* ComputeOrientation */
/*---------------------------------------------------------------------------*/
MICROFEATURES ConvertToMicroFeatures(MFOUTLINE Outline,
MICROFEATURES MicroFeatures) {
/*
** Parameters:
** Outline outline to extract micro-features from
** MicroFeatures list of micro-features to add to
** Globals: none
** Operation:
** This routine
** Return: List of micro-features with new features added to front.
** Exceptions: none
** History: 7/26/89, DSJ, Created.
*/
MFOUTLINE Current;
MFOUTLINE Last;
MFOUTLINE First;
MICROFEATURE NewFeature;
if (DegenerateOutline (Outline))
return (MicroFeatures);
First = NextExtremity (Outline);
Last = First;
do {
Current = NextExtremity (Last);
if (!PointAt(Current)->Hidden) {
NewFeature = ExtractMicroFeature (Last, Current);
if (NewFeature != NULL)
MicroFeatures = push (MicroFeatures, NewFeature);
}
Last = Current;
}
while (Last != First);
return (MicroFeatures);
} /* ConvertToMicroFeatures */
/*---------------------------------------------------------------------------*/
MICROFEATURE ExtractMicroFeature(MFOUTLINE Start, MFOUTLINE End) {
/*
** Parameters:
** Start starting point of micro-feature
** End ending point of micro-feature
** Globals: none
** Operation:
** This routine computes the feature parameters which describe
** the micro-feature that starts and Start and ends at End.
** A new micro-feature is allocated, filled with the feature
** parameters, and returned. The routine assumes that
** Start and End are not the same point. If they are the
** same point, NULL is returned, a warning message is
** printed, and the current outline is dumped to stdout.
** Return: New micro-feature or NULL if the feature was rejected.
** Exceptions: none
** History: 7/26/89, DSJ, Created.
** 11/17/89, DSJ, Added handling for Start and End same point.
*/
MICROFEATURE NewFeature;
MFEDGEPT *P1, *P2;
P1 = PointAt(Start);
P2 = PointAt(End);
NewFeature = NewMicroFeature ();
NewFeature[XPOSITION] = AverageOf(P1->Point.x, P2->Point.x);
NewFeature[YPOSITION] = AverageOf(P1->Point.y, P2->Point.y);
NewFeature[MFLENGTH] = DistanceBetween(P1->Point, P2->Point);
NewFeature[ORIENTATION] = NormalizedAngleFrom(&P1->Point, &P2->Point, 1.0);
NewFeature[FIRSTBULGE] = 0.0f; // deprecated
NewFeature[SECONDBULGE] = 0.0f; // deprecated
return NewFeature;
} /* ExtractMicroFeature */
| C++ |
/******************************************************************************
** Filename: intproto.h
** Purpose: Definition of data structures for integer protos.
** Author: Dan Johnson
** History: Thu Feb 7 12:58:45 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef INTPROTO_H
#define INTPROTO_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "genericvector.h"
#include "matchdefs.h"
#include "mfoutline.h"
#include "protos.h"
#include "scrollview.h"
#include "unicharset.h"
class FCOORD;
/* define order of params in pruners */
#define PRUNER_X 0
#define PRUNER_Y 1
#define PRUNER_ANGLE 2
/* definition of coordinate system offsets for each table parameter */
#define ANGLE_SHIFT (0.0)
#define X_SHIFT (0.5)
#define Y_SHIFT (0.5)
#define MAX_PROTO_INDEX 24
#define BITS_PER_WERD static_cast<int>(8 * sizeof(uinT32))
/* Script detection: increase this number to 128 */
#define MAX_NUM_CONFIGS 64
#define MAX_NUM_PROTOS 512
#define PROTOS_PER_PROTO_SET 64
#define MAX_NUM_PROTO_SETS (MAX_NUM_PROTOS / PROTOS_PER_PROTO_SET)
#define NUM_PP_PARAMS 3
#define NUM_PP_BUCKETS 64
#define NUM_CP_BUCKETS 24
#define CLASSES_PER_CP 32
#define NUM_BITS_PER_CLASS 2
#define CLASS_PRUNER_CLASS_MASK (~(~0 << NUM_BITS_PER_CLASS))
#define CLASSES_PER_CP_WERD (CLASSES_PER_CP / NUM_BITS_PER_CLASS)
#define PROTOS_PER_PP_WERD BITS_PER_WERD
#define BITS_PER_CP_VECTOR (CLASSES_PER_CP * NUM_BITS_PER_CLASS)
#define MAX_NUM_CLASS_PRUNERS ((MAX_NUM_CLASSES + CLASSES_PER_CP - 1) / \
CLASSES_PER_CP)
#define WERDS_PER_CP_VECTOR (BITS_PER_CP_VECTOR / BITS_PER_WERD)
#define WERDS_PER_PP_VECTOR ((PROTOS_PER_PROTO_SET+BITS_PER_WERD-1)/ \
BITS_PER_WERD)
#define WERDS_PER_PP (NUM_PP_PARAMS * NUM_PP_BUCKETS * \
WERDS_PER_PP_VECTOR)
#define WERDS_PER_CP (NUM_CP_BUCKETS * NUM_CP_BUCKETS * \
NUM_CP_BUCKETS * WERDS_PER_CP_VECTOR)
#define WERDS_PER_CONFIG_VEC ((MAX_NUM_CONFIGS + BITS_PER_WERD - 1) / \
BITS_PER_WERD)
/* The first 3 dimensions of the CLASS_PRUNER_STRUCT are the
* 3 axes of the quantized feature space.
* The position of the the bits recorded for each class in the
* 4th dimension is determined by using CPrunerWordIndexFor(c),
* where c is the corresponding class id. */
struct CLASS_PRUNER_STRUCT {
uinT32 p[NUM_CP_BUCKETS][NUM_CP_BUCKETS][NUM_CP_BUCKETS][WERDS_PER_CP_VECTOR];
};
typedef struct
{
inT8 A;
uinT8 B;
inT8 C;
uinT8 Angle;
uinT32 Configs[WERDS_PER_CONFIG_VEC];
}
INT_PROTO_STRUCT, *INT_PROTO;
typedef uinT32 PROTO_PRUNER[NUM_PP_PARAMS][NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR];
typedef struct
{
PROTO_PRUNER ProtoPruner;
INT_PROTO_STRUCT Protos[PROTOS_PER_PROTO_SET];
}
PROTO_SET_STRUCT, *PROTO_SET;
typedef uinT32 CONFIG_PRUNER[NUM_PP_PARAMS][NUM_PP_BUCKETS][4];
typedef struct
{
uinT16 NumProtos;
uinT8 NumProtoSets;
uinT8 NumConfigs;
PROTO_SET ProtoSets[MAX_NUM_PROTO_SETS];
uinT8 *ProtoLengths;
uinT16 ConfigLengths[MAX_NUM_CONFIGS];
int font_set_id; // FontSet id, see above
}
INT_CLASS_STRUCT, *INT_CLASS;
typedef struct
{
int NumClasses;
int NumClassPruners;
INT_CLASS Class[MAX_NUM_CLASSES];
CLASS_PRUNER_STRUCT* ClassPruners[MAX_NUM_CLASS_PRUNERS];
}
INT_TEMPLATES_STRUCT, *INT_TEMPLATES;
/* definitions of integer features*/
#define MAX_NUM_INT_FEATURES 512
#define INT_CHAR_NORM_RANGE 256
struct INT_FEATURE_STRUCT {
INT_FEATURE_STRUCT() : X(0), Y(0), Theta(0), CP_misses(0) { }
// Builds a feature from an FCOORD for position with all the necessary
// clipping and rounding.
INT_FEATURE_STRUCT(const FCOORD& pos, uinT8 theta);
// Builds a feature from ints with all the necessary clipping and casting.
INT_FEATURE_STRUCT(int x, int y, int theta);
uinT8 X;
uinT8 Y;
uinT8 Theta;
inT8 CP_misses;
void print() const {
tprintf("(%d,%d):%d\n", X, Y, Theta);
}
};
typedef INT_FEATURE_STRUCT *INT_FEATURE;
typedef INT_FEATURE_STRUCT INT_FEATURE_ARRAY[MAX_NUM_INT_FEATURES];
enum IntmatcherDebugAction {
IDA_ADAPTIVE,
IDA_STATIC,
IDA_SHAPE_INDEX,
IDA_BOTH
};
/**----------------------------------------------------------------------------
Macros
----------------------------------------------------------------------------**/
#define MaxNumIntProtosIn(C) (C->NumProtoSets * PROTOS_PER_PROTO_SET)
#define SetForProto(P) (P / PROTOS_PER_PROTO_SET)
#define IndexForProto(P) (P % PROTOS_PER_PROTO_SET)
#define ProtoForProtoId(C,P) (&((C->ProtoSets[SetForProto (P)])-> \
Protos [IndexForProto (P)]))
#define PPrunerWordIndexFor(I) (((I) % PROTOS_PER_PROTO_SET) / \
PROTOS_PER_PP_WERD)
#define PPrunerBitIndexFor(I) ((I) % PROTOS_PER_PP_WERD)
#define PPrunerMaskFor(I) (1 << PPrunerBitIndexFor (I))
#define MaxNumClassesIn(T) (T->NumClassPruners * CLASSES_PER_CP)
#define LegalClassId(c) ((c) >= 0 && (c) <= MAX_CLASS_ID)
#define UnusedClassIdIn(T,c) ((T)->Class[c] == NULL)
#define ClassForClassId(T,c) ((T)->Class[c])
#define ClassPrunersFor(T) ((T)->ClassPruner)
#define CPrunerIdFor(c) ((c) / CLASSES_PER_CP)
#define CPrunerFor(T,c) ((T)->ClassPruners[CPrunerIdFor(c)])
#define CPrunerWordIndexFor(c) (((c) % CLASSES_PER_CP) / CLASSES_PER_CP_WERD)
#define CPrunerBitIndexFor(c) (((c) % CLASSES_PER_CP) % CLASSES_PER_CP_WERD)
#define CPrunerMaskFor(L,c) (((L)+1) << CPrunerBitIndexFor (c) * NUM_BITS_PER_CLASS)
/* DEBUG macros*/
#define PRINT_MATCH_SUMMARY 0x001
#define DISPLAY_FEATURE_MATCHES 0x002
#define DISPLAY_PROTO_MATCHES 0x004
#define PRINT_FEATURE_MATCHES 0x008
#define PRINT_PROTO_MATCHES 0x010
#define CLIP_MATCH_EVIDENCE 0x020
#define MatchDebuggingOn(D) (D)
#define PrintMatchSummaryOn(D) ((D) & PRINT_MATCH_SUMMARY)
#define DisplayFeatureMatchesOn(D) ((D) & DISPLAY_FEATURE_MATCHES)
#define DisplayProtoMatchesOn(D) ((D) & DISPLAY_PROTO_MATCHES)
#define PrintFeatureMatchesOn(D) ((D) & PRINT_FEATURE_MATCHES)
#define PrintProtoMatchesOn(D) ((D) & PRINT_PROTO_MATCHES)
#define ClipMatchEvidenceOn(D) ((D) & CLIP_MATCH_EVIDENCE)
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
void AddIntClass(INT_TEMPLATES Templates, CLASS_ID ClassId, INT_CLASS Class);
int AddIntConfig(INT_CLASS Class);
int AddIntProto(INT_CLASS Class);
void AddProtoToClassPruner(PROTO Proto,
CLASS_ID ClassId,
INT_TEMPLATES Templates);
void AddProtoToProtoPruner(PROTO Proto, int ProtoId,
INT_CLASS Class, bool debug);
int BucketFor(FLOAT32 Param, FLOAT32 Offset, int NumBuckets);
int CircBucketFor(FLOAT32 Param, FLOAT32 Offset, int NumBuckets);
void UpdateMatchDisplay();
void ConvertConfig(BIT_VECTOR Config, int ConfigId, INT_CLASS Class);
void DisplayIntFeature(const INT_FEATURE_STRUCT* Feature, FLOAT32 Evidence);
void DisplayIntProto(INT_CLASS Class, PROTO_ID ProtoId, FLOAT32 Evidence);
INT_CLASS NewIntClass(int MaxNumProtos, int MaxNumConfigs);
INT_TEMPLATES NewIntTemplates();
void free_int_templates(INT_TEMPLATES templates);
void ShowMatchDisplay();
namespace tesseract {
// Clears the given window and draws the featurespace guides for the
// appropriate normalization method.
void ClearFeatureSpaceWindow(NORM_METHOD norm_method, ScrollView* window);
} // namespace tesseract.
/*----------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
void RenderIntFeature(ScrollView *window, const INT_FEATURE_STRUCT* Feature,
ScrollView::Color color);
void InitIntMatchWindowIfReqd();
void InitProtoDisplayWindowIfReqd();
void InitFeatureDisplayWindowIfReqd();
// Creates a window of the appropriate size for displaying elements
// in feature space.
ScrollView* CreateFeatureSpaceWindow(const char* name, int xpos, int ypos);
#endif // GRAPHICS_DISABLED
#endif
| C++ |
/******************************************************************************
** Filename: xform2d.c
** Purpose: Library routines for performing 2D point transformations
** Author: Dan Johnson
** History: Fri Sep 22 09:54:17 1989, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "xform2d.h"
#include <math.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
void InitMatrix(MATRIX_2D *M) {
M->a = 1;
M->b = 0;
M->c = 0;
M->d = 1;
M->tx = 0;
M->ty = 0;
}
void CopyMatrix(MATRIX_2D *A, MATRIX_2D *B) {
B->a = A->a;
B->b = A->b;
B->c = A->c;
B->d = A->d;
B->tx = A->tx;
B->ty = A->ty;
}
void TranslateMatrix(MATRIX_2D *M, FLOAT32 X, FLOAT32 Y) {
M->tx += M->a * X + M->c * Y;
M->ty += M->b * X + M->d * Y;
}
void ScaleMatrix(MATRIX_2D *M, FLOAT32 X, FLOAT32 Y) {
M->a *= X;
M->b *= X;
M->c *= Y;
M->d *= Y;
}
void MirrorMatrixInX(MATRIX_2D *M) {ScaleMatrix(M, -1, 1);}
void MirrorMatrixInY(MATRIX_2D *M) {ScaleMatrix(M, 1, -1);}
void MirrorMatrixInXY(MATRIX_2D *M) {ScaleMatrix(M, -1, -1);}
FLOAT32 MapX(MATRIX_2D *M, FLOAT32 X, FLOAT32 Y) {
return M->a * (X) + (M)->c * (Y) + (M)->tx;
}
FLOAT32 MapY(MATRIX_2D *M, FLOAT32 X, FLOAT32 Y) {
return M->b * X + M->d * Y + M->ty;
}
void MapPoint(MATRIX_2D *M, const FPOINT &A, FPOINT* B) {
B->x = MapX(M, A.x, A.y);
B->y = MapY(M, A.x, A.y);
}
FLOAT32 MapDx(MATRIX_2D *M, FLOAT32 DX, FLOAT32 DY) {
return M->a * DX + M->c * DY;
}
FLOAT32 MapDy(MATRIX_2D *M, FLOAT32 DX, FLOAT32 DY) {
return M->b * DX + M->d * DY;
}
/*---------------------------------------------------------------------------*/
void RotateMatrix(MATRIX_2D_PTR Matrix, FLOAT32 Angle) {
/*
** Parameters:
** Matrix transformation matrix to rotate
** Angle angle to rotate matrix
** Globals: none
** Operation:
** Rotate the coordinate system (as specified by Matrix) about
** its origin by Angle radians. In matrix notation the
** effect is as follows:
**
** Matrix = R X Matrix
**
** where R is the following matrix
**
** cos Angle sin Angle 0
** -sin Angle cos Angle 0
** 0 0 1
** Return: none
** Exceptions: none
** History: 7/27/89, DSJ, Create.
*/
FLOAT32 Cos, Sin;
FLOAT32 NewA, NewB;
Cos = cos ((double) Angle);
Sin = sin ((double) Angle);
NewA = Matrix->a * Cos + Matrix->c * Sin;
NewB = Matrix->b * Cos + Matrix->d * Sin;
Matrix->c = Matrix->a * -Sin + Matrix->c * Cos;
Matrix->d = Matrix->b * -Sin + Matrix->d * Cos;
Matrix->a = NewA;
Matrix->b = NewB;
} /* RotateMatrix */
| C++ |
/******************************************************************************
** Filename: intfx.h
** Purpose: Interface to high level integer feature extractor.
** Author: Robert Moss
** History: Tue May 21 15:51:57 MDT 1991, RWM, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef INTFX_H
#define INTFX_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "blobs.h"
#include "intproto.h"
#include "normalis.h"
#include <math.h>
class DENORM;
namespace tesseract {
class TrainingSample;
}
struct INT_FX_RESULT_STRUCT {
inT32 Length; // total length of all outlines
inT16 Xmean, Ymean; // center of mass of all outlines
inT16 Rx, Ry; // radius of gyration
inT16 NumBL, NumCN; // number of features extracted
inT16 Width; // Width of blob in BLN coords.
uinT8 YBottom; // Bottom of blob in BLN coords.
uinT8 YTop; // Top of blob in BLN coords.
};
// The standard feature length
const double kStandardFeatureLength = 64.0 / 5;
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
void InitIntegerFX();
// Returns a vector representing the direction of a feature with the given
// theta direction in an INT_FEATURE_STRUCT.
FCOORD FeatureDirection(uinT8 theta);
namespace tesseract {
// Generates a TrainingSample from a TBLOB. Extracts features and sets
// the bounding box, so classifiers that operate on the image can work.
// TODO(rays) BlobToTrainingSample must remain a global function until
// the FlexFx and FeatureDescription code can be removed and LearnBlob
// made a member of Classify.
TrainingSample* BlobToTrainingSample(
const TBLOB& blob, bool nonlinear_norm, INT_FX_RESULT_STRUCT* fx_info,
GenericVector<INT_FEATURE_STRUCT>* bl_features);
}
// Deprecated! Prefer tesseract::Classify::ExtractFeatures instead.
bool ExtractIntFeat(const TBLOB& blob,
bool nonlinear_norm,
INT_FEATURE_ARRAY BLFeat,
INT_FEATURE_ARRAY CNFeat,
INT_FX_RESULT_STRUCT* Results);
#endif
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef THIRD_PARTY_TESSERACT_CLASSIFY_ERRORCOUNTER_H_
#define THIRD_PARTY_TESSERACT_CLASSIFY_ERRORCOUNTER_H_
#include "genericvector.h"
#include "matrix.h"
#include "statistc.h"
struct Pix;
template <typename T> class UnicityTable;
namespace tesseract {
struct FontInfo;
class FontInfoTable;
class SampleIterator;
class ShapeClassifier;
class TrainingSample;
struct UnicharRating;
// Enumeration of the different types of error count.
// Error counts work as follows:
//
// Ground truth is a valid unichar-id / font-id pair:
// Number of classifier answers?
// 0 >0
// CT_REJECT unichar-id matches top shape?
// __________ yes! no
// CT_UNICHAR_TOP_OK CT_UNICHAR_TOP1_ERR
// Top shape-id has multiple unichars? 2nd shape unichar id matches?
// yes! no yes! no
// CT_OK_MULTI_UNICHAR | _____ CT_UNICHAR_TOP2_ERR
// Font attributes match? Any unichar-id matches?
// yes! no yes! no
// CT_FONT_ATTR_OK CT_FONT_ATTR_ERR ______ CT_UNICHAR_TOPN_ERR
// | __________________ _________________
// Top shape-id has multiple font attrs?
// yes! no
// CT_OK_MULTI_FONT
// _____________________________
//
// Note that multiple counts may be activated for a single sample!
//
// Ground truth is for a fragment/n-gram that is NOT in the unicharset.
// This is called junk and is expected to be rejected:
// Number of classifier answers?
// 0 >0
// CT_REJECTED_JUNK CT_ACCEPTED_JUNK
//
// Also, CT_NUM_RESULTS stores the mean number of results, and CT_RANK stores
// the mean rank of the correct result, counting from 0, and with an error
// receiving the number of answers as the correct rank.
//
// Keep in sync with the ReportString function.
enum CountTypes {
CT_UNICHAR_TOP_OK, // Top shape contains correct unichar id.
// The rank of the results in TOP1, TOP2, TOPN is determined by a gap of
// kRatingEpsilon from the first result in each group. The real top choice
// is measured using TOPTOP.
CT_UNICHAR_TOP1_ERR, // Top shape does not contain correct unichar id.
CT_UNICHAR_TOP2_ERR, // Top 2 shapes don't contain correct unichar id.
CT_UNICHAR_TOPN_ERR, // No output shape contains correct unichar id.
CT_UNICHAR_TOPTOP_ERR, // Very top choice not correct.
CT_OK_MULTI_UNICHAR, // Top shape id has correct unichar id, and others.
CT_OK_JOINED, // Top shape id is correct but marked joined.
CT_OK_BROKEN, // Top shape id is correct but marked broken.
CT_REJECT, // Classifier hates this.
CT_FONT_ATTR_ERR, // Top unichar OK, but font attributes incorrect.
CT_OK_MULTI_FONT, // CT_FONT_ATTR_OK but there are multiple font attrs.
CT_NUM_RESULTS, // Number of answers produced.
CT_RANK, // Rank of correct answer.
CT_REJECTED_JUNK, // Junk that was correctly rejected.
CT_ACCEPTED_JUNK, // Junk that was incorrectly classified otherwise.
CT_SIZE // Number of types for array sizing.
};
// Class to encapsulate all the functionality and sub-structures required
// to count errors for an isolated character classifier (ShapeClassifier).
class ErrorCounter {
public:
// Computes and returns the unweighted boosting_mode error rate of the given
// classifier. Can be used for testing, or inside an iterative training
// system, including one that uses boosting.
// report_levels:
// 0 = no output.
// 1 = bottom-line error rate.
// 2 = bottom-line error rate + time.
// 3 = font-level error rate + time.
// 4 = list of all errors + short classifier debug output on 16 errors.
// 5 = list of all errors + short classifier debug output on 25 errors.
// * The boosting_mode determines which error type is used for computing the
// scaled_error output, and setting the is_error flag in the samples.
// * The fontinfo_table is used to get string font names for the debug
// output, and also to count font attributes errors.
// * The page_images vector may contain a Pix* (which may be NULL) for each
// page index assigned to the samples.
// * The it provides encapsulated iteration over some sample set.
// * The outputs unichar_error, scaled_error and totals_report are all
// optional.
// * If not NULL, unichar error gets the top1 unichar error rate.
// * Scaled_error gets the error chosen by boosting_mode weighted by the
// weights on the samples.
// * Fonts_report gets a string summarizing the error rates for each font in
// both human-readable form and as a tab-separated list of error counts.
// The human-readable form is all before the first tab.
// * The return value is the un-weighted version of the scaled_error.
static double ComputeErrorRate(ShapeClassifier* classifier,
int report_level, CountTypes boosting_mode,
const FontInfoTable& fontinfo_table,
const GenericVector<Pix*>& page_images,
SampleIterator* it,
double* unichar_error,
double* scaled_error,
STRING* fonts_report);
// Tests a pair of classifiers, debugging errors of the new against the old.
// See errorcounter.h for description of arguments.
// Iterates over the samples, calling the classifiers in normal/silent mode.
// If the new_classifier makes a boosting_mode error that the old_classifier
// does not, and the appropriate, it will then call the new_classifier again
// with a debug flag and a keep_this argument to find out what is going on.
static void DebugNewErrors(ShapeClassifier* new_classifier,
ShapeClassifier* old_classifier,
CountTypes boosting_mode,
const FontInfoTable& fontinfo_table,
const GenericVector<Pix*>& page_images,
SampleIterator* it);
private:
// Simple struct to hold an array of counts.
struct Counts {
Counts();
// Adds other into this for computing totals.
void operator+=(const Counts& other);
int n[CT_SIZE];
};
// Constructor is private. Only anticipated use of ErrorCounter is via
// the static ComputeErrorRate.
ErrorCounter(const UNICHARSET& unicharset, int fontsize);
~ErrorCounter();
// Accumulates the errors from the classifier results on a single sample.
// Returns true if debug is true and a CT_UNICHAR_TOPN_ERR error occurred.
// boosting_mode selects the type of error to be used for boosting and the
// is_error_ member of sample is set according to whether the required type
// of error occurred. The font_table provides access to font properties
// for error counting and shape_table is used to understand the relationship
// between unichar_ids and shape_ids in the results
bool AccumulateErrors(bool debug, CountTypes boosting_mode,
const FontInfoTable& font_table,
const GenericVector<UnicharRating>& results,
TrainingSample* sample);
// Accumulates counts for junk. Counts only whether the junk was correctly
// rejected or not.
bool AccumulateJunk(bool debug, const GenericVector<UnicharRating>& results,
TrainingSample* sample);
// Creates a report of the error rate. The report_level controls the detail
// that is reported to stderr via tprintf:
// 0 -> no output.
// >=1 -> bottom-line error rate.
// >=3 -> font-level error rate.
// boosting_mode determines the return value. It selects which (un-weighted)
// error rate to return.
// The fontinfo_table from MasterTrainer provides the names of fonts.
// The it determines the current subset of the training samples.
// If not NULL, the top-choice unichar error rate is saved in unichar_error.
// If not NULL, the report string is saved in fonts_report.
// (Ignoring report_level).
double ReportErrors(int report_level, CountTypes boosting_mode,
const FontInfoTable& fontinfo_table,
const SampleIterator& it,
double* unichar_error,
STRING* fonts_report);
// Sets the report string to a combined human and machine-readable report
// string of the error rates.
// Returns false if there is no data, leaving report unchanged, unless
// even_if_empty is true.
static bool ReportString(bool even_if_empty, const Counts& counts,
STRING* report);
// Computes the error rates and returns in rates which is an array of size
// CT_SIZE. Returns false if there is no data, leaving rates unchanged.
static bool ComputeRates(const Counts& counts, double rates[CT_SIZE]);
// Total scaled error used by boosting algorithms.
double scaled_error_;
// Difference in result rating to be thought of as an "equal" choice.
double rating_epsilon_;
// Vector indexed by font_id from the samples of error accumulators.
GenericVector<Counts> font_counts_;
// Counts of the results that map each unichar_id (from samples) to an
// incorrect shape_id.
GENERIC_2D_ARRAY<int> unichar_counts_;
// Count of the number of times each shape_id occurs, is correct, and multi-
// unichar.
GenericVector<int> multi_unichar_counts_;
// Histogram of scores (as percent) for correct answers.
STATS ok_score_hist_;
// Histogram of scores (as percent) for incorrect answers.
STATS bad_score_hist_;
// Unicharset for printing character ids in results.
const UNICHARSET& unicharset_;
};
} // namespace tesseract.
#endif /* THIRD_PARTY_TESSERACT_CLASSIFY_ERRORCOUNTER_H_ */
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturemap.h
// Description: Encapsulation of IntFeatureSpace with IndexMapBiDi
// to provide a subspace mapping and fast feature lookup.
// Created: Tue Oct 26 08:58:30 PDT 2010
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_INTFEATUREMAP_H__
#define TESSERACT_CLASSIFY_INTFEATUREMAP_H__
#include "intfeaturespace.h"
#include "indexmapbidi.h"
#include "intproto.h"
namespace tesseract {
class SampleIterator;
// Number of positive and negative offset maps.
static const int kNumOffsetMaps = 2;
// Class to map a feature space defined by INT_FEATURE_STRUCT to a compact
// down-sampled subspace of actually used features.
// The IntFeatureMap copes with 2 stages of transformation:
// The first step is down-sampling (re-quantization) and converting to a
// single index value from the 3-D input:
// INT_FEATURE_STRUCT <-> index feature (via IntFeatureSpace) and
// the second is a feature-space compaction to map only the feature indices
// that are actually used. This saves space in classifiers that are built
// using the mapped feature space.
// index (sparse) feature <-> map (compact) feature via IndexMapBiDi.
// Although the transformations are reversible, the inverses are lossy and do
// not return the exact input INT_FEATURE_STRUCT, due to the many->one nature
// of both transformations.
class IntFeatureMap {
public:
IntFeatureMap();
~IntFeatureMap();
// Accessors.
int sparse_size() const {
return feature_space_.Size();
}
int compact_size() const {
return compact_size_;
}
const IntFeatureSpace& feature_space() const {
return feature_space_;
}
const IndexMapBiDi& feature_map() const {
return feature_map_;
}
// Pseudo-accessors.
int IndexFeature(const INT_FEATURE_STRUCT& f) const;
int MapFeature(const INT_FEATURE_STRUCT& f) const;
int MapIndexFeature(int index_feature) const;
INT_FEATURE_STRUCT InverseIndexFeature(int index_feature) const;
INT_FEATURE_STRUCT InverseMapFeature(int map_feature) const;
void DeleteMapFeature(int map_feature);
bool IsMapFeatureDeleted(int map_feature) const;
// Copies the given feature_space and uses it as the index feature map
// from INT_FEATURE_STRUCT.
void Init(const IntFeatureSpace& feature_space);
// Helper to return an offset index feature. In this context an offset
// feature with a dir of +/-1 is a feature of a similar direction,
// but shifted perpendicular to the direction of the feature. An offset
// feature with a dir of +/-2 is feature at the same position, but rotated
// by +/- one [compact] quantum. Returns the index of the generated offset
// feature, or -1 if it doesn't exist. Dir should be in
// [-kNumOffsetMaps, kNumOffsetMaps] to indicate the relative direction.
// A dir of 0 is an identity transformation.
// Both input and output are from the index(sparse) feature space, not
// the mapped/compact feature space, but the offset feature is the minimum
// distance moved from the input to guarantee that it maps to the next
// available quantum in the mapped/compact space.
int OffsetFeature(int index_feature, int dir) const;
// Computes the features used by the subset of samples defined by
// the iterator and sets up the feature mapping.
// Returns the size of the compacted feature space.
int FindNZFeatureMapping(SampleIterator* it);
// After deleting some features, finish setting up the mapping, and map
// all the samples. Returns the size of the compacted feature space.
int FinalizeMapping(SampleIterator* it);
// Indexes the given array of features to a vector of sorted indices.
void IndexAndSortFeatures(const INT_FEATURE_STRUCT* features,
int num_features,
GenericVector<int>* sorted_features) const {
feature_space_.IndexAndSortFeatures(features, num_features,
sorted_features);
}
// Maps the given array of index/sparse features to an array of map/compact
// features.
// Assumes the input is sorted. The output indices are sorted and uniqued.
// Returns the number of "missed" features, being features that
// don't map to the compact feature space.
int MapIndexedFeatures(const GenericVector<int>& index_features,
GenericVector<int>* map_features) const {
return feature_map_.MapFeatures(index_features, map_features);
}
// Prints the map features from the set in human-readable form.
void DebugMapFeatures(const GenericVector<int>& map_features) const;
private:
void Clear();
// Helper to compute an offset index feature. In this context an offset
// feature with a dir of +/-1 is a feature of a similar direction,
// but shifted perpendicular to the direction of the feature. An offset
// feature with a dir of +/-2 is feature at the same position, but rotated
// by +/- one [compact] quantum. Returns the index of the generated offset
// feature, or -1 if it doesn't exist. Dir should be in
// [-kNumOffsetMaps, kNumOffsetMaps] to indicate the relative direction.
// A dir of 0 is an identity transformation.
// Both input and output are from the index(sparse) feature space, not
// the mapped/compact feature space, but the offset feature is the minimum
// distance moved from the input to guarantee that it maps to the next
// available quantum in the mapped/compact space.
int ComputeOffsetFeature(int index_feature, int dir) const;
// True if the mapping has changed since it was last finalized.
bool mapping_changed_;
// Size of the compacted feature space, after unused features are removed.
int compact_size_;
// Feature space quantization definition and indexing from INT_FEATURE_STRUCT.
IntFeatureSpace feature_space_;
// Mapping from indexed feature space to the compacted space with unused
// features mapping to -1.
IndexMapBiDi feature_map_;
// Index tables to map a feature index to the corresponding feature after a
// shift perpendicular to the feature direction, or a rotation in place.
// An entry of -1 indicates that there is no corresponding feature.
// Array of arrays of size feature_space_.Size() owned by this class.
int* offset_plus_[kNumOffsetMaps];
int* offset_minus_[kNumOffsetMaps];
// Don't use default copy and assign!
IntFeatureMap(const IntFeatureMap&);
void operator=(const IntFeatureMap&);
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_INTFEATUREMAP_H__
| C++ |
/******************************************************************************
** Filename: flexfx.c
** Purpose: Interface to flexible feature extractor.
** Author: Dan Johnson
** History: Wed May 23 13:45:10 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "flexfx.h"
#include "featdefs.h"
#include "emalloc.h"
#include <string.h>
#include <stdio.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
// Deprecated! Will be deleted soon!
// In the meantime, as all TBLOBs, Blob is in baseline normalized coords.
// See SetupBLCNDenorms in intfx.cpp for other args.
CHAR_DESC ExtractFlexFeatures(const FEATURE_DEFS_STRUCT &FeatureDefs,
TBLOB *Blob, const DENORM& bl_denorm,
const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info) {
/*
** Parameters:
** Blob blob to extract features from
** denorm control parameter for feature extractor
** Globals: none
** Operation: Allocate a new character descriptor and fill it in by
** calling all feature extractors which are enabled.
** Return: Structure containing features extracted from Blob.
** Exceptions: none
** History: Wed May 23 13:46:22 1990, DSJ, Created.
*/
int Type;
CHAR_DESC CharDesc;
CharDesc = NewCharDescription(FeatureDefs);
for (Type = 0; Type < CharDesc->NumFeatureSets; Type++)
if (FeatureDefs.FeatureExtractors[Type] != NULL &&
FeatureDefs.FeatureExtractors[Type]->Extractor != NULL) {
CharDesc->FeatureSets[Type] =
(FeatureDefs.FeatureExtractors[Type])->Extractor(Blob,
bl_denorm,
cn_denorm,
fx_info);
if (CharDesc->FeatureSets[Type] == NULL) {
tprintf("Feature extractor for type %d = %s returned NULL!\n",
Type, FeatureDefs.FeatureDesc[Type]->ShortName);
FreeCharDescription(CharDesc);
return NULL;
}
}
return (CharDesc);
} /* ExtractFlexFeatures */
| C++ |
/******************************************************************************
** Filename: intfx.c
** Purpose: Integer character normalization & feature extraction
** Author: Robert Moss, rays@google.com (Ray Smith)
** History: Tue May 21 15:51:57 MDT 1991, RWM, Created.
** Tue Feb 28 10:42:00 PST 2012, vastly rewritten to allow
greyscale fx and non-linear
normalization.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "intfx.h"
#include "allheaders.h"
#include "ccutil.h"
#include "classify.h"
#include "const.h"
#include "helpers.h"
#include "intmatcher.h"
#include "linlsq.h"
#include "ndminx.h"
#include "normalis.h"
#include "statistc.h"
#include "trainingsample.h"
using tesseract::TrainingSample;
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
// Look up table for cos and sin to turn the intfx feature angle to a vector.
// Protected by atan_table_mutex.
// The entries are in binary degrees where a full circle is 256 binary degrees.
static float cos_table[INT_CHAR_NORM_RANGE];
static float sin_table[INT_CHAR_NORM_RANGE];
// Guards write access to AtanTable so we dont create it more than once.
tesseract::CCUtilMutex atan_table_mutex;
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
void InitIntegerFX() {
static bool atan_table_init = false;
atan_table_mutex.Lock();
if (!atan_table_init) {
for (int i = 0; i < INT_CHAR_NORM_RANGE; ++i) {
cos_table[i] = cos(i * 2 * PI / INT_CHAR_NORM_RANGE + PI);
sin_table[i] = sin(i * 2 * PI / INT_CHAR_NORM_RANGE + PI);
}
atan_table_init = true;
}
atan_table_mutex.Unlock();
}
// Returns a vector representing the direction of a feature with the given
// theta direction in an INT_FEATURE_STRUCT.
FCOORD FeatureDirection(uinT8 theta) {
return FCOORD(cos_table[theta], sin_table[theta]);
}
namespace tesseract {
// Generates a TrainingSample from a TBLOB. Extracts features and sets
// the bounding box, so classifiers that operate on the image can work.
// TODO(rays) BlobToTrainingSample must remain a global function until
// the FlexFx and FeatureDescription code can be removed and LearnBlob
// made a member of Classify.
TrainingSample* BlobToTrainingSample(
const TBLOB& blob, bool nonlinear_norm, INT_FX_RESULT_STRUCT* fx_info,
GenericVector<INT_FEATURE_STRUCT>* bl_features) {
GenericVector<INT_FEATURE_STRUCT> cn_features;
Classify::ExtractFeatures(blob, nonlinear_norm, bl_features,
&cn_features, fx_info, NULL);
// TODO(rays) Use blob->PreciseBoundingBox() instead.
TBOX box = blob.bounding_box();
TrainingSample* sample = NULL;
int num_features = fx_info->NumCN;
if (num_features > 0) {
sample = TrainingSample::CopyFromFeatures(*fx_info, box, &cn_features[0],
num_features);
}
if (sample != NULL) {
// Set the bounding box (in original image coordinates) in the sample.
TPOINT topleft, botright;
topleft.x = box.left();
topleft.y = box.top();
botright.x = box.right();
botright.y = box.bottom();
TPOINT original_topleft, original_botright;
blob.denorm().DenormTransform(NULL, topleft, &original_topleft);
blob.denorm().DenormTransform(NULL, botright, &original_botright);
sample->set_bounding_box(TBOX(original_topleft.x, original_botright.y,
original_botright.x, original_topleft.y));
}
return sample;
}
// Computes the DENORMS for bl(baseline) and cn(character) normalization
// during feature extraction. The input denorm describes the current state
// of the blob, which is usually a baseline-normalized word.
// The Transforms setup are as follows:
// Baseline Normalized (bl) Output:
// We center the grapheme by aligning the x-coordinate of its centroid with
// x=128 and leaving the already-baseline-normalized y as-is.
//
// Character Normalized (cn) Output:
// We align the grapheme's centroid at the origin and scale it
// asymmetrically in x and y so that the 2nd moments are a standard value
// (51.2) ie the result is vaguely square.
// If classify_nonlinear_norm is true:
// A non-linear normalization is setup that attempts to evenly distribute
// edges across x and y.
//
// Some of the fields of fx_info are also setup:
// Length: Total length of outline.
// Rx: Rounded y second moment. (Reversed by convention.)
// Ry: rounded x second moment.
// Xmean: Rounded x center of mass of the blob.
// Ymean: Rounded y center of mass of the blob.
void Classify::SetupBLCNDenorms(const TBLOB& blob, bool nonlinear_norm,
DENORM* bl_denorm, DENORM* cn_denorm,
INT_FX_RESULT_STRUCT* fx_info) {
// Compute 1st and 2nd moments of the original outline.
FCOORD center, second_moments;
int length = blob.ComputeMoments(¢er, &second_moments);
if (fx_info != NULL) {
fx_info->Length = length;
fx_info->Rx = IntCastRounded(second_moments.y());
fx_info->Ry = IntCastRounded(second_moments.x());
fx_info->Xmean = IntCastRounded(center.x());
fx_info->Ymean = IntCastRounded(center.y());
}
// Setup the denorm for Baseline normalization.
bl_denorm->SetupNormalization(NULL, NULL, &blob.denorm(), center.x(), 128.0f,
1.0f, 1.0f, 128.0f, 128.0f);
// Setup the denorm for character normalization.
if (nonlinear_norm) {
GenericVector<GenericVector<int> > x_coords;
GenericVector<GenericVector<int> > y_coords;
TBOX box;
blob.GetPreciseBoundingBox(&box);
box.pad(1, 1);
blob.GetEdgeCoords(box, &x_coords, &y_coords);
cn_denorm->SetupNonLinear(&blob.denorm(), box, MAX_UINT8, MAX_UINT8,
0.0f, 0.0f, x_coords, y_coords);
} else {
cn_denorm->SetupNormalization(NULL, NULL, &blob.denorm(),
center.x(), center.y(),
51.2f / second_moments.x(),
51.2f / second_moments.y(),
128.0f, 128.0f);
}
}
// Helper normalizes the direction, assuming that it is at the given
// unnormed_pos, using the given denorm, starting at the root_denorm.
uinT8 NormalizeDirection(uinT8 dir, const FCOORD& unnormed_pos,
const DENORM& denorm, const DENORM* root_denorm) {
// Convert direction to a vector.
FCOORD unnormed_end;
unnormed_end.from_direction(dir);
unnormed_end += unnormed_pos;
FCOORD normed_pos, normed_end;
denorm.NormTransform(root_denorm, unnormed_pos, &normed_pos);
denorm.NormTransform(root_denorm, unnormed_end, &normed_end);
normed_end -= normed_pos;
return normed_end.to_direction();
}
// Helper returns the mean direction vector from the given stats. Use the
// mean direction from dirs if there is information available, otherwise, use
// the fit_vector from point_diffs.
static FCOORD MeanDirectionVector(const LLSQ& point_diffs, const LLSQ& dirs,
const FCOORD& start_pt,
const FCOORD& end_pt) {
FCOORD fit_vector;
if (dirs.count() > 0) {
// There were directions, so use them. To avoid wrap-around problems, we
// have 2 accumulators in dirs: x for normal directions and y for
// directions offset by 128. We will use the one with the least variance.
FCOORD mean_pt = dirs.mean_point();
double mean_dir = 0.0;
if (dirs.x_variance() <= dirs.y_variance()) {
mean_dir = mean_pt.x();
} else {
mean_dir = mean_pt.y() + 128;
}
fit_vector.from_direction(Modulo(IntCastRounded(mean_dir), 256));
} else {
// There were no directions, so we rely on the vector_fit to the points.
// Since the vector_fit is 180 degrees ambiguous, we align with the
// supplied feature_dir by making the scalar product non-negative.
FCOORD feature_dir(end_pt - start_pt);
fit_vector = point_diffs.vector_fit();
if (fit_vector.x() == 0.0f && fit_vector.y() == 0.0f) {
// There was only a single point. Use feature_dir directly.
fit_vector = feature_dir;
} else {
// Sometimes the least mean squares fit is wrong, due to the small sample
// of points and scaling. Use a 90 degree rotated vector if that matches
// feature_dir better.
FCOORD fit_vector2 = !fit_vector;
// The fit_vector is 180 degrees ambiguous, so resolve the ambiguity by
// insisting that the scalar product with the feature_dir should be +ve.
if (fit_vector % feature_dir < 0.0)
fit_vector = -fit_vector;
if (fit_vector2 % feature_dir < 0.0)
fit_vector2 = -fit_vector2;
// Even though fit_vector2 has a higher mean squared error, it might be
// a better fit, so use it if the dot product with feature_dir is bigger.
if (fit_vector2 % feature_dir > fit_vector % feature_dir)
fit_vector = fit_vector2;
}
}
return fit_vector;
}
// Helper computes one or more features corresponding to the given points.
// Emitted features are on the line defined by:
// start_pt + lambda * (end_pt - start_pt) for scalar lambda.
// Features are spaced at feature_length intervals.
static int ComputeFeatures(const FCOORD& start_pt, const FCOORD& end_pt,
double feature_length,
GenericVector<INT_FEATURE_STRUCT>* features) {
FCOORD feature_vector(end_pt - start_pt);
if (feature_vector.x() == 0.0f && feature_vector.y() == 0.0f) return 0;
// Compute theta for the feature based on its direction.
uinT8 theta = feature_vector.to_direction();
// Compute the number of features and lambda_step.
double target_length = feature_vector.length();
int num_features = IntCastRounded(target_length / feature_length);
if (num_features == 0) return 0;
// Divide the length evenly into num_features pieces.
double lambda_step = 1.0 / num_features;
double lambda = lambda_step / 2.0;
for (int f = 0; f < num_features; ++f, lambda += lambda_step) {
FCOORD feature_pt(start_pt);
feature_pt += feature_vector * lambda;
INT_FEATURE_STRUCT feature(feature_pt, theta);
features->push_back(feature);
}
return num_features;
}
// Gathers outline points and their directions from start_index into dirs by
// stepping along the outline and normalizing the coordinates until the
// required feature_length has been collected or end_index is reached.
// On input pos must point to the position corresponding to start_index and on
// return pos is updated to the current raw position, and pos_normed is set to
// the normed version of pos.
// Since directions wrap-around, they need special treatment to get the mean.
// Provided the cluster of directions doesn't straddle the wrap-around point,
// the simple mean works. If they do, then, unless the directions are wildly
// varying, the cluster rotated by 180 degrees will not straddle the wrap-
// around point, so mean(dir + 180 degrees) - 180 degrees will work. Since
// LLSQ conveniently stores the mean of 2 variables, we use it to store
// dir and dir+128 (128 is 180 degrees) and then use the resulting mean
// with the least variance.
static int GatherPoints(const C_OUTLINE* outline, double feature_length,
const DENORM& denorm, const DENORM* root_denorm,
int start_index, int end_index,
ICOORD* pos, FCOORD* pos_normed,
LLSQ* points, LLSQ* dirs) {
int step_length = outline->pathlength();
ICOORD step = outline->step(start_index % step_length);
// Prev_normed is the start point of this collection and will be set on the
// first iteration, and on later iterations used to determine the length
// that has been collected.
FCOORD prev_normed;
points->clear();
dirs->clear();
int num_points = 0;
int index;
for (index = start_index; index <= end_index; ++index, *pos += step) {
step = outline->step(index % step_length);
int edge_weight = outline->edge_strength_at_index(index % step_length);
if (edge_weight == 0) {
// This point has conflicting gradient and step direction, so ignore it.
continue;
}
// Get the sub-pixel precise location and normalize.
FCOORD f_pos = outline->sub_pixel_pos_at_index(*pos, index % step_length);
denorm.NormTransform(root_denorm, f_pos, pos_normed);
if (num_points == 0) {
// The start of this segment.
prev_normed = *pos_normed;
} else {
FCOORD offset = *pos_normed - prev_normed;
float length = offset.length();
if (length > feature_length) {
// We have gone far enough from the start. We will use this point in
// the next set so return what we have so far.
return index;
}
}
points->add(pos_normed->x(), pos_normed->y(), edge_weight);
int direction = outline->direction_at_index(index % step_length);
if (direction >= 0) {
direction = NormalizeDirection(direction, f_pos, denorm, root_denorm);
// Use both the direction and direction +128 so we are not trying to
// take the mean of something straddling the wrap-around point.
dirs->add(direction, Modulo(direction + 128, 256));
}
++num_points;
}
return index;
}
// Extracts Tesseract features and appends them to the features vector.
// Startpt to lastpt, inclusive, MUST have the same src_outline member,
// which may be NULL. The vector from lastpt to its next is included in
// the feature extraction. Hidden edges should be excluded by the caller.
// If force_poly is true, the features will be extracted from the polygonal
// approximation even if more accurate data is available.
static void ExtractFeaturesFromRun(
const EDGEPT* startpt, const EDGEPT* lastpt,
const DENORM& denorm, double feature_length, bool force_poly,
GenericVector<INT_FEATURE_STRUCT>* features) {
const EDGEPT* endpt = lastpt->next;
const C_OUTLINE* outline = startpt->src_outline;
if (outline != NULL && !force_poly) {
// Detailed information is available. We have to normalize only from
// the root_denorm to denorm.
const DENORM* root_denorm = denorm.RootDenorm();
int total_features = 0;
// Get the features from the outline.
int step_length = outline->pathlength();
int start_index = startpt->start_step;
// pos is the integer coordinates of the binary image steps.
ICOORD pos = outline->position_at_index(start_index);
// We use an end_index that allows us to use a positive increment, but that
// may be beyond the bounds of the outline steps/ due to wrap-around, to
// so we use % step_length everywhere, except for start_index.
int end_index = lastpt->start_step + lastpt->step_count;
if (end_index <= start_index)
end_index += step_length;
LLSQ prev_points;
LLSQ prev_dirs;
FCOORD prev_normed_pos = outline->sub_pixel_pos_at_index(pos, start_index);
denorm.NormTransform(root_denorm, prev_normed_pos, &prev_normed_pos);
LLSQ points;
LLSQ dirs;
FCOORD normed_pos;
int index = GatherPoints(outline, feature_length, denorm, root_denorm,
start_index, end_index, &pos, &normed_pos,
&points, &dirs);
while (index <= end_index) {
// At each iteration we nominally have 3 accumulated sets of points and
// dirs: prev_points/dirs, points/dirs, next_points/dirs and sum them
// into sum_points/dirs, but we don't necessarily get any features out,
// so if that is the case, we keep accumulating instead of rotating the
// accumulators.
LLSQ next_points;
LLSQ next_dirs;
FCOORD next_normed_pos;
index = GatherPoints(outline, feature_length, denorm, root_denorm,
index, end_index, &pos, &next_normed_pos,
&next_points, &next_dirs);
LLSQ sum_points(prev_points);
// TODO(rays) find out why it is better to use just dirs and next_dirs
// in sum_dirs, instead of using prev_dirs as well.
LLSQ sum_dirs(dirs);
sum_points.add(points);
sum_points.add(next_points);
sum_dirs.add(next_dirs);
bool made_features = false;
// If we have some points, we can try making some features.
if (sum_points.count() > 0) {
// We have gone far enough from the start. Make a feature and restart.
FCOORD fit_pt = sum_points.mean_point();
FCOORD fit_vector = MeanDirectionVector(sum_points, sum_dirs,
prev_normed_pos, normed_pos);
// The segment to which we fit features is the line passing through
// fit_pt in direction of fit_vector that starts nearest to
// prev_normed_pos and ends nearest to normed_pos.
FCOORD start_pos = prev_normed_pos.nearest_pt_on_line(fit_pt,
fit_vector);
FCOORD end_pos = normed_pos.nearest_pt_on_line(fit_pt, fit_vector);
// Possible correction to match the adjacent polygon segment.
if (total_features == 0 && startpt != endpt) {
FCOORD poly_pos(startpt->pos.x, startpt->pos.y);
denorm.LocalNormTransform(poly_pos, &start_pos);
}
if (index > end_index && startpt != endpt) {
FCOORD poly_pos(endpt->pos.x, endpt->pos.y);
denorm.LocalNormTransform(poly_pos, &end_pos);
}
int num_features = ComputeFeatures(start_pos, end_pos, feature_length,
features);
if (num_features > 0) {
// We made some features so shuffle the accumulators.
prev_points = points;
prev_dirs = dirs;
prev_normed_pos = normed_pos;
points = next_points;
dirs = next_dirs;
made_features = true;
total_features += num_features;
}
// The end of the next set becomes the end next time around.
normed_pos = next_normed_pos;
}
if (!made_features) {
// We didn't make any features, so keep the prev accumulators and
// add the next ones into the current.
points.add(next_points);
dirs.add(next_dirs);
}
}
} else {
// There is no outline, so we are forced to use the polygonal approximation.
const EDGEPT* pt = startpt;
do {
FCOORD start_pos(pt->pos.x, pt->pos.y);
FCOORD end_pos(pt->next->pos.x, pt->next->pos.y);
denorm.LocalNormTransform(start_pos, &start_pos);
denorm.LocalNormTransform(end_pos, &end_pos);
ComputeFeatures(start_pos, end_pos, feature_length, features);
} while ((pt = pt->next) != endpt);
}
}
// Extracts sets of 3-D features of length kStandardFeatureLength (=12.8), as
// (x,y) position and angle as measured counterclockwise from the vector
// <-1, 0>, from blob using two normalizations defined by bl_denorm and
// cn_denorm. See SetpuBLCNDenorms for definitions.
// If outline_cn_counts is not NULL, on return it contains the cumulative
// number of cn features generated for each outline in the blob (in order).
// Thus after the first outline, there were (*outline_cn_counts)[0] features,
// after the second outline, there were (*outline_cn_counts)[1] features etc.
void Classify::ExtractFeatures(const TBLOB& blob,
bool nonlinear_norm,
GenericVector<INT_FEATURE_STRUCT>* bl_features,
GenericVector<INT_FEATURE_STRUCT>* cn_features,
INT_FX_RESULT_STRUCT* results,
GenericVector<int>* outline_cn_counts) {
DENORM bl_denorm, cn_denorm;
tesseract::Classify::SetupBLCNDenorms(blob, nonlinear_norm,
&bl_denorm, &cn_denorm, results);
if (outline_cn_counts != NULL)
outline_cn_counts->truncate(0);
// Iterate the outlines.
for (TESSLINE* ol = blob.outlines; ol != NULL; ol = ol->next) {
// Iterate the polygon.
EDGEPT* loop_pt = ol->FindBestStartPt();
EDGEPT* pt = loop_pt;
if (pt == NULL) continue;
do {
if (pt->IsHidden()) continue;
// Find a run of equal src_outline.
EDGEPT* last_pt = pt;
do {
last_pt = last_pt->next;
} while (last_pt != loop_pt && !last_pt->IsHidden() &&
last_pt->src_outline == pt->src_outline);
last_pt = last_pt->prev;
// Until the adaptive classifier can be weaned off polygon segments,
// we have to force extraction from the polygon for the bl_features.
ExtractFeaturesFromRun(pt, last_pt, bl_denorm, kStandardFeatureLength,
true, bl_features);
ExtractFeaturesFromRun(pt, last_pt, cn_denorm, kStandardFeatureLength,
false, cn_features);
pt = last_pt;
} while ((pt = pt->next) != loop_pt);
if (outline_cn_counts != NULL)
outline_cn_counts->push_back(cn_features->size());
}
results->NumBL = bl_features->size();
results->NumCN = cn_features->size();
results->YBottom = blob.bounding_box().bottom();
results->YTop = blob.bounding_box().top();
results->Width = blob.bounding_box().width();
}
} // namespace tesseract
/*--------------------------------------------------------------------------*/
// Extract a set of standard-sized features from Blobs and write them out in
// two formats: baseline normalized and character normalized.
//
// We presume the Blobs are already scaled so that x-height=128 units
//
// Standard Features:
// We take all outline segments longer than 7 units and chop them into
// standard-sized segments of approximately 13 = (64 / 5) units.
// When writing these features out, we output their center and angle as
// measured counterclockwise from the vector <-1, 0>
//
// Baseline Normalized Output:
// We center the grapheme by aligning the x-coordinate of its centroid with
// x=0 and subtracting 128 from the y-coordinate.
//
// Character Normalized Output:
// We align the grapheme's centroid at the origin and scale it asymmetrically
// in x and y so that the result is vaguely square.
//
// Deprecated! Prefer tesseract::Classify::ExtractFeatures instead.
bool ExtractIntFeat(const TBLOB& blob,
bool nonlinear_norm,
INT_FEATURE_ARRAY baseline_features,
INT_FEATURE_ARRAY charnorm_features,
INT_FX_RESULT_STRUCT* results) {
GenericVector<INT_FEATURE_STRUCT> bl_features;
GenericVector<INT_FEATURE_STRUCT> cn_features;
tesseract::Classify::ExtractFeatures(blob, nonlinear_norm,
&bl_features, &cn_features, results,
NULL);
if (bl_features.size() == 0 || cn_features.size() == 0 ||
bl_features.size() > MAX_NUM_INT_FEATURES ||
cn_features.size() > MAX_NUM_INT_FEATURES) {
return false; // Feature extraction failed.
}
memcpy(baseline_features, &bl_features[0],
bl_features.size() * sizeof(bl_features[0]));
memcpy(charnorm_features, &cn_features[0],
cn_features.size() * sizeof(cn_features[0]));
return true;
}
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "sampleiterator.h"
#include "indexmapbidi.h"
#include "shapetable.h"
#include "trainingsample.h"
#include "trainingsampleset.h"
namespace tesseract {
// ================== SampleIterator Implementation =================
SampleIterator::SampleIterator()
: charset_map_(NULL),
shape_table_(NULL),
sample_set_(NULL),
randomize_(false),
owned_shape_table_(NULL) {
num_shapes_ = 0;
Begin();
}
SampleIterator::~SampleIterator() {
Clear();
}
void SampleIterator::Clear() {
delete owned_shape_table_;
owned_shape_table_ = NULL;
}
// See class comment for arguments.
void SampleIterator::Init(const IndexMapBiDi* charset_map,
const ShapeTable* shape_table,
bool randomize,
TrainingSampleSet* sample_set) {
Clear();
charset_map_ = charset_map;
shape_table_ = shape_table;
sample_set_ = sample_set;
randomize_ = randomize;
if (shape_table_ == NULL && charset_map_ != NULL) {
// The caller wishes to iterate by class. The easiest way to do this
// is to create a dummy shape_table_ that we will own.
int num_fonts = sample_set_->NumFonts();
owned_shape_table_ = new ShapeTable(sample_set_->unicharset());
int charsetsize = sample_set_->unicharset().size();
for (int c = 0; c < charsetsize; ++c) {
// We always add a shape for each character to keep the index in sync
// with the unichar_id.
int shape_id = owned_shape_table_->AddShape(c, 0);
for (int f = 1; f < num_fonts; ++f) {
if (sample_set_->NumClassSamples(f, c, true) > 0) {
owned_shape_table_->AddToShape(shape_id, c, f);
}
}
}
shape_table_ = owned_shape_table_;
}
if (shape_table_ != NULL) {
num_shapes_ = shape_table_->NumShapes();
} else {
num_shapes_ = randomize ? sample_set_->num_samples()
: sample_set_->num_raw_samples();
}
Begin();
}
// Iterator functions designed for use with a simple for loop:
// for (it.Begin(); !it.AtEnd(); it.Next()) {
// const TrainingSample& sample = it.GetSample();
// }
void SampleIterator::Begin() {
shape_index_ = -1;
shape_char_index_ = 0;
num_shape_chars_ = 0;
shape_font_index_ = 0;
num_shape_fonts_ = 0;
sample_index_ = 0;
num_samples_ = 0;
// Find the first indexable sample.
Next();
}
bool SampleIterator::AtEnd() const {
return shape_index_ >= num_shapes_;
}
const TrainingSample& SampleIterator::GetSample() const {
if (shape_table_ != NULL) {
const UnicharAndFonts* shape_entry = GetShapeEntry();
int char_id = shape_entry->unichar_id;
int font_id = shape_entry->font_ids[shape_font_index_];
return *sample_set_->GetSample(font_id, char_id, sample_index_);
} else {
return *sample_set_->GetSample(shape_index_);
}
}
TrainingSample* SampleIterator::MutableSample() const {
if (shape_table_ != NULL) {
const UnicharAndFonts* shape_entry = GetShapeEntry();
int char_id = shape_entry->unichar_id;
int font_id = shape_entry->font_ids[shape_font_index_];
return sample_set_->MutableSample(font_id, char_id, sample_index_);
} else {
return sample_set_->mutable_sample(shape_index_);
}
}
// Returns the total index (from the original set of samples) of the current
// sample.
int SampleIterator::GlobalSampleIndex() const {
if (shape_table_ != NULL) {
const UnicharAndFonts* shape_entry = GetShapeEntry();
int char_id = shape_entry->unichar_id;
int font_id = shape_entry->font_ids[shape_font_index_];
return sample_set_->GlobalSampleIndex(font_id, char_id, sample_index_);
} else {
return shape_index_;
}
}
// Returns the index of the current sample in compact charset space, so
// in a 2-class problem between x and y, the returned indices will all be
// 0 or 1, and have nothing to do with the unichar_ids.
// If the charset_map_ is NULL, then this is equal to GetSparseClassID().
int SampleIterator::GetCompactClassID() const {
return charset_map_ != NULL ? charset_map_->SparseToCompact(shape_index_)
: GetSparseClassID();
}
// Returns the index of the current sample in sparse charset space, so
// in a 2-class problem between x and y, the returned indices will all be
// x or y, where x and y may be unichar_ids (no shape_table_) or shape_ids
// with a shape_table_.
int SampleIterator::GetSparseClassID() const {
return shape_table_ != NULL ? shape_index_ : GetSample().class_id();
}
// Moves on to the next indexable sample. If the end is reached, leaves
// the state such that AtEnd() is true.
void SampleIterator::Next() {
if (shape_table_ != NULL) {
// Next sample in this class/font combination.
++sample_index_;
if (sample_index_ < num_samples_)
return;
// Next font in this class in this shape.
sample_index_ = 0;
do {
++shape_font_index_;
if (shape_font_index_ >= num_shape_fonts_) {
// Next unichar in this shape.
shape_font_index_ = 0;
++shape_char_index_;
if (shape_char_index_ >= num_shape_chars_) {
// Find the next shape that is mapped in the charset_map_.
shape_char_index_ = 0;
do {
++shape_index_;
} while (shape_index_ < num_shapes_ &&
charset_map_ != NULL &&
charset_map_->SparseToCompact(shape_index_) < 0);
if (shape_index_ >= num_shapes_)
return; // The end.
num_shape_chars_ = shape_table_->GetShape(shape_index_).size();
}
}
const UnicharAndFonts* shape_entry = GetShapeEntry();
num_shape_fonts_ = shape_entry->font_ids.size();
int char_id = shape_entry->unichar_id;
int font_id = shape_entry->font_ids[shape_font_index_];
num_samples_ = sample_set_->NumClassSamples(font_id, char_id, randomize_);
} while (num_samples_ == 0);
} else {
// We are just iterating over the samples.
++shape_index_;
}
}
// Returns the size of the compact charset space.
int SampleIterator::CompactCharsetSize() const {
return charset_map_ != NULL ? charset_map_->CompactSize()
: SparseCharsetSize();
}
// Returns the size of the sparse charset space.
int SampleIterator::SparseCharsetSize() const {
return charset_map_ != NULL
? charset_map_->SparseSize()
: (shape_table_ != NULL ? shape_table_->NumShapes()
: sample_set_->charsetsize());
}
// Apply the supplied feature_space/feature_map transform to all samples
// accessed by this iterator.
void SampleIterator::MapSampleFeatures(const IntFeatureMap& feature_map) {
for (Begin(); !AtEnd(); Next()) {
TrainingSample* sample = MutableSample();
sample->MapFeatures(feature_map);
}
}
// Adjust the weights of all the samples to be uniform in the given charset.
// Returns the number of samples in the iterator.
int SampleIterator::UniformSamples() {
int num_good_samples = 0;
for (Begin(); !AtEnd(); Next()) {
TrainingSample* sample = MutableSample();
sample->set_weight(1.0);
++num_good_samples;
}
NormalizeSamples();
return num_good_samples;
}
// Normalize the weights of all the samples in the charset_map so they sum
// to 1. Returns the minimum assigned sample weight.
double SampleIterator::NormalizeSamples() {
double total_weight = 0.0;
int sample_count = 0;
for (Begin(); !AtEnd(); Next()) {
const TrainingSample& sample = GetSample();
total_weight += sample.weight();
++sample_count;
}
// Normalize samples.
double min_assigned_sample_weight = 1.0;
if (total_weight > 0.0) {
for (Begin(); !AtEnd(); Next()) {
TrainingSample* sample = MutableSample();
double weight = sample->weight() / total_weight;
if (weight < min_assigned_sample_weight)
min_assigned_sample_weight = weight;
sample->set_weight(weight);
}
}
return min_assigned_sample_weight;
}
// Helper returns the current UnicharAndFont shape_entry.
const UnicharAndFonts* SampleIterator::GetShapeEntry() const {
const Shape& shape = shape_table_->GetShape(shape_index_);
return &shape[shape_char_index_];
}
} // namespace tesseract.
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <ctime>
#include "errorcounter.h"
#include "fontinfo.h"
#include "ndminx.h"
#include "sampleiterator.h"
#include "shapeclassifier.h"
#include "shapetable.h"
#include "trainingsample.h"
#include "trainingsampleset.h"
#include "unicity_table.h"
namespace tesseract {
// Difference in result rating to be thought of as an "equal" choice.
const double kRatingEpsilon = 1.0 / 32;
// Tests a classifier, computing its error rate.
// See errorcounter.h for description of arguments.
// Iterates over the samples, calling the classifier in normal/silent mode.
// If the classifier makes a CT_UNICHAR_TOPN_ERR error, and the appropriate
// report_level is set (4 or greater), it will then call the classifier again
// with a debug flag and a keep_this argument to find out what is going on.
double ErrorCounter::ComputeErrorRate(ShapeClassifier* classifier,
int report_level, CountTypes boosting_mode,
const FontInfoTable& fontinfo_table,
const GenericVector<Pix*>& page_images, SampleIterator* it,
double* unichar_error, double* scaled_error, STRING* fonts_report) {
int fontsize = it->sample_set()->NumFonts();
ErrorCounter counter(classifier->GetUnicharset(), fontsize);
GenericVector<UnicharRating> results;
clock_t start = clock();
int total_samples = 0;
double unscaled_error = 0.0;
// Set a number of samples on which to run the classify debug mode.
int error_samples = report_level > 3 ? report_level * report_level : 0;
// Iterate over all the samples, accumulating errors.
for (it->Begin(); !it->AtEnd(); it->Next()) {
TrainingSample* mutable_sample = it->MutableSample();
int page_index = mutable_sample->page_num();
Pix* page_pix = 0 <= page_index && page_index < page_images.size()
? page_images[page_index] : NULL;
// No debug, no keep this.
classifier->UnicharClassifySample(*mutable_sample, page_pix, 0,
INVALID_UNICHAR_ID, &results);
bool debug_it = false;
int correct_id = mutable_sample->class_id();
if (counter.unicharset_.has_special_codes() &&
(correct_id == UNICHAR_SPACE || correct_id == UNICHAR_JOINED ||
correct_id == UNICHAR_BROKEN)) {
// This is junk so use the special counter.
debug_it = counter.AccumulateJunk(report_level > 3,
results,
mutable_sample);
} else {
debug_it = counter.AccumulateErrors(report_level > 3, boosting_mode,
fontinfo_table,
results, mutable_sample);
}
if (debug_it && error_samples > 0) {
// Running debug, keep the correct answer, and debug the classifier.
tprintf("Error on sample %d: %s Classifier debug output:\n",
it->GlobalSampleIndex(),
it->sample_set()->SampleToString(*mutable_sample).string());
classifier->DebugDisplay(*mutable_sample, page_pix, correct_id);
--error_samples;
}
++total_samples;
}
double total_time = 1.0 * (clock() - start) / CLOCKS_PER_SEC;
// Create the appropriate error report.
unscaled_error = counter.ReportErrors(report_level, boosting_mode,
fontinfo_table,
*it, unichar_error, fonts_report);
if (scaled_error != NULL) *scaled_error = counter.scaled_error_;
if (report_level > 1) {
// It is useful to know the time in microseconds/char.
tprintf("Errors computed in %.2fs at %.1f μs/char\n",
total_time, 1000000.0 * total_time / total_samples);
}
return unscaled_error;
}
// Tests a pair of classifiers, debugging errors of the new against the old.
// See errorcounter.h for description of arguments.
// Iterates over the samples, calling the classifiers in normal/silent mode.
// If the new_classifier makes a boosting_mode error that the old_classifier
// does not, it will then call the new_classifier again with a debug flag
// and a keep_this argument to find out what is going on.
void ErrorCounter::DebugNewErrors(
ShapeClassifier* new_classifier, ShapeClassifier* old_classifier,
CountTypes boosting_mode,
const FontInfoTable& fontinfo_table,
const GenericVector<Pix*>& page_images, SampleIterator* it) {
int fontsize = it->sample_set()->NumFonts();
ErrorCounter old_counter(old_classifier->GetUnicharset(), fontsize);
ErrorCounter new_counter(new_classifier->GetUnicharset(), fontsize);
GenericVector<UnicharRating> results;
int total_samples = 0;
int error_samples = 25;
int total_new_errors = 0;
// Iterate over all the samples, accumulating errors.
for (it->Begin(); !it->AtEnd(); it->Next()) {
TrainingSample* mutable_sample = it->MutableSample();
int page_index = mutable_sample->page_num();
Pix* page_pix = 0 <= page_index && page_index < page_images.size()
? page_images[page_index] : NULL;
// No debug, no keep this.
old_classifier->UnicharClassifySample(*mutable_sample, page_pix, 0,
INVALID_UNICHAR_ID, &results);
int correct_id = mutable_sample->class_id();
if (correct_id != 0 &&
!old_counter.AccumulateErrors(true, boosting_mode, fontinfo_table,
results, mutable_sample)) {
// old classifier was correct, check the new one.
new_classifier->UnicharClassifySample(*mutable_sample, page_pix, 0,
INVALID_UNICHAR_ID, &results);
if (correct_id != 0 &&
new_counter.AccumulateErrors(true, boosting_mode, fontinfo_table,
results, mutable_sample)) {
tprintf("New Error on sample %d: Classifier debug output:\n",
it->GlobalSampleIndex());
++total_new_errors;
new_classifier->UnicharClassifySample(*mutable_sample, page_pix, 1,
correct_id, &results);
if (results.size() > 0 && error_samples > 0) {
new_classifier->DebugDisplay(*mutable_sample, page_pix, correct_id);
--error_samples;
}
}
}
++total_samples;
}
tprintf("Total new errors = %d\n", total_new_errors);
}
// Constructor is private. Only anticipated use of ErrorCounter is via
// the static ComputeErrorRate.
ErrorCounter::ErrorCounter(const UNICHARSET& unicharset, int fontsize)
: scaled_error_(0.0), rating_epsilon_(kRatingEpsilon),
unichar_counts_(unicharset.size(), unicharset.size(), 0),
ok_score_hist_(0, 101), bad_score_hist_(0, 101),
unicharset_(unicharset) {
Counts empty_counts;
font_counts_.init_to_size(fontsize, empty_counts);
multi_unichar_counts_.init_to_size(unicharset.size(), 0);
}
ErrorCounter::~ErrorCounter() {
}
// Accumulates the errors from the classifier results on a single sample.
// Returns true if debug is true and a CT_UNICHAR_TOPN_ERR error occurred.
// boosting_mode selects the type of error to be used for boosting and the
// is_error_ member of sample is set according to whether the required type
// of error occurred. The font_table provides access to font properties
// for error counting and shape_table is used to understand the relationship
// between unichar_ids and shape_ids in the results
bool ErrorCounter::AccumulateErrors(bool debug, CountTypes boosting_mode,
const FontInfoTable& font_table,
const GenericVector<UnicharRating>& results,
TrainingSample* sample) {
int num_results = results.size();
int answer_actual_rank = -1;
int font_id = sample->font_id();
int unichar_id = sample->class_id();
sample->set_is_error(false);
if (num_results == 0) {
// Reject. We count rejects as a separate category, but still mark the
// sample as an error in case any training module wants to use that to
// improve the classifier.
sample->set_is_error(true);
++font_counts_[font_id].n[CT_REJECT];
} else {
// Find rank of correct unichar answer, using rating_epsilon_ to allow
// different answers to score as equal. (Ignoring the font.)
int epsilon_rank = 0;
int answer_epsilon_rank = -1;
int num_top_answers = 0;
double prev_rating = results[0].rating;
bool joined = false;
bool broken = false;
int res_index = 0;
while (res_index < num_results) {
if (results[res_index].rating < prev_rating - rating_epsilon_) {
++epsilon_rank;
prev_rating = results[res_index].rating;
}
if (results[res_index].unichar_id == unichar_id &&
answer_epsilon_rank < 0) {
answer_epsilon_rank = epsilon_rank;
answer_actual_rank = res_index;
}
if (results[res_index].unichar_id == UNICHAR_JOINED &&
unicharset_.has_special_codes())
joined = true;
else if (results[res_index].unichar_id == UNICHAR_BROKEN &&
unicharset_.has_special_codes())
broken = true;
else if (epsilon_rank == 0)
++num_top_answers;
++res_index;
}
if (answer_actual_rank != 0) {
// Correct result is not absolute top.
++font_counts_[font_id].n[CT_UNICHAR_TOPTOP_ERR];
if (boosting_mode == CT_UNICHAR_TOPTOP_ERR) sample->set_is_error(true);
}
if (answer_epsilon_rank == 0) {
++font_counts_[font_id].n[CT_UNICHAR_TOP_OK];
// Unichar OK, but count if multiple unichars.
if (num_top_answers > 1) {
++font_counts_[font_id].n[CT_OK_MULTI_UNICHAR];
++multi_unichar_counts_[unichar_id];
}
// Check to see if any font in the top choice has attributes that match.
// TODO(rays) It is easy to add counters for individual font attributes
// here if we want them.
if (font_table.SetContainsFontProperties(
font_id, results[answer_actual_rank].fonts)) {
// Font attributes were matched.
// Check for multiple properties.
if (font_table.SetContainsMultipleFontProperties(
results[answer_actual_rank].fonts))
++font_counts_[font_id].n[CT_OK_MULTI_FONT];
} else {
// Font attributes weren't matched.
++font_counts_[font_id].n[CT_FONT_ATTR_ERR];
}
} else {
// This is a top unichar error.
++font_counts_[font_id].n[CT_UNICHAR_TOP1_ERR];
if (boosting_mode == CT_UNICHAR_TOP1_ERR) sample->set_is_error(true);
// Count maps from unichar id to wrong unichar id.
++unichar_counts_(unichar_id, results[0].unichar_id);
if (answer_epsilon_rank < 0 || answer_epsilon_rank >= 2) {
// It is also a 2nd choice unichar error.
++font_counts_[font_id].n[CT_UNICHAR_TOP2_ERR];
if (boosting_mode == CT_UNICHAR_TOP2_ERR) sample->set_is_error(true);
}
if (answer_epsilon_rank < 0) {
// It is also a top-n choice unichar error.
++font_counts_[font_id].n[CT_UNICHAR_TOPN_ERR];
if (boosting_mode == CT_UNICHAR_TOPN_ERR) sample->set_is_error(true);
answer_epsilon_rank = epsilon_rank;
}
}
// Compute mean number of return values and mean rank of correct answer.
font_counts_[font_id].n[CT_NUM_RESULTS] += num_results;
font_counts_[font_id].n[CT_RANK] += answer_epsilon_rank;
if (joined)
++font_counts_[font_id].n[CT_OK_JOINED];
if (broken)
++font_counts_[font_id].n[CT_OK_BROKEN];
}
// If it was an error for boosting then sum the weight.
if (sample->is_error()) {
scaled_error_ += sample->weight();
if (debug) {
tprintf("%d results for char %s font %d :",
num_results, unicharset_.id_to_unichar(unichar_id),
font_id);
for (int i = 0; i < num_results; ++i) {
tprintf(" %.3f : %s\n",
results[i].rating,
unicharset_.id_to_unichar(results[i].unichar_id));
}
return true;
}
int percent = 0;
if (num_results > 0)
percent = IntCastRounded(results[0].rating * 100);
bad_score_hist_.add(percent, 1);
} else {
int percent = 0;
if (answer_actual_rank >= 0)
percent = IntCastRounded(results[answer_actual_rank].rating * 100);
ok_score_hist_.add(percent, 1);
}
return false;
}
// Accumulates counts for junk. Counts only whether the junk was correctly
// rejected or not.
bool ErrorCounter::AccumulateJunk(bool debug,
const GenericVector<UnicharRating>& results,
TrainingSample* sample) {
// For junk we accept no answer, or an explicit shape answer matching the
// class id of the sample.
int num_results = results.size();
int font_id = sample->font_id();
int unichar_id = sample->class_id();
int percent = 0;
if (num_results > 0)
percent = IntCastRounded(results[0].rating * 100);
if (num_results > 0 && results[0].unichar_id != unichar_id) {
// This is a junk error.
++font_counts_[font_id].n[CT_ACCEPTED_JUNK];
sample->set_is_error(true);
// It counts as an error for boosting too so sum the weight.
scaled_error_ += sample->weight();
bad_score_hist_.add(percent, 1);
return debug;
} else {
// Correctly rejected.
++font_counts_[font_id].n[CT_REJECTED_JUNK];
sample->set_is_error(false);
ok_score_hist_.add(percent, 1);
}
return false;
}
// Creates a report of the error rate. The report_level controls the detail
// that is reported to stderr via tprintf:
// 0 -> no output.
// >=1 -> bottom-line error rate.
// >=3 -> font-level error rate.
// boosting_mode determines the return value. It selects which (un-weighted)
// error rate to return.
// The fontinfo_table from MasterTrainer provides the names of fonts.
// The it determines the current subset of the training samples.
// If not NULL, the top-choice unichar error rate is saved in unichar_error.
// If not NULL, the report string is saved in fonts_report.
// (Ignoring report_level).
double ErrorCounter::ReportErrors(int report_level, CountTypes boosting_mode,
const FontInfoTable& fontinfo_table,
const SampleIterator& it,
double* unichar_error,
STRING* fonts_report) {
// Compute totals over all the fonts and report individual font results
// when required.
Counts totals;
int fontsize = font_counts_.size();
for (int f = 0; f < fontsize; ++f) {
// Accumulate counts over fonts.
totals += font_counts_[f];
STRING font_report;
if (ReportString(false, font_counts_[f], &font_report)) {
if (fonts_report != NULL) {
*fonts_report += fontinfo_table.get(f).name;
*fonts_report += ": ";
*fonts_report += font_report;
*fonts_report += "\n";
}
if (report_level > 2) {
// Report individual font error rates.
tprintf("%s: %s\n", fontinfo_table.get(f).name, font_report.string());
}
}
}
// Report the totals.
STRING total_report;
bool any_results = ReportString(true, totals, &total_report);
if (fonts_report != NULL && fonts_report->length() == 0) {
// Make sure we return something even if there were no samples.
*fonts_report = "NoSamplesFound: ";
*fonts_report += total_report;
*fonts_report += "\n";
}
if (report_level > 0) {
// Report the totals.
STRING total_report;
if (any_results) {
tprintf("TOTAL Scaled Err=%.4g%%, %s\n",
scaled_error_ * 100.0, total_report.string());
}
// Report the worst substitution error only for now.
if (totals.n[CT_UNICHAR_TOP1_ERR] > 0) {
int charsetsize = unicharset_.size();
int worst_uni_id = 0;
int worst_result_id = 0;
int worst_err = 0;
for (int u = 0; u < charsetsize; ++u) {
for (int v = 0; v < charsetsize; ++v) {
if (unichar_counts_(u, v) > worst_err) {
worst_err = unichar_counts_(u, v);
worst_uni_id = u;
worst_result_id = v;
}
}
}
if (worst_err > 0) {
tprintf("Worst error = %d:%s -> %s with %d/%d=%.2f%% errors\n",
worst_uni_id, unicharset_.id_to_unichar(worst_uni_id),
unicharset_.id_to_unichar(worst_result_id),
worst_err, totals.n[CT_UNICHAR_TOP1_ERR],
100.0 * worst_err / totals.n[CT_UNICHAR_TOP1_ERR]);
}
}
tprintf("Multi-unichar shape use:\n");
for (int u = 0; u < multi_unichar_counts_.size(); ++u) {
if (multi_unichar_counts_[u] > 0) {
tprintf("%d multiple answers for unichar: %s\n",
multi_unichar_counts_[u],
unicharset_.id_to_unichar(u));
}
}
tprintf("OK Score histogram:\n");
ok_score_hist_.print();
tprintf("ERROR Score histogram:\n");
bad_score_hist_.print();
}
double rates[CT_SIZE];
if (!ComputeRates(totals, rates))
return 0.0;
// Set output values if asked for.
if (unichar_error != NULL)
*unichar_error = rates[CT_UNICHAR_TOP1_ERR];
return rates[boosting_mode];
}
// Sets the report string to a combined human and machine-readable report
// string of the error rates.
// Returns false if there is no data, leaving report unchanged, unless
// even_if_empty is true.
bool ErrorCounter::ReportString(bool even_if_empty, const Counts& counts,
STRING* report) {
// Compute the error rates.
double rates[CT_SIZE];
if (!ComputeRates(counts, rates) && !even_if_empty)
return false;
// Using %.4g%%, the length of the output string should exactly match the
// length of the format string, but in case of overflow, allow for +eddd
// on each number.
const int kMaxExtraLength = 5; // Length of +eddd.
// Keep this format string and the snprintf in sync with the CountTypes enum.
const char* format_str = "Unichar=%.4g%%[1], %.4g%%[2], %.4g%%[n], %.4g%%[T] "
"Mult=%.4g%%, Jn=%.4g%%, Brk=%.4g%%, Rej=%.4g%%, "
"FontAttr=%.4g%%, Multi=%.4g%%, "
"Answers=%.3g, Rank=%.3g, "
"OKjunk=%.4g%%, Badjunk=%.4g%%";
int max_str_len = strlen(format_str) + kMaxExtraLength * (CT_SIZE - 1) + 1;
char* formatted_str = new char[max_str_len];
snprintf(formatted_str, max_str_len, format_str,
rates[CT_UNICHAR_TOP1_ERR] * 100.0,
rates[CT_UNICHAR_TOP2_ERR] * 100.0,
rates[CT_UNICHAR_TOPN_ERR] * 100.0,
rates[CT_UNICHAR_TOPTOP_ERR] * 100.0,
rates[CT_OK_MULTI_UNICHAR] * 100.0,
rates[CT_OK_JOINED] * 100.0,
rates[CT_OK_BROKEN] * 100.0,
rates[CT_REJECT] * 100.0,
rates[CT_FONT_ATTR_ERR] * 100.0,
rates[CT_OK_MULTI_FONT] * 100.0,
rates[CT_NUM_RESULTS],
rates[CT_RANK],
100.0 * rates[CT_REJECTED_JUNK],
100.0 * rates[CT_ACCEPTED_JUNK]);
*report = formatted_str;
delete [] formatted_str;
// Now append each field of counts with a tab in front so the result can
// be loaded into a spreadsheet.
for (int ct = 0; ct < CT_SIZE; ++ct)
report->add_str_int("\t", counts.n[ct]);
return true;
}
// Computes the error rates and returns in rates which is an array of size
// CT_SIZE. Returns false if there is no data, leaving rates unchanged.
bool ErrorCounter::ComputeRates(const Counts& counts, double rates[CT_SIZE]) {
int ok_samples = counts.n[CT_UNICHAR_TOP_OK] + counts.n[CT_UNICHAR_TOP1_ERR] +
counts.n[CT_REJECT];
int junk_samples = counts.n[CT_REJECTED_JUNK] + counts.n[CT_ACCEPTED_JUNK];
// Compute rates for normal chars.
double denominator = static_cast<double>(MAX(ok_samples, 1));
for (int ct = 0; ct <= CT_RANK; ++ct)
rates[ct] = counts.n[ct] / denominator;
// Compute rates for junk.
denominator = static_cast<double>(MAX(junk_samples, 1));
for (int ct = CT_REJECTED_JUNK; ct <= CT_ACCEPTED_JUNK; ++ct)
rates[ct] = counts.n[ct] / denominator;
return ok_samples != 0 || junk_samples != 0;
}
ErrorCounter::Counts::Counts() {
memset(n, 0, sizeof(n[0]) * CT_SIZE);
}
// Adds other into this for computing totals.
void ErrorCounter::Counts::operator+=(const Counts& other) {
for (int ct = 0; ct < CT_SIZE; ++ct)
n[ct] += other.n[ct];
}
} // namespace tesseract.
| C++ |
/******************************************************************************
** Filename: normmatch.c
** Purpose: Simple matcher based on character normalization features.
** Author: Dan Johnson
** History: Wed Dec 19 16:18:06 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "normmatch.h"
#include <stdio.h>
#include <math.h>
#include "classify.h"
#include "clusttool.h"
#include "const.h"
#include "efio.h"
#include "emalloc.h"
#include "globals.h"
#include "helpers.h"
#include "normfeat.h"
#include "scanutils.h"
#include "unicharset.h"
#include "params.h"
struct NORM_PROTOS
{
int NumParams;
PARAM_DESC *ParamDesc;
LIST* Protos;
int NumProtos;
};
/**----------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------**/
double NormEvidenceOf(register double NormAdj);
void PrintNormMatch(FILE *File,
int NumParams,
PROTOTYPE *Proto,
FEATURE Feature);
NORM_PROTOS *ReadNormProtos(FILE *File);
/**----------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------**/
/* control knobs used to control the normalization adjustment process */
double_VAR(classify_norm_adj_midpoint, 32.0, "Norm adjust midpoint ...");
double_VAR(classify_norm_adj_curl, 2.0, "Norm adjust curl ...");
// Weight of width variance against height and vertical position.
const double kWidthErrorWeighting = 0.125;
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
namespace tesseract {
FLOAT32 Classify::ComputeNormMatch(CLASS_ID ClassId,
const FEATURE_STRUCT& feature,
BOOL8 DebugMatch) {
/*
** Parameters:
** ClassId id of class to match against
** Feature character normalization feature
** DebugMatch controls dump of debug info
** Globals:
** NormProtos character normalization prototypes
** Operation: This routine compares Features against each character
** normalization proto for ClassId and returns the match
** rating of the best match.
** Return: Best match rating for Feature against protos of ClassId.
** Exceptions: none
** History: Wed Dec 19 16:56:12 1990, DSJ, Created.
*/
LIST Protos;
FLOAT32 BestMatch;
FLOAT32 Match;
FLOAT32 Delta;
PROTOTYPE *Proto;
int ProtoId;
if (ClassId >= NormProtos->NumProtos) {
ClassId = NO_CLASS;
}
/* handle requests for classification as noise */
if (ClassId == NO_CLASS) {
/* kludge - clean up constants and make into control knobs later */
Match = (feature.Params[CharNormLength] *
feature.Params[CharNormLength] * 500.0 +
feature.Params[CharNormRx] *
feature.Params[CharNormRx] * 8000.0 +
feature.Params[CharNormRy] *
feature.Params[CharNormRy] * 8000.0);
return (1.0 - NormEvidenceOf (Match));
}
BestMatch = MAX_FLOAT32;
Protos = NormProtos->Protos[ClassId];
if (DebugMatch) {
tprintf("\nChar norm for class %s\n", unicharset.id_to_unichar(ClassId));
}
ProtoId = 0;
iterate(Protos) {
Proto = (PROTOTYPE *) first_node (Protos);
Delta = feature.Params[CharNormY] - Proto->Mean[CharNormY];
Match = Delta * Delta * Proto->Weight.Elliptical[CharNormY];
if (DebugMatch) {
tprintf("YMiddle: Proto=%g, Delta=%g, Var=%g, Dist=%g\n",
Proto->Mean[CharNormY], Delta,
Proto->Weight.Elliptical[CharNormY], Match);
}
Delta = feature.Params[CharNormRx] - Proto->Mean[CharNormRx];
Match += Delta * Delta * Proto->Weight.Elliptical[CharNormRx];
if (DebugMatch) {
tprintf("Height: Proto=%g, Delta=%g, Var=%g, Dist=%g\n",
Proto->Mean[CharNormRx], Delta,
Proto->Weight.Elliptical[CharNormRx], Match);
}
// Ry is width! See intfx.cpp.
Delta = feature.Params[CharNormRy] - Proto->Mean[CharNormRy];
if (DebugMatch) {
tprintf("Width: Proto=%g, Delta=%g, Var=%g\n",
Proto->Mean[CharNormRy], Delta,
Proto->Weight.Elliptical[CharNormRy]);
}
Delta = Delta * Delta * Proto->Weight.Elliptical[CharNormRy];
Delta *= kWidthErrorWeighting;
Match += Delta;
if (DebugMatch) {
tprintf("Total Dist=%g, scaled=%g, sigmoid=%g, penalty=%g\n",
Match, Match / classify_norm_adj_midpoint,
NormEvidenceOf(Match), 256 * (1 - NormEvidenceOf(Match)));
}
if (Match < BestMatch)
BestMatch = Match;
ProtoId++;
}
return 1.0 - NormEvidenceOf(BestMatch);
} /* ComputeNormMatch */
void Classify::FreeNormProtos() {
if (NormProtos != NULL) {
for (int i = 0; i < NormProtos->NumProtos; i++)
FreeProtoList(&NormProtos->Protos[i]);
Efree(NormProtos->Protos);
Efree(NormProtos->ParamDesc);
Efree(NormProtos);
NormProtos = NULL;
}
}
} // namespace tesseract
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
/**********************************************************************
* NormEvidenceOf
*
* Return the new type of evidence number corresponding to this
* normalization adjustment. The equation that represents the transform is:
* 1 / (1 + (NormAdj / midpoint) ^ curl)
**********************************************************************/
double NormEvidenceOf(register double NormAdj) {
NormAdj /= classify_norm_adj_midpoint;
if (classify_norm_adj_curl == 3)
NormAdj = NormAdj * NormAdj * NormAdj;
else if (classify_norm_adj_curl == 2)
NormAdj = NormAdj * NormAdj;
else
NormAdj = pow (NormAdj, classify_norm_adj_curl);
return (1.0 / (1.0 + NormAdj));
}
/*---------------------------------------------------------------------------*/
void PrintNormMatch(FILE *File,
int NumParams,
PROTOTYPE *Proto,
FEATURE Feature) {
/*
** Parameters:
** File open text file to dump match debug info to
** NumParams # of parameters in proto and feature
** Proto[] array of prototype parameters
** Feature[] array of feature parameters
** Globals: none
** Operation: This routine dumps out detailed normalization match info.
** Return: none
** Exceptions: none
** History: Wed Jan 2 09:49:35 1991, DSJ, Created.
*/
int i;
FLOAT32 ParamMatch;
FLOAT32 TotalMatch;
for (i = 0, TotalMatch = 0.0; i < NumParams; i++) {
ParamMatch = (Feature->Params[i] - Mean(Proto, i)) /
StandardDeviation(Proto, i);
fprintf (File, " %6.1f", ParamMatch);
if (i == CharNormY || i == CharNormRx)
TotalMatch += ParamMatch * ParamMatch;
}
fprintf (File, " --> %6.1f (%4.2f)\n",
TotalMatch, NormEvidenceOf (TotalMatch));
} /* PrintNormMatch */
/*---------------------------------------------------------------------------*/
namespace tesseract {
NORM_PROTOS *Classify::ReadNormProtos(FILE *File, inT64 end_offset) {
/*
** Parameters:
** File open text file to read normalization protos from
** Globals: none
** Operation: This routine allocates a new data structure to hold
** a set of character normalization protos. It then fills in
** the data structure by reading from the specified File.
** Return: Character normalization protos.
** Exceptions: none
** History: Wed Dec 19 16:38:49 1990, DSJ, Created.
*/
NORM_PROTOS *NormProtos;
int i;
char unichar[2 * UNICHAR_LEN + 1];
UNICHAR_ID unichar_id;
LIST Protos;
int NumProtos;
/* allocate and initialization data structure */
NormProtos = (NORM_PROTOS *) Emalloc (sizeof (NORM_PROTOS));
NormProtos->NumProtos = unicharset.size();
NormProtos->Protos = (LIST *) Emalloc (NormProtos->NumProtos * sizeof(LIST));
for (i = 0; i < NormProtos->NumProtos; i++)
NormProtos->Protos[i] = NIL_LIST;
/* read file header and save in data structure */
NormProtos->NumParams = ReadSampleSize (File);
NormProtos->ParamDesc = ReadParamDesc (File, NormProtos->NumParams);
/* read protos for each class into a separate list */
while ((end_offset < 0 || ftell(File) < end_offset) &&
tfscanf(File, "%s %d", unichar, &NumProtos) == 2) {
if (unicharset.contains_unichar(unichar)) {
unichar_id = unicharset.unichar_to_id(unichar);
Protos = NormProtos->Protos[unichar_id];
for (i = 0; i < NumProtos; i++)
Protos =
push_last (Protos, ReadPrototype (File, NormProtos->NumParams));
NormProtos->Protos[unichar_id] = Protos;
} else {
cprintf("Error: unichar %s in normproto file is not in unichar set.\n",
unichar);
for (i = 0; i < NumProtos; i++)
FreePrototype(ReadPrototype (File, NormProtos->NumParams));
}
SkipNewline(File);
}
return (NormProtos);
} /* ReadNormProtos */
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: extract.h
** Purpose: Interface to high level generic feature extraction.
** Author: Dan Johnson
** History: 1/21/90, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef EXTRACT_H
#define EXTRACT_H
#include "featdefs.h"
#include <stdio.h>
class DENORM;
/*-----------------------------------------------------------------------------
Public Function Prototypes
-----------------------------------------------------------------------------*/
// Deprecated! Will be deleted soon!
// In the meantime, as all TBLOBs, Blob is in baseline normalized coords.
// See SetupBLCNDenorms in intfx.cpp for other args.
CHAR_DESC ExtractBlobFeatures(const FEATURE_DEFS_STRUCT &FeatureDefs,
const DENORM& bl_denorm, const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info, TBLOB *Blob);
/*---------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------*/
void ExtractorStub();
#endif
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapetable.h
// Description: Class to map a classifier shape index to unicharset
// indices and font indices.
// Author: Ray Smith
// Created: Thu Oct 28 17:46:32 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_SHAPETABLE_H_
#define TESSERACT_CLASSIFY_SHAPETABLE_H_
#include "bitvector.h"
#include "genericheap.h"
#include "genericvector.h"
#include "intmatcher.h"
class STRING;
class UNICHARSET;
namespace tesseract {
struct FontInfo;
class FontInfoTable;
class ShapeTable;
// Simple struct to hold a single classifier unichar selection, a corresponding
// rating, and a list of appropriate fonts.
struct UnicharRating {
UnicharRating() : unichar_id(0), rating(0.0f) {}
UnicharRating(int u, float r)
: unichar_id(u), rating(r) {}
// Sort function to sort ratings appropriately by descending rating.
static int SortDescendingRating(const void* t1, const void* t2) {
const UnicharRating* a = reinterpret_cast<const UnicharRating *>(t1);
const UnicharRating* b = reinterpret_cast<const UnicharRating *>(t2);
if (a->rating > b->rating) {
return -1;
} else if (a->rating < b->rating) {
return 1;
} else {
return a->unichar_id - b->unichar_id;
}
}
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
static int FirstResultWithUnichar(const GenericVector<UnicharRating>& results,
UNICHAR_ID unichar_id);
// Index into some UNICHARSET table indicates the class of the answer.
UNICHAR_ID unichar_id;
// Rating from classifier with 1.0 perfect and 0.0 impossible.
// Call it a probability if you must.
float rating;
// Set of fonts for this shape in order of decreasing preference.
// (There is no mechanism for storing scores for fonts as yet.)
GenericVector<int> fonts;
};
// Classifier result from a low-level classification is an index into some
// ShapeTable and a rating.
struct ShapeRating {
ShapeRating()
: shape_id(0), rating(0.0f), raw(0.0f), font(0.0f),
joined(false), broken(false) {}
ShapeRating(int s, float r)
: shape_id(s), rating(r), raw(1.0f), font(0.0f),
joined(false), broken(false) {}
// Sort function to sort ratings appropriately by descending rating.
static int SortDescendingRating(const void* t1, const void* t2) {
const ShapeRating* a = reinterpret_cast<const ShapeRating *>(t1);
const ShapeRating* b = reinterpret_cast<const ShapeRating *>(t2);
if (a->rating > b->rating) {
return -1;
} else if (a->rating < b->rating) {
return 1;
} else {
return a->shape_id - b->shape_id;
}
}
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
static int FirstResultWithUnichar(const GenericVector<ShapeRating>& results,
const ShapeTable& shape_table,
UNICHAR_ID unichar_id);
// Index into some shape table indicates the class of the answer.
int shape_id;
// Rating from classifier with 1.0 perfect and 0.0 impossible.
// Call it a probability if you must.
float rating;
// Subsidiary rating that a classifier may use internally.
float raw;
// Subsidiary rating that a classifier may use internally.
float font;
// Flag indicating that the input may be joined.
bool joined;
// Flag indicating that the input may be broken (a fragment).
bool broken;
};
// Simple struct to hold an entry for a heap-based priority queue of
// ShapeRating.
struct ShapeQueueEntry {
ShapeQueueEntry() : result(ShapeRating(0, 0.0f)), level(0) {}
ShapeQueueEntry(const ShapeRating& rating, int level0)
: result(rating), level(level0) {}
// Sort by decreasing rating and decreasing level for equal rating.
bool operator<(const ShapeQueueEntry& other) const {
if (result.rating > other.result.rating) return true;
if (result.rating == other.result.rating)
return level > other.level;
return false;
}
// Output from classifier.
ShapeRating result;
// Which level in the tree did this come from?
int level;
};
typedef GenericHeap<ShapeQueueEntry> ShapeQueue;
// Simple struct to hold a set of fonts associated with a single unichar-id.
// A vector of UnicharAndFonts makes a shape.
struct UnicharAndFonts {
UnicharAndFonts() : unichar_id(0) {
}
UnicharAndFonts(int uni_id, int font_id) : unichar_id(uni_id) {
font_ids.push_back(font_id);
}
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
// Sort function to sort a pair of UnicharAndFonts by unichar_id.
static int SortByUnicharId(const void* v1, const void* v2);
GenericVector<inT32> font_ids;
inT32 unichar_id;
};
// A Shape is a collection of unichar-ids and a list of fonts associated with
// each, organized as a vector of UnicharAndFonts. Conceptually a Shape is
// a classifiable unit, and represents a group of characters or parts of
// characters that have a similar or identical shape. Shapes/ShapeTables may
// be organized hierarchically from identical shapes at the leaves to vaguely
// similar shapes near the root.
class Shape {
public:
Shape() : destination_index_(-1) {}
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
int destination_index() const {
return destination_index_;
}
void set_destination_index(int index) {
destination_index_ = index;
}
int size() const {
return unichars_.size();
}
// Returns a UnicharAndFonts entry for the given index, which must be
// in the range [0, size()).
const UnicharAndFonts& operator[](int index) const {
return unichars_[index];
}
// Sets the unichar_id of the given index to the new unichar_id.
void SetUnicharId(int index, int unichar_id) {
unichars_[index].unichar_id = unichar_id;
}
// Adds a font_id for the given unichar_id. If the unichar_id is not
// in the shape, it is added.
void AddToShape(int unichar_id, int font_id);
// Adds everything in other to this.
void AddShape(const Shape& other);
// Returns true if the shape contains the given unichar_id, font_id pair.
bool ContainsUnicharAndFont(int unichar_id, int font_id) const;
// Returns true if the shape contains the given unichar_id, ignoring font.
bool ContainsUnichar(int unichar_id) const;
// Returns true if the shape contains the given font, ignoring unichar_id.
bool ContainsFont(int font_id) const;
// Returns true if the shape contains the given font properties, ignoring
// unichar_id.
bool ContainsFontProperties(const FontInfoTable& font_table,
uinT32 properties) const;
// Returns true if the shape contains multiple different font properties,
// ignoring unichar_id.
bool ContainsMultipleFontProperties(const FontInfoTable& font_table) const;
// Returns true if this shape is equal to other (ignoring order of unichars
// and fonts).
bool operator==(const Shape& other) const;
// Returns true if this is a subset (including equal) of other.
bool IsSubsetOf(const Shape& other) const;
// Returns true if the lists of unichar ids are the same in this and other,
// ignoring fonts.
// NOT const, as it will sort the unichars on demand.
bool IsEqualUnichars(Shape* other);
private:
// Sorts the unichars_ vector by unichar.
void SortUnichars();
// Flag indicates that the unichars are sorted, allowing faster set
// operations with another shape.
bool unichars_sorted_;
// If this Shape is part of a ShapeTable the destiation_index_ is the index
// of some other shape in the ShapeTable with which this shape is merged.
int destination_index_;
// Array of unichars, each with a set of fonts. Each unichar has at most
// one entry in the vector.
GenericVector<UnicharAndFonts> unichars_;
};
// ShapeTable is a class to encapsulate the triple indirection that is
// used here.
// ShapeTable is a vector of shapes.
// Each shape is a vector of UnicharAndFonts representing the set of unichars
// that the shape represents.
// Each UnicharAndFonts also lists the fonts of the unichar_id that were
// mapped to the shape during training.
class ShapeTable {
public:
ShapeTable();
// The UNICHARSET reference supplied here, or in set_unicharset below must
// exist for the entire life of the ShapeTable. It is used only by DebugStr.
explicit ShapeTable(const UNICHARSET& unicharset);
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
// Accessors.
int NumShapes() const {
return shape_table_.size();
}
const UNICHARSET& unicharset() const {
return *unicharset_;
}
// Returns the number of fonts used in this ShapeTable, computing it if
// necessary.
int NumFonts() const;
// Shapetable takes a pointer to the UNICHARSET, so it must persist for the
// entire life of the ShapeTable.
void set_unicharset(const UNICHARSET& unicharset) {
unicharset_ = &unicharset;
}
// Re-indexes the class_ids in the shapetable according to the given map.
// Useful in conjunction with set_unicharset.
void ReMapClassIds(const GenericVector<int>& unicharset_map);
// Returns a string listing the classes/fonts in a shape.
STRING DebugStr(int shape_id) const;
// Returns a debug string summarizing the table.
STRING SummaryStr() const;
// Adds a new shape starting with the given unichar_id and font_id.
// Returns the assigned index.
int AddShape(int unichar_id, int font_id);
// Adds a copy of the given shape unless it is already present.
// Returns the assigned index or index of existing shape if already present.
int AddShape(const Shape& other);
// Removes the shape given by the shape index. All indices above are changed!
void DeleteShape(int shape_id);
// Adds a font_id to the given existing shape index for the given
// unichar_id. If the unichar_id is not in the shape, it is added.
void AddToShape(int shape_id, int unichar_id, int font_id);
// Adds the given shape to the existing shape with the given index.
void AddShapeToShape(int shape_id, const Shape& other);
// Returns the id of the shape that contains the given unichar and font.
// If not found, returns -1.
// If font_id < 0, the font_id is ignored and the first shape that matches
// the unichar_id is returned.
int FindShape(int unichar_id, int font_id) const;
// Returns the first unichar_id and font_id in the given shape.
void GetFirstUnicharAndFont(int shape_id,
int* unichar_id, int* font_id) const;
// Accessors for the Shape with the given shape_id.
const Shape& GetShape(int shape_id) const {
return *shape_table_[shape_id];
}
Shape* MutableShape(int shape_id) {
return shape_table_[shape_id];
}
// Expands all the classes/fonts in the shape individually to build
// a ShapeTable.
int BuildFromShape(const Shape& shape, const ShapeTable& master_shapes);
// Returns true if the shapes are already merged.
bool AlreadyMerged(int shape_id1, int shape_id2) const;
// Returns true if any shape contains multiple unichars.
bool AnyMultipleUnichars() const;
// Returns the maximum number of unichars over all shapes.
int MaxNumUnichars() const;
// Merges shapes with a common unichar over the [start, end) interval.
// Assumes single unichar per shape.
void ForceFontMerges(int start, int end);
// Returns the number of unichars in the master shape.
int MasterUnicharCount(int shape_id) const;
// Returns the sum of the font counts in the master shape.
int MasterFontCount(int shape_id) const;
// Returns the number of unichars that would result from merging the shapes.
int MergedUnicharCount(int shape_id1, int shape_id2) const;
// Merges two shape_ids, leaving shape_id2 marked as merged.
void MergeShapes(int shape_id1, int shape_id2);
// Swaps two shape_ids.
void SwapShapes(int shape_id1, int shape_id2);
// Appends the master shapes from other to this.
// Used to create a clean ShapeTable from a merged one, or to create a
// copy of a ShapeTable.
// If not NULL, shape_map is set to map other shape_ids to this's shape_ids.
void AppendMasterShapes(const ShapeTable& other,
GenericVector<int>* shape_map);
// Returns the number of master shapes remaining after merging.
int NumMasterShapes() const;
// Returns the destination of this shape, (if merged), taking into account
// the fact that the destination may itself have been merged.
// For a non-merged shape, returns the input shape_id.
int MasterDestinationIndex(int shape_id) const;
// Returns false if the unichars in neither shape is a subset of the other..
bool SubsetUnichar(int shape_id1, int shape_id2) const;
// Returns false if the unichars in neither shape is a subset of the other..
bool MergeSubsetUnichar(int merge_id1, int merge_id2, int shape_id) const;
// Returns true if the unichar sets are equal between the shapes.
bool EqualUnichars(int shape_id1, int shape_id2) const;
bool MergeEqualUnichars(int merge_id1, int merge_id2, int shape_id) const;
// Returns true if there is a common unichar between the shapes.
bool CommonUnichars(int shape_id1, int shape_id2) const;
// Returns true if there is a common font id between the shapes.
bool CommonFont(int shape_id1, int shape_id2) const;
// Adds the unichars of the given shape_id to the vector of results. Any
// unichar_id that is already present just has the fonts added to the
// font set for that result without adding a new entry in the vector.
// NOTE: it is assumed that the results are given to this function in order
// of decreasing rating.
// The unichar_map vector indicates the index of the results entry containing
// each unichar, or -1 if the unichar is not yet included in results.
void AddShapeToResults(const ShapeRating& shape_rating,
GenericVector<int>* unichar_map,
GenericVector<UnicharRating>* results) const;
private:
// Adds the given unichar_id to the results if needed, updating unichar_map
// and returning the index of unichar in results.
int AddUnicharToResults(int unichar_id, float rating,
GenericVector<int>* unichar_map,
GenericVector<UnicharRating>* results) const;
// Pointer to a provided unicharset used only by the Debugstr member.
const UNICHARSET* unicharset_;
// Vector of pointers to the Shapes in this ShapeTable.
PointerVector<Shape> shape_table_;
// Cached data calculated on demand.
mutable int num_fonts_;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_SHAPETABLE_H_
| C++ |
/******************************************************************************
** Filename: cutoffs.c
** Purpose: Routines to manipulate an array of class cutoffs.
** Author: Dan Johnson
** History: Wed Feb 20 09:28:51 1991, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "cutoffs.h"
#include <stdio.h>
#include "classify.h"
#include "efio.h"
#include "globals.h"
#include "helpers.h"
#include "scanutils.h"
#include "serialis.h"
#include "unichar.h"
#define REALLY_QUOTE_IT(x) QUOTE_IT(x)
#define MAX_CUTOFF 1000
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
namespace tesseract {
void Classify::ReadNewCutoffs(FILE *CutoffFile, bool swap, inT64 end_offset,
CLASS_CUTOFF_ARRAY Cutoffs) {
/*
** Parameters:
** Filename name of file containing cutoff definitions
** Cutoffs array to put cutoffs into
** Globals: none
** Operation: Open Filename, read in all of the class-id/cutoff pairs
** and insert them into the Cutoffs array. Cutoffs are
** indexed in the array by class id. Unused entries in the
** array are set to an arbitrarily high cutoff value.
** Return: none
** Exceptions: none
** History: Wed Feb 20 09:38:26 1991, DSJ, Created.
*/
char Class[UNICHAR_LEN + 1];
CLASS_ID ClassId;
int Cutoff;
int i;
if (shape_table_ != NULL) {
if (!shapetable_cutoffs_.DeSerialize(swap, CutoffFile)) {
tprintf("Error during read of shapetable pffmtable!\n");
}
}
for (i = 0; i < MAX_NUM_CLASSES; i++)
Cutoffs[i] = MAX_CUTOFF;
while ((end_offset < 0 || ftell(CutoffFile) < end_offset) &&
tfscanf(CutoffFile, "%" REALLY_QUOTE_IT(UNICHAR_LEN) "s %d",
Class, &Cutoff) == 2) {
if (strcmp(Class, "NULL") == 0) {
ClassId = unicharset.unichar_to_id(" ");
} else {
ClassId = unicharset.unichar_to_id(Class);
}
Cutoffs[ClassId] = Cutoff;
SkipNewline(CutoffFile);
}
} /* ReadNewCutoffs */
} // namespace tesseract
| C++ |
/******************************************************************************
** Filename: outfeat.c
** Purpose: Definition of outline-features.
** Author: Dan Johnson
** History: 11/13/90, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "outfeat.h"
#include "classify.h"
#include "efio.h"
#include "featdefs.h"
#include "mfoutline.h"
#include "ocrfeatures.h"
#include <stdio.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
namespace tesseract {
FEATURE_SET Classify::ExtractOutlineFeatures(TBLOB *Blob) {
/*
** Parameters:
** Blob blob to extract pico-features from
** LineStats statistics on text row blob is in
** Globals: none
** Operation: Convert each segment in the outline to a feature
** and return the features.
** Return: Outline-features for Blob.
** Exceptions: none
** History: 11/13/90, DSJ, Created.
** 05/24/91, DSJ, Updated for either char or baseline normalize.
*/
LIST Outlines;
LIST RemainingOutlines;
MFOUTLINE Outline;
FEATURE_SET FeatureSet;
FLOAT32 XScale, YScale;
FeatureSet = NewFeatureSet (MAX_OUTLINE_FEATURES);
if (Blob == NULL)
return (FeatureSet);
Outlines = ConvertBlob (Blob);
NormalizeOutlines(Outlines, &XScale, &YScale);
RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
Outline = (MFOUTLINE) first_node (RemainingOutlines);
ConvertToOutlineFeatures(Outline, FeatureSet);
}
if (classify_norm_method == baseline)
NormalizeOutlineX(FeatureSet);
FreeOutlines(Outlines);
return (FeatureSet);
} /* ExtractOutlineFeatures */
} // namespace tesseract
/**----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
void AddOutlineFeatureToSet(FPOINT *Start,
FPOINT *End,
FEATURE_SET FeatureSet) {
/*
** Parameters:
** Start starting point of outline-feature
** End ending point of outline-feature
** FeatureSet set to add outline-feature to
** Globals: none
** Operation: This routine computes the midpoint between Start and
** End to obtain the x,y position of the outline-feature. It
** also computes the direction from Start to End as the
** direction of the outline-feature and the distance from
** Start to End as the length of the outline-feature.
** This feature is then
** inserted into the next feature slot in FeatureSet.
** Return: none (results are placed in FeatureSet)
** Exceptions: none
** History: 11/13/90, DSJ, Created.
*/
FEATURE Feature;
Feature = NewFeature(&OutlineFeatDesc);
Feature->Params[OutlineFeatDir] = NormalizedAngleFrom(Start, End, 1.0);
Feature->Params[OutlineFeatX] = AverageOf(Start->x, End->x);
Feature->Params[OutlineFeatY] = AverageOf(Start->y, End->y);
Feature->Params[OutlineFeatLength] = DistanceBetween(*Start, *End);
AddFeature(FeatureSet, Feature);
} /* AddOutlineFeatureToSet */
/*---------------------------------------------------------------------------*/
void ConvertToOutlineFeatures(MFOUTLINE Outline, FEATURE_SET FeatureSet) {
/*
** Parameters:
** Outline outline to extract outline-features from
** FeatureSet set of features to add outline-features to
** Globals: none
** Operation:
** This routine steps converts each section in the specified
** outline to a feature described by its x,y position, length
** and angle.
** Return: none (results are returned in FeatureSet)
** Exceptions: none
** History: 11/13/90, DSJ, Created.
** 5/24/91, DSJ, Added hidden edge capability.
*/
MFOUTLINE Next;
MFOUTLINE First;
FPOINT FeatureStart;
FPOINT FeatureEnd;
if (DegenerateOutline (Outline))
return;
First = Outline;
Next = First;
do {
FeatureStart = PointAt(Next)->Point;
Next = NextPointAfter(Next);
/* note that an edge is hidden if the ending point of the edge is
marked as hidden. This situation happens because the order of
the outlines is reversed when they are converted from the old
format. In the old format, a hidden edge is marked by the
starting point for that edge. */
if (!PointAt(Next)->Hidden) {
FeatureEnd = PointAt(Next)->Point;
AddOutlineFeatureToSet(&FeatureStart, &FeatureEnd, FeatureSet);
}
}
while (Next != First);
} /* ConvertToOutlineFeatures */
/*---------------------------------------------------------------------------*/
void NormalizeOutlineX(FEATURE_SET FeatureSet) {
/*
** Parameters:
** FeatureSet outline-features to be normalized
** Globals: none
** Operation: This routine computes the weighted average x position
** over all of the outline-features in FeatureSet and then
** renormalizes the outline-features to force this average
** to be the x origin (i.e. x=0).
** Return: none (FeatureSet is changed)
** Exceptions: none
** History: 11/13/90, DSJ, Created.
*/
int i;
FEATURE Feature;
FLOAT32 Length;
FLOAT32 TotalX = 0.0;
FLOAT32 TotalWeight = 0.0;
FLOAT32 Origin;
if (FeatureSet->NumFeatures <= 0)
return;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Length = Feature->Params[OutlineFeatLength];
TotalX += Feature->Params[OutlineFeatX] * Length;
TotalWeight += Length;
}
Origin = TotalX / TotalWeight;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Feature->Params[OutlineFeatX] -= Origin;
}
} /* NormalizeOutlineX */
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "trainingsample.h"
#include <math.h>
#include "allheaders.h"
#include "helpers.h"
#include "intfeaturemap.h"
#include "normfeat.h"
#include "shapetable.h"
namespace tesseract {
ELISTIZE(TrainingSample)
// Center of randomizing operations.
const int kRandomizingCenter = 128;
// Randomizing factors.
const int TrainingSample::kYShiftValues[kSampleYShiftSize] = {
6, 3, -3, -6, 0
};
const double TrainingSample::kScaleValues[kSampleScaleSize] = {
1.0625, 0.9375, 1.0
};
TrainingSample::~TrainingSample() {
delete [] features_;
delete [] micro_features_;
}
// WARNING! Serialize/DeSerialize do not save/restore the "cache" data
// members, which is mostly the mapped features, and the weight.
// It is assumed these can all be reconstructed from what is saved.
// Writes to the given file. Returns false in case of error.
bool TrainingSample::Serialize(FILE* fp) const {
if (fwrite(&class_id_, sizeof(class_id_), 1, fp) != 1) return false;
if (fwrite(&font_id_, sizeof(font_id_), 1, fp) != 1) return false;
if (fwrite(&page_num_, sizeof(page_num_), 1, fp) != 1) return false;
if (!bounding_box_.Serialize(fp)) return false;
if (fwrite(&num_features_, sizeof(num_features_), 1, fp) != 1) return false;
if (fwrite(&num_micro_features_, sizeof(num_micro_features_), 1, fp) != 1)
return false;
if (fwrite(&outline_length_, sizeof(outline_length_), 1, fp) != 1)
return false;
if (static_cast<int>(fwrite(features_, sizeof(*features_), num_features_, fp))
!= num_features_)
return false;
if (static_cast<int>(fwrite(micro_features_, sizeof(*micro_features_),
num_micro_features_,
fp)) != num_micro_features_)
return false;
if (fwrite(cn_feature_, sizeof(*cn_feature_), kNumCNParams, fp) !=
kNumCNParams) return false;
if (fwrite(geo_feature_, sizeof(*geo_feature_), GeoCount, fp) != GeoCount)
return false;
return true;
}
// Creates from the given file. Returns NULL in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
TrainingSample* TrainingSample::DeSerializeCreate(bool swap, FILE* fp) {
TrainingSample* sample = new TrainingSample;
if (sample->DeSerialize(swap, fp)) return sample;
delete sample;
return NULL;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TrainingSample::DeSerialize(bool swap, FILE* fp) {
if (fread(&class_id_, sizeof(class_id_), 1, fp) != 1) return false;
if (fread(&font_id_, sizeof(font_id_), 1, fp) != 1) return false;
if (fread(&page_num_, sizeof(page_num_), 1, fp) != 1) return false;
if (!bounding_box_.DeSerialize(swap, fp)) return false;
if (fread(&num_features_, sizeof(num_features_), 1, fp) != 1) return false;
if (fread(&num_micro_features_, sizeof(num_micro_features_), 1, fp) != 1)
return false;
if (fread(&outline_length_, sizeof(outline_length_), 1, fp) != 1)
return false;
if (swap) {
ReverseN(&class_id_, sizeof(class_id_));
ReverseN(&num_features_, sizeof(num_features_));
ReverseN(&num_micro_features_, sizeof(num_micro_features_));
ReverseN(&outline_length_, sizeof(outline_length_));
}
delete [] features_;
features_ = new INT_FEATURE_STRUCT[num_features_];
if (static_cast<int>(fread(features_, sizeof(*features_), num_features_, fp))
!= num_features_)
return false;
delete [] micro_features_;
micro_features_ = new MicroFeature[num_micro_features_];
if (static_cast<int>(fread(micro_features_, sizeof(*micro_features_),
num_micro_features_,
fp)) != num_micro_features_)
return false;
if (fread(cn_feature_, sizeof(*cn_feature_), kNumCNParams, fp) !=
kNumCNParams) return false;
if (fread(geo_feature_, sizeof(*geo_feature_), GeoCount, fp) != GeoCount)
return false;
return true;
}
// Saves the given features into a TrainingSample.
TrainingSample* TrainingSample::CopyFromFeatures(
const INT_FX_RESULT_STRUCT& fx_info,
const TBOX& bounding_box,
const INT_FEATURE_STRUCT* features,
int num_features) {
TrainingSample* sample = new TrainingSample;
sample->num_features_ = num_features;
sample->features_ = new INT_FEATURE_STRUCT[num_features];
sample->outline_length_ = fx_info.Length;
memcpy(sample->features_, features, num_features * sizeof(features[0]));
sample->geo_feature_[GeoBottom] = bounding_box.bottom();
sample->geo_feature_[GeoTop] = bounding_box.top();
sample->geo_feature_[GeoWidth] = bounding_box.width();
// Generate the cn_feature_ from the fx_info.
sample->cn_feature_[CharNormY] =
MF_SCALE_FACTOR * (fx_info.Ymean - kBlnBaselineOffset);
sample->cn_feature_[CharNormLength] =
MF_SCALE_FACTOR * fx_info.Length / LENGTH_COMPRESSION;
sample->cn_feature_[CharNormRx] = MF_SCALE_FACTOR * fx_info.Rx;
sample->cn_feature_[CharNormRy] = MF_SCALE_FACTOR * fx_info.Ry;
sample->features_are_indexed_ = false;
sample->features_are_mapped_ = false;
return sample;
}
// Returns the cn_feature as a FEATURE_STRUCT* needed by cntraining.
FEATURE_STRUCT* TrainingSample::GetCNFeature() const {
FEATURE feature = NewFeature(&CharNormDesc);
for (int i = 0; i < kNumCNParams; ++i)
feature->Params[i] = cn_feature_[i];
return feature;
}
// Constructs and returns a copy randomized by the method given by
// the randomizer index. If index is out of [0, kSampleRandomSize) then
// an exact copy is returned.
TrainingSample* TrainingSample::RandomizedCopy(int index) const {
TrainingSample* sample = Copy();
if (index >= 0 && index < kSampleRandomSize) {
++index; // Remove the first combination.
int yshift = kYShiftValues[index / kSampleScaleSize];
double scaling = kScaleValues[index % kSampleScaleSize];
for (int i = 0; i < num_features_; ++i) {
double result = (features_[i].X - kRandomizingCenter) * scaling;
result += kRandomizingCenter;
sample->features_[i].X = ClipToRange(static_cast<int>(result + 0.5), 0,
MAX_UINT8);
result = (features_[i].Y - kRandomizingCenter) * scaling;
result += kRandomizingCenter + yshift;
sample->features_[i].Y = ClipToRange(static_cast<int>(result + 0.5), 0,
MAX_UINT8);
}
}
return sample;
}
// Constructs and returns an exact copy.
TrainingSample* TrainingSample::Copy() const {
TrainingSample* sample = new TrainingSample;
sample->class_id_ = class_id_;
sample->font_id_ = font_id_;
sample->weight_ = weight_;
sample->sample_index_ = sample_index_;
sample->num_features_ = num_features_;
if (num_features_ > 0) {
sample->features_ = new INT_FEATURE_STRUCT[num_features_];
memcpy(sample->features_, features_, num_features_ * sizeof(features_[0]));
}
sample->num_micro_features_ = num_micro_features_;
if (num_micro_features_ > 0) {
sample->micro_features_ = new MicroFeature[num_micro_features_];
memcpy(sample->micro_features_, micro_features_,
num_micro_features_ * sizeof(micro_features_[0]));
}
memcpy(sample->cn_feature_, cn_feature_, sizeof(*cn_feature_) * kNumCNParams);
memcpy(sample->geo_feature_, geo_feature_, sizeof(*geo_feature_) * GeoCount);
return sample;
}
// Extracts the needed information from the CHAR_DESC_STRUCT.
void TrainingSample::ExtractCharDesc(int int_feature_type,
int micro_type,
int cn_type,
int geo_type,
CHAR_DESC_STRUCT* char_desc) {
// Extract the INT features.
if (features_ != NULL) delete [] features_;
FEATURE_SET_STRUCT* char_features = char_desc->FeatureSets[int_feature_type];
if (char_features == NULL) {
tprintf("Error: no features to train on of type %s\n",
kIntFeatureType);
num_features_ = 0;
features_ = NULL;
} else {
num_features_ = char_features->NumFeatures;
features_ = new INT_FEATURE_STRUCT[num_features_];
for (int f = 0; f < num_features_; ++f) {
features_[f].X =
static_cast<uinT8>(char_features->Features[f]->Params[IntX]);
features_[f].Y =
static_cast<uinT8>(char_features->Features[f]->Params[IntY]);
features_[f].Theta =
static_cast<uinT8>(char_features->Features[f]->Params[IntDir]);
features_[f].CP_misses = 0;
}
}
// Extract the Micro features.
if (micro_features_ != NULL) delete [] micro_features_;
char_features = char_desc->FeatureSets[micro_type];
if (char_features == NULL) {
tprintf("Error: no features to train on of type %s\n",
kMicroFeatureType);
num_micro_features_ = 0;
micro_features_ = NULL;
} else {
num_micro_features_ = char_features->NumFeatures;
micro_features_ = new MicroFeature[num_micro_features_];
for (int f = 0; f < num_micro_features_; ++f) {
for (int d = 0; d < MFCount; ++d) {
micro_features_[f][d] = char_features->Features[f]->Params[d];
}
}
}
// Extract the CN feature.
char_features = char_desc->FeatureSets[cn_type];
if (char_features == NULL) {
tprintf("Error: no CN feature to train on.\n");
} else {
ASSERT_HOST(char_features->NumFeatures == 1);
cn_feature_[CharNormY] = char_features->Features[0]->Params[CharNormY];
cn_feature_[CharNormLength] =
char_features->Features[0]->Params[CharNormLength];
cn_feature_[CharNormRx] = char_features->Features[0]->Params[CharNormRx];
cn_feature_[CharNormRy] = char_features->Features[0]->Params[CharNormRy];
}
// Extract the Geo feature.
char_features = char_desc->FeatureSets[geo_type];
if (char_features == NULL) {
tprintf("Error: no Geo feature to train on.\n");
} else {
ASSERT_HOST(char_features->NumFeatures == 1);
geo_feature_[GeoBottom] = char_features->Features[0]->Params[GeoBottom];
geo_feature_[GeoTop] = char_features->Features[0]->Params[GeoTop];
geo_feature_[GeoWidth] = char_features->Features[0]->Params[GeoWidth];
}
features_are_indexed_ = false;
features_are_mapped_ = false;
}
// Sets the mapped_features_ from the features_ using the provided
// feature_space to the indexed versions of the features.
void TrainingSample::IndexFeatures(const IntFeatureSpace& feature_space) {
GenericVector<int> indexed_features;
feature_space.IndexAndSortFeatures(features_, num_features_,
&mapped_features_);
features_are_indexed_ = true;
features_are_mapped_ = false;
}
// Sets the mapped_features_ from the features using the provided
// feature_map.
void TrainingSample::MapFeatures(const IntFeatureMap& feature_map) {
GenericVector<int> indexed_features;
feature_map.feature_space().IndexAndSortFeatures(features_, num_features_,
&indexed_features);
feature_map.MapIndexedFeatures(indexed_features, &mapped_features_);
features_are_indexed_ = false;
features_are_mapped_ = true;
}
// Returns a pix representing the sample. (Int features only.)
Pix* TrainingSample::RenderToPix(const UNICHARSET* unicharset) const {
Pix* pix = pixCreate(kIntFeatureExtent, kIntFeatureExtent, 1);
for (int f = 0; f < num_features_; ++f) {
int start_x = features_[f].X;
int start_y = kIntFeatureExtent - features_[f].Y;
double dx = cos((features_[f].Theta / 256.0) * 2.0 * PI - PI);
double dy = -sin((features_[f].Theta / 256.0) * 2.0 * PI - PI);
for (int i = 0; i <= 5; ++i) {
int x = static_cast<int>(start_x + dx * i);
int y = static_cast<int>(start_y + dy * i);
if (x >= 0 && x < 256 && y >= 0 && y < 256)
pixSetPixel(pix, x, y, 1);
}
}
if (unicharset != NULL)
pixSetText(pix, unicharset->id_to_unichar(class_id_));
return pix;
}
// Displays the features in the given window with the given color.
void TrainingSample::DisplayFeatures(ScrollView::Color color,
ScrollView* window) const {
#ifndef GRAPHICS_DISABLED
for (int f = 0; f < num_features_; ++f) {
RenderIntFeature(window, &features_[f], color);
}
#endif // GRAPHICS_DISABLED
}
// Returns a pix of the original sample image. The pix is padded all round
// by padding wherever possible.
// The returned Pix must be pixDestroyed after use.
// If the input page_pix is NULL, NULL is returned.
Pix* TrainingSample::GetSamplePix(int padding, Pix* page_pix) const {
if (page_pix == NULL)
return NULL;
int page_width = pixGetWidth(page_pix);
int page_height = pixGetHeight(page_pix);
TBOX padded_box = bounding_box();
padded_box.pad(padding, padding);
// Clip the padded_box to the limits of the page
TBOX page_box(0, 0, page_width, page_height);
padded_box &= page_box;
Box* box = boxCreate(page_box.left(), page_height - page_box.top(),
page_box.width(), page_box.height());
Pix* sample_pix = pixClipRectangle(page_pix, box, NULL);
boxDestroy(&box);
return sample_pix;
}
} // namespace tesseract
| C++ |
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: mastertrainer.h
// Description: Trainer to build the MasterClassifier.
// Author: Ray Smith
// Created: Wed Nov 03 18:07:01 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_MASTERTRAINER_H__
#define TESSERACT_TRAINING_MASTERTRAINER_H__
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "classify.h"
#include "cluster.h"
#include "intfx.h"
#include "elst.h"
#include "errorcounter.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "indexmapbidi.h"
#include "intfeaturespace.h"
#include "intfeaturemap.h"
#include "intmatcher.h"
#include "params.h"
#include "shapetable.h"
#include "trainingsample.h"
#include "trainingsampleset.h"
#include "unicharset.h"
namespace tesseract {
class ShapeClassifier;
// Simple struct to hold the distance between two shapes during clustering.
struct ShapeDist {
ShapeDist() : shape1(0), shape2(0), distance(0.0f) {}
ShapeDist(int s1, int s2, float dist)
: shape1(s1), shape2(s2), distance(dist) {}
// Sort operator to sort in ascending order of distance.
bool operator<(const ShapeDist& other) const {
return distance < other.distance;
}
int shape1;
int shape2;
float distance;
};
// Class to encapsulate training processes that use the TrainingSampleSet.
// Initially supports shape clustering and mftrainining.
// Other important features of the MasterTrainer are conditioning the data
// by outlier elimination, replication with perturbation, and serialization.
class MasterTrainer {
public:
MasterTrainer(NormalizationMode norm_mode, bool shape_analysis,
bool replicate_samples, int debug_level);
~MasterTrainer();
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE* fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE* fp);
// Loads an initial unicharset, or sets one up if the file cannot be read.
void LoadUnicharset(const char* filename);
// Sets the feature space definition.
void SetFeatureSpace(const IntFeatureSpace& fs) {
feature_space_ = fs;
feature_map_.Init(fs);
}
// Reads the samples and their features from the given file,
// adding them to the trainer with the font_id from the content of the file.
// If verification, then these are verification samples, not training.
void ReadTrainingSamples(const char* page_name,
const FEATURE_DEFS_STRUCT& feature_defs,
bool verification);
// Adds the given single sample to the trainer, setting the classid
// appropriately from the given unichar_str.
void AddSample(bool verification, const char* unichar_str,
TrainingSample* sample);
// Loads all pages from the given tif filename and append to page_images_.
// Must be called after ReadTrainingSamples, as the current number of images
// is used as an offset for page numbers in the samples.
void LoadPageImages(const char* filename);
// Cleans up the samples after initial load from the tr files, and prior to
// saving the MasterTrainer:
// Remaps fragmented chars if running shape anaylsis.
// Sets up the samples appropriately for class/fontwise access.
// Deletes outlier samples.
void PostLoadCleanup();
// Gets the samples ready for training. Use after both
// ReadTrainingSamples+PostLoadCleanup or DeSerialize.
// Re-indexes the features and computes canonical and cloud features.
void PreTrainingSetup();
// Sets up the master_shapes_ table, which tells which fonts should stay
// together until they get to a leaf node classifier.
void SetupMasterShapes();
// Adds the junk_samples_ to the main samples_ set. Junk samples are initially
// fragments and n-grams (all incorrectly segmented characters).
// Various training functions may result in incorrectly segmented characters
// being added to the unicharset of the main samples, perhaps because they
// form a "radical" decomposition of some (Indic) grapheme, or because they
// just look the same as a real character (like rn/m)
// This function moves all the junk samples, to the main samples_ set, but
// desirable junk, being any sample for which the unichar already exists in
// the samples_ unicharset gets the unichar-ids re-indexed to match, but
// anything else gets re-marked as unichar_id 0 (space character) to identify
// it as junk to the error counter.
void IncludeJunk();
// Replicates the samples and perturbs them if the enable_replication_ flag
// is set. MUST be used after the last call to OrganizeByFontAndClass on
// the training samples, ie after IncludeJunk if it is going to be used, as
// OrganizeByFontAndClass will eat the replicated samples into the regular
// samples.
void ReplicateAndRandomizeSamplesIfRequired();
// Loads the basic font properties file into fontinfo_table_.
// Returns false on failure.
bool LoadFontInfo(const char* filename);
// Loads the xheight font properties file into xheights_.
// Returns false on failure.
bool LoadXHeights(const char* filename);
// Reads spacing stats from filename and adds them to fontinfo_table.
// Returns false on failure.
bool AddSpacingInfo(const char *filename);
// Returns the font id corresponding to the given font name.
// Returns -1 if the font cannot be found.
int GetFontInfoId(const char* font_name);
// Returns the font_id of the closest matching font name to the given
// filename. It is assumed that a substring of the filename will match
// one of the fonts. If more than one is matched, the longest is returned.
int GetBestMatchingFontInfoId(const char* filename);
// Returns the filename of the tr file corresponding to the command-line
// argument with the given index.
const STRING& GetTRFileName(int index) const {
return tr_filenames_[index];
}
// Sets up a flat shapetable with one shape per class/font combination.
void SetupFlatShapeTable(ShapeTable* shape_table);
// Sets up a Clusterer for mftraining on a single shape_id.
// Call FreeClusterer on the return value after use.
CLUSTERER* SetupForClustering(const ShapeTable& shape_table,
const FEATURE_DEFS_STRUCT& feature_defs,
int shape_id, int* num_samples);
// Writes the given float_classes (produced by SetupForFloat2Int) as inttemp
// to the given inttemp_file, and the corresponding pffmtable.
// The unicharset is the original encoding of graphemes, and shape_set should
// match the size of the shape_table, and may possibly be totally fake.
void WriteInttempAndPFFMTable(const UNICHARSET& unicharset,
const UNICHARSET& shape_set,
const ShapeTable& shape_table,
CLASS_STRUCT* float_classes,
const char* inttemp_file,
const char* pffmtable_file);
const UNICHARSET& unicharset() const {
return samples_.unicharset();
}
TrainingSampleSet* GetSamples() {
return &samples_;
}
const ShapeTable& master_shapes() const {
return master_shapes_;
}
// Generates debug output relating to the canonical distance between the
// two given UTF8 grapheme strings.
void DebugCanonical(const char* unichar_str1, const char* unichar_str2);
#ifndef GRAPHICS_DISABLED
// Debugging for cloud/canonical features.
// Displays a Features window containing:
// If unichar_str2 is in the unicharset, and canonical_font is non-negative,
// displays the canonical features of the char/font combination in red.
// If unichar_str1 is in the unicharset, and cloud_font is non-negative,
// displays the cloud feature of the char/font combination in green.
// The canonical features are drawn first to show which ones have no
// matches in the cloud features.
// Until the features window is destroyed, each click in the features window
// will display the samples that have that feature in a separate window.
void DisplaySamples(const char* unichar_str1, int cloud_font,
const char* unichar_str2, int canonical_font);
#endif // GRAPHICS_DISABLED
void TestClassifierVOld(bool replicate_samples,
ShapeClassifier* test_classifier,
ShapeClassifier* old_classifier);
// Tests the given test_classifier on the internal samples.
// See TestClassifier for details.
void TestClassifierOnSamples(CountTypes error_mode,
int report_level,
bool replicate_samples,
ShapeClassifier* test_classifier,
STRING* report_string);
// Tests the given test_classifier on the given samples
// error_mode indicates what counts as an error.
// report_levels:
// 0 = no output.
// 1 = bottom-line error rate.
// 2 = bottom-line error rate + time.
// 3 = font-level error rate + time.
// 4 = list of all errors + short classifier debug output on 16 errors.
// 5 = list of all errors + short classifier debug output on 25 errors.
// If replicate_samples is true, then the test is run on an extended test
// sample including replicated and systematically perturbed samples.
// If report_string is non-NULL, a summary of the results for each font
// is appended to the report_string.
double TestClassifier(CountTypes error_mode,
int report_level,
bool replicate_samples,
TrainingSampleSet* samples,
ShapeClassifier* test_classifier,
STRING* report_string);
// Returns the average (in some sense) distance between the two given
// shapes, which may contain multiple fonts and/or unichars.
// This function is public to facilitate testing.
float ShapeDistance(const ShapeTable& shapes, int s1, int s2);
private:
// Replaces samples that are always fragmented with the corresponding
// fragment samples.
void ReplaceFragmentedSamples();
// Runs a hierarchical agglomerative clustering to merge shapes in the given
// shape_table, while satisfying the given constraints:
// * End with at least min_shapes left in shape_table,
// * No shape shall have more than max_shape_unichars in it,
// * Don't merge shapes where the distance between them exceeds max_dist.
void ClusterShapes(int min_shapes, int max_shape_unichars,
float max_dist, ShapeTable* shape_table);
private:
NormalizationMode norm_mode_;
// Character set we are training for.
UNICHARSET unicharset_;
// Original feature space. Subspace mapping is contained in feature_map_.
IntFeatureSpace feature_space_;
TrainingSampleSet samples_;
TrainingSampleSet junk_samples_;
TrainingSampleSet verify_samples_;
// Master shape table defines what fonts stay together until the leaves.
ShapeTable master_shapes_;
// Flat shape table has each unichar/font id pair in a separate shape.
ShapeTable flat_shapes_;
// Font metrics gathered from multiple files.
FontInfoTable fontinfo_table_;
// Array of xheights indexed by font ids in fontinfo_table_;
GenericVector<inT32> xheights_;
// Non-serialized data initialized by other means or used temporarily
// during loading of training samples.
// Number of different class labels in unicharset_.
int charsetsize_;
// Flag to indicate that we are running shape analysis and need fragments
// fixing.
bool enable_shape_anaylsis_;
// Flag to indicate that sample replication is required.
bool enable_replication_;
// Array of classids of fragments that replace the correctly segmented chars.
int* fragments_;
// Classid of previous correctly segmented sample that was added.
int prev_unichar_id_;
// Debug output control.
int debug_level_;
// Feature map used to construct reduced feature spaces for compact
// classifiers.
IntFeatureMap feature_map_;
// Vector of Pix pointers used for classifiers that need the image.
// Indexed by page_num_ in the samples.
// These images are owned by the trainer and need to be pixDestroyed.
GenericVector<Pix*> page_images_;
// Vector of filenames of loaded tr files.
GenericVector<STRING> tr_filenames_;
};
} // namespace tesseract.
#endif
| C++ |
/******************************************************************************
** Filename: blobclass.c
** Purpose: High level blob classification and training routines.
** Author: Dan Johnson
** History: 7/21/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "blobclass.h"
#include "extract.h"
#include "efio.h"
#include "featdefs.h"
#include "callcpp.h"
#include <math.h>
#include <stdio.h>
#include <signal.h>
#define MAXFILENAME 80
#define MAXMATCHES 10
static const char kUnknownFontName[] = "UnknownFont";
STRING_VAR(classify_font_name, kUnknownFontName,
"Default font name to be used in training");
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
/* name of current image file being processed */
extern char imagefile[];
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
// As all TBLOBs, Blob is in baseline normalized coords.
// See SetupBLCNDenorms in intfx.cpp for other args.
void LearnBlob(const FEATURE_DEFS_STRUCT &FeatureDefs, const STRING& filename,
TBLOB * Blob, const DENORM& bl_denorm, const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info, const char* BlobText) {
/*
** Parameters:
** Blob blob whose micro-features are to be learned
** Row row of text that blob came from
** BlobText text that corresponds to blob
** TextLength number of characters in blob
** Globals:
** imagefile base filename of the page being learned
** classify_font_name
** name of font currently being trained on
** Operation:
** Extract micro-features from the specified blob and append
** them to the appropriate file.
** Return: none
** Exceptions: none
** History: 7/28/89, DSJ, Created.
*/
#define TRAIN_SUFFIX ".tr"
static FILE *FeatureFile = NULL;
STRING Filename(filename);
// If no fontname was set, try to extract it from the filename
STRING CurrFontName = classify_font_name;
if (CurrFontName == kUnknownFontName) {
// filename is expected to be of the form [lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
const char *basename = strrchr(filename.string(), '/');
const char *firstdot = strchr(basename ? basename : filename.string(), '.');
const char *lastdot = strrchr(filename.string(), '.');
if (firstdot != lastdot && firstdot != NULL && lastdot != NULL) {
++firstdot;
CurrFontName = firstdot;
CurrFontName[lastdot - firstdot] = '\0';
}
}
// if a feature file is not yet open, open it
// the name of the file is the name of the image plus TRAIN_SUFFIX
if (FeatureFile == NULL) {
Filename += TRAIN_SUFFIX;
FeatureFile = Efopen(Filename.string(), "wb");
cprintf("TRAINING ... Font name = %s\n", CurrFontName.string());
}
LearnBlob(FeatureDefs, FeatureFile, Blob, bl_denorm, cn_denorm, fx_info,
BlobText, CurrFontName.string());
} // LearnBlob
void LearnBlob(const FEATURE_DEFS_STRUCT &FeatureDefs, FILE* FeatureFile,
TBLOB* Blob, const DENORM& bl_denorm, const DENORM& cn_denorm,
const INT_FX_RESULT_STRUCT& fx_info,
const char* BlobText, const char* FontName) {
CHAR_DESC CharDesc;
ASSERT_HOST(FeatureFile != NULL);
CharDesc = ExtractBlobFeatures(FeatureDefs, bl_denorm, cn_denorm, fx_info,
Blob);
if (CharDesc == NULL) {
cprintf("LearnBLob: CharDesc was NULL. Aborting.\n");
return;
}
if (ValidCharDescription(FeatureDefs, CharDesc)) {
// label the features with a class name and font name
fprintf(FeatureFile, "\n%s %s\n", FontName, BlobText);
// write micro-features to file and clean up
WriteCharDescription(FeatureDefs, FeatureFile, CharDesc);
} else {
tprintf("Blob learned was invalid!\n");
}
FreeCharDescription(CharDesc);
} // LearnBlob
| C++ |
/**********************************************************************
* File: devanagari_processing.cpp
* Description: Methods to process images containing devanagari symbols,
* prior to classification.
* Author: Shobhit Saxena
* Created: Mon Nov 17 20:26:01 IST 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "devanagari_processing.h"
#include "allheaders.h"
#include "tordmain.h"
#include "statistc.h"
// Flags controlling the debugging information for shiro-rekha splitting
// strategies.
INT_VAR(devanagari_split_debuglevel, 0,
"Debug level for split shiro-rekha process.");
BOOL_VAR(devanagari_split_debugimage, 0,
"Whether to create a debug image for split shiro-rekha process.");
namespace tesseract {
ShiroRekhaSplitter::ShiroRekhaSplitter() {
orig_pix_ = NULL;
segmentation_block_list_ = NULL;
splitted_image_ = NULL;
global_xheight_ = kUnspecifiedXheight;
perform_close_ = false;
debug_image_ = NULL;
pageseg_split_strategy_ = NO_SPLIT;
ocr_split_strategy_ = NO_SPLIT;
}
ShiroRekhaSplitter::~ShiroRekhaSplitter() {
Clear();
}
void ShiroRekhaSplitter::Clear() {
pixDestroy(&orig_pix_);
pixDestroy(&splitted_image_);
pageseg_split_strategy_ = NO_SPLIT;
ocr_split_strategy_ = NO_SPLIT;
pixDestroy(&debug_image_);
segmentation_block_list_ = NULL;
global_xheight_ = kUnspecifiedXheight;
perform_close_ = false;
}
// This method dumps a debug image to the specified location.
void ShiroRekhaSplitter::DumpDebugImage(const char* filename) const {
pixWrite(filename, debug_image_, IFF_PNG);
}
// On setting the input image, a clone of it is owned by this class.
void ShiroRekhaSplitter::set_orig_pix(Pix* pix) {
if (orig_pix_) {
pixDestroy(&orig_pix_);
}
orig_pix_ = pixClone(pix);
}
// Top-level method to perform splitting based on current settings.
// Returns true if a split was actually performed.
// split_for_pageseg should be true if the splitting is being done prior to
// page segmentation. This mode uses the flag
// pageseg_devanagari_split_strategy to determine the splitting strategy.
bool ShiroRekhaSplitter::Split(bool split_for_pageseg) {
SplitStrategy split_strategy = split_for_pageseg ? pageseg_split_strategy_ :
ocr_split_strategy_;
if (split_strategy == NO_SPLIT) {
return false; // Nothing to do.
}
ASSERT_HOST(split_strategy == MINIMAL_SPLIT ||
split_strategy == MAXIMAL_SPLIT);
ASSERT_HOST(orig_pix_);
if (devanagari_split_debuglevel > 0) {
tprintf("Splitting shiro-rekha ...\n");
tprintf("Split strategy = %s\n",
split_strategy == MINIMAL_SPLIT ? "Minimal" : "Maximal");
tprintf("Initial pageseg available = %s\n",
segmentation_block_list_ ? "yes" : "no");
}
// Create a copy of original image to store the splitting output.
pixDestroy(&splitted_image_);
splitted_image_ = pixCopy(NULL, orig_pix_);
// Initialize debug image if required.
if (devanagari_split_debugimage) {
pixDestroy(&debug_image_);
debug_image_ = pixConvertTo32(orig_pix_);
}
// Determine all connected components in the input image. A close operation
// may be required prior to this, depending on the current settings.
Pix* pix_for_ccs = pixClone(orig_pix_);
if (perform_close_ && global_xheight_ != kUnspecifiedXheight &&
!segmentation_block_list_) {
if (devanagari_split_debuglevel > 0) {
tprintf("Performing a global close operation..\n");
}
// A global measure is available for xheight, but no local information
// exists.
pixDestroy(&pix_for_ccs);
pix_for_ccs = pixCopy(NULL, orig_pix_);
PerformClose(pix_for_ccs, global_xheight_);
}
Pixa* ccs;
Boxa* tmp_boxa = pixConnComp(pix_for_ccs, &ccs, 8);
boxaDestroy(&tmp_boxa);
pixDestroy(&pix_for_ccs);
// Iterate over all connected components. Get their bounding boxes and clip
// out the image regions corresponding to these boxes from the original image.
// Conditionally run splitting on each of them.
Boxa* regions_to_clear = boxaCreate(0);
for (int i = 0; i < pixaGetCount(ccs); ++i) {
Box* box = ccs->boxa->box[i];
Pix* word_pix = pixClipRectangle(orig_pix_, box, NULL);
ASSERT_HOST(word_pix);
int xheight = GetXheightForCC(box);
if (xheight == kUnspecifiedXheight && segmentation_block_list_ &&
devanagari_split_debugimage) {
pixRenderBoxArb(debug_image_, box, 1, 255, 0, 0);
}
// If some xheight measure is available, attempt to pre-eliminate small
// blobs from the shiro-rekha process. This is primarily to save the CCs
// corresponding to punctuation marks/small dots etc which are part of
// larger graphemes.
if (xheight == kUnspecifiedXheight ||
(box->w > xheight / 3 && box->h > xheight / 2)) {
SplitWordShiroRekha(split_strategy, word_pix, xheight,
box->x, box->y, regions_to_clear);
} else if (devanagari_split_debuglevel > 0) {
tprintf("CC dropped from splitting: %d,%d (%d, %d)\n",
box->x, box->y, box->w, box->h);
}
pixDestroy(&word_pix);
}
// Actually clear the boxes now.
for (int i = 0; i < boxaGetCount(regions_to_clear); ++i) {
Box* box = boxaGetBox(regions_to_clear, i, L_CLONE);
pixClearInRect(splitted_image_, box);
boxDestroy(&box);
}
boxaDestroy(®ions_to_clear);
pixaDestroy(&ccs);
if (devanagari_split_debugimage) {
DumpDebugImage(split_for_pageseg ? "pageseg_split_debug.png" :
"ocr_split_debug.png");
}
return true;
}
// Method to perform a close operation on the input image. The xheight
// estimate decides the size of sel used.
void ShiroRekhaSplitter::PerformClose(Pix* pix, int xheight_estimate) {
pixCloseBrick(pix, pix, xheight_estimate / 8, xheight_estimate / 3);
}
// This method resolves the cc bbox to a particular row and returns the row's
// xheight.
int ShiroRekhaSplitter::GetXheightForCC(Box* cc_bbox) {
if (!segmentation_block_list_) {
return global_xheight_;
}
// Compute the box coordinates in Tesseract's coordinate system.
TBOX bbox(cc_bbox->x,
pixGetHeight(orig_pix_) - cc_bbox->y - cc_bbox->h - 1,
cc_bbox->x + cc_bbox->w,
pixGetHeight(orig_pix_) - cc_bbox->y - 1);
// Iterate over all blocks.
BLOCK_IT block_it(segmentation_block_list_);
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
BLOCK* block = block_it.data();
// Iterate over all rows in the block.
ROW_IT row_it(block->row_list());
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
ROW* row = row_it.data();
if (!row->bounding_box().major_overlap(bbox)) {
continue;
}
// Row could be skewed, warped, etc. Use the position of the box to
// determine the baseline position of the row for that x-coordinate.
// Create a square TBOX whose baseline's mid-point lies at this point
// and side is row's xheight. Take the overlap of this box with the input
// box and check if it is a 'major overlap'. If so, this box lies in this
// row. In that case, return the xheight for this row.
float box_middle = 0.5 * (bbox.left() + bbox.right());
int baseline = static_cast<int>(row->base_line(box_middle) + 0.5);
TBOX test_box(box_middle - row->x_height() / 2,
baseline,
box_middle + row->x_height() / 2,
static_cast<int>(baseline + row->x_height()));
// Compute overlap. If it is is a major overlap, this is the right row.
if (bbox.major_overlap(test_box)) {
return row->x_height();
}
}
}
// No row found for this bbox.
return kUnspecifiedXheight;
}
// Returns a list of regions (boxes) which should be cleared in the original
// image so as to perform shiro-rekha splitting. Pix is assumed to carry one
// (or less) word only. Xheight measure could be the global estimate, the row
// estimate, or unspecified. If unspecified, over splitting may occur, since a
// conservative estimate of stroke width along with an associated multiplier
// is used in its place. It is advisable to have a specified xheight when
// splitting for classification/training.
// A vertical projection histogram of all the on-pixels in the input pix is
// computed. The maxima of this histogram is regarded as an approximate location
// of the shiro-rekha. By descending on the maxima's peak on both sides,
// stroke width of shiro-rekha is estimated.
// A horizontal projection histogram is computed for a sub-image of the input
// image, which extends from just below the shiro-rekha down to a certain
// leeway. The leeway depends on the input xheight, if provided, else a
// conservative multiplier on approximate stroke width is used (which may lead
// to over-splitting).
void ShiroRekhaSplitter::SplitWordShiroRekha(SplitStrategy split_strategy,
Pix* pix,
int xheight,
int word_left,
int word_top,
Boxa* regions_to_clear) {
if (split_strategy == NO_SPLIT) {
return;
}
int width = pixGetWidth(pix);
int height = pixGetHeight(pix);
// Statistically determine the yextents of the shiro-rekha.
int shirorekha_top, shirorekha_bottom, shirorekha_ylevel;
GetShiroRekhaYExtents(pix, &shirorekha_top, &shirorekha_bottom,
&shirorekha_ylevel);
// Since the shiro rekha is also a stroke, its width is equal to the stroke
// width.
int stroke_width = shirorekha_bottom - shirorekha_top + 1;
// Some safeguards to protect CCs we do not want to be split.
// These are particularly useful when the word wasn't eliminated earlier
// because xheight information was unavailable.
if (shirorekha_ylevel > height / 2) {
// Shirorekha shouldn't be in the bottom half of the word.
if (devanagari_split_debuglevel > 0) {
tprintf("Skipping splitting CC at (%d, %d): shirorekha in lower half..\n",
word_left, word_top);
}
return;
}
if (stroke_width > height / 3) {
// Even the boldest of fonts shouldn't do this.
if (devanagari_split_debuglevel > 0) {
tprintf("Skipping splitting CC at (%d, %d): stroke width too huge..\n",
word_left, word_top);
}
return;
}
// Clear the ascender and descender regions of the word.
// Obtain a vertical projection histogram for the resulting image.
Box* box_to_clear = boxCreate(0, shirorekha_top - stroke_width / 3,
width, 5 * stroke_width / 3);
Pix* word_in_xheight = pixCopy(NULL, pix);
pixClearInRect(word_in_xheight, box_to_clear);
// Also clear any pixels which are below shirorekha_bottom + some leeway.
// The leeway is set to xheight if the information is available, else it is a
// multiplier applied to the stroke width.
int leeway_to_keep = stroke_width * 3;
if (xheight != kUnspecifiedXheight) {
// This is because the xheight-region typically includes the shiro-rekha
// inside it, i.e., the top of the xheight range corresponds to the top of
// shiro-rekha.
leeway_to_keep = xheight - stroke_width;
}
box_to_clear->y = shirorekha_bottom + leeway_to_keep;
box_to_clear->h = height - box_to_clear->y;
pixClearInRect(word_in_xheight, box_to_clear);
boxDestroy(&box_to_clear);
PixelHistogram vert_hist;
vert_hist.ConstructVerticalCountHist(word_in_xheight);
pixDestroy(&word_in_xheight);
// If the number of black pixel in any column of the image is less than a
// fraction of the stroke width, treat it as noise / a stray mark. Perform
// these changes inside the vert_hist data itself, as that is used later on as
// a bit vector for the final split decision at every column.
for (int i = 0; i < width; ++i) {
if (vert_hist.hist()[i] <= stroke_width / 4)
vert_hist.hist()[i] = 0;
else
vert_hist.hist()[i] = 1;
}
// In order to split the line at any point, we make sure that the width of the
// gap is atleast half the stroke width.
int i = 0;
int cur_component_width = 0;
while (i < width) {
if (!vert_hist.hist()[i]) {
int j = 0;
while (i + j < width && !vert_hist.hist()[i+j])
++j;
if (j >= stroke_width / 2 && cur_component_width >= stroke_width / 2) {
// Perform a shiro-rekha split. The intervening region lies from i to
// i+j-1.
// A minimal single-pixel split makes the estimation of intra- and
// inter-word spacing easier during page layout analysis,
// whereas a maximal split may be needed for OCR, depending on
// how the engine was trained.
bool minimal_split = (split_strategy == MINIMAL_SPLIT);
int split_width = minimal_split ? 1 : j;
int split_left = minimal_split ? i + (j / 2) - (split_width / 2) : i;
if (!minimal_split || (i != 0 && i + j != width)) {
Box* box_to_clear =
boxCreate(word_left + split_left,
word_top + shirorekha_top - stroke_width / 3,
split_width,
5 * stroke_width / 3);
if (box_to_clear) {
boxaAddBox(regions_to_clear, box_to_clear, L_CLONE);
// Mark this in the debug image if needed.
if (devanagari_split_debugimage) {
pixRenderBoxArb(debug_image_, box_to_clear, 1, 128, 255, 128);
}
boxDestroy(&box_to_clear);
cur_component_width = 0;
}
}
}
i += j;
} else {
++i;
++cur_component_width;
}
}
}
// Refreshes the words in the segmentation block list by using blobs in the
// input block list.
// The segmentation block list must be set.
void ShiroRekhaSplitter::RefreshSegmentationWithNewBlobs(
C_BLOB_LIST* new_blobs) {
// The segmentation block list must have been specified.
ASSERT_HOST(segmentation_block_list_);
if (devanagari_split_debuglevel > 0) {
tprintf("Before refreshing blobs:\n");
PrintSegmentationStats(segmentation_block_list_);
tprintf("New Blobs found: %d\n", new_blobs->length());
}
C_BLOB_LIST not_found_blobs;
RefreshWordBlobsFromNewBlobs(segmentation_block_list_,
new_blobs,
((devanagari_split_debugimage && debug_image_) ?
¬_found_blobs : NULL));
if (devanagari_split_debuglevel > 0) {
tprintf("After refreshing blobs:\n");
PrintSegmentationStats(segmentation_block_list_);
}
if (devanagari_split_debugimage && debug_image_) {
// Plot out the original blobs for which no match was found in the new
// all_blobs list.
C_BLOB_IT not_found_it(¬_found_blobs);
for (not_found_it.mark_cycle_pt(); !not_found_it.cycled_list();
not_found_it.forward()) {
C_BLOB* not_found = not_found_it.data();
TBOX not_found_box = not_found->bounding_box();
Box* box_to_plot = GetBoxForTBOX(not_found_box);
pixRenderBoxArb(debug_image_, box_to_plot, 1, 255, 0, 255);
boxDestroy(&box_to_plot);
}
// Plot out the blobs unused from all blobs.
C_BLOB_IT all_blobs_it(new_blobs);
for (all_blobs_it.mark_cycle_pt(); !all_blobs_it.cycled_list();
all_blobs_it.forward()) {
C_BLOB* a_blob = all_blobs_it.data();
Box* box_to_plot = GetBoxForTBOX(a_blob->bounding_box());
pixRenderBoxArb(debug_image_, box_to_plot, 3, 0, 127, 0);
boxDestroy(&box_to_plot);
}
}
}
// Returns a new box object for the corresponding TBOX, based on the original
// image's coordinate system.
Box* ShiroRekhaSplitter::GetBoxForTBOX(const TBOX& tbox) const {
return boxCreate(tbox.left(), pixGetHeight(orig_pix_) - tbox.top() - 1,
tbox.width(), tbox.height());
}
// This method returns the computed mode-height of blobs in the pix.
// It also prunes very small blobs from calculation.
int ShiroRekhaSplitter::GetModeHeight(Pix* pix) {
Boxa* boxa = pixConnComp(pix, NULL, 8);
STATS heights(0, pixGetHeight(pix));
heights.clear();
for (int i = 0; i < boxaGetCount(boxa); ++i) {
Box* box = boxaGetBox(boxa, i, L_CLONE);
if (box->h >= 3 || box->w >= 3) {
heights.add(box->h, 1);
}
boxDestroy(&box);
}
boxaDestroy(&boxa);
return heights.mode();
}
// This method returns y-extents of the shiro-rekha computed from the input
// word image.
void ShiroRekhaSplitter::GetShiroRekhaYExtents(Pix* word_pix,
int* shirorekha_top,
int* shirorekha_bottom,
int* shirorekha_ylevel) {
// Compute a histogram from projecting the word on a vertical line.
PixelHistogram hist_horiz;
hist_horiz.ConstructHorizontalCountHist(word_pix);
// Get the ylevel where the top-line exists. This is basically the global
// maxima in the horizontal histogram.
int topline_onpixel_count = 0;
int topline_ylevel = hist_horiz.GetHistogramMaximum(&topline_onpixel_count);
// Get the upper and lower extents of the shiro rekha.
int thresh = (topline_onpixel_count * 70) / 100;
int ulimit = topline_ylevel;
int llimit = topline_ylevel;
while (ulimit > 0 && hist_horiz.hist()[ulimit] >= thresh)
--ulimit;
while (llimit < pixGetHeight(word_pix) && hist_horiz.hist()[llimit] >= thresh)
++llimit;
if (shirorekha_top) *shirorekha_top = ulimit;
if (shirorekha_bottom) *shirorekha_bottom = llimit;
if (shirorekha_ylevel) *shirorekha_ylevel = topline_ylevel;
}
// This method returns the global-maxima for the histogram. The frequency of
// the global maxima is returned in count, if specified.
int PixelHistogram::GetHistogramMaximum(int* count) const {
int best_value = 0;
for (int i = 0; i < length_; ++i) {
if (hist_[i] > hist_[best_value]) {
best_value = i;
}
}
if (count) {
*count = hist_[best_value];
}
return best_value;
}
// Methods to construct histograms from images.
void PixelHistogram::ConstructVerticalCountHist(Pix* pix) {
Clear();
int width = pixGetWidth(pix);
int height = pixGetHeight(pix);
hist_ = new int[width];
length_ = width;
int wpl = pixGetWpl(pix);
l_uint32 *data = pixGetData(pix);
for (int i = 0; i < width; ++i)
hist_[i] = 0;
for (int i = 0; i < height; ++i) {
l_uint32 *line = data + i * wpl;
for (int j = 0; j < width; ++j)
if (GET_DATA_BIT(line, j))
++(hist_[j]);
}
}
void PixelHistogram::ConstructHorizontalCountHist(Pix* pix) {
Clear();
Numa* counts = pixCountPixelsByRow(pix, NULL);
length_ = numaGetCount(counts);
hist_ = new int[length_];
for (int i = 0; i < length_; ++i) {
l_int32 val = 0;
numaGetIValue(counts, i, &val);
hist_[i] = val;
}
numaDestroy(&counts);
}
} // namespace tesseract.
| C++ |
/**********************************************************************
* File: tovars.cpp (Formerly to_vars.c)
* Description: Variables used by textord.
* Author: Ray Smith
* Created: Tue Aug 24 16:55:02 BST 1993
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "tovars.h"
#include "params.h"
#define EXTERN
EXTERN BOOL_VAR (textord_show_initial_words, FALSE, "Display separate words");
EXTERN BOOL_VAR (textord_show_new_words, FALSE, "Display separate words");
EXTERN BOOL_VAR (textord_show_fixed_words, FALSE,
"Display forced fixed pitch words");
EXTERN BOOL_VAR (textord_blocksall_fixed, FALSE, "Moan about prop blocks");
EXTERN BOOL_VAR (textord_blocksall_prop, FALSE,
"Moan about fixed pitch blocks");
EXTERN BOOL_VAR (textord_blocksall_testing, FALSE, "Dump stats when moaning");
EXTERN BOOL_VAR (textord_test_mode, FALSE, "Do current test");
EXTERN INT_VAR (textord_dotmatrix_gap, 3,
"Max pixel gap for broken pixed pitch");
EXTERN INT_VAR (textord_debug_block, 0, "Block to do debug on");
EXTERN INT_VAR (textord_pitch_range, 2, "Max range test on pitch");
EXTERN double_VAR (textord_wordstats_smooth_factor, 0.05,
"Smoothing gap stats");
EXTERN double_VAR (textord_width_smooth_factor, 0.10,
"Smoothing width stats");
EXTERN double_VAR (textord_words_width_ile, 0.4,
"Ile of blob widths for space est");
EXTERN double_VAR (textord_words_maxspace, 4.0, "Multiple of xheight");
EXTERN double_VAR (textord_words_default_maxspace, 3.5,
"Max believable third space");
EXTERN double_VAR (textord_words_default_minspace, 0.6,
"Fraction of xheight");
EXTERN double_VAR (textord_words_min_minspace, 0.3, "Fraction of xheight");
EXTERN double_VAR (textord_words_default_nonspace, 0.2,
"Fraction of xheight");
EXTERN double_VAR (textord_words_initial_lower, 0.25,
"Max inital cluster size");
EXTERN double_VAR (textord_words_initial_upper, 0.15,
"Min initial cluster spacing");
EXTERN double_VAR (textord_words_minlarge, 0.75,
"Fraction of valid gaps needed");
EXTERN double_VAR (textord_words_pitchsd_threshold, 0.040,
"Pitch sync threshold");
EXTERN double_VAR (textord_words_def_fixed, 0.016,
"Threshold for definite fixed");
EXTERN double_VAR (textord_words_def_prop, 0.090,
"Threshold for definite prop");
EXTERN INT_VAR (textord_words_veto_power, 5,
"Rows required to outvote a veto");
EXTERN double_VAR (textord_pitch_rowsimilarity, 0.08,
"Fraction of xheight for sameness");
EXTERN BOOL_VAR (textord_pitch_scalebigwords, FALSE,
"Scale scores on big words");
EXTERN double_VAR (words_initial_lower, 0.5, "Max inital cluster size");
EXTERN double_VAR (words_initial_upper, 0.15, "Min initial cluster spacing");
EXTERN double_VAR (words_default_prop_nonspace, 0.25, "Fraction of xheight");
EXTERN double_VAR (words_default_fixed_space, 0.75, "Fraction of xheight");
EXTERN double_VAR (words_default_fixed_limit, 0.6, "Allowed size variance");
EXTERN double_VAR (textord_words_definite_spread, 0.30,
"Non-fuzzy spacing region");
EXTERN double_VAR (textord_spacesize_ratiofp, 2.8,
"Min ratio space/nonspace");
EXTERN double_VAR (textord_spacesize_ratioprop, 2.0,
"Min ratio space/nonspace");
EXTERN double_VAR (textord_fpiqr_ratio, 1.5, "Pitch IQR/Gap IQR threshold");
EXTERN double_VAR (textord_max_pitch_iqr, 0.20, "Xh fraction noise in pitch");
EXTERN double_VAR (textord_fp_min_width, 0.5, "Min width of decent blobs");
| C++ |
/**********************************************************************
* File: sortflts.cpp (Formerly sfloats.c)
* Description: Code to maintain a sorted list of floats.
* Author: Ray Smith
* Created: Mon Oct 4 16:15:40 BST 1993
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "sortflts.h"
ELISTIZE (SORTED_FLOAT)
/**
* @name SORTED_FLOATS::add
*
* Add a new entry to the sorted lsit of floats.
*/
void SORTED_FLOATS::add( //add new entry
float value,
inT32 key) {
SORTED_FLOAT *new_float = new SORTED_FLOAT (value, key);
if (list.empty ())
it.add_after_stay_put (new_float);
else {
it.move_to_first ();
while (!it.at_last () && it.data ()->entry < value)
it.forward ();
if (it.data ()->entry < value)
it.add_after_stay_put (new_float);
else
it.add_before_stay_put (new_float);
}
}
/**
* @name SORTED_FLOATS::remove
*
* Remove an entry from the sorted lsit of floats.
*/
void SORTED_FLOATS::remove( //remove the entry
inT32 key) {
if (!list.empty ()) {
for (it.mark_cycle_pt (); !it.cycled_list (); it.forward ()) {
if (it.data ()->address == key) {
delete it.extract ();
return;
}
}
}
}
/**
* @name SORTED_FLOATS::operator[]
*
* Return the floating point value of the given index into the list.
*/
float
SORTED_FLOATS::operator[] ( //get an entry
inT32 index //to list
) {
it.move_to_first ();
return it.data_relative (index)->entry;
}
| C++ |
///////////////////////////////////////////////////////////////////////
// File: strokewidth.h
// Description: Subclass of BBGrid to find uniformity of strokewidth.
// Author: Ray Smith
// Created: Mon Mar 31 16:17:01 PST 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TEXTORD_STROKEWIDTH_H__
#define TESSERACT_TEXTORD_STROKEWIDTH_H__
#include "blobbox.h" // BlobNeighourDir.
#include "blobgrid.h" // Base class.
#include "colpartitiongrid.h"
#include "textlineprojection.h"
class DENORM;
class ScrollView;
class TO_BLOCK;
namespace tesseract {
class ColPartition_LIST;
class TabFind;
class TextlineProjection;
// Misc enums to clarify bool arguments for direction-controlling args.
enum LeftOrRight {
LR_LEFT,
LR_RIGHT
};
/**
* The StrokeWidth class holds all the normal and large blobs.
* It is used to find good large blobs and move them to the normal blobs
* by virtue of having a reasonable strokewidth compatible neighbour.
*/
class StrokeWidth : public BlobGrid {
public:
StrokeWidth(int gridsize, const ICOORD& bleft, const ICOORD& tright);
virtual ~StrokeWidth();
// Sets the neighbours member of the medium-sized blobs in the block.
// Searches on 4 sides of each blob for similar-sized, similar-strokewidth
// blobs and sets pointers to the good neighbours.
void SetNeighboursOnMediumBlobs(TO_BLOCK* block);
// Sets the neighbour/textline writing direction members of the medium
// and large blobs with optional repair of broken CJK characters first.
// Repair of broken CJK is needed here because broken CJK characters
// can fool the textline direction detection algorithm.
void FindTextlineDirectionAndFixBrokenCJK(bool cjk_merge,
TO_BLOCK* input_block);
// To save computation, the process of generating partitions is broken
// into the following 4 steps:
// TestVerticalTextDirection
// CorrectForRotation (used only if a rotation is to be applied)
// FindLeaderPartitions
// GradeBlobsIntoPartitions.
// These functions are all required, in sequence, except for
// CorrectForRotation, which is not needed if no rotation is applied.
// Types all the blobs as vertical or horizontal text or unknown and
// returns true if the majority are vertical.
// If the blobs are rotated, it is necessary to call CorrectForRotation
// after rotating everything, otherwise the work done here will be enough.
// If osd_blobs is not null, a list of blobs from the dominant textline
// direction are returned for use in orientation and script detection.
// find_vertical_text_ratio should be textord_tabfind_vertical_text_ratio.
bool TestVerticalTextDirection(double find_vertical_text_ratio,
TO_BLOCK* block,
BLOBNBOX_CLIST* osd_blobs);
// Corrects the data structures for the given rotation.
void CorrectForRotation(const FCOORD& rerotation,
ColPartitionGrid* part_grid);
// Finds leader partitions and inserts them into the give grid.
void FindLeaderPartitions(TO_BLOCK* block,
ColPartitionGrid* part_grid);
// Finds and marks noise those blobs that look like bits of vertical lines
// that would otherwise screw up layout analysis.
void RemoveLineResidue(ColPartition_LIST* big_part_list);
// Types all the blobs as vertical text or horizontal text or unknown and
// puts them into initial ColPartitions in the supplied part_grid.
// rerotation determines how to get back to the image coordinates from the
// blob coordinates (since they may have been rotated for vertical text).
// block is the single block for the whole page or rectangle to be OCRed.
// nontext_pix (full-size), is a binary mask used to prevent merges across
// photo/text boundaries. It is not kept beyond this function.
// denorm provides a mapping back to the image from the current blob
// coordinate space.
// projection provides a measure of textline density over the image and
// provides functions to assist with diacritic detection. It should be a
// pointer to a new TextlineProjection, and will be setup here.
// part_grid is the output grid of textline partitions.
// Large blobs that cause overlap are put in separate partitions and added
// to the big_parts list.
void GradeBlobsIntoPartitions(const FCOORD& rerotation,
TO_BLOCK* block,
Pix* nontext_pix,
const DENORM* denorm,
bool cjk_script,
TextlineProjection* projection,
ColPartitionGrid* part_grid,
ColPartition_LIST* big_parts);
// Handles a click event in a display window.
virtual void HandleClick(int x, int y);
private:
// Computes the noise_density_ by summing the number of elements in a
// neighbourhood of each grid cell.
void ComputeNoiseDensity(TO_BLOCK* block, TabFind* line_grid);
// Detects and marks leader dots/dashes.
// Leaders are horizontal chains of small or noise blobs that look
// monospace according to ColPartition::MarkAsLeaderIfMonospaced().
// Detected leaders become the only occupants of the block->small_blobs list.
// Non-leader small blobs get moved to the blobs list.
// Non-leader noise blobs remain singletons in the noise list.
// All small and noise blobs in high density regions are marked BTFT_NONTEXT.
// block is the single block for the whole page or rectangle to be OCRed.
// leader_parts is the output.
void FindLeadersAndMarkNoise(TO_BLOCK* block,
ColPartition_LIST* leader_parts);
/** Inserts the block blobs (normal and large) into this grid.
* Blobs remain owned by the block. */
void InsertBlobs(TO_BLOCK* block);
// Fix broken CJK characters, using the fake joined blobs mechanism.
// Blobs are really merged, ie the master takes all the outlines and the
// others are deleted.
// Returns true if sufficient blobs are merged that it may be worth running
// again, due to a better estimate of character size.
bool FixBrokenCJK(TO_BLOCK* block);
// Collect blobs that overlap or are within max_dist of the input bbox.
// Return them in the list of blobs and expand the bbox to be the union
// of all the boxes. not_this is excluded from the search, as are blobs
// that cause the merged box to exceed max_size in either dimension.
void AccumulateOverlaps(const BLOBNBOX* not_this, bool debug,
int max_size, int max_dist,
TBOX* bbox, BLOBNBOX_CLIST* blobs);
// For each blob in this grid, Finds the textline direction to be horizontal
// or vertical according to distance to neighbours and 1st and 2nd order
// neighbours. Non-text tends to end up without a definite direction.
// Result is setting of the neighbours and vert_possible/horz_possible
// flags in the BLOBNBOXes currently in this grid.
// This function is called more than once if page orientation is uncertain,
// so display_if_debugging is true on the final call to display the results.
void FindTextlineFlowDirection(bool display_if_debugging);
// Sets the neighbours and good_stroke_neighbours members of the blob by
// searching close on all 4 sides.
// When finding leader dots/dashes, there is a slightly different rule for
// what makes a good neighbour.
// If activate_line_trap, then line-like objects are found and isolated.
void SetNeighbours(bool leaders, bool activate_line_trap, BLOBNBOX* blob);
// Sets the good_stroke_neighbours member of the blob if it has a
// GoodNeighbour on the given side.
// Also sets the neighbour in the blob, whether or not a good one is found.
// Return value is the number of neighbours in the line trap size range.
// Leaders get extra special lenient treatment.
int FindGoodNeighbour(BlobNeighbourDir dir, bool leaders, BLOBNBOX* blob);
// Makes the blob to be only horizontal or vertical where evidence
// is clear based on gaps of 2nd order neighbours.
void SetNeighbourFlows(BLOBNBOX* blob);
// Nullify the neighbours in the wrong directions where the direction
// is clear-cut based on a distance margin. Good for isolating vertical
// text from neighbouring horizontal text.
void SimplifyObviousNeighbours(BLOBNBOX* blob);
// Smoothes the vertical/horizontal type of the blob based on the
// 2nd-order neighbours. If reset_all is true, then all blobs are
// changed. Otherwise, only ambiguous blobs are processed.
void SmoothNeighbourTypes(BLOBNBOX* blob, bool desperate);
// Checks the left or right side of the given leader partition and sets the
// (opposite) leader_on_right or leader_on_left flags for blobs
// that are next to the given side of the given leader partition.
void MarkLeaderNeighbours(const ColPartition* part, LeftOrRight side);
// Partition creation. Accumulates vertical and horizontal text chains,
// puts the remaining blobs in as unknowns, and then merges/splits to
// minimize overlap and smoothes the types with neighbours and the color
// image if provided. rerotation is used to rotate the coordinate space
// back to the nontext_map_ image.
void FindInitialPartitions(const FCOORD& rerotation,
TO_BLOCK* block,
ColPartitionGrid* part_grid,
ColPartition_LIST* big_parts);
// Finds vertical chains of text-like blobs and puts them in ColPartitions.
void FindVerticalTextChains(ColPartitionGrid* part_grid);
// Finds horizontal chains of text-like blobs and puts them in ColPartitions.
void FindHorizontalTextChains(ColPartitionGrid* part_grid);
// Finds diacritics and saves their base character in the blob.
void TestDiacritics(ColPartitionGrid* part_grid, TO_BLOCK* block);
// Searches this grid for an appropriately close and sized neighbour of the
// given [small] blob. If such a blob is found, the diacritic base is saved
// in the blob and true is returned.
// The small_grid is a secondary grid that contains the small/noise objects
// that are not in this grid, but may be useful for determining a connection
// between blob and its potential base character. (See DiacriticXGapFilled.)
bool DiacriticBlob(BlobGrid* small_grid, BLOBNBOX* blob);
// Returns true if there is no gap between the base char and the diacritic
// bigger than a fraction of the height of the base char:
// Eg: line end.....'
// The quote is a long way from the end of the line, yet it needs to be a
// diacritic. To determine that the quote is not part of an image, or
// a different text block, we check for other marks in the gap between
// the base char and the diacritic.
// '<--Diacritic
// |---------|
// | |<-toobig-gap->
// | Base |<ok gap>
// |---------| x<-----Dot occupying gap
// The grid is const really.
bool DiacriticXGapFilled(BlobGrid* grid, const TBOX& diacritic_box,
const TBOX& base_box);
// Merges diacritics with the ColPartition of the base character blob.
void MergeDiacritics(TO_BLOCK* block, ColPartitionGrid* part_grid);
// Any blobs on the large_blobs list of block that are still unowned by a
// ColPartition, are probably drop-cap or vertically touching so the blobs
// are removed to the big_parts list and treated separately.
void RemoveLargeUnusedBlobs(TO_BLOCK* block,
ColPartitionGrid* part_grid,
ColPartition_LIST* big_parts);
// All remaining unused blobs are put in individual ColPartitions.
void PartitionRemainingBlobs(ColPartitionGrid* part_grid);
// If combine, put all blobs in the cell_list into a single partition,
// otherwise put each one into its own partition.
void MakePartitionsFromCellList(bool combine,
ColPartitionGrid* part_grid,
BLOBNBOX_CLIST* cell_list);
// Helper function to finish setting up a ColPartition and insert into
// part_grid.
void CompletePartition(ColPartition* part, ColPartitionGrid* part_grid);
// Merge partitions where the merge appears harmless.
void EasyMerges(ColPartitionGrid* part_grid);
// Compute a search box based on the orientation of the partition.
// Returns true if a suitable box can be calculated.
// Callback for EasyMerges.
bool OrientationSearchBox(ColPartition* part, TBOX* box);
// Merge confirmation callback for EasyMerges.
bool ConfirmEasyMerge(const ColPartition* p1, const ColPartition* p2);
// Returns true if there is no significant noise in between the boxes.
bool NoNoiseInBetween(const TBOX& box1, const TBOX& box2) const;
// Displays the blobs colored according to the number of good neighbours
// and the vertical/horizontal flow.
ScrollView* DisplayGoodBlobs(const char* window_name, int x, int y);
// Displays blobs colored according to whether or not they are diacritics.
ScrollView* DisplayDiacritics(const char* window_name,
int x, int y, TO_BLOCK* block);
private:
// Image map of photo/noise areas on the page. Borrowed pointer (not owned.)
Pix* nontext_map_;
// Textline projection map. Borrowed pointer.
TextlineProjection* projection_;
// DENORM used by projection_ to get back to image coords. Borrowed pointer.
const DENORM* denorm_;
// Bounding box of the grid.
TBOX grid_box_;
// Rerotation to get back to the original image.
FCOORD rerotation_;
// Windows for debug display.
ScrollView* leaders_win_;
ScrollView* initial_widths_win_;
ScrollView* widths_win_;
ScrollView* chains_win_;
ScrollView* diacritics_win_;
ScrollView* textlines_win_;
ScrollView* smoothed_win_;
};
} // namespace tesseract.
#endif // TESSERACT_TEXTORD_STROKEWIDTH_H__
| C++ |
/**********************************************************************
* File: edgblob.h (Formerly edgeloop.h)
* Description: Functions to clean up an outline before approximation.
* Author: Ray Smith
* Created: Tue Mar 26 16:56:25 GMT 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef EDGBLOB_H
#define EDGBLOB_H
#include "scrollview.h"
#include "params.h"
#include "ocrblock.h"
#include "coutln.h"
#include "crakedge.h"
#define BUCKETSIZE 16
class OL_BUCKETS
{
public:
OL_BUCKETS( //constructor
ICOORD bleft, //corners
ICOORD tright);
~OL_BUCKETS () { //cleanup
delete[]buckets;
}
C_OUTLINE_LIST *operator () (//array access
inT16 x, //image coords
inT16 y);
//first non-empty bucket
C_OUTLINE_LIST *start_scan() {
for (index = 0; buckets[index].empty () && index < bxdim * bydim - 1;
index++);
return &buckets[index];
}
//next non-empty bucket
C_OUTLINE_LIST *scan_next() {
for (; buckets[index].empty () && index < bxdim * bydim - 1; index++);
return &buckets[index];
}
inT32 count_children( //recursive sum
C_OUTLINE *outline, //parent outline
inT32 max_count); // max output
inT32 outline_complexity( // new version of count_children
C_OUTLINE *outline, // parent outline
inT32 max_count, // max output
inT16 depth); // level of recursion
void extract_children( //single level get
C_OUTLINE *outline, //parent outline
C_OUTLINE_IT *it); //destination iterator
private:
C_OUTLINE_LIST * buckets; //array of buckets
inT16 bxdim; //size of array
inT16 bydim;
ICOORD bl; //corners
ICOORD tr;
inT32 index; //for extraction scan
};
void extract_edges(Pix* pix, // thresholded image
BLOCK* block); // block to scan
void outlines_to_blobs( //find blobs
BLOCK *block, //block to scan
ICOORD bleft, //block box //outlines in block
ICOORD tright,
C_OUTLINE_LIST *outlines);
void fill_buckets( //find blobs
C_OUTLINE_LIST *outlines, //outlines in block
OL_BUCKETS *buckets //output buckets
);
void empty_buckets( //find blobs
BLOCK *block, //block to scan
OL_BUCKETS *buckets //output buckets
);
BOOL8 capture_children( //find children
OL_BUCKETS *buckets, //bucket sort clanss
C_BLOB_IT *reject_it, //dead grandchildren
C_OUTLINE_IT *blob_it //output outlines
);
#endif
| C++ |
///////////////////////////////////////////////////////////////////////
// File: blobgrid.h
// Description: BBGrid of BLOBNBOX with useful BLOBNBOX-specific methods.
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Created: Sat Jun 11 10:30:01 PST 2011
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "blobgrid.h"
namespace tesseract {
BlobGrid::BlobGrid(int gridsize, const ICOORD& bleft, const ICOORD& tright)
: BBGrid<BLOBNBOX, BLOBNBOX_CLIST, BLOBNBOX_C_IT>(gridsize, bleft, tright) {
}
BlobGrid::~BlobGrid() {
}
// Inserts all the blobs from the given list, with x and y spreading,
// without removing from the source list, so ownership remains with the
// source list.
void BlobGrid::InsertBlobList(BLOBNBOX_LIST* blobs) {
BLOBNBOX_IT blob_it(blobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
if (!blob->joined_to_prev())
InsertBBox(true, true, blob);
}
}
} // namespace tesseract.
| C++ |
///////////////////////////////////////////////////////////////////////
// File: bbgrid.cpp
// Description: Class to hold BLOBNBOXs in a grid for fast access
// to neighbours.
// Author: Ray Smith
// Created: Wed Jun 06 17:22:01 PDT 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "bbgrid.h"
#include "helpers.h"
#include "ocrblock.h"
namespace tesseract {
///////////////////////////////////////////////////////////////////////
// BBGrid IMPLEMENTATION.
///////////////////////////////////////////////////////////////////////
GridBase::GridBase() {
}
GridBase::GridBase(int gridsize, const ICOORD& bleft, const ICOORD& tright) {
Init(gridsize, bleft, tright);
}
GridBase::~GridBase() {
}
// (Re)Initialize the grid. The gridsize is the size in pixels of each cell,
// and bleft, tright are the bounding box of everything to go in it.
void GridBase::Init(int gridsize, const ICOORD& bleft, const ICOORD& tright) {
gridsize_ = gridsize;
bleft_ = bleft;
tright_ = tright;
if (gridsize_ == 0)
gridsize_ = 1;
gridwidth_ = (tright.x() - bleft.x() + gridsize_ - 1) / gridsize_;
gridheight_ = (tright.y() - bleft.y() + gridsize_ - 1) / gridsize_;
gridbuckets_ = gridwidth_ * gridheight_;
}
// Compute the given grid coordinates from image coords.
void GridBase::GridCoords(int x, int y, int* grid_x, int* grid_y) const {
*grid_x = (x - bleft_.x()) / gridsize_;
*grid_y = (y - bleft_.y()) / gridsize_;
ClipGridCoords(grid_x, grid_y);
}
// Clip the given grid coordinates to fit within the grid.
void GridBase::ClipGridCoords(int* x, int* y) const {
*x = ClipToRange(*x, 0, gridwidth_ - 1);
*y = ClipToRange(*y, 0, gridheight_ - 1);
}
IntGrid::IntGrid() {
grid_ = NULL;
}
IntGrid::IntGrid(int gridsize, const ICOORD& bleft, const ICOORD& tright)
: grid_(NULL) {
Init(gridsize, bleft, tright);
}
IntGrid::~IntGrid() {
if (grid_ != NULL)
delete [] grid_;
}
// (Re)Initialize the grid. The gridsize is the size in pixels of each cell,
// and bleft, tright are the bounding box of everything to go in it.
void IntGrid::Init(int gridsize, const ICOORD& bleft, const ICOORD& tright) {
GridBase::Init(gridsize, bleft, tright);
if (grid_ != NULL)
delete [] grid_;
grid_ = new int[gridbuckets_];
Clear();
}
// Clear all the ints in the grid to zero.
void IntGrid::Clear() {
for (int i = 0; i < gridbuckets_; ++i) {
grid_[i] = 0;
}
}
// Rotate the grid by rotation, keeping cell contents.
// rotation must be a multiple of 90 degrees.
// NOTE: due to partial cells, cell coverage in the rotated grid will be
// inexact. This is why there is no Rotate for the generic BBGrid.
// TODO(rays) investigate fixing this inaccuracy by moving the origin after
// rotation.
void IntGrid::Rotate(const FCOORD& rotation) {
ASSERT_HOST(rotation.x() == 0.0f || rotation.y() == 0.0f);
ICOORD old_bleft(bleft());
ICOORD old_tright(tright());
int old_width = gridwidth();
int old_height = gridheight();
TBOX box(bleft(), tright());
box.rotate(rotation);
int* old_grid = grid_;
grid_ = NULL;
Init(gridsize(), box.botleft(), box.topright());
// Iterate over the old grid, copying data to the rotated position in the new.
int oldi = 0;
FCOORD x_step(rotation);
x_step *= gridsize();
for (int oldy = 0; oldy < old_height; ++oldy) {
FCOORD line_pos(old_bleft.x(), old_bleft.y() + gridsize() * oldy);
line_pos.rotate(rotation);
for (int oldx = 0; oldx < old_width; ++oldx, line_pos += x_step, ++oldi) {
int grid_x, grid_y;
GridCoords(static_cast<int>(line_pos.x() + 0.5),
static_cast<int>(line_pos.y() + 0.5),
&grid_x, &grid_y);
grid_[grid_y * gridwidth() + grid_x] = old_grid[oldi];
}
}
delete [] old_grid;
}
// Returns a new IntGrid containing values equal to the sum of all the
// neighbouring cells. The returned grid must be deleted after use.
// For ease of implementation, edge cells are double counted, to make them
// have the same range as the non-edge cells.
IntGrid* IntGrid::NeighbourhoodSum() const {
IntGrid* sumgrid = new IntGrid(gridsize(), bleft(), tright());
for (int y = 0; y < gridheight(); ++y) {
for (int x = 0; x < gridwidth(); ++x) {
int cell_count = 0;
for (int yoffset = -1; yoffset <= 1; ++yoffset) {
for (int xoffset = -1; xoffset <= 1; ++xoffset) {
int grid_x = x + xoffset;
int grid_y = y + yoffset;
ClipGridCoords(&grid_x, &grid_y);
cell_count += GridCellValue(grid_x, grid_y);
}
}
if (GridCellValue(x, y) > 1)
sumgrid->SetGridCell(x, y, cell_count);
}
}
return sumgrid;
}
// Returns true if more than half the area of the rect is covered by grid
// cells that are over the theshold.
bool IntGrid::RectMostlyOverThreshold(const TBOX& rect, int threshold) const {
int min_x, min_y, max_x, max_y;
GridCoords(rect.left(), rect.bottom(), &min_x, &min_y);
GridCoords(rect.right(), rect.top(), &max_x, &max_y);
int total_area = 0;
for (int y = min_y; y <= max_y; ++y) {
for (int x = min_x; x <= max_x; ++x) {
int value = GridCellValue(x, y);
if (value > threshold) {
TBOX cell_box(x * gridsize_, y * gridsize_,
(x + 1) * gridsize_, (y + 1) * gridsize_);
cell_box &= rect; // This is in-place box intersection.
total_area += cell_box.area();
}
}
}
return total_area * 2 > rect.area();
}
// Returns true if any cell value in the given rectangle is zero.
bool IntGrid::AnyZeroInRect(const TBOX& rect) const {
int min_x, min_y, max_x, max_y;
GridCoords(rect.left(), rect.bottom(), &min_x, &min_y);
GridCoords(rect.right(), rect.top(), &max_x, &max_y);
for (int y = min_y; y <= max_y; ++y) {
for (int x = min_x; x <= max_x; ++x) {
if (GridCellValue(x, y) == 0)
return true;
}
}
return false;
}
// Returns a full-resolution binary pix in which each cell over the given
// threshold is filled as a black square. pixDestroy after use.
// Edge cells, which have a zero 4-neighbour, are not marked.
Pix* IntGrid::ThresholdToPix(int threshold) const {
Pix* pix = pixCreate(tright().x() - bleft().x(),
tright().y() - bleft().y(), 1);
int cellsize = gridsize();
for (int y = 0; y < gridheight(); ++y) {
for (int x = 0; x < gridwidth(); ++x) {
if (GridCellValue(x, y) > threshold &&
GridCellValue(x - 1, y) > 0 && GridCellValue(x + 1, y) > 0 &&
GridCellValue(x, y - 1) > 0 && GridCellValue(x, y + 1) > 0) {
pixRasterop(pix, x * cellsize, tright().y() - ((y + 1) * cellsize),
cellsize, cellsize, PIX_SET, NULL, 0, 0);
}
}
}
return pix;
}
// Make a Pix of the correct scaled size for the TraceOutline functions.
Pix* GridReducedPix(const TBOX& box, int gridsize,
ICOORD bleft, int* left, int* bottom) {
// Compute grid bounds of the outline and pad all round by 1.
int grid_left = (box.left() - bleft.x()) / gridsize - 1;
int grid_bottom = (box.bottom() - bleft.y()) / gridsize - 1;
int grid_right = (box.right() - bleft.x()) / gridsize + 1;
int grid_top = (box.top() - bleft.y()) / gridsize + 1;
*left = grid_left;
*bottom = grid_bottom;
return pixCreate(grid_right - grid_left + 1,
grid_top - grid_bottom + 1,
1);
}
// Helper function to return a scaled Pix with one pixel per grid cell,
// set (black) where the given outline enters the corresponding grid cell,
// and clear where the outline does not touch the grid cell.
// Also returns the grid coords of the bottom-left of the Pix, in *left
// and *bottom, which corresponds to (0, 0) on the Pix.
// Note that the Pix is used upside-down, with (0, 0) being the bottom-left.
Pix* TraceOutlineOnReducedPix(C_OUTLINE* outline, int gridsize,
ICOORD bleft, int* left, int* bottom) {
TBOX box = outline->bounding_box();
Pix* pix = GridReducedPix(box, gridsize, bleft, left, bottom);
int wpl = pixGetWpl(pix);
l_uint32* data = pixGetData(pix);
int length = outline->pathlength();
ICOORD pos = outline->start_pos();
for (int i = 0; i < length; ++i) {
int grid_x = (pos.x() - bleft.x()) / gridsize - *left;
int grid_y = (pos.y() - bleft.y()) / gridsize - *bottom;
SET_DATA_BIT(data + grid_y * wpl, grid_x);
pos += outline->step(i);
}
return pix;
}
#if 0 // Example code shows how to use TraceOutlineOnReducedPix.
C_OUTLINE_IT ol_it(blob->cblob()->out_list());
int grid_left, grid_bottom;
Pix* pix = TraceOutlineOnReducedPix(ol_it.data(), gridsize_, bleft_,
&grid_left, &grid_bottom);
grid->InsertPixPtBBox(grid_left, grid_bottom, pix, blob);
pixDestroy(&pix);
#endif
// As TraceOutlineOnReducedPix above, but on a BLOCK instead of a C_OUTLINE.
Pix* TraceBlockOnReducedPix(BLOCK* block, int gridsize,
ICOORD bleft, int* left, int* bottom) {
TBOX box = block->bounding_box();
Pix* pix = GridReducedPix(box, gridsize, bleft, left, bottom);
int wpl = pixGetWpl(pix);
l_uint32* data = pixGetData(pix);
ICOORDELT_IT it(block->poly_block()->points());
for (it.mark_cycle_pt(); !it.cycled_list();) {
ICOORD pos = *it.data();
it.forward();
ICOORD next_pos = *it.data();
ICOORD line_vector = next_pos - pos;
int major, minor;
ICOORD major_step, minor_step;
line_vector.setup_render(&major_step, &minor_step, &major, &minor);
int accumulator = major / 2;
while (pos != next_pos) {
int grid_x = (pos.x() - bleft.x()) / gridsize - *left;
int grid_y = (pos.y() - bleft.y()) / gridsize - *bottom;
SET_DATA_BIT(data + grid_y * wpl, grid_x);
pos += major_step;
accumulator += minor;
if (accumulator >= major) {
accumulator -= major;
pos += minor_step;
}
}
}
return pix;
}
} // namespace tesseract.
| C++ |
///////////////////////////////////////////////////////////////////////
// File: tablefind.h
// Description: Helper classes to find tables from ColPartitions.
// Author: Faisal Shafait (faisal.shafait@dfki.de)
// Created: Tue Jan 06 11:13:01 PST 2009
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TEXTORD_TABLEFIND_H__
#define TESSERACT_TEXTORD_TABLEFIND_H__
#include "colpartitiongrid.h"
#include "elst.h"
#include "rect.h"
namespace tesseract {
// Possible types for a column segment.
enum ColSegType {
COL_UNKNOWN,
COL_TEXT,
COL_TABLE,
COL_MIXED,
COL_COUNT
};
class ColPartitionSet;
// ColSegment holds rectangular blocks that represent segmentation of a page
// into regions containing single column text/table.
class ColSegment;
ELISTIZEH(ColSegment)
CLISTIZEH(ColSegment)
class ColSegment : public ELIST_LINK {
public:
ColSegment();
~ColSegment();
// Simple accessors and mutators
const TBOX& bounding_box() const {
return bounding_box_;
}
void set_top(int y) {
bounding_box_.set_top(y);
}
void set_bottom(int y) {
bounding_box_.set_bottom(y);
}
void set_left(int x) {
bounding_box_.set_left(x);
}
void set_right(int x) {
bounding_box_.set_right(x);
}
void set_bounding_box(const TBOX& other) {
bounding_box_ = other;
}
int get_num_table_cells() const {
return num_table_cells_;
}
// set the number of table colpartitions covered by the bounding_box_
void set_num_table_cells(int n) {
num_table_cells_ = n;
}
int get_num_text_cells() const {
return num_text_cells_;
}
// set the number of text colpartitions covered by the bounding_box_
void set_num_text_cells(int n) {
num_text_cells_ = n;
}
ColSegType type() const {
return type_;
}
// set the type of the block based on the ratio of table to text
// colpartitions covered by it.
void set_type();
// Provides a color for BBGrid to draw the rectangle.
ScrollView::Color BoxColor() const;
// Insert a rectangle into bounding_box_
void InsertBox(const TBOX& other);
private:
TBOX bounding_box_; // bounding box
int num_table_cells_;
int num_text_cells_;
ColSegType type_;
};
// Typedef BBGrid of ColSegments
typedef BBGrid<ColSegment,
ColSegment_CLIST,
ColSegment_C_IT> ColSegmentGrid;
typedef GridSearch<ColSegment,
ColSegment_CLIST,
ColSegment_C_IT> ColSegmentGridSearch;
// TableFinder is a utility class to find a set of tables given a set of
// ColPartitions and Columns. The TableFinder will mark candidate ColPartitions
// based on research in "Table Detection in Heterogeneous Documents".
// Usage flow is as follows:
// TableFinder finder;
// finder.InsertCleanPartitions(/* grid info */)
// finder.LocateTables(/* ColPartitions and Columns */);
// finder.Update TODO(nbeato)
class TableFinder {
public:
// Constructor is simple initializations
TableFinder();
~TableFinder();
// Set the resolution of the connected components in ppi.
void set_resolution(int resolution) {
resolution_ = resolution;
}
// Change the reading order. Initially it is left to right.
void set_left_to_right_language(bool order);
// Initialize
void Init(int grid_size, const ICOORD& bottom_left, const ICOORD& top_right);
// Copy cleaned partitions from ColumnFinder's part_grid_ to this
// clean_part_grid_ and insert dot-like noise into period_grid_.
// It resizes the grids in this object to the dimensions of grid.
void InsertCleanPartitions(ColPartitionGrid* grid, TO_BLOCK* block);
// High level function to perform table detection
// Finds tables and updates the grid object with new partitions for the
// tables. The columns and width callbacks are used to merge tables.
// The reskew argument is only used to write the tables to the out.png
// if that feature is enabled.
void LocateTables(ColPartitionGrid* grid,
ColPartitionSet** columns,
WidthCallback* width_cb,
const FCOORD& reskew);
protected:
// Access for the grid dimensions.
// The results will not be correct until InsertCleanPartitions
// has been called. The values are taken from the grid passed as an argument
// to that function.
int gridsize() const;
int gridwidth() const;
int gridheight() const;
const ICOORD& bleft() const;
const ICOORD& tright() const;
// Makes a window for debugging, see BBGrid
ScrollView* MakeWindow(int x, int y, const char* window_name);
//////// Functions to insert objects from the grid into the table finder.
//////// In all cases, ownership is transferred to the table finder.
// Inserts text into the table finder.
void InsertTextPartition(ColPartition* part);
void InsertFragmentedTextPartition(ColPartition* part);
void InsertLeaderPartition(ColPartition* part);
void InsertRulingPartition(ColPartition* part);
void InsertImagePartition(ColPartition* part);
void SplitAndInsertFragmentedTextPartition(ColPartition* part);
bool AllowTextPartition(const ColPartition& part) const;
bool AllowBlob(const BLOBNBOX& blob) const;
//////// Functions that manipulate ColPartitions in the part_grid_ /////
//////// to find tables.
////////
// Utility function to move segments to col_seg_grid
// Note: Move includes ownership,
// so segments will be be owned by col_seg_grid
void MoveColSegmentsToGrid(ColSegment_LIST* segments,
ColSegmentGrid* col_seg_grid);
//////// Set up code to run during table detection to correctly
//////// initialize variables on column partitions that are used later.
////////
// Initialize the grid and partitions
void InitializePartitions(ColPartitionSet** all_columns);
// Set left, right and top, bottom spacings of each colpartition.
// Left/right spacings are w.r.t the column boundaries
// Top/bottom spacings are w.r.t. previous and next colpartitions
static void SetPartitionSpacings(ColPartitionGrid* grid,
ColPartitionSet** all_columns);
// Set spacing and closest neighbors above and below a given colpartition.
void SetVerticalSpacing(ColPartition* part);
// Set global spacing estimates. This function is dependent on the
// partition spacings. So make sure SetPartitionSpacings is called
// on the same grid before this.
void SetGlobalSpacings(ColPartitionGrid* grid);
// Access to the global median xheight. The xheight is the height
// of a lowercase 'x' character on the page. This can be viewed as the
// average height of a lowercase letter in a textline. As a result
// it is used to make assumptions about spacing between words and
// table cells.
void set_global_median_xheight(int xheight);
// Access to the global median blob width. The width is useful
// when deciding if a partition is noise.
void set_global_median_blob_width(int width);
// Access to the global median ledding. The ledding is the distance between
// two adjacent text lines. This value can be used to get a rough estimate
// for the amount of space between two lines of text. As a result, it
// is used to calculate appropriate spacing between adjacent rows of text.
void set_global_median_ledding(int ledding);
// Updates the nearest neighbors for each ColPartition in clean_part_grid_.
// The neighbors are most likely SingletonPartner calls after the neighbors
// are assigned. This is hear until it is decided to remove the
// nearest_neighbor code in ColPartition
void FindNeighbors();
//////// Functions to mark candidate column partitions as tables.
//////// Tables are marked as described in
//////// Table Detection in Heterogeneous Documents (2010, Shafait & Smith)
////////
// High level function to mark partitions as table rows/cells.
// When this function is done, the column partitions in clean_part_grid_
// should mostly be marked as tables.
void MarkTablePartitions();
// Marks partitions given a local view of a single partition
void MarkPartitionsUsingLocalInformation();
/////// Heuristics for local marking
// Check if the partition has at least one large gap between words or no
// significant gap at all
// TODO(nbeato): Make const, prevented because blobnbox array access
bool HasWideOrNoInterWordGap(ColPartition* part) const;
// Checks if a partition is adjacent to leaders on the page
bool HasLeaderAdjacent(const ColPartition& part);
// Filter individual text partitions marked as table partitions
// consisting of paragraph endings, small section headings, and
// headers and footers.
void FilterFalseAlarms();
void FilterParagraphEndings();
void FilterHeaderAndFooter();
// Mark all ColPartitions as table cells that have a table cell above
// and below them
void SmoothTablePartitionRuns();
//////// Functions to create bounding boxes (ColSegment) objects for
//////// the columns on the page. The columns are not necessarily
//////// vertical lines, meaning if tab stops strongly suggests that
//////// a column changes horizontal position, as in the case below,
//////// The ColSegment objects will respect that after processing.
////////
//////// _____________
//////// Ex. | | |
//////// |_____|______| 5 boxes: 2 on this line
//////// | | | | 3 on this line
//////// |___|____|___|
////////
// Get Column segments from best_columns_
void GetColumnBlocks(ColPartitionSet** columns,
ColSegment_LIST *col_segments);
// Group Column segments into consecutive single column regions.
void GroupColumnBlocks(ColSegment_LIST *current_segments,
ColSegment_LIST *col_segments);
// Check if two boxes are consecutive within the same column
bool ConsecutiveBoxes(const TBOX &b1, const TBOX &b2);
// Set the ratio of candidate table partitions in each column
void SetColumnsType(ColSegment_LIST* col_segments);
// Merge Column Blocks that were split due to the presence of a table
void GridMergeColumnBlocks();
//////// Functions to turn marked ColPartitions into candidate tables
//////// using a modified T-Recs++ algorithm described in
//////// Applying The T-Recs Table Recognition System
//////// To The Business Letter Domain (2001, Kieninger & Dengel)
////////
// Merge partititons cells into table columns
// Differs from paper by just looking at marked table partitions
// instead of similarity metric.
// Modified section 4.1 of paper.
void GetTableColumns(ColSegment_LIST *table_columns);
// Finds regions within a column that potentially contain a table.
// Ie, the table columns from GetTableColumns are turned into boxes
// that span the entire page column (using ColumnBlocks found in
// earlier functions) in the x direction and the min/max extent of
// overlapping table columns in the y direction.
// Section 4.2 of paper.
void GetTableRegions(ColSegment_LIST *table_columns,
ColSegment_LIST *table_regions);
//////// Functions to "patch up" found tables
////////
// Merge table regions corresponding to tables spanning multiple columns
void GridMergeTableRegions();
bool BelongToOneTable(const TBOX &box1, const TBOX &box2);
// Adjust table boundaries by building a tight bounding box around all
// ColPartitions contained in it.
void AdjustTableBoundaries();
// Grows a table to include partitions that are partially covered
// by the table. This includes lines and text. It does not include
// noise or images.
// On entry, result_box is the minimum size of the result. The results of the
// function will union the actual result with result_box.
void GrowTableBox(const TBOX& table_box, TBOX* result_box);
// Grow a table by increasing the size of the box to include
// partitions with significant overlap with the table.
void GrowTableToIncludePartials(const TBOX& table_box,
const TBOX& search_range,
TBOX* result_box);
// Grow a table by expanding to the extents of significantly
// overlapping lines.
void GrowTableToIncludeLines(const TBOX& table_box, const TBOX& search_range,
TBOX* result_box);
// Checks whether the horizontal line belong to the table by looking at the
// side spacing of extra ColParitions that will be included in the table
// due to expansion
bool HLineBelongsToTable(const ColPartition& part, const TBOX& table_box);
// Look for isolated column headers above the given table box and
// include them in the table
void IncludeLeftOutColumnHeaders(TBOX* table_box);
// Remove false alarms consiting of a single column
void DeleteSingleColumnTables();
// Return true if at least one gap larger than the global x-height
// exists in the horizontal projection
bool GapInXProjection(int* xprojection, int length);
//////// Recognize the tables.
////////
// This function will run the table recognizer and try to find better
// bounding boxes. The structures of the tables never leave this function
// right now. It just tries to prune and merge tables based on info it
// has available.
void RecognizeTables();
//////// Debugging functions. Render different structures to GUI
//////// for visual debugging / intuition.
////////
// Displays Colpartitions marked as table row. Overlays them on top of
// part_grid_.
void DisplayColSegments(ScrollView* win, ColSegment_LIST *cols,
ScrollView::Color color);
// Displays the colpartitions using a new coloring on an existing window.
// Note: This method is only for debug purpose during development and
// would not be part of checked in code
void DisplayColPartitions(ScrollView* win, ColPartitionGrid* grid,
ScrollView::Color text_color,
ScrollView::Color table_color);
void DisplayColPartitions(ScrollView* win, ColPartitionGrid* grid,
ScrollView::Color default_color);
void DisplayColPartitionConnections(ScrollView* win,
ColPartitionGrid* grid,
ScrollView::Color default_color);
void DisplayColSegmentGrid(ScrollView* win, ColSegmentGrid* grid,
ScrollView::Color color);
// Write ColParitions and Tables to a PIX image
// Note: This method is only for debug purpose during development and
// would not be part of checked in code
void WriteToPix(const FCOORD& reskew);
// Merge all colpartitions in table regions to make them a single
// colpartition and revert types of isolated table cells not
// assigned to any table to their original types.
void MakeTableBlocks(ColPartitionGrid* grid,
ColPartitionSet** columns,
WidthCallback* width_cb);
/////////////////////////////////////////////////
// Useful objects used during table find process.
/////////////////////////////////////////////////
// Resolution of the connected components in ppi.
int resolution_;
// Estimate of median x-height over the page
int global_median_xheight_;
// Estimate of the median blob width on the page
int global_median_blob_width_;
// Estimate of median leading on the page
int global_median_ledding_;
// Grid to hold cleaned colpartitions after removing all
// colpartitions that consist of only noise blobs, and removing
// noise blobs from remaining colpartitions.
ColPartitionGrid clean_part_grid_;
// Grid contains the leaders and ruling lines.
ColPartitionGrid leader_and_ruling_grid_;
// Grid contains the broken down column partitions. It can be thought
// of as a "word" grid. However, it usually doesn't break apart text lines.
// It does break apart table data (most of the time).
ColPartitionGrid fragmented_text_grid_;
// Grid of page column blocks
ColSegmentGrid col_seg_grid_;
// Grid of detected tables
ColSegmentGrid table_grid_;
// The reading order of text. Defaults to true, for languages such as English.
bool left_to_right_language_;
};
} // namespace tesseract.
#endif // TESSERACT_TEXTORD_TABLEFIND_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: cjkpitch.cpp
// Description: Code to determine fixed pitchness and the pitch if fixed,
// for CJK text.
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: takenaka@google.com (Hiroshi Takenaka)
// Created: Mon Jun 27 12:48:35 JST 2011
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "cjkpitch.h"
#include "genericvector.h"
#include "ndminx.h"
#include "topitch.h"
#include "tovars.h"
BOOL_VAR(textord_space_size_is_variable, FALSE,
"If true, word delimiter spaces are assumed to have "
"variable width, even though characters have fixed pitch.");
namespace {
// Allow +/-10% error for character pitch / body size.
static const float kFPTolerance = 0.1;
// Minimum ratio of "good" character pitch for a row to be considered
// to be fixed-pitch.
static const float kFixedPitchThreshold = 0.35;
// rank statistics for a small collection of float values.
class SimpleStats {
public:
SimpleStats(): finalized_(false), values_() { }
~SimpleStats() { }
void Clear() {
values_.clear();
finalized_ = false;
}
void Add(float value) {
values_.push_back(value);
finalized_ = false;
}
void Finish() {
values_.sort(float_compare);
finalized_ = true;
}
float ile(double frac) {
if (!finalized_) Finish();
if (values_.empty()) return 0.0;
if (frac >= 1.0) return values_.back();
if (frac <= 0.0 || values_.size() == 1) return values_[0];
int index = static_cast<int>((values_.size() - 1) * frac);
float reminder = (values_.size() - 1) * frac - index;
return values_[index] * (1.0 - reminder) +
values_[index + 1] * reminder;
}
float median() {
return ile(0.5);
}
float maximum() {
if (!finalized_) Finish();
if (values_.empty()) return 0.0;
return values_.back();
}
float minimum() {
if (!finalized_) Finish();
if (values_.empty()) return 0.0;
return values_[0];
}
int size() const {
return values_.size();
}
private:
static int float_compare(const void* a, const void* b) {
const float* f_a = reinterpret_cast<const float*>(a);
const float* f_b = reinterpret_cast<const float*>(b);
return (*f_a > *f_b) ? 1 : ((*f_a < *f_b) ? -1 : 0);
}
bool finalized_;
GenericVector<float> values_;
};
// statistics for a small collection of float pairs (x, y).
// EstimateYFor(x, r) returns the estimated y at x, based on
// existing samples between x*(1-r) ~ x*(1+r).
class LocalCorrelation {
public:
struct float_pair {
float x, y;
int vote;
};
LocalCorrelation(): finalized_(false) { }
~LocalCorrelation() { }
void Finish() {
values_.sort(float_pair_compare);
finalized_ = true;
}
void Clear() {
finalized_ = false;
}
void Add(float x, float y, int v) {
struct float_pair value;
value.x = x;
value.y = y;
value.vote = v;
values_.push_back(value);
finalized_ = false;
}
float EstimateYFor(float x, float r) {
ASSERT_HOST(finalized_);
int start = 0, end = values_.size();
// Because the number of samples (used_) is assumed to be small,
// just use linear search to find values within the range.
while (start < values_.size() && values_[start].x < x * (1.0 - r)) start++;
while (end - 1 >= 0 && values_[end - 1].x > x * (1.0 + r)) end--;
// Fall back to the global average if there are no data within r
// of x.
if (start >= end) {
start = 0;
end = values_.size();
}
// Compute weighted average of the values.
float rc = 0;
int vote = 0;
for (int i = start; i < end; i++) {
rc += values_[i].vote * x * values_[i].y / values_[i].x;
vote += values_[i].vote;
}
return rc / vote;
}
private:
static int float_pair_compare(const void* a, const void* b) {
const float_pair* f_a = reinterpret_cast<const float_pair*>(a);
const float_pair* f_b = reinterpret_cast<const float_pair*>(b);
return (f_a->x > f_b->x) ? 1 : ((f_a->x < f_b->x) ? -1 : 0);
}
bool finalized_;
GenericVector<struct float_pair> values_;
};
// Class to represent a character on a fixed pitch row. A FPChar may
// consist of multiple blobs (BLOBNBOX's).
class FPChar {
public:
enum Alignment {
ALIGN_UNKNOWN, ALIGN_GOOD, ALIGN_BAD
};
FPChar(): box_(), real_body_(),
from_(NULL), to_(NULL), num_blobs_(0), max_gap_(0),
final_(false), alignment_(ALIGN_UNKNOWN),
merge_to_prev_(false), delete_flag_(false) {
}
// Initialize from blob.
void Init(BLOBNBOX *blob) {
box_ = blob->bounding_box();
real_body_ = box_;
from_ = to_ = blob;
num_blobs_ = 1;
}
// Merge this character with "next". The "next" character should
// consist of succeeding blobs on the same row.
void Merge(const FPChar &next) {
int gap = real_body_.x_gap(next.real_body_);
if (gap > max_gap_) max_gap_ = gap;
box_ += next.box_;
real_body_ += next.real_body_;
to_ = next.to_;
num_blobs_ += next.num_blobs_;
}
// Accessors.
const TBOX &box() const { return box_; }
void set_box(const TBOX &box) {
box_ = box;
}
const TBOX &real_body() const { return real_body_; }
bool is_final() const { return final_; }
void set_final(bool flag) {
final_ = flag;
}
const Alignment& alignment() const {
return alignment_;
}
void set_alignment(Alignment alignment) {
alignment_ = alignment;
}
bool merge_to_prev() const {
return merge_to_prev_;
}
void set_merge_to_prev(bool flag) {
merge_to_prev_ = flag;
}
bool delete_flag() const {
return delete_flag_;
}
void set_delete_flag(bool flag) {
delete_flag_ = flag;
}
int max_gap() const {
return max_gap_;
}
int num_blobs() const {
return num_blobs_;
}
private:
TBOX box_; // Rectangle region considered to be occupied by this
// character. It could be bigger than the bounding box.
TBOX real_body_; // Real bounding box of this character.
BLOBNBOX *from_; // The first blob of this character.
BLOBNBOX *to_; // The last blob of this character.
int num_blobs_; // Number of blobs that belong to this character.
int max_gap_; // Maximum x gap between the blobs.
bool final_; // True if alignment/fragmentation decision for this
// character is finalized.
Alignment alignment_; // Alignment status.
bool merge_to_prev_; // True if this is a fragmented blob that
// needs to be merged to the previous
// character.
int delete_flag_; // True if this character is merged to another
// one and needs to be deleted.
};
// Class to represent a fixed pitch row, as a linear collection of
// FPChar's.
class FPRow {
public:
FPRow() : pitch_(0.0f), estimated_pitch_(0.0f),
all_pitches_(), all_gaps_(), good_pitches_(), good_gaps_(),
heights_(), characters_(), real_row_(NULL) {
}
~FPRow() { }
// Initialize from TD_ROW.
void Init(TO_ROW *row);
// Estimate character pitch of this row, based on current alignment
// status of underlying FPChar's. The argument pass1 can be set to
// true if the function is called after Pass1Analyze(), to eliminate
// some redundant computation.
void EstimatePitch(bool pass1);
// Check each character if it has good character pitches between its
// predecessor and its successor and set its alignment status. If
// we already calculated the estimated pitch for this row, the value
// is used. If we didn't, a character is considered to be good, if
// the pitches between its predecessor and its successor are almost
// equal.
void Pass1Analyze();
// Find characters that fit nicely into one imaginary body next to a
// character which is already finalized. Then mark them as character
// fragments.
bool Pass2Analyze();
// Merge FPChars marked as character fragments into one.
void MergeFragments();
// Finalize characters that are already large enough and cannot be
// merged with others any more.
void FinalizeLargeChars();
// Ouput pitch estimation results to attributes of TD_ROW.
void OutputEstimations();
void DebugOutputResult(int row_index);
int good_pitches() {
return good_pitches_.size();
}
int good_gaps() {
return good_gaps_.size();
}
float pitch() {
return pitch_;
}
float estimated_pitch() {
return estimated_pitch_;
}
void set_estimated_pitch(float v) {
estimated_pitch_ = v;
}
float height() {
return height_;
}
float height_pitch_ratio() {
if (good_pitches_.size() < 2) return -1.0;
return height_ / good_pitches_.median();
}
float gap() {
return gap_;
}
int num_chars() {
return characters_.size();
}
FPChar *character(int i) {
return &characters_[i];
}
const TBOX &box(int i) {
return characters_[i].box();
}
const TBOX &real_body(int i) {
return characters_[i].real_body();
}
bool is_box_modified(int i) {
return !(characters_[i].box() == characters_[i].real_body());
}
float center_x(int i) {
return (characters_[i].box().left() + characters_[i].box().right()) / 2.0;
}
bool is_final(int i) {
return characters_[i].is_final();
}
void finalize(int i) {
characters_[i].set_final(true);
}
bool is_good(int i) {
return characters_[i].alignment() == FPChar::ALIGN_GOOD;
}
bool is_bad(int i) {
return characters_[i].alignment() == FPChar::ALIGN_BAD;
}
bool is_unknown(int i) {
return characters_[i].alignment() == FPChar::ALIGN_UNKNOWN;
}
void mark_good(int i) {
characters_[i].set_alignment(FPChar::ALIGN_GOOD);
}
void mark_bad(int i) {
characters_[i].set_alignment(FPChar::ALIGN_BAD);
}
void clear_alignment(int i) {
characters_[i].set_alignment(FPChar::ALIGN_UNKNOWN);
}
private:
static float x_overlap_fraction(const TBOX& box1, const TBOX& box2) {
if (MIN(box1.width(), box2.width()) == 0) return 0.0;
return -box1.x_gap(box2) / (float)MIN(box1.width(), box2.width());
}
static bool mostly_overlap(const TBOX& box1, const TBOX& box2) {
return x_overlap_fraction(box1, box2) > 0.9;
}
static bool significant_overlap(const TBOX& box1, const TBOX& box2) {
if (MIN(box1.width(), box2.width()) == 0) return false;
int overlap = -box1.x_gap(box2);
return overlap > 1 || x_overlap_fraction(box1, box2) > 0.1;
}
static float box_pitch(const TBOX& ref, const TBOX& box) {
return abs(ref.left() + ref.right() - box.left() - box.right()) / 2.0;
}
// Check if two neighboring characters satisfy the fixed pitch model.
static bool is_good_pitch(float pitch, const TBOX& box1, const TBOX& box2) {
// Character box shouldn't exceed pitch.
if (box1.width() >= pitch * (1.0 + kFPTolerance) ||
box2.width() >= pitch * (1.0 + kFPTolerance) ||
box1.height() >= pitch * (1.0 + kFPTolerance) ||
box2.height() >= pitch * (1.0 + kFPTolerance)) return false;
const float real_pitch = box_pitch(box1, box2);
if (fabs(real_pitch - pitch) < pitch * kFPTolerance) return true;
if (textord_space_size_is_variable) {
// Hangul characters usually have fixed pitch, but words are
// delimited by space which can be narrower than characters.
if (real_pitch > pitch && real_pitch < pitch * 2.0 &&
real_pitch - box1.x_gap(box2) < pitch) {
return true;
}
}
return false;
}
static bool is_interesting_blob(const BLOBNBOX *blob) {
return !blob->joined_to_prev() && blob->flow() != BTFT_LEADER;
}
// Cleanup chars that are already merged to others.
void DeleteChars() {
int index = 0;
for (int i = 0; i < characters_.size(); ++i) {
if (!characters_[i].delete_flag()) {
if (index != i) characters_[index] = characters_[i];
index++;
}
}
characters_.truncate(index);
}
float pitch_; // Character pitch.
float estimated_pitch_; // equal to pitch_ if pitch_ is considered
// to be good enough.
float height_; // Character height.
float gap_; // Minimum gap between characters.
// Pitches between any two successive characters.
SimpleStats all_pitches_;
// Gaps between any two successive characters.
SimpleStats all_gaps_;
// Pitches between any two successive characters that are consistent
// with the fixed pitch model.
SimpleStats good_pitches_;
// Gaps between any two successive characters that are consistent
// with the fixed pitch model.
SimpleStats good_gaps_;
SimpleStats heights_;
GenericVector<FPChar> characters_;
TO_ROW *real_row_; // Underlying TD_ROW for this row.
};
void FPRow::Init(TO_ROW *row) {
ASSERT_HOST(row != NULL);
ASSERT_HOST(row->xheight > 0);
real_row_ = row;
real_row_->pitch_decision = PITCH_CORR_PROP; // Default decision.
BLOBNBOX_IT blob_it = row->blob_list();
// Initialize characters_ and compute the initial estimation of
// character height.
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
if (is_interesting_blob(blob_it.data())) {
FPChar fp_char;
fp_char.Init(blob_it.data());
// Merge unconditionally if two blobs overlap.
if (!characters_.empty() &&
significant_overlap(fp_char.box(), characters_.back().box())) {
characters_.back().Merge(fp_char);
} else {
characters_.push_back(fp_char);
}
TBOX bound = blob_it.data()->bounding_box();
if (bound.height() * 3.0 > bound.width()) {
heights_.Add(bound.height());
}
}
}
heights_.Finish();
height_ = heights_.ile(0.875);
}
void FPRow::OutputEstimations() {
if (good_pitches_.size() == 0) {
pitch_ = 0.0f;
real_row_->pitch_decision = PITCH_CORR_PROP;
return;
}
pitch_ = good_pitches_.median();
real_row_->fixed_pitch = pitch_;
// good_gaps_.ile(0.125) can be large if most characters on the row
// are skinny. Use pitch_ - height_ instead if it's smaller, but
// positive.
real_row_->kern_size = real_row_->pr_nonsp =
MIN(good_gaps_.ile(0.125), MAX(pitch_ - height_, 0));
real_row_->body_size = pitch_ - real_row_->kern_size;
if (good_pitches_.size() < all_pitches_.size() * kFixedPitchThreshold) {
// If more than half of the characters of a line don't fit to the
// fixed pitch model, consider the line to be propotional. 50%
// seems to be a good threshold in practice as well.
// Anyway we store estimated values (fixed_pitch, kern_size, etc.) in
// real_row_ as a partial estimation result and try to use them in the
// normalization process.
real_row_->pitch_decision = PITCH_CORR_PROP;
return;
} else if (good_pitches_.size() > all_pitches_.size() * 0.75) {
real_row_->pitch_decision = PITCH_DEF_FIXED;
} else {
real_row_->pitch_decision = PITCH_CORR_FIXED;
}
real_row_->space_size = real_row_->pr_space = pitch_;
// Set min_space to 50% of character pitch so that we can break CJK
// text at a half-width space after punctuation.
real_row_->min_space = (pitch_ + good_gaps_.minimum()) * 0.5;
// Don't consider a quarter space as a real space, because it's used
// for line justification in traditional Japanese books.
real_row_->max_nonspace = MAX(pitch_ * 0.25 + good_gaps_.minimum(),
(double)good_gaps_.ile(0.875));
int space_threshold =
MIN((real_row_->max_nonspace + real_row_->min_space) / 2,
real_row_->xheight);
// Make max_nonspace larger than any intra-character gap so that
// make_prop_words() won't break a row at the middle of a character.
for (int i = 0; i < num_chars(); ++i) {
if (characters_[i].max_gap() > real_row_->max_nonspace) {
real_row_->max_nonspace = characters_[i].max_gap();
}
}
real_row_->space_threshold =
MIN((real_row_->max_nonspace + real_row_->min_space) / 2,
real_row_->xheight);
real_row_->used_dm_model = false;
// Setup char_cells.
ICOORDELT_IT cell_it = &real_row_->char_cells;
ICOORDELT *cell = new ICOORDELT(real_body(0).left(), 0);
cell_it.add_after_then_move(cell);
int right = real_body(0).right();
for (int i = 1; i < num_chars(); ++i) {
// Put a word break if gap between two characters is bigger than
// space_threshold. Don't break if none of two characters
// couldn't be "finalized", because maybe they need to be merged
// to one character.
if ((is_final(i - 1) || is_final(i)) &&
real_body(i - 1).x_gap(real_body(i)) > space_threshold) {
cell = new ICOORDELT(right + 1, 0);
cell_it.add_after_then_move(cell);
while (right + pitch_ < box(i).left()) {
right += pitch_;
cell = new ICOORDELT(right + 1, 0);
cell_it.add_after_then_move(cell);
}
right = box(i).left();
}
cell = new ICOORDELT((right + real_body(i).left()) / 2, 0);
cell_it.add_after_then_move(cell);
right = real_body(i).right();
}
cell = new ICOORDELT(right + 1, 0);
cell_it.add_after_then_move(cell);
// TODO(takenaka): add code to store alignment/fragmentation
// information to blobs so that it can be reused later, e.g. in
// recognition phase.
}
void FPRow::EstimatePitch(bool pass1) {
good_pitches_.Clear();
all_pitches_.Clear();
good_gaps_.Clear();
all_gaps_.Clear();
heights_.Clear();
if (num_chars() == 0) return;
inT32 cx0, cx1;
bool prev_was_good = is_good(0);
cx0 = center_x(0);
heights_.Add(box(0).height());
for (int i = 1; i < num_chars(); i++) {
cx1 = center_x(i);
inT32 pitch = cx1 - cx0;
inT32 gap = MAX(0, real_body(i - 1).x_gap(real_body(i)));
heights_.Add(box(i).height());
// Ignore if the pitch is too close. But don't ignore wide pitch
// may be the result of large tracking.
if (pitch > height_ * 0.5) {
all_pitches_.Add(pitch);
all_gaps_.Add(gap);
if (is_good(i)) {
// In pass1 (after Pass1Analyze()), all characters marked as
// "good" have a good consistent pitch with their previous
// characters. However, it's not true in pass2 and a good
// character may have a good pitch only between its successor.
// So we collect only pitch values between two good
// characters. and within tolerance in pass2.
if (pass1 || (prev_was_good &&
fabs(estimated_pitch_ - pitch) <
kFPTolerance * estimated_pitch_)) {
good_pitches_.Add(pitch);
if (!is_box_modified(i - 1) && !is_box_modified(i)) {
good_gaps_.Add(gap);
}
}
prev_was_good = true;
} else {
prev_was_good = false;
}
}
cx0 = cx1;
}
good_pitches_.Finish();
all_pitches_.Finish();
good_gaps_.Finish();
all_gaps_.Finish();
heights_.Finish();
height_ = heights_.ile(0.875);
if (all_pitches_.size() == 0) {
pitch_ = 0.0f;
gap_ = 0.0f;
} else if (good_pitches_.size() < 2) {
// We don't have enough data to estimate the pitch of this row yet.
// Use median of all pitches as the initial guess.
pitch_ = all_pitches_.median();
ASSERT_HOST(pitch_ > 0.0f);
gap_ = all_gaps_.ile(0.125);
} else {
pitch_ = good_pitches_.median();
ASSERT_HOST(pitch_ > 0.0f);
gap_ = good_gaps_.ile(0.125);
}
}
void FPRow::DebugOutputResult(int row_index) {
if (num_chars() > 0) {
tprintf("Row %d: pitch_decision=%d, fixed_pitch=%f, max_nonspace=%d, "
"space_size=%f, space_threshold=%d, xheight=%f\n",
row_index, (int)(real_row_->pitch_decision),
real_row_->fixed_pitch, real_row_->max_nonspace,
real_row_->space_size, real_row_->space_threshold,
real_row_->xheight);
for (int i = 0; i < num_chars(); i++) {
tprintf("Char %d: is_final=%d is_good=%d num_blobs=%d: ",
i, is_final(i), is_good(i), character(i)->num_blobs());
box(i).print();
}
}
}
void FPRow::Pass1Analyze() {
if (num_chars() < 2) return;
if (estimated_pitch_ > 0.0f) {
for (int i = 2; i < num_chars(); i++) {
if (is_good_pitch(estimated_pitch_, box(i - 2), box(i-1)) &&
is_good_pitch(estimated_pitch_, box(i - 1), box(i))) {
mark_good(i - 1);
}
}
} else {
for (int i = 2; i < num_chars(); i++) {
if (is_good_pitch(box_pitch(box(i-2), box(i-1)), box(i - 1), box(i))) {
mark_good(i - 1);
}
}
}
character(0)->set_alignment(character(1)->alignment());
character(num_chars() - 1)->set_alignment(
character(num_chars() - 2)->alignment());
}
bool FPRow::Pass2Analyze() {
bool changed = false;
if (num_chars() <= 1 || estimated_pitch_ == 0.0f) {
return false;
}
for (int i = 0; i < num_chars(); i++) {
if (is_final(i)) continue;
FPChar::Alignment alignment = character(i)->alignment();
bool intersecting = false;
bool not_intersecting = false;
if (i < num_chars() - 1 && is_final(i + 1)) {
// Next character is already finalized. Estimate the imaginary
// body including this character based on the character. Skip
// whitespace if necessary.
bool skipped_whitespaces = false;
float c1 = center_x(i + 1) - 1.5 * estimated_pitch_;
while (c1 > box(i).right()) {
skipped_whitespaces = true;
c1 -= estimated_pitch_;
}
TBOX ibody(c1, box(i).bottom(), c1 + estimated_pitch_, box(i).top());
// Collect all characters that mostly fit in the region.
// Also, their union height shouldn't be too big.
int j = i;
TBOX merged;
while (j >= 0 && !is_final(j) && mostly_overlap(ibody, box(j)) &&
merged.bounding_union(box(j)).height() <
estimated_pitch_ * (1 + kFPTolerance)) {
merged += box(j);
j--;
}
if (j >= 0 && significant_overlap(ibody, box(j))) {
// character(j) lies on the character boundary and doesn't fit
// well into the imaginary body.
if (!is_final(j)) intersecting = true;
} else {
not_intersecting = true;
if (i - j > 0) {
// Merge character(j+1) ... character(i) because they fit
// into the body nicely.
if (i - j == 1) {
// Only one char in the imaginary body.
if (!skipped_whitespaces) mark_good(i);
// set ibody as bounding box of this character to get
// better pitch analysis result for halfwidth glyphs
// followed by a halfwidth space.
if (box(i).width() <= estimated_pitch_ * 0.5) {
ibody += box(i);
character(i)->set_box(ibody);
}
character(i)->set_merge_to_prev(false);
finalize(i);
} else {
for (int k = i; k > j + 1; k--) {
character(k)->set_merge_to_prev(true);
}
}
}
}
}
if (i > 0 && is_final(i - 1)) {
// Now we repeat everything from the opposite side. Previous
// character is already finalized. Estimate the imaginary body
// including this character based on the character.
bool skipped_whitespaces = false;
float c1 = center_x(i - 1) + 1.5 * estimated_pitch_;
while (c1 < box(i).left()) {
skipped_whitespaces = true;
c1 += estimated_pitch_;
}
TBOX ibody(c1 - estimated_pitch_, box(i).bottom(), c1, box(i).top());
int j = i;
TBOX merged;
while (j < num_chars() && !is_final(j) && mostly_overlap(ibody, box(j)) &&
merged.bounding_union(box(j)).height() <
estimated_pitch_ * (1 + kFPTolerance)) {
merged += box(j);
j++;
}
if (j < num_chars() && significant_overlap(ibody, box(j))) {
if (!is_final(j)) intersecting = true;
} else {
not_intersecting = true;
if (j - i > 0) {
if (j - i == 1) {
if (!skipped_whitespaces) mark_good(i);
if (box(i).width() <= estimated_pitch_ * 0.5) {
ibody += box(i);
character(i)->set_box(ibody);
}
character(i)->set_merge_to_prev(false);
finalize(i);
} else {
for (int k = i + 1; k < j; k++) {
character(k)->set_merge_to_prev(true);
}
}
}
}
}
// This character doesn't fit well into the estimated imaginary
// bodies. Mark it as bad.
if (intersecting && !not_intersecting) mark_bad(i);
if (character(i)->alignment() != alignment ||
character(i)->merge_to_prev()) {
changed = true;
}
}
return changed;
}
void FPRow::MergeFragments() {
int last_char = 0;
for (int j = 0; j < num_chars(); ++j) {
if (character(j)->merge_to_prev()) {
character(last_char)->Merge(*character(j));
character(j)->set_delete_flag(true);
clear_alignment(last_char);
character(j-1)->set_merge_to_prev(false);
} else {
last_char = j;
}
}
DeleteChars();
}
void FPRow::FinalizeLargeChars() {
float row_pitch = estimated_pitch();
for (int i = 0; i < num_chars(); i++) {
if (is_final(i)) continue;
// Finalize if both neighbors are finalized. We have no other choice.
if (i > 0 && is_final(i - 1) && i < num_chars() - 1 && is_final(i + 1)) {
finalize(i);
continue;
}
float cx = center_x(i);
TBOX ibody(cx - 0.5 * row_pitch, 0, cx + 0.5 * row_pitch, 1);
if (i > 0) {
// The preceding character significantly intersects with the
// imaginary body of this character. Let Pass2Analyze() handle
// this case.
if (x_overlap_fraction(ibody, box(i - 1)) > 0.1) continue;
if (!is_final(i - 1)) {
TBOX merged = box(i);
merged += box(i - 1);
if (merged.width() < row_pitch) continue;
// This character cannot be finalized yet because it can be
// merged with the previous one. Again, let Pass2Analyze()
// handle this case.
}
}
if (i < num_chars() - 1) {
if (x_overlap_fraction(ibody, box(i + 1)) > 0.1) continue;
if (!is_final(i + 1)) {
TBOX merged = box(i);
merged += box(i + 1);
if (merged.width() < row_pitch) continue;
}
}
finalize(i);
}
// Update alignment decision. We only consider finalized characters
// in pass2. E.g. if a finalized character C has another finalized
// character L on its left and a not-finalized character R on its
// right, we mark C as good if the pitch between C and L is good,
// regardless of the pitch between C and R.
for (int i = 0; i < num_chars(); i++) {
if (!is_final(i)) continue;
bool good_pitch = false;
bool bad_pitch = false;
if (i > 0 && is_final(i - 1)) {
if (is_good_pitch(row_pitch, box(i - 1), box(i))) {
good_pitch = true;
} else {
bad_pitch = true;
}
}
if (i < num_chars() - 1 && is_final(i + 1)) {
if (is_good_pitch(row_pitch, box(i), box(i + 1))) {
good_pitch = true;
} else {
bad_pitch = true;
}
}
if (good_pitch && !bad_pitch) mark_good(i);
else if (!good_pitch && bad_pitch) mark_bad(i);
}
}
class FPAnalyzer {
public:
FPAnalyzer(): page_tr_(), rows_() { }
~FPAnalyzer() { }
void Init(ICOORD page_tr, TO_BLOCK_LIST *port_blocks);
void Pass1Analyze() {
for (int i = 0; i < rows_.size(); i++) rows_[i].Pass1Analyze();
}
// Estimate character pitch for each row. The argument pass1 can be
// set to true if the function is called after Pass1Analyze(), to
// eliminate some redundant computation.
void EstimatePitch(bool pass1);
bool maybe_fixed_pitch() {
if (rows_.empty() ||
rows_.size() <= num_bad_rows_ + num_tall_rows_ + 1) return false;
return true;
}
void MergeFragments() {
for (int i = 0; i < rows_.size(); i++) rows_[i].MergeFragments();
}
void FinalizeLargeChars() {
for (int i = 0; i < rows_.size(); i++) rows_[i].FinalizeLargeChars();
}
bool Pass2Analyze() {
bool changed = false;
for (int i = 0; i < rows_.size(); i++) {
if (rows_[i].Pass2Analyze()) {
changed = true;
}
}
return changed;
}
void OutputEstimations() {
for (int i = 0; i < rows_.size(); i++) rows_[i].OutputEstimations();
// Don't we need page-level estimation of gaps/spaces?
}
void DebugOutputResult() {
tprintf("FPAnalyzer: final result\n");
for (int i = 0; i < rows_.size(); i++) rows_[i].DebugOutputResult(i);
}
int num_rows() {
return rows_.size();
}
// Returns the upper limit for pass2 loop iteration.
int max_iteration() {
// We're fixing at least one character per iteration. So basically
// we shouldn't require more than max_chars_per_row_ iterations.
return max_chars_per_row_ + 100;
}
private:
ICOORD page_tr_;
GenericVector<FPRow> rows_;
int num_tall_rows_;
int num_bad_rows_;
int num_empty_rows_;
int max_chars_per_row_;
};
void FPAnalyzer::Init(ICOORD page_tr, TO_BLOCK_LIST *port_blocks) {
page_tr_ = page_tr;
TO_BLOCK_IT block_it;
block_it.set_to_list (port_blocks);
for (block_it.mark_cycle_pt(); !block_it.cycled_list();
block_it.forward()) {
TO_BLOCK *block = block_it.data();
if (!block->get_rows()->empty()) {
ASSERT_HOST(block->xheight > 0);
find_repeated_chars(block, FALSE);
}
}
num_empty_rows_ = 0;
max_chars_per_row_ = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list();
block_it.forward()) {
TO_ROW_IT row_it = block_it.data()->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
FPRow row;
row.Init(row_it.data());
rows_.push_back(row);
int num_chars = rows_.back().num_chars();
if (num_chars <= 1) num_empty_rows_++;
if (num_chars > max_chars_per_row_) max_chars_per_row_ = num_chars;
}
}
}
void FPAnalyzer::EstimatePitch(bool pass1) {
LocalCorrelation pitch_height_stats;
num_tall_rows_ = 0;
num_bad_rows_ = 0;
pitch_height_stats.Clear();
for (int i = 0; i < rows_.size(); i++) {
rows_[i].EstimatePitch(pass1);
if (rows_[i].good_pitches()) {
pitch_height_stats.Add(rows_[i].height() + rows_[i].gap(),
rows_[i].pitch(), rows_[i].good_pitches());
if (rows_[i].height_pitch_ratio() > 1.1) num_tall_rows_++;
} else {
num_bad_rows_++;
}
}
pitch_height_stats.Finish();
for (int i = 0; i < rows_.size(); i++) {
if (rows_[i].good_pitches() >= 5) {
// We have enough evidences. Just use the pitch estimation
// from this row.
rows_[i].set_estimated_pitch(rows_[i].pitch());
} else if (rows_[i].num_chars() > 1) {
float estimated_pitch =
pitch_height_stats.EstimateYFor(rows_[i].height() + rows_[i].gap(),
0.1);
// CJK characters are more likely to be fragmented than poorly
// chopped. So trust the page-level estimation of character
// pitch only if it's larger than row-level estimation or
// row-level estimation is too large (2x bigger than row height).
if (estimated_pitch > rows_[i].pitch() ||
rows_[i].pitch() > rows_[i].height() * 2.0) {
rows_[i].set_estimated_pitch(estimated_pitch);
} else {
rows_[i].set_estimated_pitch(rows_[i].pitch());
}
}
}
}
} // namespace
void compute_fixed_pitch_cjk(ICOORD page_tr,
TO_BLOCK_LIST *port_blocks) {
FPAnalyzer analyzer;
analyzer.Init(page_tr, port_blocks);
if (analyzer.num_rows() == 0) return;
analyzer.Pass1Analyze();
analyzer.EstimatePitch(true);
// Perform pass1 analysis again with the initial estimation of row
// pitches, for better estimation.
analyzer.Pass1Analyze();
analyzer.EstimatePitch(true);
// Early exit if the page doesn't seem to contain fixed pitch rows.
if (!analyzer.maybe_fixed_pitch()) {
if (textord_debug_pitch_test) {
tprintf("Page doesn't seem to contain fixed pitch rows\n");
}
return;
}
int iteration = 0;
do {
analyzer.MergeFragments();
analyzer.FinalizeLargeChars();
analyzer.EstimatePitch(false);
iteration++;
} while (analyzer.Pass2Analyze() && iteration < analyzer.max_iteration());
if (textord_debug_pitch_test) {
tprintf("compute_fixed_pitch_cjk finished after %d iteration (limit=%d)\n",
iteration, analyzer.max_iteration());
}
analyzer.OutputEstimations();
if (textord_debug_pitch_test) analyzer.DebugOutputResult();
}
| C++ |
/**********************************************************************
* File: scanedg.h (Formerly scanedge.h)
* Description: Raster scanning crack based edge extractor.
* Author: Ray Smith
* Created: Fri Mar 22 16:11:50 GMT 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef SCANEDG_H
#define SCANEDG_H
#include "params.h"
#include "scrollview.h"
#include "pdblock.h"
#include "crakedge.h"
class C_OUTLINE_IT;
struct CrackPos {
CRACKEDGE** free_cracks; // Freelist for fast allocation.
int x; // Position of new edge.
int y;
};
struct Pix;
void block_edges(Pix *t_image, // thresholded image
PDBLK *block, // block in image
C_OUTLINE_IT* outline_it);
void make_margins(PDBLK *block, // block in image
BLOCK_LINE_IT *line_it, // for old style
uinT8 *pixels, // pixels to strip
uinT8 margin, // white-out pixel
inT16 left, // block edges
inT16 right,
inT16 y); // line coord );
void line_edges(inT16 x, // coord of line start
inT16 y, // coord of line
inT16 xext, // width of line
uinT8 uppercolour, // start of prev line
uinT8 * bwpos, // thresholded line
CRACKEDGE ** prevline, // edges in progress
CRACKEDGE **free_cracks,
C_OUTLINE_IT* outline_it);
CRACKEDGE *h_edge(int sign, // sign of edge
CRACKEDGE * join, // edge to join to
CrackPos* pos);
CRACKEDGE *v_edge(int sign, // sign of edge
CRACKEDGE * join, // edge to join to
CrackPos* pos);
void join_edges(CRACKEDGE *edge1, // edges to join
CRACKEDGE *edge2, // no specific order
CRACKEDGE **free_cracks,
C_OUTLINE_IT* outline_it);
void free_crackedges(CRACKEDGE *start);
#endif
| C++ |
///////////////////////////////////////////////////////////////////////
// File: strokewidth.cpp
// Description: Subclass of BBGrid to find uniformity of strokewidth.
// Author: Ray Smith
// Created: Mon Mar 31 16:17:01 PST 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "strokewidth.h"
#include <math.h>
#include "blobbox.h"
#include "colpartition.h"
#include "colpartitiongrid.h"
#include "imagefind.h"
#include "linlsq.h"
#include "statistc.h"
#include "tabfind.h"
#include "textlineprojection.h"
#include "tordmain.h" // For SetBlobStrokeWidth.
namespace tesseract {
INT_VAR(textord_tabfind_show_strokewidths, 0, "Show stroke widths");
BOOL_VAR(textord_tabfind_only_strokewidths, false, "Only run stroke widths");
/** Allowed proportional change in stroke width to be the same font. */
const double kStrokeWidthFractionTolerance = 0.125;
/**
* Allowed constant change in stroke width to be the same font.
* Really 1.5 pixels.
*/
const double kStrokeWidthTolerance = 1.5;
// Same but for CJK we are a bit more generous.
const double kStrokeWidthFractionCJK = 0.25;
const double kStrokeWidthCJK = 2.0;
// Radius in grid cells of search for broken CJK. Doesn't need to be very
// large as the grid size should be about the size of a character anyway.
const int kCJKRadius = 2;
// Max distance fraction of size to join close but broken CJK characters.
const double kCJKBrokenDistanceFraction = 0.25;
// Max number of components in a broken CJK character.
const int kCJKMaxComponents = 8;
// Max aspect ratio of CJK broken characters when put back together.
const double kCJKAspectRatio = 1.25;
// Max increase in aspect ratio of CJK broken characters when merged.
const double kCJKAspectRatioIncrease = 1.0625;
// Max multiple of the grid size that will be used in computing median CJKsize.
const int kMaxCJKSizeRatio = 5;
// Min fraction of blobs broken CJK to iterate and run it again.
const double kBrokenCJKIterationFraction = 0.125;
// Multiple of gridsize as x-padding for a search box for diacritic base
// characters.
const double kDiacriticXPadRatio = 7.0;
// Multiple of gridsize as y-padding for a search box for diacritic base
// characters.
const double kDiacriticYPadRatio = 1.75;
// Min multiple of diacritic height that a neighbour must be to be a
// convincing base character.
const double kMinDiacriticSizeRatio = 1.0625;
// Max multiple of a textline's median height as a threshold for the sum of
// a diacritic's farthest x and y distances (gap + size).
const double kMaxDiacriticDistanceRatio = 1.25;
// Max x-gap between a diacritic and its base char as a fraction of the height
// of the base char (allowing other blobs to fill the gap.)
const double kMaxDiacriticGapToBaseCharHeight = 1.0;
// Radius of a search for diacritics in grid units.
const int kSearchRadius = 2;
// Ratio between longest side of a line and longest side of a character.
// (neighbor_min > blob_min * kLineTrapShortest &&
// neighbor_max < blob_max / kLineTrapLongest)
// => neighbor is a grapheme and blob is a line.
const int kLineTrapLongest = 4;
// Ratio between shortest side of a line and shortest side of a character.
const int kLineTrapShortest = 2;
// Max aspect ratio of the total box before CountNeighbourGaps
// decides immediately based on the aspect ratio.
const int kMostlyOneDirRatio = 3;
// Aspect ratio for a blob to be considered as line residue.
const double kLineResidueAspectRatio = 8.0;
// Padding ratio for line residue search box.
const int kLineResiduePadRatio = 3;
// Min multiple of neighbour size for a line residue to be genuine.
const double kLineResidueSizeRatio = 1.75;
// Aspect ratio filter for OSD.
const float kSizeRatioToReject = 2.0;
// Max number of normal blobs a large blob may overlap before it is rejected
// and determined to be image
const int kMaxLargeOverlaps = 3;
// Expansion factor for search box for good neighbours.
const double kNeighbourSearchFactor = 2.5;
StrokeWidth::StrokeWidth(int gridsize,
const ICOORD& bleft, const ICOORD& tright)
: BlobGrid(gridsize, bleft, tright), nontext_map_(NULL), projection_(NULL),
denorm_(NULL), grid_box_(bleft, tright), rerotation_(1.0f, 0.0f) {
leaders_win_ = NULL;
widths_win_ = NULL;
initial_widths_win_ = NULL;
chains_win_ = NULL;
diacritics_win_ = NULL;
textlines_win_ = NULL;
smoothed_win_ = NULL;
}
StrokeWidth::~StrokeWidth() {
if (widths_win_ != NULL) {
#ifndef GRAPHICS_DISABLED
delete widths_win_->AwaitEvent(SVET_DESTROY);
#endif // GRAPHICS_DISABLED
if (textord_tabfind_only_strokewidths)
exit(0);
delete widths_win_;
}
delete leaders_win_;
delete initial_widths_win_;
delete chains_win_;
delete textlines_win_;
delete smoothed_win_;
delete diacritics_win_;
}
// Sets the neighbours member of the medium-sized blobs in the block.
// Searches on 4 sides of each blob for similar-sized, similar-strokewidth
// blobs and sets pointers to the good neighbours.
void StrokeWidth::SetNeighboursOnMediumBlobs(TO_BLOCK* block) {
// Run a preliminary strokewidth neighbour detection on the medium blobs.
InsertBlobList(&block->blobs);
BLOBNBOX_IT blob_it(&block->blobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
SetNeighbours(false, false, blob_it.data());
}
Clear();
}
// Sets the neighbour/textline writing direction members of the medium
// and large blobs with optional repair of broken CJK characters first.
// Repair of broken CJK is needed here because broken CJK characters
// can fool the textline direction detection algorithm.
void StrokeWidth::FindTextlineDirectionAndFixBrokenCJK(bool cjk_merge,
TO_BLOCK* input_block) {
// Setup the grid with the remaining (non-noise) blobs.
InsertBlobs(input_block);
// Repair broken CJK characters if needed.
while (cjk_merge && FixBrokenCJK(input_block));
// Grade blobs by inspection of neighbours.
FindTextlineFlowDirection(false);
// Clear the grid ready for rotation or leader finding.
Clear();
}
// Helper to collect and count horizontal and vertical blobs from a list.
static void CollectHorizVertBlobs(BLOBNBOX_LIST* input_blobs,
int* num_vertical_blobs,
int* num_horizontal_blobs,
BLOBNBOX_CLIST* vertical_blobs,
BLOBNBOX_CLIST* horizontal_blobs,
BLOBNBOX_CLIST* nondescript_blobs) {
BLOBNBOX_C_IT v_it(vertical_blobs);
BLOBNBOX_C_IT h_it(horizontal_blobs);
BLOBNBOX_C_IT n_it(nondescript_blobs);
BLOBNBOX_IT blob_it(input_blobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
const TBOX& box = blob->bounding_box();
float y_x = static_cast<float>(box.height()) / box.width();
float x_y = 1.0f / y_x;
// Select a >= 1.0 ratio
float ratio = x_y > y_x ? x_y : y_x;
// If the aspect ratio is small and we want them for osd, save the blob.
bool ok_blob = ratio <= kSizeRatioToReject;
if (blob->UniquelyVertical()) {
++*num_vertical_blobs;
if (ok_blob) v_it.add_after_then_move(blob);
} else if (blob->UniquelyHorizontal()) {
++*num_horizontal_blobs;
if (ok_blob) h_it.add_after_then_move(blob);
} else if (ok_blob) {
n_it.add_after_then_move(blob);
}
}
}
// Types all the blobs as vertical or horizontal text or unknown and
// returns true if the majority are vertical.
// If the blobs are rotated, it is necessary to call CorrectForRotation
// after rotating everything, otherwise the work done here will be enough.
// If osd_blobs is not null, a list of blobs from the dominant textline
// direction are returned for use in orientation and script detection.
bool StrokeWidth::TestVerticalTextDirection(double find_vertical_text_ratio,
TO_BLOCK* block,
BLOBNBOX_CLIST* osd_blobs) {
int vertical_boxes = 0;
int horizontal_boxes = 0;
// Count vertical normal and large blobs.
BLOBNBOX_CLIST vertical_blobs;
BLOBNBOX_CLIST horizontal_blobs;
BLOBNBOX_CLIST nondescript_blobs;
CollectHorizVertBlobs(&block->blobs, &vertical_boxes, &horizontal_boxes,
&vertical_blobs, &horizontal_blobs, &nondescript_blobs);
CollectHorizVertBlobs(&block->large_blobs, &vertical_boxes, &horizontal_boxes,
&vertical_blobs, &horizontal_blobs, &nondescript_blobs);
if (textord_debug_tabfind)
tprintf("TextDir hbox=%d vs vbox=%d, %dH, %dV, %dN osd blobs\n",
horizontal_boxes, vertical_boxes,
horizontal_blobs.length(), vertical_blobs.length(),
nondescript_blobs.length());
if (osd_blobs != NULL && vertical_boxes == 0 && horizontal_boxes == 0) {
// Only nondescript blobs available, so return those.
BLOBNBOX_C_IT osd_it(osd_blobs);
osd_it.add_list_after(&nondescript_blobs);
return false;
}
int min_vert_boxes = static_cast<int>((vertical_boxes + horizontal_boxes) *
find_vertical_text_ratio);
if (vertical_boxes >= min_vert_boxes) {
if (osd_blobs != NULL) {
BLOBNBOX_C_IT osd_it(osd_blobs);
osd_it.add_list_after(&vertical_blobs);
}
return true;
} else {
if (osd_blobs != NULL) {
BLOBNBOX_C_IT osd_it(osd_blobs);
osd_it.add_list_after(&horizontal_blobs);
}
return false;
}
}
// Corrects the data structures for the given rotation.
void StrokeWidth::CorrectForRotation(const FCOORD& rotation,
ColPartitionGrid* part_grid) {
Init(part_grid->gridsize(), part_grid->bleft(), part_grid->tright());
grid_box_ = TBOX(bleft(), tright());
rerotation_.set_x(rotation.x());
rerotation_.set_y(-rotation.y());
}
// Finds leader partitions and inserts them into the given part_grid.
void StrokeWidth::FindLeaderPartitions(TO_BLOCK* block,
ColPartitionGrid* part_grid) {
Clear();
// Find and isolate leaders in the noise list.
ColPartition_LIST leader_parts;
FindLeadersAndMarkNoise(block, &leader_parts);
// Setup the strokewidth grid with the block's remaining (non-noise) blobs.
InsertBlobList(&block->blobs);
// Mark blobs that have leader neighbours.
for (ColPartition_IT it(&leader_parts); !it.empty(); it.forward()) {
ColPartition* part = it.extract();
part->ClaimBoxes();
MarkLeaderNeighbours(part, LR_LEFT);
MarkLeaderNeighbours(part, LR_RIGHT);
part_grid->InsertBBox(true, true, part);
}
}
// Finds and marks noise those blobs that look like bits of vertical lines
// that would otherwise screw up layout analysis.
void StrokeWidth::RemoveLineResidue(ColPartition_LIST* big_part_list) {
BlobGridSearch gsearch(this);
BLOBNBOX* bbox;
// For every vertical line-like bbox in the grid, search its neighbours
// to find the tallest, and if the original box is taller by sufficient
// margin, then call it line residue and delete it.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
TBOX box = bbox->bounding_box();
if (box.height() < box.width() * kLineResidueAspectRatio)
continue;
// Set up a rectangle search around the blob to find the size of its
// neighbours.
int padding = box.height() * kLineResiduePadRatio;
TBOX search_box = box;
search_box.pad(padding, padding);
bool debug = AlignedBlob::WithinTestRegion(2, box.left(),
box.bottom());
// Find the largest object in the search box not equal to bbox.
BlobGridSearch rsearch(this);
int max_size = 0;
BLOBNBOX* n;
rsearch.StartRectSearch(search_box);
while ((n = rsearch.NextRectSearch()) != NULL) {
if (n == bbox) continue;
TBOX nbox = n->bounding_box();
if (nbox.height() > max_size) {
max_size = nbox.height();
}
}
if (debug) {
tprintf("Max neighbour size=%d for candidate line box at:", max_size);
box.print();
}
if (max_size * kLineResidueSizeRatio < box.height()) {
#ifndef GRAPHICS_DISABLED
if (leaders_win_ != NULL) {
// We are debugging, so display deleted in pink blobs in the same
// window that we use to display leader detection.
leaders_win_->Pen(ScrollView::PINK);
leaders_win_->Rectangle(box.left(), box.bottom(),
box.right(), box.top());
}
#endif // GRAPHICS_DISABLED
ColPartition::MakeBigPartition(bbox, big_part_list);
}
}
}
// Types all the blobs as vertical text or horizontal text or unknown and
// puts them into initial ColPartitions in the supplied part_grid.
// rerotation determines how to get back to the image coordinates from the
// blob coordinates (since they may have been rotated for vertical text).
// block is the single block for the whole page or rectangle to be OCRed.
// nontext_pix (full-size), is a binary mask used to prevent merges across
// photo/text boundaries. It is not kept beyond this function.
// denorm provides a mapping back to the image from the current blob
// coordinate space.
// projection provides a measure of textline density over the image and
// provides functions to assist with diacritic detection. It should be a
// pointer to a new TextlineProjection, and will be setup here.
// part_grid is the output grid of textline partitions.
// Large blobs that cause overlap are put in separate partitions and added
// to the big_parts list.
void StrokeWidth::GradeBlobsIntoPartitions(const FCOORD& rerotation,
TO_BLOCK* block,
Pix* nontext_pix,
const DENORM* denorm,
bool cjk_script,
TextlineProjection* projection,
ColPartitionGrid* part_grid,
ColPartition_LIST* big_parts) {
nontext_map_ = nontext_pix;
projection_ = projection;
denorm_ = denorm;
// Clear and re Insert to take advantage of the tab stops in the blobs.
Clear();
// Setup the strokewidth grid with the remaining non-noise, non-leader blobs.
InsertBlobs(block);
// Run FixBrokenCJK() again if the page is CJK.
if (cjk_script) {
FixBrokenCJK(block);
}
FindTextlineFlowDirection(true);
projection_->ConstructProjection(block, rerotation, nontext_map_);
if (textord_tabfind_show_strokewidths) {
ScrollView* line_blobs_win = MakeWindow(0, 0, "Initial textline Blobs");
projection_->PlotGradedBlobs(&block->blobs, line_blobs_win);
projection_->PlotGradedBlobs(&block->small_blobs, line_blobs_win);
}
projection_->MoveNonTextlineBlobs(&block->blobs, &block->noise_blobs);
projection_->MoveNonTextlineBlobs(&block->small_blobs, &block->noise_blobs);
// Clear and re Insert to take advantage of the removed diacritics.
Clear();
InsertBlobs(block);
FindInitialPartitions(rerotation, block, part_grid, big_parts);
nontext_map_ = NULL;
projection_ = NULL;
denorm_ = NULL;
}
static void PrintBoxWidths(BLOBNBOX* neighbour) {
TBOX nbox = neighbour->bounding_box();
tprintf("Box (%d,%d)->(%d,%d): h-width=%.1f, v-width=%.1f p-width=%1.f\n",
nbox.left(), nbox.bottom(), nbox.right(), nbox.top(),
neighbour->horz_stroke_width(), neighbour->vert_stroke_width(),
2.0 * neighbour->cblob()->area()/neighbour->cblob()->perimeter());
}
/** Handles a click event in a display window. */
void StrokeWidth::HandleClick(int x, int y) {
BBGrid<BLOBNBOX, BLOBNBOX_CLIST, BLOBNBOX_C_IT>::HandleClick(x, y);
// Run a radial search for blobs that overlap.
BlobGridSearch radsearch(this);
radsearch.StartRadSearch(x, y, 1);
BLOBNBOX* neighbour;
FCOORD click(static_cast<float>(x), static_cast<float>(y));
while ((neighbour = radsearch.NextRadSearch()) != NULL) {
TBOX nbox = neighbour->bounding_box();
if (nbox.contains(click) && neighbour->cblob() != NULL) {
PrintBoxWidths(neighbour);
if (neighbour->neighbour(BND_LEFT) != NULL)
PrintBoxWidths(neighbour->neighbour(BND_LEFT));
if (neighbour->neighbour(BND_RIGHT) != NULL)
PrintBoxWidths(neighbour->neighbour(BND_RIGHT));
if (neighbour->neighbour(BND_ABOVE) != NULL)
PrintBoxWidths(neighbour->neighbour(BND_ABOVE));
if (neighbour->neighbour(BND_BELOW) != NULL)
PrintBoxWidths(neighbour->neighbour(BND_BELOW));
int gaps[BND_COUNT];
neighbour->NeighbourGaps(gaps);
tprintf("Left gap=%d, right=%d, above=%d, below=%d, horz=%d, vert=%d\n"
"Good= %d %d %d %d\n",
gaps[BND_LEFT], gaps[BND_RIGHT],
gaps[BND_ABOVE], gaps[BND_BELOW],
neighbour->horz_possible(),
neighbour->vert_possible(),
neighbour->good_stroke_neighbour(BND_LEFT),
neighbour->good_stroke_neighbour(BND_RIGHT),
neighbour->good_stroke_neighbour(BND_ABOVE),
neighbour->good_stroke_neighbour(BND_BELOW));
break;
}
}
}
// Detects and marks leader dots/dashes.
// Leaders are horizontal chains of small or noise blobs that look
// monospace according to ColPartition::MarkAsLeaderIfMonospaced().
// Detected leaders become the only occupants of the block->small_blobs list.
// Non-leader small blobs get moved to the blobs list.
// Non-leader noise blobs remain singletons in the noise list.
// All small and noise blobs in high density regions are marked BTFT_NONTEXT.
// block is the single block for the whole page or rectangle to be OCRed.
// leader_parts is the output.
void StrokeWidth::FindLeadersAndMarkNoise(TO_BLOCK* block,
ColPartition_LIST* leader_parts) {
InsertBlobList(&block->small_blobs);
InsertBlobList(&block->noise_blobs);
BlobGridSearch gsearch(this);
BLOBNBOX* bbox;
// For every bbox in the grid, set its neighbours.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SetNeighbours(true, false, bbox);
}
ColPartition_IT part_it(leader_parts);
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
if (bbox->flow() == BTFT_NONE) {
if (bbox->neighbour(BND_RIGHT) == NULL &&
bbox->neighbour(BND_LEFT) == NULL)
continue;
// Put all the linked blobs into a ColPartition.
ColPartition* part = new ColPartition(BRT_UNKNOWN, ICOORD(0, 1));
BLOBNBOX* blob;
for (blob = bbox; blob != NULL && blob->flow() == BTFT_NONE;
blob = blob->neighbour(BND_RIGHT))
part->AddBox(blob);
for (blob = bbox->neighbour(BND_LEFT); blob != NULL &&
blob->flow() == BTFT_NONE;
blob = blob->neighbour(BND_LEFT))
part->AddBox(blob);
if (part->MarkAsLeaderIfMonospaced())
part_it.add_after_then_move(part);
else
delete part;
}
}
if (textord_tabfind_show_strokewidths) {
leaders_win_ = DisplayGoodBlobs("LeaderNeighbours", 0, 0);
}
// Move any non-leaders from the small to the blobs list, as they are
// most likely dashes or broken characters.
BLOBNBOX_IT blob_it(&block->blobs);
BLOBNBOX_IT small_it(&block->small_blobs);
for (small_it.mark_cycle_pt(); !small_it.cycled_list(); small_it.forward()) {
BLOBNBOX* blob = small_it.data();
if (blob->flow() != BTFT_LEADER) {
if (blob->flow() == BTFT_NEIGHBOURS)
blob->set_flow(BTFT_NONE);
blob->ClearNeighbours();
blob_it.add_to_end(small_it.extract());
}
}
// Move leaders from the noise list to the small list, leaving the small
// list exclusively leaders, so they don't get processed further,
// and the remaining small blobs all in the noise list.
BLOBNBOX_IT noise_it(&block->noise_blobs);
for (noise_it.mark_cycle_pt(); !noise_it.cycled_list(); noise_it.forward()) {
BLOBNBOX* blob = noise_it.data();
if (blob->flow() == BTFT_LEADER || blob->joined_to_prev()) {
small_it.add_to_end(noise_it.extract());
} else if (blob->flow() == BTFT_NEIGHBOURS) {
blob->set_flow(BTFT_NONE);
blob->ClearNeighbours();
}
}
// Clear the grid as we don't want the small stuff hanging around in it.
Clear();
}
/** Inserts the block blobs (normal and large) into this grid.
* Blobs remain owned by the block. */
void StrokeWidth::InsertBlobs(TO_BLOCK* block) {
InsertBlobList(&block->blobs);
InsertBlobList(&block->large_blobs);
}
// Checks the left or right side of the given leader partition and sets the
// (opposite) leader_on_right or leader_on_left flags for blobs
// that are next to the given side of the given leader partition.
void StrokeWidth::MarkLeaderNeighbours(const ColPartition* part,
LeftOrRight side) {
const TBOX& part_box = part->bounding_box();
BlobGridSearch blobsearch(this);
// Search to the side of the leader for the nearest neighbour.
BLOBNBOX* best_blob = NULL;
int best_gap = 0;
blobsearch.StartSideSearch(side == LR_LEFT ? part_box.left()
: part_box.right(),
part_box.bottom(), part_box.top());
BLOBNBOX* blob;
while ((blob = blobsearch.NextSideSearch(side == LR_LEFT)) != NULL) {
const TBOX& blob_box = blob->bounding_box();
if (!blob_box.y_overlap(part_box))
continue;
int x_gap = blob_box.x_gap(part_box);
if (x_gap > 2 * gridsize()) {
break;
} else if (best_blob == NULL || x_gap < best_gap) {
best_blob = blob;
best_gap = x_gap;
}
}
if (best_blob != NULL) {
if (side == LR_LEFT)
best_blob->set_leader_on_right(true);
else
best_blob->set_leader_on_left(true);
#ifndef GRAPHICS_DISABLED
if (leaders_win_ != NULL) {
leaders_win_->Pen(side == LR_LEFT ? ScrollView::RED : ScrollView::GREEN);
const TBOX& blob_box = best_blob->bounding_box();
leaders_win_->Rectangle(blob_box.left(), blob_box.bottom(),
blob_box.right(), blob_box.top());
}
#endif // GRAPHICS_DISABLED
}
}
// Helper to compute the UQ of the square-ish CJK charcters.
static int UpperQuartileCJKSize(int gridsize, BLOBNBOX_LIST* blobs) {
STATS sizes(0, gridsize * kMaxCJKSizeRatio);
BLOBNBOX_IT it(blobs);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
BLOBNBOX* blob = it.data();
int width = blob->bounding_box().width();
int height = blob->bounding_box().height();
if (width <= height * kCJKAspectRatio && height < width * kCJKAspectRatio)
sizes.add(height, 1);
}
return static_cast<int>(sizes.ile(0.75f) + 0.5);
}
// Fix broken CJK characters, using the fake joined blobs mechanism.
// Blobs are really merged, ie the master takes all the outlines and the
// others are deleted.
// Returns true if sufficient blobs are merged that it may be worth running
// again, due to a better estimate of character size.
bool StrokeWidth::FixBrokenCJK(TO_BLOCK* block) {
BLOBNBOX_LIST* blobs = &block->blobs;
int median_height = UpperQuartileCJKSize(gridsize(), blobs);
int max_dist = static_cast<int>(median_height * kCJKBrokenDistanceFraction);
int max_size = static_cast<int>(median_height * kCJKAspectRatio);
int num_fixed = 0;
BLOBNBOX_IT blob_it(blobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
if (blob->cblob() == NULL || blob->cblob()->out_list()->empty())
continue;
TBOX bbox = blob->bounding_box();
bool debug = AlignedBlob::WithinTestRegion(3, bbox.left(),
bbox.bottom());
if (debug) {
tprintf("Checking for Broken CJK (max size=%d):", max_size);
bbox.print();
}
// Generate a list of blobs that overlap or are near enough to merge.
BLOBNBOX_CLIST overlapped_blobs;
AccumulateOverlaps(blob, debug, max_size, max_dist,
&bbox, &overlapped_blobs);
if (!overlapped_blobs.empty()) {
// There are overlapping blobs, so qualify them as being satisfactory
// before removing them from the grid and replacing them with the union.
// The final box must be roughly square.
if (bbox.width() > bbox.height() * kCJKAspectRatio ||
bbox.height() > bbox.width() * kCJKAspectRatio) {
if (debug) {
tprintf("Bad final aspectratio:");
bbox.print();
}
continue;
}
// There can't be too many blobs to merge.
if (overlapped_blobs.length() >= kCJKMaxComponents) {
if (debug)
tprintf("Too many neighbours: %d\n", overlapped_blobs.length());
continue;
}
// The strokewidths must match amongst the join candidates.
BLOBNBOX_C_IT n_it(&overlapped_blobs);
for (n_it.mark_cycle_pt(); !n_it.cycled_list(); n_it.forward()) {
BLOBNBOX* neighbour = NULL;
neighbour = n_it.data();
if (!blob->MatchingStrokeWidth(*neighbour, kStrokeWidthFractionCJK,
kStrokeWidthCJK))
break;
}
if (!n_it.cycled_list()) {
if (debug) {
tprintf("Bad stroke widths:");
PrintBoxWidths(blob);
}
continue; // Not good enough.
}
// Merge all the candidates into blob.
// We must remove blob from the grid and reinsert it after merging
// to maintain the integrity of the grid.
RemoveBBox(blob);
// Everything else will be calculated later.
for (n_it.mark_cycle_pt(); !n_it.cycled_list(); n_it.forward()) {
BLOBNBOX* neighbour = n_it.data();
RemoveBBox(neighbour);
// Mark empty blob for deletion.
neighbour->set_region_type(BRT_NOISE);
blob->really_merge(neighbour);
if (rerotation_.x() != 1.0f || rerotation_.y() != 0.0f) {
blob->rotate_box(rerotation_);
}
}
InsertBBox(true, true, blob);
++num_fixed;
if (debug) {
tprintf("Done! Final box:");
bbox.print();
}
}
}
// Count remaining blobs.
int num_remaining = 0;
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
if (blob->cblob() != NULL && !blob->cblob()->out_list()->empty()) {
++num_remaining;
}
}
// Permanently delete all the marked blobs after first removing all
// references in the neighbour members.
block->DeleteUnownedNoise();
return num_fixed > num_remaining * kBrokenCJKIterationFraction;
}
// Helper function to determine whether it is reasonable to merge the
// bbox and the nbox for repairing broken CJK.
// The distance apart must not exceed max_dist, the combined size must
// not exceed max_size, and the aspect ratio must either improve or at
// least not get worse by much.
static bool AcceptableCJKMerge(const TBOX& bbox, const TBOX& nbox,
bool debug, int max_size, int max_dist,
int* x_gap, int* y_gap) {
*x_gap = bbox.x_gap(nbox);
*y_gap = bbox.y_gap(nbox);
TBOX merged(nbox);
merged += bbox;
if (debug) {
tprintf("gaps = %d, %d, merged_box:", *x_gap, *y_gap);
merged.print();
}
if (*x_gap <= max_dist && *y_gap <= max_dist &&
merged.width() <= max_size && merged.height() <= max_size) {
// Close enough to call overlapping. Check aspect ratios.
double old_ratio = static_cast<double>(bbox.width()) / bbox.height();
if (old_ratio < 1.0) old_ratio = 1.0 / old_ratio;
double new_ratio = static_cast<double>(merged.width()) / merged.height();
if (new_ratio < 1.0) new_ratio = 1.0 / new_ratio;
if (new_ratio <= old_ratio * kCJKAspectRatioIncrease)
return true;
}
return false;
}
// Collect blobs that overlap or are within max_dist of the input bbox.
// Return them in the list of blobs and expand the bbox to be the union
// of all the boxes. not_this is excluded from the search, as are blobs
// that cause the merged box to exceed max_size in either dimension.
void StrokeWidth::AccumulateOverlaps(const BLOBNBOX* not_this, bool debug,
int max_size, int max_dist,
TBOX* bbox, BLOBNBOX_CLIST* blobs) {
// While searching, nearests holds the nearest failed blob in each
// direction. When we have a nearest in each of the 4 directions, then
// the search is over, and at this point the final bbox must not overlap
// any of the nearests.
BLOBNBOX* nearests[BND_COUNT];
for (int i = 0; i < BND_COUNT; ++i) {
nearests[i] = NULL;
}
int x = (bbox->left() + bbox->right()) / 2;
int y = (bbox->bottom() + bbox->top()) / 2;
// Run a radial search for blobs that overlap or are sufficiently close.
BlobGridSearch radsearch(this);
radsearch.StartRadSearch(x, y, kCJKRadius);
BLOBNBOX* neighbour;
while ((neighbour = radsearch.NextRadSearch()) != NULL) {
if (neighbour == not_this) continue;
TBOX nbox = neighbour->bounding_box();
int x_gap, y_gap;
if (AcceptableCJKMerge(*bbox, nbox, debug, max_size, max_dist,
&x_gap, &y_gap)) {
// Close enough to call overlapping. Merge boxes.
*bbox += nbox;
blobs->add_sorted(SortByBoxLeft<BLOBNBOX>, true, neighbour);
if (debug) {
tprintf("Added:");
nbox.print();
}
// Since we merged, search the nearests, as some might now me mergeable.
for (int dir = 0; dir < BND_COUNT; ++dir) {
if (nearests[dir] == NULL) continue;
nbox = nearests[dir]->bounding_box();
if (AcceptableCJKMerge(*bbox, nbox, debug, max_size,
max_dist, &x_gap, &y_gap)) {
// Close enough to call overlapping. Merge boxes.
*bbox += nbox;
blobs->add_sorted(SortByBoxLeft<BLOBNBOX>, true, nearests[dir]);
if (debug) {
tprintf("Added:");
nbox.print();
}
nearests[dir] = NULL;
dir = -1; // Restart the search.
}
}
} else if (x_gap < 0 && x_gap <= y_gap) {
// A vertical neighbour. Record the nearest.
BlobNeighbourDir dir = nbox.top() > bbox->top() ? BND_ABOVE : BND_BELOW;
if (nearests[dir] == NULL ||
y_gap < bbox->y_gap(nearests[dir]->bounding_box())) {
nearests[dir] = neighbour;
}
} else if (y_gap < 0 && y_gap <= x_gap) {
// A horizontal neighbour. Record the nearest.
BlobNeighbourDir dir = nbox.left() > bbox->left() ? BND_RIGHT : BND_LEFT;
if (nearests[dir] == NULL ||
x_gap < bbox->x_gap(nearests[dir]->bounding_box())) {
nearests[dir] = neighbour;
}
}
// If all nearests are non-null, then we have finished.
if (nearests[BND_LEFT] && nearests[BND_RIGHT] &&
nearests[BND_ABOVE] && nearests[BND_BELOW])
break;
}
// Final overlap with a nearest is not allowed.
for (int dir = 0; dir < BND_COUNT; ++dir) {
if (nearests[dir] == NULL) continue;
const TBOX& nbox = nearests[dir]->bounding_box();
if (debug) {
tprintf("Testing for overlap with:");
nbox.print();
}
if (bbox->overlap(nbox)) {
blobs->shallow_clear();
if (debug)
tprintf("Final box overlaps nearest\n");
return;
}
}
}
// For each blob in this grid, Finds the textline direction to be horizontal
// or vertical according to distance to neighbours and 1st and 2nd order
// neighbours. Non-text tends to end up without a definite direction.
// Result is setting of the neighbours and vert_possible/horz_possible
// flags in the BLOBNBOXes currently in this grid.
// This function is called more than once if page orientation is uncertain,
// so display_if_debugging is true on the final call to display the results.
void StrokeWidth::FindTextlineFlowDirection(bool display_if_debugging) {
BlobGridSearch gsearch(this);
BLOBNBOX* bbox;
// For every bbox in the grid, set its neighbours.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SetNeighbours(false, display_if_debugging, bbox);
}
// Where vertical or horizontal wins by a big margin, clarify it.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SimplifyObviousNeighbours(bbox);
}
// Now try to make the blobs only vertical or horizontal using neighbours.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SetNeighbourFlows(bbox);
}
if ((textord_tabfind_show_strokewidths && display_if_debugging) ||
textord_tabfind_show_strokewidths > 1) {
initial_widths_win_ = DisplayGoodBlobs("InitialStrokewidths", 400, 0);
}
// Improve flow direction with neighbours.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SmoothNeighbourTypes(bbox, false);
}
// Now allow reset of firm values to fix renegades.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SmoothNeighbourTypes(bbox, true);
}
// Repeat.
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
SmoothNeighbourTypes(bbox, true);
}
if ((textord_tabfind_show_strokewidths && display_if_debugging) ||
textord_tabfind_show_strokewidths > 1) {
widths_win_ = DisplayGoodBlobs("ImprovedStrokewidths", 800, 0);
}
}
// Sets the neighbours and good_stroke_neighbours members of the blob by
// searching close on all 4 sides.
// When finding leader dots/dashes, there is a slightly different rule for
// what makes a good neighbour.
void StrokeWidth::SetNeighbours(bool leaders, bool activate_line_trap,
BLOBNBOX* blob) {
int line_trap_count = 0;
for (int dir = 0; dir < BND_COUNT; ++dir) {
BlobNeighbourDir bnd = static_cast<BlobNeighbourDir>(dir);
line_trap_count += FindGoodNeighbour(bnd, leaders, blob);
}
if (line_trap_count > 0 && activate_line_trap) {
// It looks like a line so isolate it by clearing its neighbours.
blob->ClearNeighbours();
const TBOX& box = blob->bounding_box();
blob->set_region_type(box.width() > box.height() ? BRT_HLINE : BRT_VLINE);
}
}
// Sets the good_stroke_neighbours member of the blob if it has a
// GoodNeighbour on the given side.
// Also sets the neighbour in the blob, whether or not a good one is found.
// Returns the number of blobs in the nearby search area that would lead us to
// believe that this blob is a line separator.
// Leaders get extra special lenient treatment.
int StrokeWidth::FindGoodNeighbour(BlobNeighbourDir dir, bool leaders,
BLOBNBOX* blob) {
// Search for neighbours that overlap vertically.
TBOX blob_box = blob->bounding_box();
bool debug = AlignedBlob::WithinTestRegion(2, blob_box.left(),
blob_box.bottom());
if (debug) {
tprintf("FGN in dir %d for blob:", dir);
blob_box.print();
}
int top = blob_box.top();
int bottom = blob_box.bottom();
int left = blob_box.left();
int right = blob_box.right();
int width = right - left;
int height = top - bottom;
// A trap to detect lines tests for the min dimension of neighbours
// being larger than a multiple of the min dimension of the line
// and the larger dimension being smaller than a fraction of the max
// dimension of the line.
int line_trap_max = MAX(width, height) / kLineTrapLongest;
int line_trap_min = MIN(width, height) * kLineTrapShortest;
int line_trap_count = 0;
int min_good_overlap = (dir == BND_LEFT || dir == BND_RIGHT)
? height / 2 : width / 2;
int min_decent_overlap = (dir == BND_LEFT || dir == BND_RIGHT)
? height / 3 : width / 3;
if (leaders)
min_good_overlap = min_decent_overlap = 1;
int search_pad = static_cast<int>(
sqrt(static_cast<double>(width * height)) * kNeighbourSearchFactor);
if (gridsize() > search_pad)
search_pad = gridsize();
TBOX search_box = blob_box;
// Pad the search in the appropriate direction.
switch (dir) {
case BND_LEFT:
search_box.set_left(search_box.left() - search_pad);
break;
case BND_RIGHT:
search_box.set_right(search_box.right() + search_pad);
break;
case BND_BELOW:
search_box.set_bottom(search_box.bottom() - search_pad);
break;
case BND_ABOVE:
search_box.set_top(search_box.top() + search_pad);
break;
case BND_COUNT:
return 0;
}
BlobGridSearch rectsearch(this);
rectsearch.StartRectSearch(search_box);
BLOBNBOX* best_neighbour = NULL;
double best_goodness = 0.0;
bool best_is_good = false;
BLOBNBOX* neighbour;
while ((neighbour = rectsearch.NextRectSearch()) != NULL) {
TBOX nbox = neighbour->bounding_box();
if (neighbour == blob)
continue;
int mid_x = (nbox.left() + nbox.right()) / 2;
if (mid_x < blob->left_rule() || mid_x > blob->right_rule())
continue; // In a different column.
if (debug) {
tprintf("Neighbour at:");
nbox.print();
}
// Last-minute line detector. There is a small upper limit to the line
// width accepted by the morphological line detector.
int n_width = nbox.width();
int n_height = nbox.height();
if (MIN(n_width, n_height) > line_trap_min &&
MAX(n_width, n_height) < line_trap_max)
++line_trap_count;
// Heavily joined text, such as Arabic may have very different sizes when
// looking at the maxes, but the heights may be almost identical, so check
// for a difference in height if looking sideways or width vertically.
if (TabFind::VeryDifferentSizes(MAX(n_width, n_height),
MAX(width, height)) &&
(((dir == BND_LEFT || dir ==BND_RIGHT) &&
TabFind::DifferentSizes(n_height, height)) ||
((dir == BND_BELOW || dir ==BND_ABOVE) &&
TabFind::DifferentSizes(n_width, width)))) {
if (debug) tprintf("Bad size\n");
continue; // Could be a different font size or non-text.
}
// Amount of vertical overlap between the blobs.
int overlap;
// If the overlap is along the short side of the neighbour, and it
// is fully overlapped, then perp_overlap holds the length of the long
// side of the neighbour. A measure to include hyphens and dashes as
// legitimate neighbours.
int perp_overlap;
int gap;
if (dir == BND_LEFT || dir == BND_RIGHT) {
overlap = MIN(nbox.top(), top) - MAX(nbox.bottom(), bottom);
if (overlap == nbox.height() && nbox.width() > nbox.height())
perp_overlap = nbox.width();
else
perp_overlap = overlap;
gap = dir == BND_LEFT ? left - nbox.left() : nbox.right() - right;
if (gap <= 0) {
if (debug) tprintf("On wrong side\n");
continue; // On the wrong side.
}
gap -= n_width;
} else {
overlap = MIN(nbox.right(), right) - MAX(nbox.left(), left);
if (overlap == nbox.width() && nbox.height() > nbox.width())
perp_overlap = nbox.height();
else
perp_overlap = overlap;
gap = dir == BND_BELOW ? bottom - nbox.bottom() : nbox.top() - top;
if (gap <= 0) {
if (debug) tprintf("On wrong side\n");
continue; // On the wrong side.
}
gap -= n_height;
}
if (-gap > overlap) {
if (debug) tprintf("Overlaps wrong way\n");
continue; // Overlaps the wrong way.
}
if (perp_overlap < min_decent_overlap) {
if (debug) tprintf("Doesn't overlap enough\n");
continue; // Doesn't overlap enough.
}
bool bad_sizes = TabFind::DifferentSizes(height, n_height) &&
TabFind::DifferentSizes(width, n_width);
bool is_good = overlap >= min_good_overlap && !bad_sizes &&
blob->MatchingStrokeWidth(*neighbour,
kStrokeWidthFractionTolerance,
kStrokeWidthTolerance);
// Best is a fuzzy combination of gap, overlap and is good.
// Basically if you make one thing twice as good without making
// anything else twice as bad, then it is better.
if (gap < 1) gap = 1;
double goodness = (1.0 + is_good) * overlap / gap;
if (debug) {
tprintf("goodness = %g vs best of %g, good=%d, overlap=%d, gap=%d\n",
goodness, best_goodness, is_good, overlap, gap);
}
if (goodness > best_goodness) {
best_neighbour = neighbour;
best_goodness = goodness;
best_is_good = is_good;
}
}
blob->set_neighbour(dir, best_neighbour, best_is_good);
return line_trap_count;
}
// Helper to get a list of 1st-order neighbours.
static void ListNeighbours(const BLOBNBOX* blob,
BLOBNBOX_CLIST* neighbours) {
for (int dir = 0; dir < BND_COUNT; ++dir) {
BlobNeighbourDir bnd = static_cast<BlobNeighbourDir>(dir);
BLOBNBOX* neighbour = blob->neighbour(bnd);
if (neighbour != NULL) {
neighbours->add_sorted(SortByBoxLeft<BLOBNBOX>, true, neighbour);
}
}
}
// Helper to get a list of 1st and 2nd order neighbours.
static void List2ndNeighbours(const BLOBNBOX* blob,
BLOBNBOX_CLIST* neighbours) {
ListNeighbours(blob, neighbours);
for (int dir = 0; dir < BND_COUNT; ++dir) {
BlobNeighbourDir bnd = static_cast<BlobNeighbourDir>(dir);
BLOBNBOX* neighbour = blob->neighbour(bnd);
if (neighbour != NULL) {
ListNeighbours(neighbour, neighbours);
}
}
}
// Helper to get a list of 1st, 2nd and 3rd order neighbours.
static void List3rdNeighbours(const BLOBNBOX* blob,
BLOBNBOX_CLIST* neighbours) {
List2ndNeighbours(blob, neighbours);
for (int dir = 0; dir < BND_COUNT; ++dir) {
BlobNeighbourDir bnd = static_cast<BlobNeighbourDir>(dir);
BLOBNBOX* neighbour = blob->neighbour(bnd);
if (neighbour != NULL) {
List2ndNeighbours(neighbour, neighbours);
}
}
}
// Helper to count the evidence for verticalness or horizontalness
// in a list of neighbours.
static void CountNeighbourGaps(bool debug, BLOBNBOX_CLIST* neighbours,
int* pure_h_count, int* pure_v_count) {
if (neighbours->length() <= kMostlyOneDirRatio)
return;
BLOBNBOX_C_IT it(neighbours);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
BLOBNBOX* blob = it.data();
int h_min, h_max, v_min, v_max;
blob->MinMaxGapsClipped(&h_min, &h_max, &v_min, &v_max);
if (debug)
tprintf("Hgaps [%d,%d], vgaps [%d,%d]:", h_min, h_max, v_min, v_max);
if (h_max < v_min ||
blob->leader_on_left() || blob->leader_on_right()) {
// Horizontal gaps are clear winners. Count a pure horizontal.
++*pure_h_count;
if (debug) tprintf("Horz at:");
} else if (v_max < h_min) {
// Vertical gaps are clear winners. Clear a pure vertical.
++*pure_v_count;
if (debug) tprintf("Vert at:");
} else {
if (debug) tprintf("Neither at:");
}
if (debug)
blob->bounding_box().print();
}
}
// Makes the blob to be only horizontal or vertical where evidence
// is clear based on gaps of 2nd order neighbours, or definite individual
// blobs.
void StrokeWidth::SetNeighbourFlows(BLOBNBOX* blob) {
if (blob->DefiniteIndividualFlow())
return;
bool debug = AlignedBlob::WithinTestRegion(2, blob->bounding_box().left(),
blob->bounding_box().bottom());
if (debug) {
tprintf("SetNeighbourFlows (current flow=%d, type=%d) on:",
blob->flow(), blob->region_type());
blob->bounding_box().print();
}
BLOBNBOX_CLIST neighbours;
List3rdNeighbours(blob, &neighbours);
// The number of pure horizontal and vertical neighbours.
int pure_h_count = 0;
int pure_v_count = 0;
CountNeighbourGaps(debug, &neighbours, &pure_h_count, &pure_v_count);
if (debug) {
HandleClick(blob->bounding_box().left() + 1,
blob->bounding_box().bottom() + 1);
tprintf("SetFlows: h_count=%d, v_count=%d\n",
pure_h_count, pure_v_count);
}
if (!neighbours.empty()) {
blob->set_vert_possible(true);
blob->set_horz_possible(true);
if (pure_h_count > 2 * pure_v_count) {
// Horizontal gaps are clear winners. Clear vertical neighbours.
blob->set_vert_possible(false);
} else if (pure_v_count > 2 * pure_h_count) {
// Vertical gaps are clear winners. Clear horizontal neighbours.
blob->set_horz_possible(false);
}
} else {
// Lonely blob. Can't tell its flow direction.
blob->set_vert_possible(false);
blob->set_horz_possible(false);
}
}
// Helper to count the number of horizontal and vertical blobs in a list.
static void CountNeighbourTypes(BLOBNBOX_CLIST* neighbours,
int* pure_h_count, int* pure_v_count) {
BLOBNBOX_C_IT it(neighbours);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
BLOBNBOX* blob = it.data();
if (blob->UniquelyHorizontal())
++*pure_h_count;
if (blob->UniquelyVertical())
++*pure_v_count;
}
}
// Nullify the neighbours in the wrong directions where the direction
// is clear-cut based on a distance margin. Good for isolating vertical
// text from neighbouring horizontal text.
void StrokeWidth::SimplifyObviousNeighbours(BLOBNBOX* blob) {
// Case 1: We have text that is likely several characters, blurry and joined
// together.
if ((blob->bounding_box().width() > 3 * blob->area_stroke_width() &&
blob->bounding_box().height() > 3 * blob->area_stroke_width())) {
// The blob is complex (not stick-like).
if (blob->bounding_box().width() > 4 * blob->bounding_box().height()) {
// Horizontal conjoined text.
blob->set_neighbour(BND_ABOVE, NULL, false);
blob->set_neighbour(BND_BELOW, NULL, false);
return;
}
if (blob->bounding_box().height() > 4 * blob->bounding_box().width()) {
// Vertical conjoined text.
blob->set_neighbour(BND_LEFT, NULL, false);
blob->set_neighbour(BND_RIGHT, NULL, false);
return;
}
}
// Case 2: This blob is likely a single character.
int margin = gridsize() / 2;
int h_min, h_max, v_min, v_max;
blob->MinMaxGapsClipped(&h_min, &h_max, &v_min, &v_max);
if ((h_max + margin < v_min && h_max < margin / 2) ||
blob->leader_on_left() || blob->leader_on_right()) {
// Horizontal gaps are clear winners. Clear vertical neighbours.
blob->set_neighbour(BND_ABOVE, NULL, false);
blob->set_neighbour(BND_BELOW, NULL, false);
} else if (v_max + margin < h_min && v_max < margin / 2) {
// Vertical gaps are clear winners. Clear horizontal neighbours.
blob->set_neighbour(BND_LEFT, NULL, false);
blob->set_neighbour(BND_RIGHT, NULL, false);
}
}
// Smoothes the vertical/horizontal type of the blob based on the
// 2nd-order neighbours. If reset_all is true, then all blobs are
// changed. Otherwise, only ambiguous blobs are processed.
void StrokeWidth::SmoothNeighbourTypes(BLOBNBOX* blob, bool reset_all) {
if ((blob->vert_possible() && blob->horz_possible()) || reset_all) {
// There are both horizontal and vertical so try to fix it.
BLOBNBOX_CLIST neighbours;
List2ndNeighbours(blob, &neighbours);
// The number of pure horizontal and vertical neighbours.
int pure_h_count = 0;
int pure_v_count = 0;
CountNeighbourTypes(&neighbours, &pure_h_count, &pure_v_count);
if (AlignedBlob::WithinTestRegion(2, blob->bounding_box().left(),
blob->bounding_box().bottom())) {
HandleClick(blob->bounding_box().left() + 1,
blob->bounding_box().bottom() + 1);
tprintf("pure_h=%d, pure_v=%d\n",
pure_h_count, pure_v_count);
}
if (pure_h_count > pure_v_count) {
// Horizontal gaps are clear winners. Clear vertical neighbours.
blob->set_vert_possible(false);
blob->set_horz_possible(true);
} else if (pure_v_count > pure_h_count) {
// Vertical gaps are clear winners. Clear horizontal neighbours.
blob->set_horz_possible(false);
blob->set_vert_possible(true);
}
} else if (AlignedBlob::WithinTestRegion(2, blob->bounding_box().left(),
blob->bounding_box().bottom())) {
HandleClick(blob->bounding_box().left() + 1,
blob->bounding_box().bottom() + 1);
tprintf("Clean on pass 3!\n");
}
}
// Partition creation. Accumulates vertical and horizontal text chains,
// puts the remaining blobs in as unknowns, and then merges/splits to
// minimize overlap and smoothes the types with neighbours and the color
// image if provided. rerotation is used to rotate the coordinate space
// back to the nontext_map_ image.
void StrokeWidth::FindInitialPartitions(const FCOORD& rerotation,
TO_BLOCK* block,
ColPartitionGrid* part_grid,
ColPartition_LIST* big_parts) {
FindVerticalTextChains(part_grid);
FindHorizontalTextChains(part_grid);
if (textord_tabfind_show_strokewidths) {
chains_win_ = MakeWindow(0, 400, "Initial text chains");
part_grid->DisplayBoxes(chains_win_);
projection_->DisplayProjection();
}
part_grid->SplitOverlappingPartitions(big_parts);
EasyMerges(part_grid);
RemoveLargeUnusedBlobs(block, part_grid, big_parts);
TBOX grid_box(bleft(), tright());
while (part_grid->GridSmoothNeighbours(BTFT_CHAIN, nontext_map_, grid_box,
rerotation));
while (part_grid->GridSmoothNeighbours(BTFT_NEIGHBOURS, nontext_map_,
grid_box, rerotation));
TestDiacritics(part_grid, block);
MergeDiacritics(block, part_grid);
if (textord_tabfind_show_strokewidths) {
textlines_win_ = MakeWindow(400, 400, "GoodTextline blobs");
part_grid->DisplayBoxes(textlines_win_);
diacritics_win_ = DisplayDiacritics("Diacritics", 0, 0, block);
}
PartitionRemainingBlobs(part_grid);
part_grid->SplitOverlappingPartitions(big_parts);
EasyMerges(part_grid);
while (part_grid->GridSmoothNeighbours(BTFT_CHAIN, nontext_map_, grid_box,
rerotation));
while (part_grid->GridSmoothNeighbours(BTFT_NEIGHBOURS, nontext_map_,
grid_box, rerotation));
// Now eliminate strong stuff in a sea of the opposite.
while (part_grid->GridSmoothNeighbours(BTFT_STRONG_CHAIN, nontext_map_,
grid_box, rerotation));
if (textord_tabfind_show_strokewidths) {
smoothed_win_ = MakeWindow(800, 400, "Smoothed blobs");
part_grid->DisplayBoxes(smoothed_win_);
}
}
// Helper verifies that blob's neighbour in direction dir is good to add to a
// vertical text chain by returning the neighbour if it is not null, not owned,
// and not uniquely horizontal, as well as its neighbour in the opposite
// direction is blob.
static BLOBNBOX* MutualUnusedVNeighbour(const BLOBNBOX* blob,
BlobNeighbourDir dir) {
BLOBNBOX* next_blob = blob->neighbour(dir);
if (next_blob == NULL || next_blob->owner() != NULL ||
next_blob->UniquelyHorizontal())
return NULL;
if (next_blob->neighbour(DirOtherWay(dir)) == blob)
return next_blob;
return NULL;
}
// Finds vertical chains of text-like blobs and puts them in ColPartitions.
void StrokeWidth::FindVerticalTextChains(ColPartitionGrid* part_grid) {
BlobGridSearch gsearch(this);
BLOBNBOX* bbox;
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
// Only process boxes that have no horizontal hope and have not yet
// been included in a chain.
BLOBNBOX* blob;
if (bbox->owner() == NULL && bbox->UniquelyVertical() &&
(blob = MutualUnusedVNeighbour(bbox, BND_ABOVE)) != NULL) {
// Put all the linked blobs into a ColPartition.
ColPartition* part = new ColPartition(BRT_VERT_TEXT, ICOORD(0, 1));
part->AddBox(bbox);
while (blob != NULL) {
part->AddBox(blob);
blob = MutualUnusedVNeighbour(blob, BND_ABOVE);
}
blob = MutualUnusedVNeighbour(bbox, BND_BELOW);
while (blob != NULL) {
part->AddBox(blob);
blob = MutualUnusedVNeighbour(blob, BND_BELOW);
}
CompletePartition(part, part_grid);
}
}
}
// Helper verifies that blob's neighbour in direction dir is good to add to a
// horizontal text chain by returning the neighbour if it is not null, not
// owned, and not uniquely vertical, as well as its neighbour in the opposite
// direction is blob.
static BLOBNBOX* MutualUnusedHNeighbour(const BLOBNBOX* blob,
BlobNeighbourDir dir) {
BLOBNBOX* next_blob = blob->neighbour(dir);
if (next_blob == NULL || next_blob->owner() != NULL ||
next_blob->UniquelyVertical())
return NULL;
if (next_blob->neighbour(DirOtherWay(dir)) == blob)
return next_blob;
return NULL;
}
// Finds horizontal chains of text-like blobs and puts them in ColPartitions.
void StrokeWidth::FindHorizontalTextChains(ColPartitionGrid* part_grid) {
BlobGridSearch gsearch(this);
BLOBNBOX* bbox;
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
BLOBNBOX* blob;
if (bbox->owner() == NULL && bbox->UniquelyHorizontal() &&
(blob = MutualUnusedHNeighbour(bbox, BND_RIGHT)) != NULL) {
// Put all the linked blobs into a ColPartition.
ColPartition* part = new ColPartition(BRT_TEXT, ICOORD(0, 1));
part->AddBox(bbox);
while (blob != NULL) {
part->AddBox(blob);
blob = MutualUnusedHNeighbour(blob, BND_RIGHT);
}
blob = MutualUnusedHNeighbour(bbox, BND_LEFT);
while (blob != NULL) {
part->AddBox(blob);
blob = MutualUnusedVNeighbour(blob, BND_LEFT);
}
CompletePartition(part, part_grid);
}
}
}
// Finds diacritics and saves their base character in the blob.
// The objective is to move all diacritics to the noise_blobs list, so
// they don't mess up early textline finding/merging, or force splits
// on textlines that overlap a bit. Blobs that become diacritics must be
// either part of no ColPartition (NULL owner) or in a small partition in
// which ALL the blobs are diacritics, in which case the partition is
// exploded (deleted) back to its blobs.
void StrokeWidth::TestDiacritics(ColPartitionGrid* part_grid, TO_BLOCK* block) {
BlobGrid small_grid(gridsize(), bleft(), tright());
small_grid.InsertBlobList(&block->noise_blobs);
small_grid.InsertBlobList(&block->blobs);
int medium_diacritics = 0;
int small_diacritics = 0;
BLOBNBOX_IT small_it(&block->noise_blobs);
for (small_it.mark_cycle_pt(); !small_it.cycled_list(); small_it.forward()) {
BLOBNBOX* blob = small_it.data();
if (blob->owner() == NULL && !blob->IsDiacritic() &&
DiacriticBlob(&small_grid, blob)) {
++small_diacritics;
}
}
BLOBNBOX_IT blob_it(&block->blobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
if (blob->IsDiacritic()) {
small_it.add_to_end(blob_it.extract());
continue; // Already a diacritic.
}
ColPartition* part = blob->owner();
if (part == NULL && DiacriticBlob(&small_grid, blob)) {
++medium_diacritics;
RemoveBBox(blob);
small_it.add_to_end(blob_it.extract());
} else if (part != NULL && !part->block_owned() &&
part->boxes_count() < 3) {
// We allow blobs in small partitions to become diacritics if ALL the
// blobs in the partition qualify as we can then cleanly delete the
// partition, turn all the blobs in it to diacritics and they can be
// merged into the base character partition more easily than merging
// the partitions.
BLOBNBOX_C_IT box_it(part->boxes());
for (box_it.mark_cycle_pt(); !box_it.cycled_list() &&
DiacriticBlob(&small_grid, box_it.data());
box_it.forward());
if (box_it.cycled_list()) {
// They are all good.
while (!box_it.empty()) {
// Liberate the blob from its partition so it can be treated
// as a diacritic and merged explicitly with the base part.
// The blob is really owned by the block. The partition "owner"
// is NULLed to allow the blob to get merged with its base character
// partition.
BLOBNBOX* box = box_it.extract();
box->set_owner(NULL);
box_it.forward();
++medium_diacritics;
// We remove the blob from the grid so it isn't found by subsequent
// searches where we might not want to include diacritics.
RemoveBBox(box);
}
// We only move the one blob to the small list here, but the others
// all get moved by the test at the top of the loop.
small_it.add_to_end(blob_it.extract());
part_grid->RemoveBBox(part);
delete part;
}
} else if (AlignedBlob::WithinTestRegion(2, blob->bounding_box().left(),
blob->bounding_box().bottom())) {
tprintf("Blob not available to be a diacritic at:");
blob->bounding_box().print();
}
}
if (textord_tabfind_show_strokewidths) {
tprintf("Found %d small diacritics, %d medium\n",
small_diacritics, medium_diacritics);
}
}
// Searches this grid for an appropriately close and sized neighbour of the
// given [small] blob. If such a blob is found, the diacritic base is saved
// in the blob and true is returned.
// The small_grid is a secondary grid that contains the small/noise objects
// that are not in this grid, but may be useful for determining a connection
// between blob and its potential base character. (See DiacriticXGapFilled.)
bool StrokeWidth::DiacriticBlob(BlobGrid* small_grid, BLOBNBOX* blob) {
if (BLOBNBOX::UnMergeableType(blob->region_type()) ||
blob->region_type() == BRT_VERT_TEXT)
return false;
TBOX small_box(blob->bounding_box());
bool debug = AlignedBlob::WithinTestRegion(2, small_box.left(),
small_box.bottom());
if (debug) {
tprintf("Testing blob for diacriticness at:");
small_box.print();
}
int x = (small_box.left() + small_box.right()) / 2;
int y = (small_box.bottom() + small_box.top()) / 2;
int grid_x, grid_y;
GridCoords(x, y, &grid_x, &grid_y);
int height = small_box.height();
// Setup a rectangle search to find its nearest base-character neighbour.
// We keep 2 different best candidates:
// best_x_overlap is a category of base characters that have an overlap in x
// (like a acute) in which we look for the least y-gap, computed using the
// projection to favor base characters in the same textline.
// best_y_overlap is a category of base characters that have no x overlap,
// (nominally a y-overlap is preferrecd but not essential) in which we
// look for the least weighted sum of x-gap and y-gap, with x-gap getting
// a lower weight to catch quotes at the end of a textline.
// NOTE that x-gap and y-gap are measured from the nearest side of the base
// character to the FARTHEST side of the diacritic to allow small diacritics
// to be a reasonable distance away, but not big diacritics.
BLOBNBOX* best_x_overlap = NULL;
BLOBNBOX* best_y_overlap = NULL;
int best_total_dist = 0;
int best_y_gap = 0;
TBOX best_xbox;
// TODO(rays) the search box could be setup using the projection as a guide.
TBOX search_box(small_box);
int x_pad = IntCastRounded(gridsize() * kDiacriticXPadRatio);
int y_pad = IntCastRounded(gridsize() * kDiacriticYPadRatio);
search_box.pad(x_pad, y_pad);
BlobGridSearch rsearch(this);
rsearch.SetUniqueMode(true);
int min_height = height * kMinDiacriticSizeRatio;
rsearch.StartRectSearch(search_box);
BLOBNBOX* neighbour;
while ((neighbour = rsearch.NextRectSearch()) != NULL) {
if (BLOBNBOX::UnMergeableType(neighbour->region_type()) ||
neighbour == blob || neighbour->owner() == blob->owner())
continue;
TBOX nbox = neighbour->bounding_box();
if (neighbour->owner() == NULL || neighbour->owner()->IsVerticalType() ||
(neighbour->flow() != BTFT_CHAIN &&
neighbour->flow() != BTFT_STRONG_CHAIN)) {
if (debug) {
tprintf("Neighbour not strong enough:");
nbox.print();
}
continue; // Diacritics must be attached to strong text.
}
if (nbox.height() < min_height) {
if (debug) {
tprintf("Neighbour not big enough:");
nbox.print();
}
continue; // Too small to be the base character.
}
int x_gap = small_box.x_gap(nbox);
int y_gap = small_box.y_gap(nbox);
int total_distance = projection_->DistanceOfBoxFromBox(small_box, nbox,
true, denorm_,
debug);
if (debug) tprintf("xgap=%d, y=%d, total dist=%d\n",
x_gap, y_gap, total_distance);
if (total_distance >
neighbour->owner()->median_size() * kMaxDiacriticDistanceRatio) {
if (debug) {
tprintf("Neighbour with median size %d too far away:",
neighbour->owner()->median_size());
neighbour->bounding_box().print();
}
continue; // Diacritics must not be too distant.
}
if (x_gap <= 0) {
if (debug) {
tprintf("Computing reduced box for :");
nbox.print();
}
int left = small_box.left() - small_box.width();
int right = small_box.right() + small_box.width();
nbox = neighbour->BoundsWithinLimits(left, right);
y_gap = small_box.y_gap(nbox);
if (best_x_overlap == NULL || y_gap < best_y_gap) {
best_x_overlap = neighbour;
best_xbox = nbox;
best_y_gap = y_gap;
if (debug) {
tprintf("New best:");
nbox.print();
}
} else if (debug) {
tprintf("Shrunken box doesn't win:");
nbox.print();
}
} else if (blob->ConfirmNoTabViolation(*neighbour)) {
if (best_y_overlap == NULL || total_distance < best_total_dist) {
if (debug) {
tprintf("New best y overlap:");
nbox.print();
}
best_y_overlap = neighbour;
best_total_dist = total_distance;
} else if (debug) {
tprintf("New y overlap box doesn't win:");
nbox.print();
}
} else if (debug) {
tprintf("Neighbour wrong side of a tab:");
nbox.print();
}
}
if (best_x_overlap != NULL &&
(best_y_overlap == NULL ||
best_xbox.major_y_overlap(best_y_overlap->bounding_box()))) {
blob->set_diacritic_box(best_xbox);
blob->set_base_char_blob(best_x_overlap);
if (debug) {
tprintf("DiacriticBlob OK! (x-overlap:");
small_box.print();
best_xbox.print();
}
return true;
}
if (best_y_overlap != NULL &&
DiacriticXGapFilled(small_grid, small_box,
best_y_overlap->bounding_box()) &&
NoNoiseInBetween(small_box, best_y_overlap->bounding_box())) {
blob->set_diacritic_box(best_y_overlap->bounding_box());
blob->set_base_char_blob(best_y_overlap);
if (debug) {
tprintf("DiacriticBlob OK! (y-overlap:");
small_box.print();
best_y_overlap->bounding_box().print();
}
return true;
}
if (debug) {
tprintf("DiacriticBlob fails:");
small_box.print();
tprintf("Best x+y gap = %d, y = %d\n", best_total_dist, best_y_gap);
if (best_y_overlap != NULL) {
tprintf("XGapFilled=%d, NoiseBetween=%d\n",
DiacriticXGapFilled(small_grid, small_box,
best_y_overlap->bounding_box()),
NoNoiseInBetween(small_box, best_y_overlap->bounding_box()));
}
}
return false;
}
// Returns true if there is no gap between the base char and the diacritic
// bigger than a fraction of the height of the base char:
// Eg: line end.....'
// The quote is a long way from the end of the line, yet it needs to be a
// diacritic. To determine that the quote is not part of an image, or
// a different text block, we check for other marks in the gap between
// the base char and the diacritic.
// '<--Diacritic
// |---------|
// | |<-toobig-gap->
// | Base |<ok gap>
// |---------| x<-----Dot occupying gap
// The grid is const really.
bool StrokeWidth::DiacriticXGapFilled(BlobGrid* grid,
const TBOX& diacritic_box,
const TBOX& base_box) {
// Since most gaps are small, use an iterative algorithm to search the gap.
int max_gap = IntCastRounded(base_box.height() *
kMaxDiacriticGapToBaseCharHeight);
TBOX occupied_box(base_box);
int diacritic_gap;
while ((diacritic_gap = diacritic_box.x_gap(occupied_box)) > max_gap) {
TBOX search_box(occupied_box);
if (diacritic_box.left() > search_box.right()) {
// We are looking right.
search_box.set_left(search_box.right());
search_box.set_right(search_box.left() + max_gap);
} else {
// We are looking left.
search_box.set_right(search_box.left());
search_box.set_left(search_box.left() - max_gap);
}
BlobGridSearch rsearch(grid);
rsearch.StartRectSearch(search_box);
BLOBNBOX* neighbour;
while ((neighbour = rsearch.NextRectSearch()) != NULL) {
const TBOX& nbox = neighbour->bounding_box();
if (nbox.x_gap(diacritic_box) < diacritic_gap) {
if (nbox.left() < occupied_box.left())
occupied_box.set_left(nbox.left());
if (nbox.right() > occupied_box.right())
occupied_box.set_right(nbox.right());
break;
}
}
if (neighbour == NULL)
return false; // Found a big gap.
}
return true; // The gap was filled.
}
// Merges diacritics with the ColPartition of the base character blob.
void StrokeWidth::MergeDiacritics(TO_BLOCK* block,
ColPartitionGrid* part_grid) {
BLOBNBOX_IT small_it(&block->noise_blobs);
for (small_it.mark_cycle_pt(); !small_it.cycled_list(); small_it.forward()) {
BLOBNBOX* blob = small_it.data();
if (blob->base_char_blob() != NULL) {
ColPartition* part = blob->base_char_blob()->owner();
// The base character must be owned by a partition and that partition
// must not be on the big_parts list (not block owned).
if (part != NULL && !part->block_owned() && blob->owner() == NULL &&
blob->IsDiacritic()) {
// The partition has to be removed from the grid and reinserted
// because its bounding box may change.
part_grid->RemoveBBox(part);
part->AddBox(blob);
blob->set_region_type(part->blob_type());
blob->set_flow(part->flow());
blob->set_owner(part);
part_grid->InsertBBox(true, true, part);
}
// Set all base chars to NULL before any blobs get deleted.
blob->set_base_char_blob(NULL);
}
}
}
// Any blobs on the large_blobs list of block that are still unowned by a
// ColPartition, are probably drop-cap or vertically touching so the blobs
// are removed to the big_parts list and treated separately.
void StrokeWidth::RemoveLargeUnusedBlobs(TO_BLOCK* block,
ColPartitionGrid* part_grid,
ColPartition_LIST* big_parts) {
BLOBNBOX_IT large_it(&block->large_blobs);
for (large_it.mark_cycle_pt(); !large_it.cycled_list(); large_it.forward()) {
BLOBNBOX* blob = large_it.data();
ColPartition* big_part = blob->owner();
if (big_part == NULL) {
// Large blobs should have gone into partitions by now if they are
// genuine characters, so move any unowned ones out to the big parts
// list. This will include drop caps and vertically touching characters.
ColPartition::MakeBigPartition(blob, big_parts);
}
}
}
// All remaining unused blobs are put in individual ColPartitions.
void StrokeWidth::PartitionRemainingBlobs(ColPartitionGrid* part_grid) {
BlobGridSearch gsearch(this);
BLOBNBOX* bbox;
int prev_grid_x = -1;
int prev_grid_y = -1;
BLOBNBOX_CLIST cell_list;
BLOBNBOX_C_IT cell_it(&cell_list);
bool cell_all_noise = true;
gsearch.StartFullSearch();
while ((bbox = gsearch.NextFullSearch()) != NULL) {
int grid_x = gsearch.GridX();
int grid_y = gsearch.GridY();
if (grid_x != prev_grid_x || grid_y != prev_grid_y) {
// New cell. Process old cell.
MakePartitionsFromCellList(cell_all_noise, part_grid, &cell_list);
cell_it.set_to_list(&cell_list);
prev_grid_x = grid_x;
prev_grid_y = grid_y;
cell_all_noise = true;
}
if (bbox->owner() == NULL) {
cell_it.add_to_end(bbox);
if (bbox->flow() != BTFT_NONTEXT)
cell_all_noise = false;
} else {
cell_all_noise = false;
}
}
MakePartitionsFromCellList(cell_all_noise, part_grid, &cell_list);
}
// If combine, put all blobs in the cell_list into a single partition, otherwise
// put each one into its own partition.
void StrokeWidth::MakePartitionsFromCellList(bool combine,
ColPartitionGrid* part_grid,
BLOBNBOX_CLIST* cell_list) {
if (cell_list->empty())
return;
BLOBNBOX_C_IT cell_it(cell_list);
if (combine) {
BLOBNBOX* bbox = cell_it.extract();
ColPartition* part = new ColPartition(bbox->region_type(), ICOORD(0, 1));
part->AddBox(bbox);
part->set_flow(bbox->flow());
for (cell_it.forward(); !cell_it.empty(); cell_it.forward()) {
part->AddBox(cell_it.extract());
}
CompletePartition(part, part_grid);
} else {
for (; !cell_it.empty(); cell_it.forward()) {
BLOBNBOX* bbox = cell_it.extract();
ColPartition* part = new ColPartition(bbox->region_type(), ICOORD(0, 1));
part->set_flow(bbox->flow());
part->AddBox(bbox);
CompletePartition(part, part_grid);
}
}
}
// Helper function to finish setting up a ColPartition and insert into
// part_grid.
void StrokeWidth::CompletePartition(ColPartition* part,
ColPartitionGrid* part_grid) {
part->ComputeLimits();
TBOX box = part->bounding_box();
bool debug = AlignedBlob::WithinTestRegion(2, box.left(),
box.bottom());
int value = projection_->EvaluateColPartition(*part, denorm_, debug);
part->SetRegionAndFlowTypesFromProjectionValue(value);
part->ClaimBoxes();
part_grid->InsertBBox(true, true, part);
}
// Merge partitions where the merge appears harmless.
// As this
void StrokeWidth::EasyMerges(ColPartitionGrid* part_grid) {
part_grid->Merges(
NewPermanentTessCallback(this, &StrokeWidth::OrientationSearchBox),
NewPermanentTessCallback(this, &StrokeWidth::ConfirmEasyMerge));
}
// Compute a search box based on the orientation of the partition.
// Returns true if a suitable box can be calculated.
// Callback for EasyMerges.
bool StrokeWidth::OrientationSearchBox(ColPartition* part, TBOX* box) {
if (part->IsVerticalType()) {
box->set_top(box->top() + box->width());
box->set_bottom(box->bottom() - box->width());
} else {
box->set_left(box->left() - box->height());
box->set_right(box->right() + box->height());
}
return true;
}
// Merge confirmation callback for EasyMerges.
bool StrokeWidth::ConfirmEasyMerge(const ColPartition* p1,
const ColPartition* p2) {
ASSERT_HOST(p1 != NULL && p2 != NULL);
ASSERT_HOST(!p1->IsEmpty() && !p2->IsEmpty());
if ((p1->flow() == BTFT_NONTEXT && p2->flow() >= BTFT_CHAIN) ||
(p1->flow() >= BTFT_CHAIN && p2->flow() == BTFT_NONTEXT))
return false; // Don't merge confirmed image with text.
if ((p1->IsVerticalType() || p2->IsVerticalType()) &&
p1->HCoreOverlap(*p2) <= 0 &&
((!p1->IsSingleton() &&
!p2->IsSingleton()) ||
!p1->bounding_box().major_overlap(p2->bounding_box())))
return false; // Overlap must be in the text line.
if ((p1->IsHorizontalType() || p2->IsHorizontalType()) &&
p1->VCoreOverlap(*p2) <= 0 &&
((!p1->IsSingleton() &&
!p2->IsSingleton()) ||
(!p1->bounding_box().major_overlap(p2->bounding_box()) &&
!p1->OKDiacriticMerge(*p2, false) &&
!p2->OKDiacriticMerge(*p1, false))))
return false; // Overlap must be in the text line.
if (!p1->ConfirmNoTabViolation(*p2))
return false;
if (p1->flow() <= BTFT_NONTEXT && p2->flow() <= BTFT_NONTEXT)
return true;
return NoNoiseInBetween(p1->bounding_box(), p2->bounding_box());
}
// Returns true if there is no significant noise in between the boxes.
bool StrokeWidth::NoNoiseInBetween(const TBOX& box1, const TBOX& box2) const {
return ImageFind::BlankImageInBetween(box1, box2, grid_box_, rerotation_,
nontext_map_);
}
/** Displays the blobs colored according to the number of good neighbours
* and the vertical/horizontal flow.
*/
ScrollView* StrokeWidth::DisplayGoodBlobs(const char* window_name,
int x, int y) {
ScrollView* window = NULL;
#ifndef GRAPHICS_DISABLED
window = MakeWindow(x, y, window_name);
// For every blob in the grid, display it.
window->Brush(ScrollView::NONE);
// For every bbox in the grid, display it.
BlobGridSearch gsearch(this);
gsearch.StartFullSearch();
BLOBNBOX* bbox;
while ((bbox = gsearch.NextFullSearch()) != NULL) {
TBOX box = bbox->bounding_box();
int left_x = box.left();
int right_x = box.right();
int top_y = box.top();
int bottom_y = box.bottom();
int goodness = bbox->GoodTextBlob();
BlobRegionType blob_type = bbox->region_type();
if (bbox->UniquelyVertical())
blob_type = BRT_VERT_TEXT;
if (bbox->UniquelyHorizontal())
blob_type = BRT_TEXT;
BlobTextFlowType flow = bbox->flow();
if (flow == BTFT_NONE) {
if (goodness == 0)
flow = BTFT_NEIGHBOURS;
else if (goodness == 1)
flow = BTFT_CHAIN;
else
flow = BTFT_STRONG_CHAIN;
}
window->Pen(BLOBNBOX::TextlineColor(blob_type, flow));
window->Rectangle(left_x, bottom_y, right_x, top_y);
}
window->Update();
#endif
return window;
}
static void DrawDiacriticJoiner(const BLOBNBOX* blob, ScrollView* window) {
#ifndef GRAPHICS_DISABLED
const TBOX& blob_box(blob->bounding_box());
int top = MAX(blob_box.top(), blob->base_char_top());
int bottom = MIN(blob_box.bottom(), blob->base_char_bottom());
int x = (blob_box.left() + blob_box.right()) / 2;
window->Line(x, top, x, bottom);
#endif // GRAPHICS_DISABLED
}
// Displays blobs colored according to whether or not they are diacritics.
ScrollView* StrokeWidth::DisplayDiacritics(const char* window_name,
int x, int y, TO_BLOCK* block) {
ScrollView* window = NULL;
#ifndef GRAPHICS_DISABLED
window = MakeWindow(x, y, window_name);
// For every blob in the grid, display it.
window->Brush(ScrollView::NONE);
BLOBNBOX_IT it(&block->blobs);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
BLOBNBOX* blob = it.data();
if (blob->IsDiacritic()) {
window->Pen(ScrollView::GREEN);
DrawDiacriticJoiner(blob, window);
} else {
window->Pen(blob->BoxColor());
}
const TBOX& box = blob->bounding_box();
window->Rectangle(box.left(), box. bottom(), box.right(), box.top());
}
it.set_to_list(&block->noise_blobs);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
BLOBNBOX* blob = it.data();
if (blob->IsDiacritic()) {
window->Pen(ScrollView::GREEN);
DrawDiacriticJoiner(blob, window);
} else {
window->Pen(ScrollView::WHITE);
}
const TBOX& box = blob->bounding_box();
window->Rectangle(box.left(), box. bottom(), box.right(), box.top());
}
window->Update();
#endif
return window;
}
} // namespace tesseract.
| C++ |
///////////////////////////////////////////////////////////////////////
// File: colpartitionset.h
// Description: Class to hold a list of ColPartitions of the page that
// correspond roughly to columns.
// Author: Ray Smith
// Created: Thu Aug 14 10:50:01 PDT 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TEXTORD_COLPARTITIONSET_H__
#define TESSERACT_TEXTORD_COLPARTITIONSET_H__
#include "colpartition.h" // For ColPartition_LIST.
#include "genericvector.h" // For GenericVector.
#include "rect.h" // For TBOX.
#include "tabvector.h" // For BLOBNBOX_CLIST.
namespace tesseract {
class WorkingPartSet_LIST;
class ColSegment_LIST;
class ColPartitionSet;
typedef GenericVector<ColPartitionSet*> PartSetVector;
// ColPartitionSet is a class that holds a list of ColPartitions.
// Its main use is in holding a candidate partitioning of the width of the
// image into columns, where each member ColPartition is a single column.
// ColPartitionSets are used in building the column layout of a page.
class ColPartitionSet : public ELIST_LINK {
public:
ColPartitionSet() {
}
explicit ColPartitionSet(ColPartition_LIST* partitions);
explicit ColPartitionSet(ColPartition* partition);
~ColPartitionSet();
// Simple accessors.
const TBOX& bounding_box() const {
return bounding_box_;
}
bool Empty() const {
return parts_.empty();
}
int ColumnCount() const {
return parts_.length();
}
// Returns the number of columns of good width.
int GoodColumnCount() const;
// Return an element of the parts_ list from its index.
ColPartition* GetColumnByIndex(int index);
// Return the ColPartition that contains the given coords, if any, else NULL.
ColPartition* ColumnContaining(int x, int y);
// Return the bounding boxes of columns at the given y-range
void GetColumnBoxes(int y_bottom, int y_top, ColSegment_LIST *segments);
// Extract all the parts from the list, relinquishing ownership.
void RelinquishParts();
// Attempt to improve this by adding partitions or expanding partitions.
void ImproveColumnCandidate(WidthCallback* cb, PartSetVector* src_sets);
// If this set is good enough to represent a new partitioning into columns,
// add it to the vector of sets, otherwise delete it.
void AddToColumnSetsIfUnique(PartSetVector* column_sets, WidthCallback* cb);
// Return true if the partitions in other are all compatible with the columns
// in this.
bool CompatibleColumns(bool debug, ColPartitionSet* other, WidthCallback* cb);
// Returns the total width of all blobs in the part_set that do not lie
// within an approved column. Used as a cost measure for using this
// column set over another that might be compatible.
int UnmatchedWidth(ColPartitionSet* part_set);
// Return true if this ColPartitionSet makes a legal column candidate by
// having legal individual partitions and non-overlapping adjacent pairs.
bool LegalColumnCandidate();
// Return a copy of this. If good_only will only copy the Good ColPartitions.
ColPartitionSet* Copy(bool good_only);
// Display the edges of the columns at the given y coords.
void DisplayColumnEdges(int y_bottom, int y_top, ScrollView* win);
// Return the ColumnSpanningType that best explains the columns overlapped
// by the given coords(left,right,y), with the given margins.
// Also return the first and last column index touched by the coords and
// the leftmost spanned column.
// Column indices are 2n + 1 for real colums (0 based) and even values
// represent the gaps in between columns, with 0 being left of the leftmost.
// resolution refers to the ppi resolution of the image. It may be 0 if only
// the first_col and last_col are required.
ColumnSpanningType SpanningType(int resolution,
int left, int right, int height, int y,
int left_margin, int right_margin,
int* first_col, int* last_col,
int* first_spanned_col);
// The column_set has changed. Close down all in-progress WorkingPartSets in
// columns that do not match and start new ones for the new columns in this.
// As ColPartitions are turned into BLOCKs, the used ones are put in
// used_parts, as they still need to be referenced in the grid.
void ChangeWorkColumns(const ICOORD& bleft, const ICOORD& tright,
int resolution, ColPartition_LIST* used_parts,
WorkingPartSet_LIST* working_set);
// Accumulate the widths and gaps into the given variables.
void AccumulateColumnWidthsAndGaps(int* total_width, int* width_samples,
int* total_gap, int* gap_samples);
// Provide debug output for this ColPartitionSet and all the ColPartitions.
void Print();
private:
// Add the given partition to the list in the appropriate place.
void AddPartition(ColPartition* new_part, ColPartition_IT* it);
// Compute the coverage and good column count. Coverage is the amount of the
// width of the page (in pixels) that is covered by ColPartitions, which are
// used to provide candidate column layouts.
// Coverage is split into good and bad. Good coverage is provided by
// ColPartitions of a frequent width (according to the callback function
// provided by TabFinder::WidthCB, which accesses stored statistics on the
// widths of ColParititions) and bad coverage is provided by all other
// ColPartitions, even if they have tab vectors at both sides. Thus:
// |-----------------------------------------------------------------|
// | Double width heading |
// |-----------------------------------------------------------------|
// |-------------------------------| |-------------------------------|
// | Common width ColParition | | Common width ColPartition |
// |-------------------------------| |-------------------------------|
// the layout with two common-width columns has better coverage than the
// double width heading, because the coverage is "good," even though less in
// total coverage than the heading, because the heading coverage is "bad."
void ComputeCoverage();
// Adds the coverage, column count and box for a single partition,
// without adding it to the list. (Helper factored from ComputeCoverage.)
void AddPartitionCoverageAndBox(const ColPartition& part);
// The partitions in this column candidate.
ColPartition_LIST parts_;
// The number of partitions that have a frequent column width.
int good_column_count_;
// Total width of all the good ColPartitions.
int good_coverage_;
// Total width of all the bad ColPartitions.
int bad_coverage_;
// Bounding box of all partitions in the set.
TBOX bounding_box_;
};
ELISTIZEH(ColPartitionSet)
} // namespace tesseract.
#endif // TESSERACT_TEXTORD_COLPARTITION_H__
| C++ |
///////////////////////////////////////////////////////////////////////
// File: linefind.cpp
// Description: Class to find vertical lines in an image and create
// a corresponding list of empty blobs.
// Author: Ray Smith
// Created: Thu Mar 20 09:49:01 PDT 2008
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef _MSC_VER
#pragma warning(disable:4244) // Conversion warnings
#endif
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "linefind.h"
#include "alignedblob.h"
#include "tabvector.h"
#include "blobbox.h"
#include "edgblob.h"
#include "openclwrapper.h"
#include "allheaders.h"
namespace tesseract {
/// Denominator of resolution makes max pixel width to allow thin lines.
const int kThinLineFraction = 20;
/// Denominator of resolution makes min pixels to demand line lengths to be.
const int kMinLineLengthFraction = 4;
/// Spacing of cracks across the page to break up tall vertical lines.
const int kCrackSpacing = 100;
/// Grid size used by line finder. Not very critical.
const int kLineFindGridSize = 50;
// Min width of a line in pixels to be considered thick.
const int kMinThickLineWidth = 12;
// Max size of line residue. (The pixels that fail the long thin opening, and
// therefore don't make it to the candidate line mask, but are nevertheless
// part of the line.)
const int kMaxLineResidue = 6;
// Min length in inches of a line segment that exceeds kMinThickLineWidth in
// thickness. (Such lines shouldn't break by simple image degradation.)
const double kThickLengthMultiple = 0.75;
// Max fraction of line box area that can be occupied by non-line pixels.
const double kMaxNonLineDensity = 0.25;
// Max height of a music stave in inches.
const double kMaxStaveHeight = 1.0;
// Minimum fraction of pixels in a music rectangle connected to the staves.
const double kMinMusicPixelFraction = 0.75;
// Erases the unused blobs from the line_pix image, taking into account
// whether this was a horizontal or vertical line set.
static void RemoveUnusedLineSegments(bool horizontal_lines,
BLOBNBOX_LIST* line_bblobs,
Pix* line_pix) {
int height = pixGetHeight(line_pix);
BLOBNBOX_IT bbox_it(line_bblobs);
for (bbox_it.mark_cycle_pt(); !bbox_it.cycled_list(); bbox_it.forward()) {
BLOBNBOX* blob = bbox_it.data();
if (blob->left_tab_type() != TT_VLINE) {
const TBOX& box = blob->bounding_box();
Box* pixbox = NULL;
if (horizontal_lines) {
// Horizontal lines are in tess format and also have x and y flipped
// (to use FindVerticalAlignment) so we have to flip x and y and then
// convert to Leptonica by height - flipped x (ie the right edge).
// See GetLineBoxes for more explanation.
pixbox = boxCreate(box.bottom(), height - box.right(),
box.height(), box.width());
} else {
// For vertical lines, just flip upside-down to convert to Leptonica.
// The y position of the box in Leptonica terms is the distance from
// the top of the image to the top of the box.
pixbox = boxCreate(box.left(), height - box.top(),
box.width(), box.height());
}
pixClearInRect(line_pix, pixbox);
boxDestroy(&pixbox);
}
}
}
// Helper subtracts the line_pix image from the src_pix, and removes residue
// as well by removing components that touch the line, but are not in the
// non_line_pix mask. It is assumed that the non_line_pix mask has already
// been prepared to required accuracy.
static void SubtractLinesAndResidue(Pix* line_pix, Pix* non_line_pix,
int resolution, Pix* src_pix) {
// First remove the lines themselves.
pixSubtract(src_pix, src_pix, line_pix);
// Subtract the non-lines from the image to get the residue.
Pix* residue_pix = pixSubtract(NULL, src_pix, non_line_pix);
// Dilate the lines so they touch the residue.
Pix* fat_line_pix = pixDilateBrick(NULL, line_pix, 3, 3);
// Seed fill the fat lines to get all the residue.
pixSeedfillBinary(fat_line_pix, fat_line_pix, residue_pix, 8);
// Subtract the residue from the original image.
pixSubtract(src_pix, src_pix, fat_line_pix);
pixDestroy(&fat_line_pix);
pixDestroy(&residue_pix);
}
// Returns the maximum strokewidth in the given binary image by doubling
// the maximum of the distance function.
static int MaxStrokeWidth(Pix* pix) {
Pix* dist_pix = pixDistanceFunction(pix, 4, 8, L_BOUNDARY_BG);
int width = pixGetWidth(dist_pix);
int height = pixGetHeight(dist_pix);
int wpl = pixGetWpl(dist_pix);
l_uint32* data = pixGetData(dist_pix);
// Find the maximum value in the distance image.
int max_dist = 0;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int pixel = GET_DATA_BYTE(data, x);
if (pixel > max_dist)
max_dist = pixel;
}
data += wpl;
}
pixDestroy(&dist_pix);
return max_dist * 2;
}
// Returns the number of components in the intersection_pix touched by line_box.
static int NumTouchingIntersections(Box* line_box, Pix* intersection_pix) {
if (intersection_pix == NULL) return 0;
Pix* rect_pix = pixClipRectangle(intersection_pix, line_box, NULL);
Boxa* boxa = pixConnComp(rect_pix, NULL, 8);
pixDestroy(&rect_pix);
if (boxa == NULL) return false;
int result = boxaGetCount(boxa);
boxaDestroy(&boxa);
return result;
}
// Returns the number of black pixels found in the box made by adding the line
// width to both sides of the line bounding box. (Increasing the smallest
// dimension of the bounding box.)
static int CountPixelsAdjacentToLine(int line_width, Box* line_box,
Pix* nonline_pix) {
l_int32 x, y, box_width, box_height;
boxGetGeometry(line_box, &x, &y, &box_width, &box_height);
if (box_width > box_height) {
// horizontal line.
int bottom = MIN(pixGetHeight(nonline_pix), y + box_height + line_width);
y = MAX(0, y - line_width);
box_height = bottom - y;
} else {
// Vertical line.
int right = MIN(pixGetWidth(nonline_pix), x + box_width + line_width);
x = MAX(0, x - line_width);
box_width = right - x;
}
Box* box = boxCreate(x, y, box_width, box_height);
Pix* rect_pix = pixClipRectangle(nonline_pix, box, NULL);
boxDestroy(&box);
l_int32 result;
pixCountPixels(rect_pix, &result, NULL);
pixDestroy(&rect_pix);
return result;
}
// Helper erases false-positive line segments from the input/output line_pix.
// 1. Since thick lines shouldn't really break up, we can eliminate some false
// positives by marking segments that are at least kMinThickLineWidth
// thickness, yet have a length less than min_thick_length.
// 2. Lines that don't have at least 2 intersections with other lines and have
// a lot of neighbouring non-lines are probably not lines (perhaps arabic
// or Hindi words, or underlines.)
// Bad line components are erased from line_pix.
// Returns the number of remaining connected components.
static int FilterFalsePositives(int resolution, Pix* nonline_pix,
Pix* intersection_pix, Pix* line_pix) {
int min_thick_length = static_cast<int>(resolution * kThickLengthMultiple);
Pixa* pixa = NULL;
Boxa* boxa = pixConnComp(line_pix, &pixa, 8);
// Iterate over the boxes to remove false positives.
int nboxes = boxaGetCount(boxa);
int remaining_boxes = nboxes;
for (int i = 0; i < nboxes; ++i) {
Box* box = boxaGetBox(boxa, i, L_CLONE);
l_int32 x, y, box_width, box_height;
boxGetGeometry(box, &x, &y, &box_width, &box_height);
Pix* comp_pix = pixaGetPix(pixa, i, L_CLONE);
int max_width = MaxStrokeWidth(comp_pix);
pixDestroy(&comp_pix);
bool bad_line = false;
// If the length is too short to stand-alone as a line, and the box width
// is thick enough, and the stroke width is thick enough it is bad.
if (box_width >= kMinThickLineWidth && box_height >= kMinThickLineWidth &&
box_width < min_thick_length && box_height < min_thick_length &&
max_width > kMinThickLineWidth) {
// Too thick for the length.
bad_line = true;
}
if (!bad_line &&
(intersection_pix == NULL ||
NumTouchingIntersections(box, intersection_pix) < 2)) {
// Test non-line density near the line.
int nonline_count = CountPixelsAdjacentToLine(max_width, box,
nonline_pix);
if (nonline_count > box_height * box_width * kMaxNonLineDensity)
bad_line = true;
}
if (bad_line) {
// Not a good line.
pixClearInRect(line_pix, box);
--remaining_boxes;
}
boxDestroy(&box);
}
pixaDestroy(&pixa);
boxaDestroy(&boxa);
return remaining_boxes;
}
// Finds vertical and horizontal line objects in the given pix.
// Uses the given resolution to determine size thresholds instead of any
// that may be present in the pix.
// The output vertical_x and vertical_y contain a sum of the output vectors,
// thereby giving the mean vertical direction.
// If pix_music_mask != NULL, and music is detected, a mask of the staves
// and anything that is connected (bars, notes etc.) will be returned in
// pix_music_mask, the mask subtracted from pix, and the lines will not
// appear in v_lines or h_lines.
// The output vectors are owned by the list and Frozen (cannot refit) by
// having no boxes, as there is no need to refit or merge separator lines.
// The detected lines are removed from the pix.
void LineFinder::FindAndRemoveLines(int resolution, bool debug, Pix* pix,
int* vertical_x, int* vertical_y,
Pix** pix_music_mask,
TabVector_LIST* v_lines,
TabVector_LIST* h_lines) {
PERF_COUNT_START("FindAndRemoveLines")
if (pix == NULL || vertical_x == NULL || vertical_y == NULL) {
tprintf("Error in parameters for LineFinder::FindAndRemoveLines\n");
return;
}
Pix* pix_vline = NULL;
Pix* pix_non_vline = NULL;
Pix* pix_hline = NULL;
Pix* pix_non_hline = NULL;
Pix* pix_intersections = NULL;
Pixa* pixa_display = debug ? pixaCreate(0) : NULL;
GetLineMasks(resolution, pix, &pix_vline, &pix_non_vline, &pix_hline,
&pix_non_hline, &pix_intersections, pix_music_mask,
pixa_display);
// Find lines, convert to TabVector_LIST and remove those that are used.
FindAndRemoveVLines(resolution, pix_intersections, vertical_x, vertical_y,
&pix_vline, pix_non_vline, pix, v_lines);
if (pix_hline != NULL) {
// Recompute intersections and re-filter false positive h-lines.
if (pix_vline != NULL)
pixAnd(pix_intersections, pix_vline, pix_hline);
else
pixDestroy(&pix_intersections);
if (!FilterFalsePositives(resolution, pix_non_hline, pix_intersections,
pix_hline)) {
pixDestroy(&pix_hline);
}
}
FindAndRemoveHLines(resolution, pix_intersections, *vertical_x, *vertical_y,
&pix_hline, pix_non_hline, pix, h_lines);
if (pixa_display != NULL && pix_vline != NULL)
pixaAddPix(pixa_display, pix_vline, L_CLONE);
if (pixa_display != NULL && pix_hline != NULL)
pixaAddPix(pixa_display, pix_hline, L_CLONE);
if (pix_vline != NULL && pix_hline != NULL) {
// Remove joins (intersections) where lines cross, and the residue.
// Recalculate the intersections, since some lines have been deleted.
pixAnd(pix_intersections, pix_vline, pix_hline);
// Fatten up the intersections and seed-fill to get the intersection
// residue.
Pix* pix_join_residue = pixDilateBrick(NULL, pix_intersections, 5, 5);
pixSeedfillBinary(pix_join_residue, pix_join_residue, pix, 8);
// Now remove the intersection residue.
pixSubtract(pix, pix, pix_join_residue);
pixDestroy(&pix_join_residue);
}
// Remove any detected music.
if (pix_music_mask != NULL && *pix_music_mask != NULL) {
if (pixa_display != NULL)
pixaAddPix(pixa_display, *pix_music_mask, L_CLONE);
pixSubtract(pix, pix, *pix_music_mask);
}
if (pixa_display != NULL)
pixaAddPix(pixa_display, pix, L_CLONE);
pixDestroy(&pix_vline);
pixDestroy(&pix_non_vline);
pixDestroy(&pix_hline);
pixDestroy(&pix_non_hline);
pixDestroy(&pix_intersections);
if (pixa_display != NULL) {
#if LIBLEPT_MINOR_VERSION >= 69 || LIBLEPT_MAJOR_VERSION > 1
pixaConvertToPdf(pixa_display, resolution, 1.0f, 0, 0, "LineFinding",
"vhlinefinding.pdf");
#endif
pixaDestroy(&pixa_display);
}
PERF_COUNT_END
}
// Converts the Boxa array to a list of C_BLOB, getting rid of severely
// overlapping outlines and those that are children of a bigger one.
// The output is a list of C_BLOBs that are owned by the list.
// The C_OUTLINEs in the C_BLOBs contain no outline data - just empty
// bounding boxes. The Boxa is consumed and destroyed.
void LineFinder::ConvertBoxaToBlobs(int image_width, int image_height,
Boxa** boxes, C_BLOB_LIST* blobs) {
C_OUTLINE_LIST outlines;
C_OUTLINE_IT ol_it = &outlines;
// Iterate the boxes to convert to outlines.
int nboxes = boxaGetCount(*boxes);
for (int i = 0; i < nboxes; ++i) {
l_int32 x, y, width, height;
boxaGetBoxGeometry(*boxes, i, &x, &y, &width, &height);
// Make a C_OUTLINE from the leptonica box. This is a bit of a hack,
// as there is no outline, just a bounding box, but with some very
// small changes to coutln.cpp, it works nicely.
ICOORD top_left(x, y);
ICOORD bot_right(x + width, y + height);
CRACKEDGE startpt;
startpt.pos = top_left;
C_OUTLINE* outline = new C_OUTLINE(&startpt, top_left, bot_right, 0);
ol_it.add_after_then_move(outline);
}
// Use outlines_to_blobs to convert the outlines to blobs and find
// overlapping and contained objects. The output list of blobs in the block
// has all the bad ones filtered out and deleted.
BLOCK block;
ICOORD page_tl(0, 0);
ICOORD page_br(image_width, image_height);
outlines_to_blobs(&block, page_tl, page_br, &outlines);
// Transfer the created blobs to the output list.
C_BLOB_IT blob_it(blobs);
blob_it.add_list_after(block.blob_list());
// The boxes aren't needed any more.
boxaDestroy(boxes);
}
// Finds vertical line objects in pix_vline and removes the from src_pix.
// Uses the given resolution to determine size thresholds instead of any
// that may be present in the pix.
// The output vertical_x and vertical_y contain a sum of the output vectors,
// thereby giving the mean vertical direction.
// The output vectors are owned by the list and Frozen (cannot refit) by
// having no boxes, as there is no need to refit or merge separator lines.
// If no good lines are found, pix_vline is destroyed.
// None of the input pointers may be NULL, and if *pix_vline is NULL then
// the function does nothing.
void LineFinder::FindAndRemoveVLines(int resolution,
Pix* pix_intersections,
int* vertical_x, int* vertical_y,
Pix** pix_vline, Pix* pix_non_vline,
Pix* src_pix, TabVector_LIST* vectors) {
if (pix_vline == NULL || *pix_vline == NULL) return;
C_BLOB_LIST line_cblobs;
BLOBNBOX_LIST line_bblobs;
GetLineBoxes(false, *pix_vline, pix_intersections,
&line_cblobs, &line_bblobs);
int width = pixGetWidth(src_pix);
int height = pixGetHeight(src_pix);
ICOORD bleft(0, 0);
ICOORD tright(width, height);
FindLineVectors(bleft, tright, &line_bblobs, vertical_x, vertical_y, vectors);
if (!vectors->empty()) {
RemoveUnusedLineSegments(false, &line_bblobs, *pix_vline);
SubtractLinesAndResidue(*pix_vline, pix_non_vline, resolution, src_pix);
ICOORD vertical;
vertical.set_with_shrink(*vertical_x, *vertical_y);
TabVector::MergeSimilarTabVectors(vertical, vectors, NULL);
} else {
pixDestroy(pix_vline);
}
}
// Finds horizontal line objects in pix_hline and removes them from src_pix.
// Uses the given resolution to determine size thresholds instead of any
// that may be present in the pix.
// The output vertical_x and vertical_y contain a sum of the output vectors,
// thereby giving the mean vertical direction.
// The output vectors are owned by the list and Frozen (cannot refit) by
// having no boxes, as there is no need to refit or merge separator lines.
// If no good lines are found, pix_hline is destroyed.
// None of the input pointers may be NULL, and if *pix_hline is NULL then
// the function does nothing.
void LineFinder::FindAndRemoveHLines(int resolution,
Pix* pix_intersections,
int vertical_x, int vertical_y,
Pix** pix_hline, Pix* pix_non_hline,
Pix* src_pix, TabVector_LIST* vectors) {
if (pix_hline == NULL || *pix_hline == NULL) return;
C_BLOB_LIST line_cblobs;
BLOBNBOX_LIST line_bblobs;
GetLineBoxes(true, *pix_hline, pix_intersections, &line_cblobs, &line_bblobs);
int width = pixGetWidth(src_pix);
int height = pixGetHeight(src_pix);
ICOORD bleft(0, 0);
ICOORD tright(height, width);
FindLineVectors(bleft, tright, &line_bblobs, &vertical_x, &vertical_y,
vectors);
if (!vectors->empty()) {
RemoveUnusedLineSegments(true, &line_bblobs, *pix_hline);
SubtractLinesAndResidue(*pix_hline, pix_non_hline, resolution, src_pix);
ICOORD vertical;
vertical.set_with_shrink(vertical_x, vertical_y);
TabVector::MergeSimilarTabVectors(vertical, vectors, NULL);
// Iterate the vectors to flip them. x and y were flipped for horizontal
// lines, so FindLineVectors can work just with the vertical case.
// See GetLineBoxes for more on the flip.
TabVector_IT h_it(vectors);
for (h_it.mark_cycle_pt(); !h_it.cycled_list(); h_it.forward()) {
h_it.data()->XYFlip();
}
} else {
pixDestroy(pix_hline);
}
}
// Finds vertical lines in the given list of BLOBNBOXes. bleft and tright
// are the bounds of the image on which the input line_bblobs were found.
// The input line_bblobs list is const really.
// The output vertical_x and vertical_y are the total of all the vectors.
// The output list of TabVector makes no reference to the input BLOBNBOXes.
void LineFinder::FindLineVectors(const ICOORD& bleft, const ICOORD& tright,
BLOBNBOX_LIST* line_bblobs,
int* vertical_x, int* vertical_y,
TabVector_LIST* vectors) {
BLOBNBOX_IT bbox_it(line_bblobs);
int b_count = 0;
// Put all the blobs into the grid to find the lines, and move the blobs
// to the output lists.
AlignedBlob blob_grid(kLineFindGridSize, bleft, tright);
for (bbox_it.mark_cycle_pt(); !bbox_it.cycled_list(); bbox_it.forward()) {
BLOBNBOX* bblob = bbox_it.data();
bblob->set_left_tab_type(TT_MAYBE_ALIGNED);
bblob->set_left_rule(bleft.x());
bblob->set_right_rule(tright.x());
bblob->set_left_crossing_rule(bleft.x());
bblob->set_right_crossing_rule(tright.x());
blob_grid.InsertBBox(false, true, bblob);
++b_count;
}
if (b_count == 0)
return;
// Search the entire grid, looking for vertical line vectors.
BlobGridSearch lsearch(&blob_grid);
BLOBNBOX* bbox;
TabVector_IT vector_it(vectors);
*vertical_x = 0;
*vertical_y = 1;
lsearch.StartFullSearch();
while ((bbox = lsearch.NextFullSearch()) != NULL) {
if (bbox->left_tab_type() == TT_MAYBE_ALIGNED) {
const TBOX& box = bbox->bounding_box();
if (AlignedBlob::WithinTestRegion(2, box.left(), box.bottom()))
tprintf("Finding line vector starting at bbox (%d,%d)\n",
box.left(), box.bottom());
AlignedBlobParams align_params(*vertical_x, *vertical_y, box.width());
TabVector* vector = blob_grid.FindVerticalAlignment(align_params, bbox,
vertical_x,
vertical_y);
if (vector != NULL) {
vector->Freeze();
vector_it.add_to_end(vector);
}
}
}
}
// Returns a Pix music mask if music is detected.
// Any vertical line that has at least 5 intersections in sufficient density
// is taken to be a bar. Bars are used as a seed and the entire touching
// component is added to the output music mask and subtracted from the lines.
// Returns NULL and does minimal work if no music is found.
static Pix* FilterMusic(int resolution, Pix* pix_closed,
Pix* pix_vline, Pix* pix_hline,
l_int32* v_empty, l_int32* h_empty) {
int max_stave_height = static_cast<int>(resolution * kMaxStaveHeight);
Pix* intersection_pix = pixAnd(NULL, pix_vline, pix_hline);
Boxa* boxa = pixConnComp(pix_vline, NULL, 8);
// Iterate over the boxes to find music bars.
int nboxes = boxaGetCount(boxa);
Pix* music_mask = NULL;
for (int i = 0; i < nboxes; ++i) {
Box* box = boxaGetBox(boxa, i, L_CLONE);
l_int32 x, y, box_width, box_height;
boxGetGeometry(box, &x, &y, &box_width, &box_height);
int joins = NumTouchingIntersections(box, intersection_pix);
// Test for the join density being at least 5 per max_stave_height,
// ie (joins-1)/box_height >= (5-1)/max_stave_height.
if (joins >= 5 && (joins - 1) * max_stave_height >= 4 * box_height) {
// This is a music bar. Add to the mask.
if (music_mask == NULL)
music_mask = pixCreate(pixGetWidth(pix_vline), pixGetHeight(pix_vline),
1);
pixSetInRect(music_mask, box);
}
boxDestroy(&box);
}
boxaDestroy(&boxa);
pixDestroy(&intersection_pix);
if (music_mask != NULL) {
// The mask currently contains just the bars. Use the mask as a seed
// and the pix_closed as the mask for a seedfill to get all the
// intersecting staves.
pixSeedfillBinary(music_mask, music_mask, pix_closed, 8);
// Filter out false positives. CCs in the music_mask should be the vast
// majority of the pixels in their bounding boxes, as we expect just a
// tiny amount of text, a few phrase marks, and crescendo etc left.
Boxa* boxa = pixConnComp(music_mask, NULL, 8);
// Iterate over the boxes to find music components.
int nboxes = boxaGetCount(boxa);
for (int i = 0; i < nboxes; ++i) {
Box* box = boxaGetBox(boxa, i, L_CLONE);
Pix* rect_pix = pixClipRectangle(music_mask, box, NULL);
l_int32 music_pixels;
pixCountPixels(rect_pix, &music_pixels, NULL);
pixDestroy(&rect_pix);
rect_pix = pixClipRectangle(pix_closed, box, NULL);
l_int32 all_pixels;
pixCountPixels(rect_pix, &all_pixels, NULL);
pixDestroy(&rect_pix);
if (music_pixels < kMinMusicPixelFraction * all_pixels) {
// False positive. Delete from the music mask.
pixClearInRect(music_mask, box);
}
boxDestroy(&box);
}
l_int32 no_remaining_music;
boxaDestroy(&boxa);
pixZero(music_mask, &no_remaining_music);
if (no_remaining_music) {
pixDestroy(&music_mask);
} else {
pixSubtract(pix_vline, pix_vline, music_mask);
pixSubtract(pix_hline, pix_hline, music_mask);
// We may have deleted all the lines
pixZero(pix_vline, v_empty);
pixZero(pix_hline, h_empty);
}
}
return music_mask;
}
// Most of the heavy lifting of line finding. Given src_pix and its separate
// resolution, returns image masks:
// pix_vline candidate vertical lines.
// pix_non_vline pixels that didn't look like vertical lines.
// pix_hline candidate horizontal lines.
// pix_non_hline pixels that didn't look like horizontal lines.
// pix_intersections pixels where vertical and horizontal lines meet.
// pix_music_mask candidate music staves.
// This function promises to initialize all the output (2nd level) pointers,
// but any of the returns that are empty will be NULL on output.
// None of the input (1st level) pointers may be NULL except pix_music_mask,
// which will disable music detection, and pixa_display.
void LineFinder::GetLineMasks(int resolution, Pix* src_pix,
Pix** pix_vline, Pix** pix_non_vline,
Pix** pix_hline, Pix** pix_non_hline,
Pix** pix_intersections, Pix** pix_music_mask,
Pixa* pixa_display) {
Pix* pix_closed = NULL;
Pix* pix_hollow = NULL;
int max_line_width = resolution / kThinLineFraction;
int min_line_length = resolution / kMinLineLengthFraction;
if (pixa_display != NULL) {
tprintf("Image resolution = %d, max line width = %d, min length=%d\n",
resolution, max_line_width, min_line_length);
}
int closing_brick = max_line_width / 3;
PERF_COUNT_START("GetLineMasksMorph")
// only use opencl if compiled w/ OpenCL and selected device is opencl
#ifdef USE_OPENCL
if (OpenclDevice::selectedDeviceIsOpenCL()) {
// OpenCL pixGetLines Operation
int clStatus = OpenclDevice::initMorphCLAllocations(pixGetWpl(src_pix),
pixGetHeight(src_pix),
src_pix);
bool getpixclosed = pix_music_mask != NULL ? true : false;
OpenclDevice::pixGetLinesCL(NULL, src_pix, pix_vline, pix_hline,
&pix_closed, getpixclosed, closing_brick,
closing_brick, max_line_width, max_line_width,
min_line_length, min_line_length);
} else {
#endif
// Close up small holes, making it less likely that false alarms are found
// in thickened text (as it will become more solid) and also smoothing over
// some line breaks and nicks in the edges of the lines.
pix_closed = pixCloseBrick(NULL, src_pix, closing_brick, closing_brick);
if (pixa_display != NULL)
pixaAddPix(pixa_display, pix_closed, L_CLONE);
// Open up with a big box to detect solid areas, which can then be subtracted.
// This is very generous and will leave in even quite wide lines.
Pix* pix_solid = pixOpenBrick(NULL, pix_closed, max_line_width,
max_line_width);
if (pixa_display != NULL)
pixaAddPix(pixa_display, pix_solid, L_CLONE);
pix_hollow = pixSubtract(NULL, pix_closed, pix_solid);
pixDestroy(&pix_solid);
// Now open up in both directions independently to find lines of at least
// 1 inch/kMinLineLengthFraction in length.
if (pixa_display != NULL)
pixaAddPix(pixa_display, pix_hollow, L_CLONE);
*pix_vline = pixOpenBrick(NULL, pix_hollow, 1, min_line_length);
*pix_hline = pixOpenBrick(NULL, pix_hollow, min_line_length, 1);
pixDestroy(&pix_hollow);
#ifdef USE_OPENCL
}
#endif
PERF_COUNT_END
// Lines are sufficiently rare, that it is worth checking for a zero image.
l_int32 v_empty = 0;
l_int32 h_empty = 0;
pixZero(*pix_vline, &v_empty);
pixZero(*pix_hline, &h_empty);
if (pix_music_mask != NULL) {
if (!v_empty && !h_empty) {
*pix_music_mask = FilterMusic(resolution, pix_closed,
*pix_vline, *pix_hline,
&v_empty, &h_empty);
} else {
*pix_music_mask = NULL;
}
}
pixDestroy(&pix_closed);
Pix* pix_nonlines = NULL;
*pix_intersections = NULL;
Pix* extra_non_hlines = NULL;
if (!v_empty) {
// Subtract both line candidates from the source to get definite non-lines.
pix_nonlines = pixSubtract(NULL, src_pix, *pix_vline);
if (!h_empty) {
pixSubtract(pix_nonlines, pix_nonlines, *pix_hline);
// Intersections are a useful indicator for likelihood of being a line.
*pix_intersections = pixAnd(NULL, *pix_vline, *pix_hline);
// Candidate vlines are not hlines (apart from the intersections)
// and vice versa.
extra_non_hlines = pixSubtract(NULL, *pix_vline, *pix_intersections);
}
*pix_non_vline = pixErodeBrick(NULL, pix_nonlines, kMaxLineResidue, 1);
pixSeedfillBinary(*pix_non_vline, *pix_non_vline, pix_nonlines, 8);
if (!h_empty) {
// Candidate hlines are not vlines.
pixOr(*pix_non_vline, *pix_non_vline, *pix_hline);
pixSubtract(*pix_non_vline, *pix_non_vline, *pix_intersections);
}
if (!FilterFalsePositives(resolution, *pix_non_vline, *pix_intersections,
*pix_vline))
pixDestroy(pix_vline); // No candidates left.
} else {
// No vertical lines.
pixDestroy(pix_vline);
*pix_non_vline = NULL;
if (!h_empty) {
pix_nonlines = pixSubtract(NULL, src_pix, *pix_hline);
}
}
if (h_empty) {
pixDestroy(pix_hline);
*pix_non_hline = NULL;
if (v_empty) {
return;
}
} else {
*pix_non_hline = pixErodeBrick(NULL, pix_nonlines, 1, kMaxLineResidue);
pixSeedfillBinary(*pix_non_hline, *pix_non_hline, pix_nonlines, 8);
if (extra_non_hlines != NULL) {
pixOr(*pix_non_hline, *pix_non_hline, extra_non_hlines);
pixDestroy(&extra_non_hlines);
}
if (!FilterFalsePositives(resolution, *pix_non_hline, *pix_intersections,
*pix_hline))
pixDestroy(pix_hline); // No candidates left.
}
if (pixa_display != NULL) {
if (*pix_vline != NULL) pixaAddPix(pixa_display, *pix_vline, L_CLONE);
if (*pix_hline != NULL) pixaAddPix(pixa_display, *pix_hline, L_CLONE);
if (pix_nonlines != NULL) pixaAddPix(pixa_display, pix_nonlines, L_CLONE);
if (*pix_non_vline != NULL)
pixaAddPix(pixa_display, *pix_non_vline, L_CLONE);
if (*pix_non_hline != NULL)
pixaAddPix(pixa_display, *pix_non_hline, L_CLONE);
if (*pix_intersections != NULL)
pixaAddPix(pixa_display, *pix_intersections, L_CLONE);
if (pix_music_mask != NULL && *pix_music_mask != NULL)
pixaAddPix(pixa_display, *pix_music_mask, L_CLONE);
}
pixDestroy(&pix_nonlines);
}
// Returns a list of boxes corresponding to the candidate line segments. Sets
// the line_crossings member of the boxes so we can later determin the number
// of intersections touched by a full line.
void LineFinder::GetLineBoxes(bool horizontal_lines,
Pix* pix_lines, Pix* pix_intersections,
C_BLOB_LIST* line_cblobs,
BLOBNBOX_LIST* line_bblobs) {
// Put a single pixel crack in every line at an arbitrary spacing,
// so they break up and the bounding boxes can be used to get the
// direction accurately enough without needing outlines.
int wpl = pixGetWpl(pix_lines);
int width = pixGetWidth(pix_lines);
int height = pixGetHeight(pix_lines);
l_uint32* data = pixGetData(pix_lines);
if (horizontal_lines) {
for (int y = 0; y < height; ++y, data += wpl) {
for (int x = kCrackSpacing; x < width; x += kCrackSpacing) {
CLEAR_DATA_BIT(data, x);
}
}
} else {
for (int y = kCrackSpacing; y < height; y += kCrackSpacing) {
memset(data + wpl * y, 0, wpl * sizeof(*data));
}
}
// Get the individual connected components
Boxa* boxa = pixConnComp(pix_lines, NULL, 8);
ConvertBoxaToBlobs(width, height, &boxa, line_cblobs);
// Make the BLOBNBOXes from the C_BLOBs.
C_BLOB_IT blob_it(line_cblobs);
BLOBNBOX_IT bbox_it(line_bblobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
C_BLOB* cblob = blob_it.data();
BLOBNBOX* bblob = new BLOBNBOX(cblob);
bbox_it.add_to_end(bblob);
// Determine whether the line segment touches two intersections.
const TBOX& bbox = bblob->bounding_box();
Box* box = boxCreate(bbox.left(), bbox.bottom(),
bbox.width(), bbox.height());
bblob->set_line_crossings(NumTouchingIntersections(box, pix_intersections));
boxDestroy(&box);
// Transform the bounding box prior to finding lines. To save writing
// two line finders, flip x and y for horizontal lines and re-use the
// tab-stop detection code. For vertical lines we still have to flip the
// y-coordinates to switch from leptonica coords to tesseract coords.
if (horizontal_lines) {
// Note that we have Leptonica coords stored in a Tesseract box, so that
// bbox.bottom(), being the MIN y coord, is actually the top, so to get
// back to Leptonica coords in RemoveUnusedLineSegments, we have to
// use height - box.right() as the top, which looks very odd.
TBOX new_box(height - bbox.top(), bbox.left(),
height - bbox.bottom(), bbox.right());
bblob->set_bounding_box(new_box);
} else {
TBOX new_box(bbox.left(), height - bbox.top(),
bbox.right(), height - bbox.bottom());
bblob->set_bounding_box(new_box);
}
}
}
} // namespace tesseract.
| C++ |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESSERACT_TEXTORD_TEXTLINEPROJECTION_H_
#define TESSERACT_TEXTORD_TEXTLINEPROJECTION_H_
#include "blobgrid.h" // For BlobGrid
class DENORM;
struct Pix;
struct TPOINT;
namespace tesseract {
class ColPartition;
// Simple class to encapsulate the computation of an image representing
// local textline density, and function(s) to make use of it.
// The underlying principle is that if you smear connected components
// horizontally (vertically for components on a vertically written textline)
// and count the number of smeared components in an image, then the resulting
// image shows the density of the textlines at each image position.
class TextlineProjection {
public:
// The down-scaling factor is computed to obtain a projection resolution
// of about 100 dpi, whatever the input.
explicit TextlineProjection(int resolution);
~TextlineProjection();
// Build the projection profile given the input_block containing lists of
// blobs, a rotation to convert to image coords,
// and a full-resolution nontext_map, marking out areas to avoid.
// During construction, we have the following assumptions:
// The rotation is a multiple of 90 degrees, ie no deskew yet.
// The blobs have had their left and right rules set to also limit
// the range of projection.
void ConstructProjection(TO_BLOCK* input_block,
const FCOORD& rotation, Pix* nontext_map);
// Display the blobs in the window colored according to textline quality.
void PlotGradedBlobs(BLOBNBOX_LIST* blobs, ScrollView* win);
// Moves blobs that look like they don't sit well on a textline from the
// input blobs list to the output small_blobs list.
// This gets them away from initial textline finding to stop diacritics
// from forming incorrect textlines. (Introduced mainly to fix Thai.)
void MoveNonTextlineBlobs(BLOBNBOX_LIST* blobs,
BLOBNBOX_LIST* small_blobs) const;
// Create a window and display the projection in it.
void DisplayProjection() const;
// Compute the distance of the box from the partition using curved projection
// space. As DistanceOfBoxFromBox, except that the direction is taken from
// the ColPartition and the median bounds of the ColPartition are used as
// the to_box.
int DistanceOfBoxFromPartition(const TBOX& box, const ColPartition& part,
const DENORM* denorm, bool debug) const;
// Compute the distance from the from_box to the to_box using curved
// projection space. Separation that involves a decrease in projection
// density (moving from the from_box to the to_box) is weighted more heavily
// than constant density, and an increase is weighted less.
// If horizontal_textline is true, then curved space is used vertically,
// as for a diacritic on the edge of a textline.
// The projection uses original image coords, so denorm is used to get
// back to the image coords from box/part space.
int DistanceOfBoxFromBox(const TBOX& from_box, const TBOX& to_box,
bool horizontal_textline,
const DENORM* denorm, bool debug) const;
// Compute the distance between (x, y1) and (x, y2) using the rule that
// a decrease in textline density is weighted more heavily than an increase.
// The coordinates are in source image space, ie processed by any denorm
// already, but not yet scaled by scale_factor_.
// Going from the outside of a textline to the inside should measure much
// less distance than going from the inside of a textline to the outside.
int VerticalDistance(bool debug, int x, int y1, int y2) const;
// Compute the distance between (x1, y) and (x2, y) using the rule that
// a decrease in textline density is weighted more heavily than an increase.
int HorizontalDistance(bool debug, int x1, int x2, int y) const;
// Returns true if the blob appears to be outside of a horizontal textline.
// Such blobs are potentially diacritics (even if large in Thai) and should
// be kept away from initial textline finding.
bool BoxOutOfHTextline(const TBOX& box, const DENORM* denorm,
bool debug) const;
// Evaluates the textlineiness of a ColPartition. Uses EvaluateBox below,
// but uses the median top/bottom for horizontal and median left/right for
// vertical instead of the bounding box edges.
// Evaluates for both horizontal and vertical and returns the best result,
// with a positive value for horizontal and a negative value for vertical.
int EvaluateColPartition(const ColPartition& part, const DENORM* denorm,
bool debug) const;
// Computes the mean projection gradients over the horizontal and vertical
// edges of the box:
// -h-h-h-h-h-h
// |------------| mean=htop -v|+v--------+v|-v
// |+h+h+h+h+h+h| -v|+v +v|-v
// | | -v|+v +v|-v
// | box | -v|+v box +v|-v
// | | -v|+v +v|-v
// |+h+h+h+h+h+h| -v|+v +v|-v
// |------------| mean=hbot -v|+v--------+v|-v
// -h-h-h-h-h-h
// mean=vleft mean=vright
//
// Returns MAX(htop,hbot) - MAX(vleft,vright), which is a positive number
// for a horizontal textline, a negative number for a vertical textline,
// and near zero for undecided. Undecided is most likely non-text.
int EvaluateBox(const TBOX& box, const DENORM* denorm, bool debug) const;
private:
// Internal version of EvaluateBox returns the unclipped gradients as well
// as the result of EvaluateBox.
// hgrad1 and hgrad2 are the gradients for the horizontal textline.
int EvaluateBoxInternal(const TBOX& box, const DENORM* denorm, bool debug,
int* hgrad1, int* hgrad2,
int* vgrad1, int* vgrad2) const;
// Helper returns the mean gradient value for the horizontal row at the given
// y, (in the external coordinates) by subtracting the mean of the transformed
// row 2 pixels above from the mean of the transformed row 2 pixels below.
// This gives a positive value for a good top edge and negative for bottom.
// Returns the best result out of +2/-2, +3/-1, +1/-3 pixels from the edge.
int BestMeanGradientInRow(const DENORM* denorm, inT16 min_x, inT16 max_x,
inT16 y, bool best_is_max) const;
// Helper returns the mean gradient value for the vertical column at the
// given x, (in the external coordinates) by subtracting the mean of the
// transformed column 2 pixels left from the mean of the transformed column
// 2 pixels to the right.
// This gives a positive value for a good left edge and negative for right.
// Returns the best result out of +2/-2, +3/-1, +1/-3 pixels from the edge.
int BestMeanGradientInColumn(const DENORM* denorm, inT16 x, inT16 min_y,
inT16 max_y, bool best_is_max) const;
// Helper returns the mean pixel value over the line between the start_pt and
// end_pt (inclusive), but shifted perpendicular to the line in the projection
// image by offset pixels. For simplicity, it is assumed that the vector is
// either nearly horizontal or nearly vertical. It works on skewed textlines!
// The end points are in external coordinates, and will be denormalized with
// the denorm if not NULL before further conversion to pix coordinates.
// After all the conversions, the offset is added to the direction
// perpendicular to the line direction. The offset is thus in projection image
// coordinates, which allows the caller to get a guaranteed displacement
// between pixels used to calculate gradients.
int MeanPixelsInLineSegment(const DENORM* denorm, int offset,
TPOINT start_pt, TPOINT end_pt) const;
// Helper function to add 1 to a rectangle in source image coords to the
// internal projection pix_.
void IncrementRectangle8Bit(const TBOX& box);
// Inserts a list of blobs into the projection.
// Rotation is a multiple of 90 degrees to get from blob coords to
// nontext_map coords, image_box is the bounds of the nontext_map.
// Blobs are spread horizontally or vertically according to their internal
// flags, but the spreading is truncated by set pixels in the nontext_map
// and also by the horizontal rule line limits on the blobs.
void ProjectBlobs(BLOBNBOX_LIST* blobs, const FCOORD& rotation,
const TBOX& image_box, Pix* nontext_map);
// Pads the bounding box of the given blob according to whether it is on
// a horizontal or vertical text line, taking into account tab-stops near
// the blob. Returns true if padding was in the horizontal direction.
bool PadBlobBox(BLOBNBOX* blob, TBOX* bbox);
// Helper denormalizes the TPOINT with the denorm if not NULL, then
// converts to pix_ coordinates.
void TransformToPixCoords(const DENORM* denorm, TPOINT* pt) const;
// Helper truncates the TPOINT to be within the pix_.
void TruncateToImageBounds(TPOINT* pt) const;
// Transform tesseract coordinates to coordinates used in the pix.
int ImageXToProjectionX(int x) const;
int ImageYToProjectionY(int y) const;
// The down-sampling scale factor used in building the image.
int scale_factor_;
// The blob coordinates of the top-left (origin of the pix_) in tesseract
// coordinates. Used to transform the bottom-up tesseract coordinates to
// the top-down coordinates of the pix.
int x_origin_;
int y_origin_;
// The image of horizontally smeared blob boxes summed to provide a
// textline density map. As with a horizontal projection, the map has
// dips in the gaps between textlines.
Pix* pix_;
};
} // namespace tesseract.
#endif // TESSERACT_TEXTORD_TEXTLINEPROJECTION_H_
| C++ |
///////////////////////////////////////////////////////////////////////
// File: baselinedetect.cpp
// Description: Initial Baseline Determination.
// Copyright 2012 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Created: Mon Apr 30 10:15:31 PDT 2012
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef _MSC_VER
#define _USE_MATH_DEFINES
#endif // _MSC_VER
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "baselinedetect.h"
#include <math.h>
#include "allheaders.h"
#include "blobbox.h"
#include "detlinefit.h"
#include "drawtord.h"
#include "helpers.h"
#include "linlsq.h"
#include "makerow.h"
#include "textord.h"
#include "tprintf.h"
#include "underlin.h"
// Number of displacement modes kept in displacement_modes_;
const int kMaxDisplacementsModes = 3;
// Number of points to skip when retrying initial fit.
const int kNumSkipPoints = 3;
// Max angle deviation (in radians) allowed to keep the independent baseline.
const double kMaxSkewDeviation = 1.0 / 64;
// Fraction of line spacing estimate for quantization of blob displacements.
const double kOffsetQuantizationFactor = 3.0 / 64;
// Fraction of line spacing estimate for computing blob fit error.
const double kFitHalfrangeFactor = 6.0 / 64;
// Max fraction of line spacing allowed before a baseline counts as badly fitting.
const double kMaxBaselineError = 3.0 / 64;
// Multiple of linespacing that sets max_blob_size in TO_BLOCK.
// Copied from textord_excess_blobsize.
const double kMaxBlobSizeMultiple = 1.3;
// Min fraction of linespacing gaps that should be close to the model before
// we will force the linespacing model on all the lines.
const double kMinFittingLinespacings = 0.25;
// A y-coordinate within a textline that is to be debugged.
//#define kDebugYCoord 1525
namespace tesseract {
BaselineRow::BaselineRow(double line_spacing, TO_ROW* to_row)
: blobs_(to_row->blob_list()),
baseline_pt1_(0.0f, 0.0f), baseline_pt2_(0.0f, 0.0f),
baseline_error_(0.0), good_baseline_(false) {
ComputeBoundingBox();
// Compute a scale factor for rounding to ints.
disp_quant_factor_ = kOffsetQuantizationFactor * line_spacing;
fit_halfrange_ = kFitHalfrangeFactor * line_spacing;
max_baseline_error_ = kMaxBaselineError * line_spacing;
}
// Sets the TO_ROW with the output straight line.
void BaselineRow::SetupOldLineParameters(TO_ROW* row) const {
// TODO(rays) get rid of this when m and c are no longer used.
double gradient = tan(BaselineAngle());
// para_c is the actual intercept of the baseline on the y-axis.
float para_c = StraightYAtX(0.0);
row->set_line(gradient, para_c, baseline_error_);
row->set_parallel_line(gradient, para_c, baseline_error_);
}
// Outputs diagnostic information.
void BaselineRow::Print() const {
tprintf("Baseline (%g,%g)->(%g,%g), angle=%g, intercept=%g\n",
baseline_pt1_.x(), baseline_pt1_.y(),
baseline_pt2_.x(), baseline_pt2_.y(),
BaselineAngle(), StraightYAtX(0.0));
tprintf("Quant factor=%g, error=%g, good=%d, box:",
disp_quant_factor_, baseline_error_, good_baseline_);
bounding_box_.print();
}
// Returns the skew angle (in radians) of the current baseline in [-pi,pi].
double BaselineRow::BaselineAngle() const {
FCOORD baseline_dir(baseline_pt2_ - baseline_pt1_);
double angle = baseline_dir.angle();
// Baseline directions are only unique in a range of pi so constrain to
// [-pi/2, pi/2].
return fmod(angle + M_PI * 1.5, M_PI) - M_PI * 0.5;
}
// Computes and returns the linespacing at the middle of the overlap
// between this and other.
double BaselineRow::SpaceBetween(const BaselineRow& other) const {
// Find the x-centre of overlap of the lines.
float x = (MAX(bounding_box_.left(), other.bounding_box_.left()) +
MIN(bounding_box_.right(), other.bounding_box_.right())) / 2.0f;
// Find the vertical centre between them.
float y = (StraightYAtX(x) + other.StraightYAtX(x)) / 2.0f;
// Find the perpendicular distance of (x,y) from each line.
FCOORD pt(x, y);
return PerpDistanceFromBaseline(pt) + other.PerpDistanceFromBaseline(pt);
}
// Computes and returns the displacement of the center of the line
// perpendicular to the given direction.
double BaselineRow::PerpDisp(const FCOORD& direction) const {
float middle_x = (bounding_box_.left() + bounding_box_.right()) / 2.0f;
FCOORD middle_pos(middle_x, StraightYAtX(middle_x));
return direction * middle_pos / direction.length();
}
// Computes the y coordinate at the given x using the straight baseline
// defined by baseline_pt1_ and baseline_pt2__.
double BaselineRow::StraightYAtX(double x) const {
double denominator = baseline_pt2_.x() - baseline_pt1_.x();
if (denominator == 0.0)
return (baseline_pt1_.y() + baseline_pt2_.y()) / 2.0;
return baseline_pt1_.y() +
(x - baseline_pt1_.x()) * (baseline_pt2_.y() - baseline_pt1_.y()) /
denominator;
}
// Fits a straight baseline to the points. Returns true if it had enough
// points to be reasonably sure of the fitted baseline.
// If use_box_bottoms is false, baselines positions are formed by
// considering the outlines of the blobs.
bool BaselineRow::FitBaseline(bool use_box_bottoms) {
// Deterministic fitting is used wherever possible.
fitter_.Clear();
// Linear least squares is a backup if the DetLineFit produces a bad line.
LLSQ llsq;
BLOBNBOX_IT blob_it(blobs_);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
if (!use_box_bottoms) blob->EstimateBaselinePosition();
const TBOX& box = blob->bounding_box();
int x_middle = (box.left() + box.right()) / 2;
#ifdef kDebugYCoord
if (box.bottom() < kDebugYCoord && box.top() > kDebugYCoord) {
tprintf("Box bottom = %d, baseline pos=%d for box at:",
box.bottom(), blob->baseline_position());
box.print();
}
#endif
fitter_.Add(ICOORD(x_middle, blob->baseline_position()), box.width() / 2);
llsq.add(x_middle, blob->baseline_position());
}
// Fit the line.
ICOORD pt1, pt2;
baseline_error_ = fitter_.Fit(&pt1, &pt2);
baseline_pt1_ = pt1;
baseline_pt2_ = pt2;
if (baseline_error_ > max_baseline_error_ &&
fitter_.SufficientPointsForIndependentFit()) {
// The fit was bad but there were plenty of points, so try skipping
// the first and last few, and use the new line if it dramatically improves
// the error of fit.
double error = fitter_.Fit(kNumSkipPoints, kNumSkipPoints, &pt1, &pt2);
if (error < baseline_error_ / 2.0) {
baseline_error_ = error;
baseline_pt1_ = pt1;
baseline_pt2_ = pt2;
}
}
int debug = 0;
#ifdef kDebugYCoord
Print();
debug = bounding_box_.bottom() < kDebugYCoord &&
bounding_box_.top() > kDebugYCoord
? 3 : 2;
#endif
// Now we obtained a direction from that fit, see if we can improve the
// fit using the same direction and some other start point.
FCOORD direction(pt2 - pt1);
double target_offset = direction * pt1;
good_baseline_ = false;
FitConstrainedIfBetter(debug, direction, 0.0, target_offset);
// Wild lines can be produced because DetLineFit allows vertical lines, but
// vertical text has been rotated so angles over pi/4 should be disallowed.
// Near vertical lines can still be produced by vertically aligned components
// on very short lines.
double angle = BaselineAngle();
if (fabs(angle) > M_PI * 0.25) {
// Use the llsq fit as a backup.
baseline_pt1_ = llsq.mean_point();
baseline_pt2_ = baseline_pt1_ + FCOORD(1.0f, llsq.m());
// TODO(rays) get rid of this when m and c are no longer used.
double m = llsq.m();
double c = llsq.c(m);
baseline_error_ = llsq.rms(m, c);
good_baseline_ = false;
}
return good_baseline_;
}
// Modifies an existing result of FitBaseline to be parallel to the given
// direction vector if that produces a better result.
void BaselineRow::AdjustBaselineToParallel(int debug,
const FCOORD& direction) {
SetupBlobDisplacements(direction);
if (displacement_modes_.empty())
return;
#ifdef kDebugYCoord
if (bounding_box_.bottom() < kDebugYCoord &&
bounding_box_.top() > kDebugYCoord && debug < 3)
debug = 3;
#endif
FitConstrainedIfBetter(debug, direction, 0.0, displacement_modes_[0]);
}
// Modifies the baseline to snap to the textline grid if the existing
// result is not good enough.
double BaselineRow::AdjustBaselineToGrid(int debug,
const FCOORD& direction,
double line_spacing,
double line_offset) {
if (blobs_->empty()) {
if (debug > 1) {
tprintf("Row empty at:");
bounding_box_.print();
}
return line_offset;
}
// Find the displacement_modes_ entry nearest to the grid.
double best_error = 0.0;
int best_index = -1;
for (int i = 0; i < displacement_modes_.size(); ++i) {
double blob_y = displacement_modes_[i];
double error = BaselineBlock::SpacingModelError(blob_y, line_spacing,
line_offset);
if (debug > 1) {
tprintf("Mode at %g has error %g from model \n", blob_y, error);
}
if (best_index < 0 || error < best_error) {
best_error = error;
best_index = i;
}
}
// We will move the baseline only if the chosen mode is close enough to the
// model.
double model_margin = max_baseline_error_ - best_error;
if (best_index >= 0 && model_margin > 0.0) {
// But if the current baseline is already close to the mode there is no
// point, and only the potential to damage accuracy by changing its angle.
double perp_disp = PerpDisp(direction);
double shift = displacement_modes_[best_index] - perp_disp;
if (fabs(shift) > max_baseline_error_) {
if (debug > 1) {
tprintf("Attempting linespacing model fit with mode %g to row at:",
displacement_modes_[best_index]);
bounding_box_.print();
}
FitConstrainedIfBetter(debug, direction, model_margin,
displacement_modes_[best_index]);
} else if (debug > 1) {
tprintf("Linespacing model only moves current line by %g for row at:",
shift);
bounding_box_.print();
}
} else if (debug > 1) {
tprintf("Linespacing model not close enough to any mode for row at:");
bounding_box_.print();
}
return fmod(PerpDisp(direction), line_spacing);
}
// Sets up displacement_modes_ with the top few modes of the perpendicular
// distance of each blob from the given direction vector, after rounding.
void BaselineRow::SetupBlobDisplacements(const FCOORD& direction) {
// Set of perpendicular displacements of the blob bottoms from the required
// baseline direction.
GenericVector<double> perp_blob_dists;
displacement_modes_.truncate(0);
// Gather the skew-corrected position of every blob.
double min_dist = MAX_FLOAT32;
double max_dist = -MAX_FLOAT32;
BLOBNBOX_IT blob_it(blobs_);
bool debug = false;
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX* blob = blob_it.data();
const TBOX& box = blob->bounding_box();
#ifdef kDebugYCoord
if (box.bottom() < kDebugYCoord && box.top() > kDebugYCoord) debug = true;
#endif
FCOORD blob_pos((box.left() + box.right()) / 2.0f,
blob->baseline_position());
double offset = direction * blob_pos;
perp_blob_dists.push_back(offset);
if (debug) {
tprintf("Displacement %g for blob at:", offset);
box.print();
}
UpdateRange(offset, &min_dist, &max_dist);
}
// Set up a histogram using disp_quant_factor_ as the bucket size.
STATS dist_stats(IntCastRounded(min_dist / disp_quant_factor_),
IntCastRounded(max_dist / disp_quant_factor_) + 1);
for (int i = 0; i < perp_blob_dists.size(); ++i) {
dist_stats.add(IntCastRounded(perp_blob_dists[i] / disp_quant_factor_), 1);
}
GenericVector<KDPairInc<float, int> > scaled_modes;
dist_stats.top_n_modes(kMaxDisplacementsModes, &scaled_modes);
if (debug) {
for (int i = 0; i < scaled_modes.size(); ++i) {
tprintf("Top mode = %g * %d\n",
scaled_modes[i].key * disp_quant_factor_, scaled_modes[i].data);
}
}
for (int i = 0; i < scaled_modes.size(); ++i)
displacement_modes_.push_back(disp_quant_factor_ * scaled_modes[i].key);
}
// Fits a line in the given direction to blobs that are close to the given
// target_offset perpendicular displacement from the direction. The fit
// error is allowed to be cheat_allowance worse than the existing fit, and
// will still be used.
// If cheat_allowance > 0, the new fit will be good and replace the current
// fit if it has better fit (with cheat) OR its error is below
// max_baseline_error_ and the old fit is marked bad.
// Otherwise the new fit will only replace the old if it is really better,
// or the old fit is marked bad and the new fit has sufficient points, as
// well as being within the max_baseline_error_.
void BaselineRow::FitConstrainedIfBetter(int debug,
const FCOORD& direction,
double cheat_allowance,
double target_offset) {
double halfrange = fit_halfrange_ * direction.length();
double min_dist = target_offset - halfrange;
double max_dist = target_offset + halfrange;
ICOORD line_pt;
double new_error = fitter_.ConstrainedFit(direction, min_dist, max_dist,
debug > 2, &line_pt);
// Allow cheat_allowance off the new error
new_error -= cheat_allowance;
double old_angle = BaselineAngle();
double new_angle = direction.angle();
if (debug > 1) {
tprintf("Constrained error = %g, original = %g",
new_error, baseline_error_);
tprintf(" angles = %g, %g, delta=%g vs threshold %g\n",
old_angle, new_angle,
new_angle - old_angle, kMaxSkewDeviation);
}
bool new_good_baseline = new_error <= max_baseline_error_ &&
(cheat_allowance > 0.0 || fitter_.SufficientPointsForIndependentFit());
// The new will replace the old if any are true:
// 1. the new error is better
// 2. the old is NOT good, but the new is
// 3. there is a wild angular difference between them (assuming that the new
// is a better guess at the angle.)
if (new_error <= baseline_error_ ||
(!good_baseline_ && new_good_baseline) ||
fabs(new_angle - old_angle) > kMaxSkewDeviation) {
baseline_error_ = new_error;
baseline_pt1_ = line_pt;
baseline_pt2_ = baseline_pt1_ + direction;
good_baseline_ = new_good_baseline;
if (debug > 1) {
tprintf("Replacing with constrained baseline, good = %d\n",
good_baseline_);
}
} else if (debug > 1) {
tprintf("Keeping old baseline\n");
}
}
// Returns the perpendicular distance of the point from the straight
// baseline.
double BaselineRow::PerpDistanceFromBaseline(const FCOORD& pt) const {
FCOORD baseline_vector(baseline_pt2_ - baseline_pt1_);
FCOORD offset_vector(pt - baseline_pt1_);
double distance = baseline_vector * offset_vector;
return sqrt(distance * distance / baseline_vector.sqlength());
}
// Computes the bounding box of the row.
void BaselineRow::ComputeBoundingBox() {
BLOBNBOX_IT it(blobs_);
TBOX box;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
box += it.data()->bounding_box();
}
bounding_box_ = box;
}
BaselineBlock::BaselineBlock(int debug_level, bool non_text, TO_BLOCK* block)
: block_(block), debug_level_(debug_level), non_text_block_(non_text),
good_skew_angle_(false), skew_angle_(0.0),
line_spacing_(block->line_spacing), line_offset_(0.0), model_error_(0.0) {
TO_ROW_IT row_it(block_->get_rows());
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
// Sort the blobs on the rows.
row_it.data()->blob_list()->sort(blob_x_order);
rows_.push_back(new BaselineRow(block->line_spacing, row_it.data()));
}
}
// Computes and returns the absolute error of the given perp_disp from the
// given linespacing model.
double BaselineBlock::SpacingModelError(double perp_disp, double line_spacing,
double line_offset) {
// Round to the nearest multiple of line_spacing + line offset.
int multiple = IntCastRounded((perp_disp - line_offset) / line_spacing);
double model_y = line_spacing * multiple + line_offset;
return fabs(perp_disp - model_y);
}
// Fits straight line baselines and computes the skew angle from the
// median angle. Returns true if a good angle is found.
// If use_box_bottoms is false, baseline positions are formed by
// considering the outlines of the blobs.
bool BaselineBlock::FitBaselinesAndFindSkew(bool use_box_bottoms) {
if (non_text_block_) return false;
GenericVector<double> angles;
for (int r = 0; r < rows_.size(); ++r) {
BaselineRow* row = rows_[r];
if (row->FitBaseline(use_box_bottoms)) {
double angle = row->BaselineAngle();
angles.push_back(angle);
}
if (debug_level_ > 1)
row->Print();
}
if (!angles.empty()) {
skew_angle_ = MedianOfCircularValues(M_PI, &angles);
good_skew_angle_ = true;
} else {
skew_angle_ = 0.0f;
good_skew_angle_ = false;
}
if (debug_level_ > 0) {
tprintf("Initial block skew angle = %g, good = %d\n",
skew_angle_, good_skew_angle_);
}
return good_skew_angle_;
}
// Refits the baseline to a constrained angle, using the stored block
// skew if good enough, otherwise the supplied default skew.
void BaselineBlock::ParallelizeBaselines(double default_block_skew) {
if (non_text_block_) return;
if (!good_skew_angle_) skew_angle_ = default_block_skew;
if (debug_level_ > 0)
tprintf("Adjusting block to skew angle %g\n", skew_angle_);
FCOORD direction(cos(skew_angle_), sin(skew_angle_));
for (int r = 0; r < rows_.size(); ++r) {
BaselineRow* row = rows_[r];
row->AdjustBaselineToParallel(debug_level_, direction);
if (debug_level_ > 1)
row->Print();
}
if (rows_.size() < 3 || !ComputeLineSpacing())
return;
// Enforce the line spacing model on all lines that don't yet have a good
// baseline.
// Start by finding the row that is best fitted to the model.
int best_row = 0;
double best_error = SpacingModelError(rows_[0]->PerpDisp(direction),
line_spacing_, line_offset_);
for (int r = 1; r < rows_.size(); ++r) {
double error = SpacingModelError(rows_[r]->PerpDisp(direction),
line_spacing_, line_offset_);
if (error < best_error) {
best_error = error;
best_row = r;
}
}
// Starting at the best fitting row, work outwards, syncing the offset.
double offset = line_offset_;
for (int r = best_row + 1; r < rows_.size(); ++r) {
offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction,
line_spacing_, offset);
}
offset = line_offset_;
for (int r = best_row - 1; r >= 0; --r) {
offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction,
line_spacing_, offset);
}
}
// Sets the parameters in TO_BLOCK that are needed by subsequent processes.
void BaselineBlock::SetupBlockParameters() const {
if (line_spacing_ > 0.0) {
// Where was block_line_spacing set before?
float min_spacing = MIN(block_->line_spacing, line_spacing_);
if (min_spacing < block_->line_size)
block_->line_size = min_spacing;
block_->line_spacing = line_spacing_;
block_->baseline_offset = line_offset_;
block_->max_blob_size = line_spacing_ * kMaxBlobSizeMultiple;
}
// Setup the parameters on all the rows.
TO_ROW_IT row_it(block_->get_rows());
for (int r = 0; r < rows_.size(); ++r, row_it.forward()) {
BaselineRow* row = rows_[r];
TO_ROW* to_row = row_it.data();
row->SetupOldLineParameters(to_row);
}
}
// Processing that is required before fitting baseline splines, but requires
// linear baselines in order to be successful:
// Removes noise if required
// Separates out underlines
// Pre-associates blob fragments.
// TODO(rays/joeliu) This entire section of code is inherited from the past
// and could be improved/eliminated.
// page_tr is used to size a debug window.
void BaselineBlock::PrepareForSplineFitting(ICOORD page_tr, bool remove_noise) {
if (non_text_block_) return;
if (remove_noise) {
vigorous_noise_removal(block_);
}
FCOORD rotation(1.0f, 0.0f);
double gradient = tan(skew_angle_);
separate_underlines(block_, gradient, rotation, true);
pre_associate_blobs(page_tr, block_, rotation, true);
}
// Fits splines to the textlines, or creates fake QSPLINES from the straight
// baselines that are already on the TO_ROWs.
// As a side-effect, computes the xheights of the rows and the block.
// Although x-height estimation is conceptually separate, it is part of
// detecting perspective distortion and therefore baseline fitting.
void BaselineBlock::FitBaselineSplines(bool enable_splines,
bool show_final_rows,
Textord* textord) {
double gradient = tan(skew_angle_);
FCOORD rotation(1.0f, 0.0f);
if (enable_splines) {
textord->make_spline_rows(block_, gradient, show_final_rows);
} else {
// Make a fake spline from the existing line.
TBOX block_box= block_->block->bounding_box();
TO_ROW_IT row_it = block_->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
TO_ROW* row = row_it.data();
inT32 xstarts[2] = { block_box.left(), block_box.right() };
double coeffs[3] = { 0.0, row->line_m(), row->line_c() };
row->baseline = QSPLINE(1, xstarts, coeffs);
textord->compute_row_xheight(row, block_->block->classify_rotation(),
row->line_m(), block_->line_size);
}
}
textord->compute_block_xheight(block_, gradient);
block_->block->set_xheight(block_->xheight);
if (textord_restore_underlines) // fix underlines
restore_underlined_blobs(block_);
}
// Draws the (straight) baselines and final blobs colored according to
// what was discarded as noise and what is associated with each row.
void BaselineBlock::DrawFinalRows(const ICOORD& page_tr) {
#ifndef GRAPHICS_DISABLED
if (non_text_block_) return;
double gradient = tan(skew_angle_);
FCOORD rotation(1.0f, 0.0f);
int left_edge = block_->block->bounding_box().left();
ScrollView* win = create_to_win(page_tr);
ScrollView::Color colour = ScrollView::RED;
TO_ROW_IT row_it = block_->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
plot_parallel_row(row_it.data(), gradient, left_edge, colour, rotation);
colour = static_cast<ScrollView::Color>(colour + 1);
if (colour > ScrollView::MAGENTA)
colour = ScrollView::RED;
}
plot_blob_list(win, &block_->blobs, ScrollView::MAGENTA, ScrollView::WHITE);
// Show discarded blobs.
plot_blob_list(win, &block_->underlines,
ScrollView::YELLOW, ScrollView::CORAL);
if (block_->blobs.length() > 0)
tprintf("%d blobs discarded as noise\n", block_->blobs.length());
draw_meanlines(block_, gradient, left_edge, ScrollView::WHITE, rotation);
#endif
}
void BaselineBlock::DrawPixSpline(Pix* pix_in) {
if (non_text_block_) return;
TO_ROW_IT row_it = block_->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
row_it.data()->baseline.plot(pix_in);
}
}
// Top-level line-spacing calculation. Computes an estimate of the line-
// spacing, using the current baselines in the TO_ROWS of the block, and
// then refines it by fitting a regression line to the baseline positions
// as a function of their integer index.
// Returns true if it seems that the model is a reasonable fit to the
// observations.
bool BaselineBlock::ComputeLineSpacing() {
FCOORD direction(cos(skew_angle_), sin(skew_angle_));
GenericVector<double> row_positions;
ComputeBaselinePositions(direction, &row_positions);
if (row_positions.size() < 2) return false;
EstimateLineSpacing();
RefineLineSpacing(row_positions);
// Verify that the model is reasonable.
double max_baseline_error = kMaxBaselineError * line_spacing_;
int non_trivial_gaps = 0;
int fitting_gaps = 0;
for (int i = 1; i < row_positions.size(); ++i) {
double row_gap = fabs(row_positions[i - 1] - row_positions[i]);
if (row_gap > max_baseline_error) {
++non_trivial_gaps;
if (fabs(row_gap - line_spacing_) <= max_baseline_error)
++fitting_gaps;
}
}
if (debug_level_ > 0) {
tprintf("Spacing %g, in %d rows, %d gaps fitted out of %d non-trivial\n",
line_spacing_, row_positions.size(), fitting_gaps,
non_trivial_gaps);
}
return fitting_gaps > non_trivial_gaps * kMinFittingLinespacings;
}
// Computes the deskewed vertical position of each baseline in the block and
// stores them in the given vector.
// This is calculated as the perpendicular distance of the middle of each
// baseline (in case it has a different skew angle) from the line passing
// through the origin parallel to the block baseline angle.
// NOTE that "distance" above is a signed quantity so we can tell which side
// of the block baseline a line sits, hence the function and argument name
// positions not distances.
void BaselineBlock::ComputeBaselinePositions(const FCOORD& direction,
GenericVector<double>* positions) {
positions->clear();
for (int r = 0; r < rows_.size(); ++r) {
BaselineRow* row = rows_[r];
const TBOX& row_box = row->bounding_box();
float x_middle = (row_box.left() + row_box.right()) / 2.0f;
FCOORD row_pos(x_middle, static_cast<float>(row->StraightYAtX(x_middle)));
float offset = direction * row_pos;
positions->push_back(offset);
}
}
// Computes an estimate of the line spacing of the block from the median
// of the spacings between adjacent overlapping textlines.
void BaselineBlock::EstimateLineSpacing() {
GenericVector<float> spacings;
for (int r = 0; r < rows_.size(); ++r) {
BaselineRow* row = rows_[r];
// Exclude silly lines.
if (fabs(row->BaselineAngle()) > M_PI * 0.25) continue;
// Find the first row after row that overlaps it significantly.
const TBOX& row_box = row->bounding_box();
int r2;
for (r2 = r + 1; r2 < rows_.size() &&
!row_box.major_x_overlap(rows_[r2]->bounding_box());
++r2);
if (r2 < rows_.size()) {
BaselineRow* row2 = rows_[r2];
// Exclude silly lines.
if (fabs(row2->BaselineAngle()) > M_PI * 0.25) continue;
float spacing = row->SpaceBetween(*row2);
spacings.push_back(spacing);
}
}
// If we have at least one value, use it, otherwise leave the previous
// value unchanged.
if (!spacings.empty()) {
line_spacing_ = spacings[spacings.choose_nth_item(spacings.size() / 2)];
if (debug_level_ > 1)
tprintf("Estimate of linespacing = %g\n", line_spacing_);
}
}
// Refines the line spacing of the block by fitting a regression
// line to the deskewed y-position of each baseline as a function of its
// estimated line index, allowing for a small error in the initial linespacing
// and choosing the best available model.
void BaselineBlock::RefineLineSpacing(const GenericVector<double>& positions) {
double spacings[3], offsets[3], errors[3];
int index_range;
errors[0] = FitLineSpacingModel(positions, line_spacing_,
&spacings[0], &offsets[0], &index_range);
if (index_range > 1) {
double spacing_plus = line_spacing_ / (1.0 + 1.0 / index_range);
// Try the hypotheses that there might be index_range +/- 1 line spaces.
errors[1] = FitLineSpacingModel(positions, spacing_plus,
&spacings[1], &offsets[1], NULL);
double spacing_minus = line_spacing_ / (1.0 - 1.0 / index_range);
errors[2] = FitLineSpacingModel(positions, spacing_minus,
&spacings[2], &offsets[2], NULL);
for (int i = 1; i <= 2; ++i) {
if (errors[i] < errors[0]) {
spacings[0] = spacings[i];
offsets[0] = offsets[i];
errors[0] = errors[i];
}
}
}
if (spacings[0] > 0.0) {
line_spacing_ = spacings[0];
line_offset_ = offsets[0];
model_error_ = errors[0];
if (debug_level_ > 0) {
tprintf("Final linespacing model = %g + offset %g, error %g\n",
line_spacing_, line_offset_, model_error_);
}
}
}
// Given an initial estimate of line spacing (m_in) and the positions of each
// baseline, computes the line spacing of the block more accurately in m_out,
// and the corresponding intercept in c_out, and the number of spacings seen
// in index_delta. Returns the error of fit to the line spacing model.
// Uses a simple linear regression, but optimized the offset using the median.
double BaselineBlock::FitLineSpacingModel(
const GenericVector<double>& positions, double m_in,
double* m_out, double* c_out, int* index_delta) {
if (m_in == 0.0f || positions.size() < 2) {
*m_out = m_in;
*c_out = 0.0;
if (index_delta != NULL) *index_delta = 0;
return 0.0;
}
GenericVector<double> offsets;
// Get the offset (remainder) linespacing for each line and choose the median.
for (int i = 0; i < positions.size(); ++i)
offsets.push_back(fmod(positions[i], m_in));
// Get the median offset.
double median_offset = MedianOfCircularValues(m_in, &offsets);
// Now fit a line to quantized line number and offset.
LLSQ llsq;
int min_index = MAX_INT32;
int max_index = -MAX_INT32;
for (int i = 0; i < positions.size(); ++i) {
double y_pos = positions[i];
int row_index = IntCastRounded((y_pos - median_offset) / m_in);
UpdateRange(row_index, &min_index, &max_index);
llsq.add(row_index, y_pos);
}
// Get the refined line spacing.
*m_out = llsq.m();
// Use the median offset rather than the mean.
offsets.truncate(0);
for (int i = 0; i < positions.size(); ++i)
offsets.push_back(fmod(positions[i], *m_out));
// Get the median offset.
if (debug_level_ > 2) {
for (int i = 0; i < offsets.size(); ++i)
tprintf("%d: %g\n", i, offsets[i]);
}
*c_out = MedianOfCircularValues(*m_out, &offsets);
if (debug_level_ > 1) {
tprintf("Median offset = %g, compared to mean of %g.\n",
*c_out, llsq.c(*m_out));
}
// Index_delta is the number of hypothesized line gaps present.
if (index_delta != NULL)
*index_delta = max_index - min_index;
// Use the regression model's intercept to compute the error, as it may be
// a full line-spacing in disagreement with the median.
double rms_error = llsq.rms(*m_out, llsq.c(*m_out));
if (debug_level_ > 1) {
tprintf("Linespacing of y=%g x + %g improved to %g x + %g, rms=%g\n",
m_in, median_offset, *m_out, *c_out, rms_error);
}
return rms_error;
}
BaselineDetect::BaselineDetect(int debug_level, const FCOORD& page_skew,
TO_BLOCK_LIST* blocks)
: page_skew_(page_skew), debug_level_(debug_level), pix_debug_(NULL),
debug_file_prefix_("") {
TO_BLOCK_IT it(blocks);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
TO_BLOCK* to_block = it.data();
BLOCK* block = to_block->block;
POLY_BLOCK* pb = block->poly_block();
// A note about non-text blocks.
// On output, non-text blocks are supposed to contain a single empty word
// in each incoming text line. These mark out the polygonal bounds of the
// block. Ideally no baselines should be required, but currently
// make_words crashes if a baseline and xheight are not provided, so we
// include non-text blocks here, but flag them for special treatment.
bool non_text = pb != NULL && !pb->IsText();
blocks_.push_back(new BaselineBlock(debug_level_, non_text, to_block));
}
}
BaselineDetect::~BaselineDetect() {
pixDestroy(&pix_debug_);
}
// Finds the initial baselines for each TO_ROW in each TO_BLOCK, gathers
// block-wise and page-wise data to smooth small blocks/rows, and applies
// smoothing based on block/page-level skew and block-level linespacing.
void BaselineDetect::ComputeStraightBaselines(bool use_box_bottoms) {
GenericVector<double> block_skew_angles;
for (int i = 0; i < blocks_.size(); ++i) {
BaselineBlock* bl_block = blocks_[i];
if (debug_level_ > 0)
tprintf("Fitting initial baselines...\n");
if (bl_block->FitBaselinesAndFindSkew(use_box_bottoms)) {
block_skew_angles.push_back(bl_block->skew_angle());
}
}
// Compute a page-wide default skew for blocks with too little information.
double default_block_skew = page_skew_.angle();
if (!block_skew_angles.empty()) {
default_block_skew = MedianOfCircularValues(M_PI, &block_skew_angles);
}
if (debug_level_ > 0) {
tprintf("Page skew angle = %g\n", default_block_skew);
}
// Set bad lines in each block to the default block skew and then force fit
// a linespacing model where it makes sense to do so.
for (int i = 0; i < blocks_.size(); ++i) {
BaselineBlock* bl_block = blocks_[i];
bl_block->ParallelizeBaselines(default_block_skew);
bl_block->SetupBlockParameters(); // This replaced compute_row_stats.
}
}
// Computes the baseline splines for each TO_ROW in each TO_BLOCK and
// other associated side-effects, including pre-associating blobs, computing
// x-heights and displaying debug information.
// NOTE that ComputeStraightBaselines must have been called first as this
// sets up data in the TO_ROWs upon which this function depends.
void BaselineDetect::ComputeBaselineSplinesAndXheights(const ICOORD& page_tr,
bool enable_splines,
bool remove_noise,
bool show_final_rows,
Textord* textord) {
Pix* pix_spline = pix_debug_ ? pixConvertTo32(pix_debug_) : NULL;
for (int i = 0; i < blocks_.size(); ++i) {
BaselineBlock* bl_block = blocks_[i];
bl_block->PrepareForSplineFitting(page_tr, remove_noise);
bl_block->FitBaselineSplines(enable_splines, show_final_rows, textord);
if (pix_spline) {
bl_block->DrawPixSpline(pix_spline);
}
if (show_final_rows) {
bl_block->DrawFinalRows(page_tr);
}
}
if (pix_spline) {
STRING outfile_name = debug_file_prefix_ + "_spline.png";
pixWrite(outfile_name.string(), pix_spline, IFF_PNG);
pixDestroy(&pix_spline);
}
}
void BaselineDetect::SetDebugImage(Pix* pixIn, const STRING& output_path) {
pixDestroy(&pix_debug_);
pix_debug_ = pixClone(pixIn);
debug_file_prefix_ = output_path;
}
} // namespace tesseract.
| C++ |
/**********************************************************************
* File: underlin.cpp (Formerly undrline.c)
* Description: Code to chop blobs apart from underlines.
* Author: Ray Smith
* Created: Mon Aug 8 11:14:00 BST 1994
*
* (C) Copyright 1994, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef __UNIX__
#include <assert.h>
#endif
#include "underlin.h"
#define PROJECTION_MARGIN 10 //arbitrary
#define EXTERN
EXTERN double_VAR (textord_underline_offset, 0.1, "Fraction of x to ignore");
EXTERN BOOL_VAR (textord_restore_underlines, TRUE,
"Chop underlines & put back");
/**********************************************************************
* restore_underlined_blobs
*
* Find underlined blobs and put them back in the row.
**********************************************************************/
void restore_underlined_blobs( //get chop points
TO_BLOCK *block //block to do
) {
inT16 chop_coord; //chop boundary
TBOX blob_box; //of underline
BLOBNBOX *u_line; //underline bit
TO_ROW *row; //best row for blob
ICOORDELT_LIST chop_cells; //blobs to cut out
//real underlines
BLOBNBOX_LIST residual_underlines;
C_OUTLINE_LIST left_coutlines;
C_OUTLINE_LIST right_coutlines;
ICOORDELT_IT cell_it = &chop_cells;
//under lines
BLOBNBOX_IT under_it = &block->underlines;
BLOBNBOX_IT ru_it = &residual_underlines;
if (block->get_rows()->empty())
return; // Don't crash if there are no rows.
for (under_it.mark_cycle_pt (); !under_it.cycled_list ();
under_it.forward ()) {
u_line = under_it.extract ();
blob_box = u_line->bounding_box ();
row = most_overlapping_row (block->get_rows (), u_line);
if (row == NULL)
return; // Don't crash if there is no row.
find_underlined_blobs (u_line, &row->baseline, row->xheight,
row->xheight * textord_underline_offset,
&chop_cells);
cell_it.set_to_list (&chop_cells);
for (cell_it.mark_cycle_pt (); !cell_it.cycled_list ();
cell_it.forward ()) {
chop_coord = cell_it.data ()->x ();
if (cell_it.data ()->y () - chop_coord > textord_fp_chop_error + 1) {
split_to_blob (u_line, chop_coord,
textord_fp_chop_error + 0.5,
&left_coutlines,
&right_coutlines);
if (!left_coutlines.empty()) {
ru_it.add_after_then_move(new BLOBNBOX(new C_BLOB(&left_coutlines)));
}
chop_coord = cell_it.data ()->y ();
split_to_blob(NULL, chop_coord, textord_fp_chop_error + 0.5,
&left_coutlines, &right_coutlines);
if (!left_coutlines.empty()) {
row->insert_blob(new BLOBNBOX(new C_BLOB(&left_coutlines)));
}
u_line = NULL; //no more blobs to add
}
delete cell_it.extract();
}
if (!right_coutlines.empty ()) {
split_to_blob(NULL, blob_box.right(), textord_fp_chop_error + 0.5,
&left_coutlines, &right_coutlines);
if (!left_coutlines.empty())
ru_it.add_after_then_move(new BLOBNBOX(new C_BLOB(&left_coutlines)));
}
if (u_line != NULL) {
if (u_line->cblob() != NULL)
delete u_line->cblob();
delete u_line;
}
}
if (!ru_it.empty()) {
ru_it.move_to_first();
for (ru_it.mark_cycle_pt(); !ru_it.cycled_list(); ru_it.forward()) {
under_it.add_after_then_move(ru_it.extract());
}
}
}
/**********************************************************************
* most_overlapping_row
*
* Return the row which most overlaps the blob.
**********************************************************************/
TO_ROW *most_overlapping_row( //find best row
TO_ROW_LIST *rows, //list of rows
BLOBNBOX *blob //blob to place
) {
inT16 x = (blob->bounding_box ().left ()
+ blob->bounding_box ().right ()) / 2;
TO_ROW_IT row_it = rows; //row iterator
TO_ROW *row; //current row
TO_ROW *best_row; //output row
float overlap; //of blob & row
float bestover; //best overlap
best_row = NULL;
bestover = (float) -MAX_INT32;
if (row_it.empty ())
return NULL;
row = row_it.data ();
row_it.mark_cycle_pt ();
while (row->baseline.y (x) + row->descdrop > blob->bounding_box ().top ()
&& !row_it.cycled_list ()) {
best_row = row;
bestover =
blob->bounding_box ().top () - row->baseline.y (x) + row->descdrop;
row_it.forward ();
row = row_it.data ();
}
while (row->baseline.y (x) + row->xheight + row->ascrise
>= blob->bounding_box ().bottom () && !row_it.cycled_list ()) {
overlap = row->baseline.y (x) + row->xheight + row->ascrise;
if (blob->bounding_box ().top () < overlap)
overlap = blob->bounding_box ().top ();
if (blob->bounding_box ().bottom () >
row->baseline.y (x) + row->descdrop)
overlap -= blob->bounding_box ().bottom ();
else
overlap -= row->baseline.y (x) + row->descdrop;
if (overlap > bestover) {
bestover = overlap;
best_row = row;
}
row_it.forward ();
row = row_it.data ();
}
if (bestover < 0
&& row->baseline.y (x) + row->xheight + row->ascrise
- blob->bounding_box ().bottom () > bestover)
best_row = row;
return best_row;
}
/**********************************************************************
* find_underlined_blobs
*
* Find the start and end coords of blobs in the underline.
**********************************************************************/
void find_underlined_blobs( //get chop points
BLOBNBOX *u_line, //underlined unit
QSPLINE *baseline, //actual baseline
float xheight, //height of line
float baseline_offset, //amount to shrinke it
ICOORDELT_LIST *chop_cells //places to chop
) {
inT16 x, y; //sides of blob
ICOORD blob_chop; //sides of blob
TBOX blob_box = u_line->bounding_box ();
//cell iterator
ICOORDELT_IT cell_it = chop_cells;
STATS upper_proj (blob_box.left (), blob_box.right () + 1);
STATS middle_proj (blob_box.left (), blob_box.right () + 1);
STATS lower_proj (blob_box.left (), blob_box.right () + 1);
C_OUTLINE_IT out_it; //outlines of blob
ASSERT_HOST (u_line->cblob () != NULL);
out_it.set_to_list (u_line->cblob ()->out_list ());
for (out_it.mark_cycle_pt (); !out_it.cycled_list (); out_it.forward ()) {
vertical_cunderline_projection (out_it.data (),
baseline, xheight, baseline_offset,
&lower_proj, &middle_proj, &upper_proj);
}
for (x = blob_box.left (); x < blob_box.right (); x++) {
if (middle_proj.pile_count (x) > 0) {
for (y = x + 1;
y < blob_box.right () && middle_proj.pile_count (y) > 0; y++);
blob_chop = ICOORD (x, y);
cell_it.add_after_then_move (new ICOORDELT (blob_chop));
x = y;
}
}
}
/**********************************************************************
* vertical_cunderline_projection
*
* Compute the vertical projection of a outline from its outlines
* and add to the given STATS.
**********************************************************************/
void vertical_cunderline_projection( //project outlines
C_OUTLINE *outline, //outline to project
QSPLINE *baseline, //actual baseline
float xheight, //height of line
float baseline_offset, //amount to shrinke it
STATS *lower_proj, //below baseline
STATS *middle_proj, //centre region
STATS *upper_proj //top region
) {
ICOORD pos; //current point
ICOORD step; //edge step
inT16 lower_y, upper_y; //region limits
inT32 length; //of outline
inT16 stepindex; //current step
C_OUTLINE_IT out_it = outline->child ();
pos = outline->start_pos ();
length = outline->pathlength ();
for (stepindex = 0; stepindex < length; stepindex++) {
step = outline->step (stepindex);
if (step.x () > 0) {
lower_y =
(inT16) floor (baseline->y (pos.x ()) + baseline_offset + 0.5);
upper_y =
(inT16) floor (baseline->y (pos.x ()) + baseline_offset +
xheight + 0.5);
if (pos.y () >= lower_y) {
lower_proj->add (pos.x (), -lower_y);
if (pos.y () >= upper_y) {
middle_proj->add (pos.x (), lower_y - upper_y);
upper_proj->add (pos.x (), upper_y - pos.y ());
}
else
middle_proj->add (pos.x (), lower_y - pos.y ());
}
else
lower_proj->add (pos.x (), -pos.y ());
}
else if (step.x () < 0) {
lower_y =
(inT16) floor (baseline->y (pos.x () - 1) + baseline_offset +
0.5);
upper_y =
(inT16) floor (baseline->y (pos.x () - 1) + baseline_offset +
xheight + 0.5);
if (pos.y () >= lower_y) {
lower_proj->add (pos.x () - 1, lower_y);
if (pos.y () >= upper_y) {
middle_proj->add (pos.x () - 1, upper_y - lower_y);
upper_proj->add (pos.x () - 1, pos.y () - upper_y);
}
else
middle_proj->add (pos.x () - 1, pos.y () - lower_y);
}
else
lower_proj->add (pos.x () - 1, pos.y ());
}
pos += step;
}
for (out_it.mark_cycle_pt (); !out_it.cycled_list (); out_it.forward ()) {
vertical_cunderline_projection (out_it.data (),
baseline, xheight, baseline_offset,
lower_proj, middle_proj, upper_proj);
}
}
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.