code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231 values | license stringclasses 13 values | size int64 1 2.01M |
|---|---|---|---|---|---|
/* -*-C-*-
********************************************************************************
*
* File: structures.h (Formerly structures.h)
* Description: Allocate all the different types of structures.
* Author: Mark Seaman, OCR Technology
* Created: Wed May 30 10:12:12 1990
* Modified: Tue May 21 11:07:47 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1990, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef STRUCTURES_H
#define STRUCTURES_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "oldlist.h"
#include "freelist.h"
#include "danerror.h"
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* makestructure
*
* Allocate a chunk of memory for a particular data type. This macro
* defines an allocation, deallocation, and status printing function
* for each new data type.
**********************************************************************/
#define makestructure(newfunc, old, type) \
type *newfunc() \
{ \
return new type; \
} \
\
\
\
void old(type* deadelement) \
{ \
delete deadelement; \
} \
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
extern LIST new_cell();
extern void free_cell(LIST);
#endif
| 1080228-arabicocr11 | cutil/structures.h | C | asf20 | 2,847 |
///////////////////////////////////////////////////////////////////////
// File: cutil.cpp
// Description: cutil class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "cutil_class.h"
namespace tesseract {
CUtil::CUtil() {
}
CUtil::~CUtil() {
}
} // namespace tesseract
| 1080228-arabicocr11 | cutil/cutil_class.cpp | C++ | asf20 | 933 |
/**************************************************************************
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
**************************************************************************/
#ifndef CONST_H
#define CONST_H
/*This file contains constants which are global to the entire system*/
#define SPLINESIZE 23 // max spline parts to a line
#define PI 3.14159265359 // pi
#define EDGEPTFLAGS 4 // concavity,length etc.
#endif
| 1080228-arabicocr11 | cutil/const.h | C | asf20 | 987 |
/**********************************************************************
* File: callcpp.h
* Description: extern C interface calling C++ from C.
* Author: Ray Smith
* Created: Sun Feb 04 20:39:23 MST 1996
*
* (C) Copyright 1996, Hewlett-Packard Co.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef CALLCPP_H
#define CALLCPP_H
#ifndef __UNIX__
#include <assert.h>
#endif
#include "host.h"
#include "params.h"
#include "unichar.h"
class ScrollView;
typedef enum {
Black,
White,
Red,
Yellow,
Green,
Cyan,
Blue,
Magenta,
Aquamarine,
Dark_SLATE_BLUE,
Light_BLUE,
Medium_BLUE,
Midnight_BLUE,
Navy_BLUE,
Sky_BLUE,
Slate_BLUE,
Steel_BLUE,
Coral,
Brown,
Sandy_BROWN,
Gold,
GoldENROD,
Dark_GREEN,
Dark_OLIVE_GREEN,
Forest_GREEN,
Lime_GREEN,
Pale_GREEN,
Yellow_GREEN,
Light_GREY,
Dark_SLATE_GREY,
Dim_GREY,
Grey,
Khaki,
Maroon,
Orange,
Orchid,
Pink,
Plum,
Indian_RED,
Orange_RED,
Violet_RED,
Salmon,
Tan,
Turqoise,
Dark_TURQUOISE,
Violet,
Wheat,
Green_YELLOW
} C_COL; /*starbase colours */
void cprintf ( //Trace printf
const char *format, ... //special message
);
ScrollView *c_create_window( /*create a window */
const char *name, /*name/title of window */
inT16 xpos, /*coords of window */
inT16 ypos, /*coords of window */
inT16 xsize, /*size of window */
inT16 ysize, /*size of window */
double xmin, /*scrolling limits */
double xmax, /*to stop users */
double ymin, /*getting lost in */
double ymax /*empty space */
);
void c_line_color_index( /*set color */
void *win,
C_COL index);
void c_move( /*move pen */
void *win,
double x,
double y);
void c_draw( /*move pen */
void *win,
double x,
double y);
void c_make_current( /*move pen */
void *win);
void c_clear_window( /*move pen */
void *win);
char window_wait(ScrollView* win);
void reverse32(void *ptr);
void reverse16(void *ptr);
#endif
| 1080228-arabicocr11 | cutil/callcpp.h | C++ | asf20 | 2,998 |
/******************************************************************************
** Filename: efio.h
** Purpose: Definition of file I/O routines
** Author: Dan Johnson
** History: 5/21/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef EFIO_H
#define EFIO_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include <stdio.h>
#define FOPENERROR 3000
/**----------------------------------------------------------------------------
Public Function Prototype
----------------------------------------------------------------------------**/
FILE *Efopen(const char *Name, const char *Mode);
#endif
| 1080228-arabicocr11 | cutil/efio.h | C | asf20 | 1,435 |
/******************************************************************************
** Filename: danerror.h
** Purpose: Definition of error trapping routines.
** Author: Dan Johnson
** History: 4/3/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef DANERROR_H
#define DANERROR_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#define NOERROR 0
#define DO_NOTHING 0
typedef int TRAPERROR;
typedef void (*VOID_PROC) ();
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
void DoError(int Error, const char *Message);
#endif
| 1080228-arabicocr11 | cutil/danerror.h | C | asf20 | 1,506 |
/**********************************************************************
* File: callcpp.cpp
* Description: extern C interface calling C++ from C.
* Author: Ray Smith
* Created: Sun Feb 04 20:39:23 MST 1996
*
* (C) Copyright 1996, Hewlett-Packard Co.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "errcode.h"
#ifdef __UNIX__
#include <assert.h>
#include <stdarg.h>
#endif
#include <time.h>
#include "memry.h"
#include "scrollview.h"
#include "params.h"
#include "callcpp.h"
#include "tprintf.h"
#include "host.h"
#include "unichar.h"
void
cprintf ( //Trace printf
const char *format, ... //special message
) {
va_list args; //variable args
char msg[1000];
va_start(args, format); //variable list
vsprintf(msg, format, args); //Format into msg
va_end(args);
tprintf ("%s", msg);
}
#ifndef GRAPHICS_DISABLED
ScrollView *c_create_window( /*create a window */
const char *name, /*name/title of window */
inT16 xpos, /*coords of window */
inT16 ypos, /*coords of window */
inT16 xsize, /*size of window */
inT16 ysize, /*size of window */
double xmin, /*scrolling limits */
double xmax, /*to stop users */
double ymin, /*getting lost in */
double ymax /*empty space */
) {
return new ScrollView(name, xpos, ypos, xsize, ysize, xmax - xmin, ymax - ymin, true);
}
void c_line_color_index( /*set color */
void *win,
C_COL index) {
// The colors are the same as the SV ones except that SV has COLOR:NONE --> offset of 1
ScrollView* window = (ScrollView*) win;
window->Pen((ScrollView::Color) (index + 1));
}
void c_move( /*move pen */
void *win,
double x,
double y) {
ScrollView* window = (ScrollView*) win;
window->SetCursor((int) x, (int) y);
}
void c_draw( /*move pen */
void *win,
double x,
double y) {
ScrollView* window = (ScrollView*) win;
window->DrawTo((int) x, (int) y);
}
void c_make_current( /*move pen */
void *win) {
ScrollView* window = (ScrollView*) win;
window->Update();
}
void c_clear_window( /*move pen */
void *win) {
ScrollView* window = (ScrollView*) win;
window->Clear();
}
char window_wait(ScrollView* win) {
SVEvent* ev;
// Wait till an input or click event (all others are thrown away)
char ret = '\0';
SVEventType ev_type = SVET_ANY;
do {
ev = win->AwaitEvent(SVET_ANY);
ev_type = ev->type;
if (ev_type == SVET_INPUT)
ret = ev->parameter[0];
delete ev;
} while (ev_type != SVET_INPUT && ev_type != SVET_CLICK);
return ret;
}
#endif
void reverse32(void *ptr) {
char tmp;
char *cptr = (char *) ptr;
tmp = *cptr;
*cptr = *(cptr + 3);
*(cptr + 3) = tmp;
tmp = *(cptr + 1);
*(cptr + 1) = *(cptr + 2);
*(cptr + 2) = tmp;
}
void reverse16(void *ptr) {
char tmp;
char *cptr = (char *) ptr;
tmp = *cptr;
*cptr = *(cptr + 1);
*(cptr + 1) = tmp;
}
| 1080228-arabicocr11 | cutil/callcpp.cpp | C++ | asf20 | 4,097 |
/* -*-C-*-
********************************************************************************
*
* File: list.h (Formerly list.h)
* Description: List processing procedures declarations.
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Wed Dec 5 15:43:17 1990 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
********************************************************************************
*
* This file contains the interface for a set of general purpose list
* manipulation routines. For the implementation of these routines see
* the file "list.c".
*
********************************************************************************
*
* INDEX
* =======
*
* BASICS:
* -------
* first_node - Macro to return the first list node (not the cell).
* list_rest - Macro the return the second list cell
* pop - Destroy one list cell
* push - Create one list cell and set the node and next fields
*
* ITERATION:
* -----------------
* iterate - Macro to create a for loop to visit each cell.
* iterate_list - Macro to visit each cell using a local variable.
* for_each - Applies a function to each node.
*
* LIST CELL COUNTS:
* -----------------
* count - Returns the number of list cells in the list.
* second_node - Returns the second node.
* third - Returns the third node.
* fourth - Returns the fourth node.
* fifth - Returns the fifth node.
* last - Returns the last list cell.
* pair - Creates a list of two elements.
*
* COPYING:
* -----------------
* copy_first - Pushes the first element from list 1 onto list 2.
* copy - Create a copy of a list.
* concat - Creates a new list that is a copy of both input lists.
* delete_n - Creates a new list without the chosen elements.
* reverse - Creates a backwards copy of the input list.
* sort - Use quick sort to construct a new list.
* transform - Creates a new list by transforming each of the nodes.
*
* TRANFORMS: (Note: These functions all modify the input list.)
* ----------
* join - Concatenates list 1 and list 2.
* delete_d - Removes the requested elements from the list.
* transform_d - Modifies the list by applying a function to each node.
* insert - Add a new element into this spot in a list. (not NIL_LIST)
* push_last - Add a new element onto the end of a list.
* reverse_d - Reverse a list and destroy the old one.
*
* ASSOCIATED LISTS:
* -----------------
* adelete - Remove a particular entry from an associated list.
* assoc - Find an entry in an associated list that matches a key.
* match - Return the data element of an a-list entry.
*
* DISPLAY:
* -----------------
* print_cell - Print a hex dump of a list cell.
* show - Displays a string and a list (using lprint).
*
* SETS:
* -----
* adjoin - Add a new element to list if it does not exist already.
* intersection - Create a new list that is the set intersection.
* set_union - Create a new list that is the set intersection.
* set_difference - Create a new list that is the set difference.
* s_adjoin - Add an element to a sort list if it is not there.
* s_intersection - Set intersection on a sorted list. Modifies old list.
* s_union - Set intersection on a sorted list. Modifies old list.
* search - Return the pointer to the list cell whose node matches.
*
* COMPARISONS:
* -----------------
* is_same - Compares each node to the key.
* is_not_same - Compares each node to the key.
* is_key - Compares first of each node to the key.
* is_not_key - Compares first of each node to the key.
*
* CELL OPERATIONS:
* -----------------
* new_cell - Obtain a new list cell from the free list. Allocate.
* free_cell - Return a list cell to the free list.
* destroy - Return all list cells in a list.
* destroy_nodes - Apply a function to each list cell and destroy the list.
* set_node - Assign the node field in a list cell.
* set_rest - Assign the next field in a list cell.
*
***********************************************************************/
#ifndef LIST_H
#define LIST_H
#include "cutil.h"
#include "tesscallback.h"
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
#define NIL_LIST (LIST) 0
struct list_rec
{
struct list_rec *node;
struct list_rec *next;
};
typedef list_rec *LIST;
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/* Predefinitions */
#define list_rest(l) ((l) ? (l)->next : NIL_LIST)
#define first_node(l) ((l) ? (l)->node : NIL_LIST)
/**********************************************************************
* c o p y f i r s t
*
* Do the appropriate kind a push operation to copy the first node from
* one list to another.
*
**********************************************************************/
#define copy_first(l1,l2) \
(l2=push(l2, first_node(l1)))
/**********************************************************************
* i t e r a t e
*
* Visit each node in the list. Replace the old list with the list
* minus the head. Continue until the list is NIL_LIST.
**********************************************************************/
#define iterate(l) \
for (; (l) != NIL_LIST; (l) = list_rest (l))
/**********************************************************************
* i t e r a t e l i s t
*
* Visit each node in the list (l). Use a local variable (x) to iterate
* through all of the list cells. This macro is identical to iterate
* except that it does not lose the original list.
**********************************************************************/
#define iterate_list(x,l) \
for ((x)=(l); (x)!=0; (x)=list_rest(x))
/**********************************************************************
* j o i n o n
*
* Add another list onto the tail of this one. The list given as an input
* parameter is modified.
**********************************************************************/
#define JOIN_ON(list1,list2) \
((list1) = join ((list1), (list2)))
/**********************************************************************
* p o p o f f
*
* Add a cell onto the front of a list. The list given as an input
* parameter is modified.
**********************************************************************/
#define pop_off(list) \
((list) = pop (list))
/**********************************************************************
* p u s h o n
*
* Add a cell onto the front of a list. The list given as an input
* parameter is modified.
**********************************************************************/
#define push_on(list,thing) \
((list) = push (list, (LIST) (thing)))
/**********************************************************************
* s e c o n d
*
* Return the contents of the second list element.
*
* #define second_node(l) first_node (list_rest (l))
**********************************************************************/
#define second_node(l) \
first_node (list_rest (l))
/**********************************************************************
* s e t r e s t
*
* Change the "next" field of a list element to point to a desired place.
*
* #define set_rest(l,node) l->next = node;
**********************************************************************/
#define set_rest(l,cell)\
((l)->next = (cell))
/**********************************************************************
* t h i r d
*
* Return the contents of the third list element.
*
* #define third(l) first_node (list_rest (list_rest (l)))
**********************************************************************/
#define third(l) \
first_node (list_rest (list_rest (l)))
/*----------------------------------------------------------------------
Public Funtion Prototypes
----------------------------------------------------------------------*/
int count(LIST var_list);
LIST delete_d(LIST list, void *key, int_compare is_equal);
LIST delete_d(LIST list, void *key,
TessResultCallback2<int, void*, void*>* is_equal);
LIST destroy(LIST list);
void destroy_nodes(LIST list, void_dest destructor);
void insert(LIST list, void *node);
int is_same_node(void *item1, void *item2);
int is_same(void *item1, void *item2);
LIST join(LIST list1, LIST list2);
LIST last(LIST var_list);
void *nth_cell(LIST var_list, int item_num);
LIST pop(LIST list);
LIST push(LIST list, void *element);
LIST push_last(LIST list, void *item);
LIST reverse(LIST list);
LIST reverse_d(LIST list);
LIST s_adjoin(LIST var_list, void *variable, int_compare compare);
LIST search(LIST list, void *key, int_compare is_equal);
LIST search(LIST list, void *key, TessResultCallback2<int, void*, void*>*);
/*
#if defined(__STDC__) || defined(__cplusplus)
# define _ARGS(s) s
#else
# define _ARGS(s) ()
#endif
typedef void (*destructor) _ARGS((LIST l));
typedef LIST (*list_proc) _ARGS((LIST a));
int count
_ARGS((LIST var_list));
LIST delete_d
_ARGS((LIST list,
LIST key,
int_compare is_equal));
LIST destroy
_ARGS((LIST list));
LIST destroy_nodes
_ARGS((LIST list,
void_dest destructor));
void insert
_ARGS((LIST list,
LIST node));
int is_same_node
_ARGS((LIST s1,
LIST s2));
int is_same
_ARGS((LIST s1,
LIST s2));
LIST join
_ARGS((LIST list1,
LIST list2));
LIST last
_ARGS((LIST var_list));
LIST nth_cell
_ARGS((LIST var_list,
int item_num));
LIST pop
_ARGS((LIST list));
LIST push
_ARGS((LIST list,
LIST element));
LIST push_last
_ARGS((LIST list,
LIST item));
LIST reverse
_ARGS((LIST list));
LIST reverse_d
_ARGS((LIST list));
LIST s_adjoin
_ARGS((LIST var_list,
LIST variable,
int_compare compare));
LIST search
_ARGS((LIST list,
LIST key,
int_compare is_equal));
#undef _ARGS
*/
#endif
| 1080228-arabicocr11 | cutil/oldlist.h | C | asf20 | 11,195 |
/* -*-C-*-
********************************************************************************
*
* File: cutil.c
* Description: General utility functions
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Wed Jun 6 16:29:17 1990 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
********************************************************************************
Revision 1.1 2007/02/02 23:39:07 theraysmith
Fixed portability issues
Revision 1.1.1.1 2004/02/20 19:39:06 slumos
Import original HP distribution
* Revision 1.3 90/03/06 15:39:10 15:39:10 marks (Mark Seaman)
* Look for correct file of <malloc.h> or <stdlib.h>
*
* Revision 1.2 90/01/15 13:02:13 13:02:13 marks (Mark Seaman)
* Added memory allocator (*allocate) and (*deallocate)
*
* Revision 1.1 89/10/09 14:58:29 14:58:29 marks (Mark Seaman)
* Initial revision
**/
#include "cutil.h"
#include "tprintf.h"
#include "callcpp.h"
#include <stdlib.h>
#define RESET_COUNT 2000
/**********************************************************************
* long_rand
*
* Return a long random number whose value is less than limit. Do this
* by calling the standard cheepo random number generator and reseting
* it pretty often.
**********************************************************************/
long long_rand(long limit) {
#if RAND_MAX < 0x1000000
static long seed;
long num;
num = (long) rand () << 16;
num |= rand () & 0xffff;
seed ^= num;
long result = num % limit;
while (result < 0) {
result += limit;
}
return result;
#else
return (long)((double)limit * rand()/(RAND_MAX + 1.0));
#endif
}
/**********************************************************************
* open_file
*
* Open a file for reading or writing. If the file name parameter is
* NULL use stdin (or stdout) for the file. If the file can not be
* opened then call the error routine.
**********************************************************************/
FILE *open_file(const char *filename, const char *mode) {
FILE *thisfile = NULL;
if ((thisfile = fopen (filename, mode)) == NULL) {
tprintf ("Could not open file, %s\n", filename);
exit (1);
}
return (thisfile);
}
/// Check whether the file exists
bool exists_file(const char *filename) {
bool exists = false;
FILE *f = NULL;
if ((f = fopen(filename, "rb")) != NULL) {
fclose(f);
exists = true;
}
return exists;
}
| 1080228-arabicocr11 | cutil/cutil.cpp | C | asf20 | 3,144 |
/* -*-C-*-
###############################################################################
#
# File: list.c
# Description: List processing procedures.
# Author: Mark Seaman, Software Productivity
# Created: Thu Jul 23 13:24:09 1987
# Modified: Thu Dec 22 10:59:52 1988 (Mark Seaman) marks@hpgrlt
# Language: C
# Package: N/A
# Status: Reusable Software Component
#
# (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
#
################################################################################
* Revision 1.13 90/03/06 15:37:54 15:37:54 marks (Mark Seaman)
* Look for correct file of <malloc.h> or <stdlib.h>
*
* Revision 1.12 90/02/26 17:37:36 17:37:36 marks (Mark Seaman)
* Added pop_off and join_on
*
This file contains a set of general purpose list manipulation routines.
These routines can be used in a wide variety of ways to provide several
different popular data structures. A new list can be created by declaring
a variable of type 'LIST', and can be initialized with the value 'NIL_LIST'.
All of these routines check for the NIL_LIST condition before dereferencing
pointers. NOTE: There is a users' manual available in printed form from
Mark Seaman at (303) 350-4492 at Greeley Hard Copy.
To implement a STACK use:
push to add to the Stack l = push (l, (LIST) "jim");
pop to remove items from the Stack l = pop (l);
first_node to access the head name = (char *) first_node (l);
To implement a QUEUE use:
push_last to add to the Queue l = push_last (l, (LIST) "jim");
pop remove items from the Queue l = pop (l);
first_node to access the head name = (char *) first_node (l);
To implement LISP like functions use:
first_node CAR x = (int) first_node (l);
rest CDR l = list_rest (l);
push CONS l = push (l, (LIST) this);
last LAST x = last (l);
concat APPEND l = concat (r, s);
count LENGTH x = count (l);
search MEMBER if (search (l, x, NULL))
To implement SETS use:
adjoin l = adjoin (l, x);
set_union l = set_union (r, s);
intersection l = intersection (r, s);
set_difference l = set_difference (r, s);
delete l = delete (s, x, NULL);
search if (search (l, x, NULL))
To Implement Associated LISTS use:
lpush l = lpush (l, p);
assoc s = assoc (l, x);
adelete l = adelete (l, x);
The following rules of closure exist for the functions provided.
a = first_node (push (a, b))
b = list_rest (push (a, b))
a = push (pop (a), a)) For all a <> NIL_LIST
a = reverse (reverse (a))
******************************************************************************/
#include "oldlist.h"
#include "structures.h"
#include <stdio.h>
#if MAC_OR_DOS
#include <stdlib.h>
#else
#include "freelist.h"
#endif
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
#define add_on(l,x) l = push (l,first_node (x))
#define next_one(l) l = list_rest (l)
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* c o u n t
*
* Recursively count the elements in a list. Return the count.
**********************************************************************/
int count(LIST var_list) {
int temp = 0;
iterate (var_list) temp += 1;
return (temp);
}
/**********************************************************************
* d e l e t e d
*
* Delete all the elements out of the current list that match the key.
* This operation destroys the original list. The caller will supply a
* routine that will compare each node to the
* key, and return a non-zero value when they match. If the value
* NULL is supplied for is_equal, the is_key routine will be used.
**********************************************************************/
LIST delete_d(LIST list, void *key, int_compare is_equal) {
LIST result = NIL_LIST;
LIST last_one = NIL_LIST;
if (is_equal == NULL)
is_equal = is_same;
while (list != NIL_LIST) {
if (!(*is_equal) (first_node (list), key)) {
if (last_one == NIL_LIST) {
last_one = list;
list = list_rest (list);
result = last_one;
set_rest(last_one, NIL_LIST);
}
else {
set_rest(last_one, list);
last_one = list;
list = list_rest (list);
set_rest(last_one, NIL_LIST);
}
}
else {
list = pop (list);
}
}
return (result);
}
LIST delete_d(LIST list, void *key,
TessResultCallback2<int, void*, void*>* is_equal) {
LIST result = NIL_LIST;
LIST last_one = NIL_LIST;
while (list != NIL_LIST) {
if (!(*is_equal).Run (first_node (list), key)) {
if (last_one == NIL_LIST) {
last_one = list;
list = list_rest (list);
result = last_one;
set_rest(last_one, NIL_LIST);
}
else {
set_rest(last_one, list);
last_one = list;
list = list_rest (list);
set_rest(last_one, NIL_LIST);
}
}
else {
list = pop (list);
}
}
return (result);
}
/**********************************************************************
* d e s t r o y
*
* Return the space taken by a list to the heap.
**********************************************************************/
LIST destroy(LIST list) {
LIST next;
while (list != NIL_LIST) {
next = list_rest (list);
free_cell(list);
list = next;
}
return (NIL_LIST);
}
/**********************************************************************
* d e s t r o y n o d e s
*
* Return the space taken by the LISTs of a list to the heap.
**********************************************************************/
void destroy_nodes(LIST list, void_dest destructor) {
if (destructor == NULL)
destructor = memfree;
while (list != NIL_LIST) {
(*destructor) (first_node (list));
list = pop (list);
}
}
/**********************************************************************
* i n s e r t
*
* Create a list element and rearange the pointers so that the first
* element in the list is the second aurgment.
**********************************************************************/
void insert(LIST list, void *node) {
LIST element;
if (list != NIL_LIST) {
element = push (NIL_LIST, node);
set_rest (element, list_rest (list));
set_rest(list, element);
node = first_node (list);
list->node = first_node (list_rest (list));
list->next->node = (LIST) node;
}
}
/**********************************************************************
* i s s a m e n o d e
*
* Compare the list node with the key value return TRUE (non-zero)
* if they are equivalent strings. (Return FALSE if not)
**********************************************************************/
int is_same_node(void *item1, void *item2) {
return (item1 == item2);
}
/**********************************************************************
* i s s a m e
*
* Compare the list node with the key value return TRUE (non-zero)
* if they are equivalent strings. (Return FALSE if not)
**********************************************************************/
int is_same(void *item1, void *item2) {
return (!strcmp ((char *) item1, (char *) item2));
}
/**********************************************************************
* j o i n
*
* Join the two lists together. This function is similar to concat
* except that concat creates a new list. This function returns the
* first list updated.
**********************************************************************/
LIST join(LIST list1, LIST list2) {
if (list1 == NIL_LIST)
return (list2);
set_rest (last (list1), list2);
return (list1);
}
/**********************************************************************
* l a s t
*
* Return the last list item (this is list type).
**********************************************************************/
LIST last(LIST var_list) {
while (list_rest (var_list) != NIL_LIST)
var_list = list_rest (var_list);
return (var_list);
}
/**********************************************************************
* n t h c e l l
*
* Return nth list cell in the list.
**********************************************************************/
void *nth_cell(LIST var_list, int item_num) {
int x = 0;
iterate(var_list) {
if (x++ == item_num)
return (var_list);
}
return (var_list);
}
/**********************************************************************
* p o p
*
* Return the list with the first element removed. Destroy the space
* that it occupied in the list.
**********************************************************************/
LIST pop(LIST list) {
LIST temp;
temp = list_rest (list);
if (list != NIL_LIST) {
free_cell(list);
}
return (temp);
}
/**********************************************************************
* p u s h
*
* Create a list element. Push the second parameter (the node) onto
* the first parameter (the list). Return the new list to the caller.
**********************************************************************/
LIST push(LIST list, void *element) {
LIST t;
t = new_cell ();
t->node = (LIST) element;
set_rest(t, list);
return (t);
}
/**********************************************************************
* p u s h l a s t
*
* Create a list element. Add the element onto the end of the list.
**********************************************************************/
LIST push_last(LIST list, void *item) {
LIST t;
if (list != NIL_LIST) {
t = last (list);
t->next = push (NIL_LIST, item);
return (list);
}
else
return (push (NIL_LIST, item));
}
/**********************************************************************
* r e v e r s e
*
* Create a new list with the elements reversed. The old list is not
* destroyed.
**********************************************************************/
LIST reverse(LIST list) {
LIST newlist = NIL_LIST;
iterate (list) copy_first (list, newlist);
return (newlist);
}
/**********************************************************************
* r e v e r s e d
*
* Create a new list with the elements reversed. The old list is
* destroyed.
**********************************************************************/
LIST reverse_d(LIST list) {
LIST result = reverse (list);
destroy(list);
return (result);
}
/**********************************************************************
* s a d j o i n
*
* Adjoin an element to an assorted list. The original list is
* modified. Returns the modified list.
**********************************************************************/
LIST s_adjoin(LIST var_list, void *variable, int_compare compare) {
LIST l;
int result;
if (compare == NULL)
compare = (int_compare) strcmp;
l = var_list;
iterate(l) {
result = (*compare) (variable, first_node (l));
if (result == 0)
return (var_list);
else if (result < 0) {
insert(l, variable);
return (var_list);
}
}
return (push_last (var_list, variable));
}
/**********************************************************************
* s e a r c h
*
* Search list, return NIL_LIST if not found. Return the list starting from
* the item if found. The compare routine "is_equal" is passed in as
* the third paramter to this routine. If the value NULL is supplied
* for is_equal, the is_key routine will be used.
**********************************************************************/
LIST search(LIST list, void *key, int_compare is_equal) {
if (is_equal == NULL)
is_equal = is_same;
iterate (list) if ((*is_equal) (first_node (list), key))
return (list);
return (NIL_LIST);
}
LIST search(LIST list, void *key, TessResultCallback2<int, void*, void*>* is_equal) {
iterate (list) if ((*is_equal).Run(first_node (list), key))
return (list);
return (NIL_LIST);
}
| 1080228-arabicocr11 | cutil/oldlist.cpp | C | asf20 | 13,449 |
/* -*-C-*-
********************************************************************************
*
* File: globals.h (Formerly globals.h)
* Description: Global Variables for Wise Owl
* Author: Mark Seaman, OCR Technology
* Created: Thu Dec 21 11:38:36 1989
* Modified: Thu Jan 4 17:13:00 1990 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef GLOBALS_H
#define GLOBALS_H
#include "const.h"
#include "unicharset.h"
#include "strngs.h"
#include <stdio.h>
#endif
| 1080228-arabicocr11 | cutil/globals.h | C | asf20 | 1,258 |
/* -*-C-*-
********************************************************************************
*
* File: freelist.h (Formerly freelist.h)
* Description: Memory allocator
* Author: Mark Seaman, OCR Technology
* Created: Wed May 30 13:50:28 1990
* Modified: Mon Dec 10 15:15:25 1990 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1990, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef FREELIST_H
#define FREELIST_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include <stdio.h>
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
int *memalloc(int size);
int *memrealloc(void *ptr, int size, int oldsize);
void memfree(void *element);
#endif
| 1080228-arabicocr11 | cutil/freelist.h | C | asf20 | 1,647 |
/******************************************************************************
** Filename: bitvec.c
** Purpose: Routines for manipulating bit vectors
** Author: Dan Johnson
** History: Thu Mar 15 10:37:27 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include "bitvec.h"
#include <stdio.h>
#include "emalloc.h"
#include "freelist.h"
#include "tprintf.h"
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* This routine uses realloc to increase the size of
* the specified bit vector.
*
* Globals:
* - none
*
* @param Vector bit vector to be expanded
* @param NewNumBits new size of bit vector
*
* @return New expanded bit vector.
* @note Exceptions: none
* @note History: Fri Nov 16 10:11:16 1990, DSJ, Created.
*/
BIT_VECTOR ExpandBitVector(BIT_VECTOR Vector, int NewNumBits) {
return ((BIT_VECTOR) Erealloc(Vector,
sizeof(Vector[0]) * WordsInVectorOfSize(NewNumBits)));
} /* ExpandBitVector */
/*---------------------------------------------------------------------------*/
void FreeBitVector(BIT_VECTOR BitVector) {
/**
* This routine frees a bit vector. It also decrements
* the global counter that keeps track of the number of
* bit vectors allocated. If BitVector is NULL, then
* the count is printed to stderr.
*
* Globals:
* - BitVectorCount count of number of bit vectors allocated
*
* @param BitVector bit vector to be freed
*
* @note Exceptions: none
* @note History: Tue Oct 23 16:46:09 1990, DSJ, Created.
*/
if (BitVector) {
Efree(BitVector);
}
} /* FreeBitVector */
/*---------------------------------------------------------------------------*/
/**
* Allocate and return a new bit vector large enough to
* hold the specified number of bits.
*
* Globals:
* - BitVectorCount number of bit vectors allocated
*
* @param NumBits number of bits in new bit vector
*
* @return New bit vector.
* @note Exceptions: none
* @note History: Tue Oct 23 16:51:27 1990, DSJ, Created.
*/
BIT_VECTOR NewBitVector(int NumBits) {
return ((BIT_VECTOR) Emalloc(sizeof(uinT32) *
WordsInVectorOfSize(NumBits)));
} /* NewBitVector */
| 1080228-arabicocr11 | cutil/bitvec.cpp | C++ | asf20 | 3,291 |
/******************************************************************************
** Filename: emalloc.h
** Purpose: Definition of memory allocation routines.
** Author: Dan Johnson
** History: 4/3/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef EMALLOC_H
#define EMALLOC_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "host.h"
#include "callcpp.h"
#define NOTENOUGHMEMORY 2000
#define ILLEGALMALLOCREQUEST 2001
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
void *Emalloc(int Size);
void *Erealloc(void *ptr, int size);
void Efree(void *ptr);
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
#endif
| 1080228-arabicocr11 | cutil/emalloc.h | C | asf20 | 1,759 |
///////////////////////////////////////////////////////////////////////
// File: cutil.h
// Description: cutil class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CUTIL_CUTIL_CLASS_H__
#define TESSERACT_CUTIL_CUTIL_CLASS_H__
#include "ccutil.h"
#include "const.h"
#include "strngs.h"
namespace tesseract {
class CUtil : public CCUtil {
public:
CUtil();
~CUtil();
void read_variables(const char *filename, bool global_only);
};
} // namespace tesseract
#endif // TESSERACT_CUTIL_CUTIL_CLASS_H__
| 1080228-arabicocr11 | cutil/cutil_class.h | C++ | asf20 | 1,178 |
AM_CPPFLAGS += -I$(top_srcdir)/ccutil -I$(top_srcdir)/viewer
if VISIBILITY
AM_CPPFLAGS += -DTESS_EXPORTS \
-fvisibility=hidden -fvisibility-inlines-hidden
endif
noinst_HEADERS = \
bitvec.h callcpp.h const.h cutil.h cutil_class.h danerror.h efio.h \
emalloc.h freelist.h globals.h listio.h \
oldlist.h structures.h
if !USING_MULTIPLELIBS
noinst_LTLIBRARIES = libtesseract_cutil.la
else
lib_LTLIBRARIES = libtesseract_cutil.la
libtesseract_cutil_la_LDFLAGS = -version-info $(GENERIC_LIBRARY_VERSION)
libtesseract_cutil_la_LIBADD = \
../ccutil/libtesseract_ccutil.la \
../viewer/libtesseract_viewer.la
endif
libtesseract_cutil_la_SOURCES = \
bitvec.cpp callcpp.cpp cutil.cpp cutil_class.cpp danerror.cpp efio.cpp \
emalloc.cpp freelist.cpp listio.cpp \
oldlist.cpp structures.cpp
| 1080228-arabicocr11 | cutil/Makefile.am | Makefile | asf20 | 814 |
/******************************************************************************
** Filename:
emalloc.c
** Purpose:
Routines for trapping memory allocation errors.
** Author:
Dan Johnson
HP-UX 6.2
HP-UX 6.2
** History:
4/3/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "emalloc.h"
#include "danerror.h"
#include <stdlib.h>
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
void *Emalloc(int Size) {
/*
** Parameters:
** Size
number of bytes of memory to be allocated
** Globals: none
** Operation:
** This routine attempts to allocate the specified number of
** bytes. If the memory can be allocated, a pointer to the
** memory is returned. If the memory cannot be allocated, or
** if the allocation request is negative or zero,
** an error is trapped.
** Return: Pointer to allocated memory.
** Exceptions: NOTENOUGHMEMORY
unable to allocate Size bytes
** ILLEGALMALLOCREQUEST
negative or zero request size
** History: 4/3/89, DSJ, Created.
*/
void *Buffer;
if (Size <= 0)
DoError (ILLEGALMALLOCREQUEST, "Illegal malloc request size");
Buffer = (void *) malloc (Size);
if (Buffer == NULL) {
DoError (NOTENOUGHMEMORY, "Not enough memory");
return (NULL);
}
else
return (Buffer);
} /* Emalloc */
/*---------------------------------------------------------------------------*/
void *Erealloc(void *ptr, int size) {
void *Buffer;
if (size < 0 || (size == 0 && ptr == NULL))
DoError (ILLEGALMALLOCREQUEST, "Illegal realloc request size");
Buffer = (void *) realloc (ptr, size);
if (Buffer == NULL && size != 0)
DoError (NOTENOUGHMEMORY, "Not enough memory");
return (Buffer);
} /* Erealloc */
/*---------------------------------------------------------------------------*/
void Efree(void *ptr) {
if (ptr == NULL)
DoError (ILLEGALMALLOCREQUEST, "Attempted to free NULL ptr");
free(ptr);
} /* Efree */
| 1080228-arabicocr11 | cutil/emalloc.cpp | C++ | asf20 | 3,264 |
/******************************************************************************
** Filename: efio.c
** Purpose: Utility I/O routines
** Author: Dan Johnson
** History: 5/21/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "efio.h"
#include "danerror.h"
#include <stdio.h>
#include <string.h>
#define MAXERRORMESSAGE 256
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
/*---------------------------------------------------------------------------*/
FILE *Efopen(const char *Name, const char *Mode) {
/*
** Parameters:
** Name name of file to be opened
** Mode mode to be used to open file
** Globals:
** None
** Operation:
** This routine attempts to open the specified file in the
** specified mode. If the file can be opened, a pointer to
** the open file is returned. If the file cannot be opened,
** an error is trapped.
** Return:
** Pointer to open file.
** Exceptions:
** FOPENERROR unable to open specified file
** History:
** 5/21/89, DSJ, Created.
*/
FILE *File;
char ErrorMessage[MAXERRORMESSAGE];
File = fopen (Name, Mode);
if (File == NULL) {
sprintf (ErrorMessage, "Unable to open %s", Name);
DoError(FOPENERROR, ErrorMessage);
return (NULL);
}
else
return (File);
} /* Efopen */
| 1080228-arabicocr11 | cutil/efio.cpp | C++ | asf20 | 2,299 |
/* -*-C-*-
********************************************************************************
*
* File: structures.c (Formerly structures.c)
* Description: Allocate all the different types of structures.
* Author: Mark Seaman, OCR Technology
* Created: Wed May 30 10:27:26 1990
* Modified: Mon Jul 15 10:39:18 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1990, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "structures.h"
#include <stdio.h>
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
makestructure(new_cell, free_cell, list_rec);
| 1080228-arabicocr11 | cutil/structures.cpp | C | asf20 | 1,599 |
/**************************************************************************
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
**************************************************************************/
#include "freelist.h"
#include <stdlib.h>
// With improvements in OS memory allocators, internal memory management is
// no longer required, so these functions all map to their malloc-family
// equivalents.
int *memalloc(int size) {
return static_cast<int*>(malloc(static_cast<size_t>(size)));
}
int *memrealloc(void *ptr, int size, int oldsize) {
return static_cast<int*>(realloc(ptr, static_cast<size_t>(size)));
}
void memfree(void *element) {
free(element);
}
| 1080228-arabicocr11 | cutil/freelist.cpp | C++ | asf20 | 1,183 |
/******************************************************************************
** Filename: bitvec.h
** Purpose: Routines for manipulating bit vectors
** Author: Dan Johnson
** History: Wed Mar 7 17:52:45 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef BITVEC_H
#define BITVEC_H
#include "host.h"
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
// TODO(rays) Rename BITSINLONG to BITSINuinT32, and use sizeof.
#define BITSINLONG 32 /**< no of bits in a long */
typedef uinT32 *BIT_VECTOR;
/*-----------------------------------------------------------------------------
Public Function Prototypes
-----------------------------------------------------------------------------*/
#define zero_all_bits(array,length) \
{\
register int index; /*temporary index*/\
\
for (index=0;index<length;index++)\
array[index]=0; /*zero all bits*/\
}
#define set_all_bits(array,length) \
{\
register int index; /*temporary index*/\
\
for (index=0;index<length;index++)\
array[index]= ~0; /*set all bits*/\
}
#define copy_all_bits(source,dest,length) \
{\
register int index; /*temporary index*/\
\
for (index=0;index<length;index++)\
dest[index]=source[index]; /*copy all bits*/\
}
#define SET_BIT(array,bit) (array[bit/BITSINLONG]|=1<<(bit&(BITSINLONG-1)))
#define reset_bit(array,bit) (array[bit/BITSINLONG]&=~(1<<(bit&(BITSINLONG-1))))
#define test_bit(array,bit) (array[bit/BITSINLONG] & (1<<(bit&(BITSINLONG-1))))
#define WordsInVectorOfSize(NumBits) \
(((NumBits) + BITSINLONG - 1) / BITSINLONG)
/*--------------------------------------------------------------------------
Public Function Prototypes
--------------------------------------------------------------------------*/
BIT_VECTOR ExpandBitVector(BIT_VECTOR Vector, int NewNumBits);
void FreeBitVector(BIT_VECTOR BitVector);
BIT_VECTOR NewBitVector(int NumBits);
#endif
| 1080228-arabicocr11 | cutil/bitvec.h | C | asf20 | 2,946 |
/******************************************************************************
** Filename: danerror.c
** Purpose: Routines for managing error trapping
** Author: Dan Johnson
** History: 3/17/89, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "host.h"
#include "danerror.h"
#include "tprintf.h"
#include "globaloc.h"
#ifdef __UNIX__
#include "assert.h"
#endif
#include <stdio.h>
/*---------------------------------------------------------------------------*/
void DoError(int Error, const char *Message) {
/*
** Parameters:
** Error error number which is to be trapped
** Message pointer to a string to be printed as an error message
** Globals:
** ErrorTrapStack stack of error traps
** CurrentTrapDepth number of traps on the stack
** Operation:
** This routine prints the specified error message to stderr.
** It then jumps to the current error trap. If the error trap
** stack is empty, the calling program is terminated with a
** fatal error message.
** Return:
** None - this routine does not return.
** Exceptions:
** Empty error trap stack terminates the calling program.
** History:
** 4/3/89, DSJ, Created.
*/
if (Message != NULL) {
tprintf("\nError: %s!\n", Message);
}
err_exit();
} /* DoError */
| 1080228-arabicocr11 | cutil/danerror.cpp | C++ | asf20 | 2,155 |
/* -*-C-*-
********************************************************************************
*
* File: cutil.h
* Description: General utility functions
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Wed Dec 5 15:40:26 1990 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
********************************************************************************
Revision 1.1 2007/02/02 23:39:07 theraysmith
Fixed portability issues
Revision 1.1.1.1 2004/02/20 19:39:06 slumos
Import original HP distribution
*/
#ifndef CUTILH
#define CUTILH
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "host.h"
#include "tprintf.h"
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
#define CHARS_PER_LINE 500
#if defined(__STDC__) || defined(__cplusplus) || MAC_OR_DOS
# define _ARGS(s) s
#else
# define _ARGS(s) ()
#endif
//typedef int (*int_proc) (void);
typedef void (*void_proc) (...);
typedef void *(*void_star_proc) _ARGS ((...));
typedef int (*int_void) (void);
typedef void (*void_void) (void);
typedef int (*int_compare) (void *, void *);
typedef void (*void_dest) (void *);
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* new_line
*
* Print a new line character on stdout.
**********************************************************************/
#define new_line() \
tprintf("\n")
/**********************************************************************
* print_string
*
* Print a string on stdout.
**********************************************************************/
#define print_string(str) \
printf ("%s\n", str)
/**********************************************************************
* strfree
*
* Reserve a spot in memory for the string to be stored. Copy the string
* to it and return the result.
**********************************************************************/
#define strfree(s) (free_string(s))
/**********************************************************************
* strsave
*
* Reserve a spot in memory for the string to be stored. Copy the string
* to it and return the result.
**********************************************************************/
#define strsave(s) \
((s) != NULL ? \
((char*) strcpy (alloc_string(strlen(s)+1), s)) : \
(NULL))
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
long long_rand(long limit);
FILE *open_file(const char *filename, const char *mode);
bool exists_file(const char *filename);
/* util.c
long long_rand
_ARGS ((long limit));
FILE *open_file
_ARGS((char *filename,
char *mode));
#undef _ARGS
*/
#include "cutil_class.h"
#endif
| 1080228-arabicocr11 | cutil/cutil.h | C | asf20 | 4,058 |
/* -*-C-*-
################################################################################
#
# File: listio.h
# Description: List I/O processing procedures.
# Author: Mark Seaman, Software Productivity
# Created: Thu Jul 23 13:24:09 1987
# Modified: Mon Oct 16 11:38:52 1989 (Mark Seaman) marks@hpgrlt
# Language: C
# Package: N/A
# Status: Reusable Software Component
#
# (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
#
################################################################################
* Revision 1.5 89/06/27 11:56:00 11:56:00 marks (Mark Seaman)
* Fixed MAC_OR_DOS bug
*
This file contains the interface definitions to a set of general purpose
list I/O routines.
***********************************************************************/
#ifndef LISTIO_H
#define LISTIO_H
#include <stdio.h>
#include "oldlist.h"
/*----------------------------------------------------------------------------
Public Funtion Prototypes
--------------------------------------------------------------------------*/
LIST read_list(const char *filename);
#endif
| 1080228-arabicocr11 | cutil/listio.h | C | asf20 | 1,662 |
/* -*-C-*-
################################################################################
#
# File: listio.c
# Description: List I/O processing procedures.
# Author: Mark Seaman, Software Productivity
# Created: Thu Jul 23 13:24:09 1987
# Modified: Fri May 17 17:33:30 1991 (Mark Seaman) marks@hpgrlt
# Language: C
# Package: N/A
# Status: Reusable Software Component
#
# (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
#
################################################################################
This file contains the implementations of a set of general purpose
list I/O routines. For the interface definitions look in the file
"listio.h".
---------------------------------------------------------------------------*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "listio.h"
/*---------------------------------------------------------------------------
Public Function Code
---------------------------------------------------------------------------*/
/*************************************************************************
* R E A D L I S T
*
* Read a list of strings from a file. Return the string list to the
* caller.
*************************************************************************/
LIST read_list(const char *filename) {
FILE *infile;
char s[CHARS_PER_LINE];
LIST list;
if ((infile = open_file (filename, "r")) == NULL)
return (NIL_LIST);
list = NIL_LIST;
while (fgets (s, CHARS_PER_LINE, infile) != NULL) {
s[CHARS_PER_LINE - 1] = '\0';
if (strlen (s) > 0) {
if (s[strlen (s) - 1] == '\n')
s[strlen (s) - 1] = '\0';
if (strlen (s) > 0) {
list = push (list, (LIST) strsave (s));
}
}
}
fclose(infile);
return (reverse_d (list));
}
| 1080228-arabicocr11 | cutil/listio.cpp | C | asf20 | 2,358 |
## run autogen.sh to create Makefile.in from this file
ACLOCAL_AMFLAGS = -I m4
if ENABLE_TRAINING
TRAINING_SUBDIR = training
training:
@cd "$(top_builddir)/training" && $(MAKE)
training-install:
@cd "$(top_builddir)/training" && $(MAKE) install
clean-local:
@cd "$(top_builddir)/training" && $(MAKE) clean
else
training:
@echo "Need to reconfigure project, so there are no errors"
endif
.PHONY: install-langs ScrollView.jar install-jars $(TRAINING_SUBDIR)
SUBDIRS = ccutil viewer cutil opencl ccstruct dict classify wordrec neural_networks/runtime textord cube ccmain api . tessdata doc
EXTRA_DIST = ReleaseNotes \
aclocal.m4 config configure.ac autogen.sh contrib \
tesseract.pc.in $(TRAINING_SUBDIR) java doc testing
DIST_SUBDIRS = $(SUBDIRS) $(TRAINING_SUBDIR)
uninstall-hook:
rm -rf $(DESTDIR)$(includedir)
dist-hook:
# Need to remove .svn directories from directories
# added using EXTRA_DIST. $(distdir)/tessdata would in
# theory suffice.
rm -rf `find $(distdir) -name .svn`
rm -rf `find $(distdir) -name .git`
rm -rf `find $(distdir) -name .deps`
rm -rf `find $(distdir) -name .libs`
rm -rf `find $(distdir) -name *.o`
rm -rf `find $(distdir) -name *.lo`
rm -rf `find $(distdir) -name *.la`
rm -rf `find $(distdir)/training -executable -type f`
rm -rf $(distdir)/doc/html/*
ScrollView.jar:
@cd "$(top_builddir)/java" && $(MAKE) $@
install-jars:
@cd "$(top_builddir)/java" && $(MAKE) $@
doc-dummy:
doc: doc-dummy
-srcdir="$(top_srcdir)" builddir="$(top_builddir)" \
version="@PACKAGE_VERSION@" name="@PACKAGE_NAME@" \
doxygen $(top_srcdir)/doc/Doxyfile
doc-pack: doc
-chmod a+r $(top_srcdir)/doc/html/*
@tar --create --directory=$(top_srcdir)/doc/html --verbose --file=- . | gzip -c -9 > $(top_srcdir)/@PACKAGE_NAME@-@PACKAGE_VERSION@-doc-html.tar.gz;
doc-clean:
rm -rf $(top_srcdir)/doc/html/*
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = tesseract.pc
| 1080228-arabicocr11 | Makefile.am | Makefile | asf20 | 1,907 |
#!/bin/bash
# File: runalltests.sh
# Description: Script to run a set of UNLV test sets.
# Author: Ray Smith
# Created: Thu Jun 14 08:21:01 PDT 2007
#
# (C) Copyright 2007, Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -ne 2 ]
then
echo "Usage:$0 unlv-data-dir version-id"
exit 1
fi
if [ ! -d api ]
then
echo "Run $0 from the tesseract-ocr root directory!"
exit 1
fi
if [ ! -r api/tesseract -a ! -r tesseract.exe ]
then
echo "Please build tesseract before running $0"
exit 1
fi
if [ ! -r testing/unlv/accuracy -a ! -r testing/unlv/accuracy.exe ]
then
echo "Please download the UNLV accuracy tools (and build) to testing/unlv"
exit 1
fi
#deltapc new old calculates the %change from old to new
deltapc() {
awk ' BEGIN {
printf("%.2f", 100.0*('$1'-'$2')/'$2');
}'
}
#timesum computes the total cpu time
timesum() {
awk ' BEGIN {
total = 0.0;
}
{
total += $2;
}
END {
printf("%.2f\n", total);
}' $1
}
imdir="$1"
vid="$2"
bindir=${0%/*}
if [ "$bindir" = "$0" ]
then
bindir="./"
fi
rdir=testing/reports
testsets="bus.3B doe3.3B mag.3B news.3B"
totalerrs=0
totalwerrs=0
totalnswerrs=0
totalolderrs=0
totaloldwerrs=0
totaloldnswerrs=0
for set in $testsets
do
if [ -r $imdir/$set/pages ]
then
# Run tesseract on all the pages.
$bindir/runtestset.sh $imdir/$set/pages
# Count the errors on all the pages.
$bindir/counttestset.sh $imdir/$set/pages
# Get the old character word and nonstop word errors.
olderrs=`cat testing/reports/1995.$set.sum | cut -f3`
oldwerrs=`cat testing/reports/1995.$set.sum | cut -f6`
oldnswerrs=`cat testing/reports/1995.$set.sum | cut -f9`
# Get the new character word and nonstop word errors and accuracy.
cherrs=`head -4 testing/reports/$set.characc |tail -1 |cut -c1-9 |
tr -d '[:blank:]'`
chacc=`head -5 testing/reports/$set.characc |tail -1 |cut -c1-9 |
tr -d '[:blank:]'`
wderrs=`head -4 testing/reports/$set.wordacc |tail -1 |cut -c1-9 |
tr -d '[:blank:]'`
wdacc=`head -5 testing/reports/$set.wordacc |tail -1 |cut -c1-9 |
tr -d '[:blank:]'`
nswderrs=`grep Total testing/reports/$set.wordacc |head -2 |tail -1 |
cut -c10-17 |tr -d '[:blank:]'`
nswdacc=`grep Total testing/reports/$set.wordacc |head -2 |tail -1 |
cut -c19-26 |tr -d '[:blank:]'`
# Compute the percent change.
chdelta=`deltapc $cherrs $olderrs`
wdelta=`deltapc $wderrs $oldwerrs`
nswdelta=`deltapc $nswderrs $oldnswerrs`
sumfile=$rdir/$vid.$set.sum
if [ -r testing/reports/$set.times ]
then
total_time=`timesum testing/reports/$set.times`
if [ -r testing/reports/prev/$set.times ]
then
paste testing/reports/prev/$set.times testing/reports/$set.times |
awk '{ printf("%s %.2f\n", $1, $4-$2); }' |sort -k2n >testing/reports/$set.timedelta
fi
else
total_time='0.0'
fi
echo "$vid $set $cherrs $chacc $chdelta% $wderrs $wdacc\
$wdelta% $nswderrs $nswdacc $nswdelta% ${total_time}s" >$sumfile
# Sum totals over all the testsets.
let totalerrs=totalerrs+cherrs
let totalwerrs=totalwerrs+wderrs
let totalnswerrs=totalnswerrs+nswderrs
let totalolderrs=totalolderrs+olderrs
let totaloldwerrs=totaloldwerrs+oldwerrs
let totaloldnswerrs=totaloldnswerrs+oldnswerrs
fi
done
# Compute grand total percent change.
chdelta=`deltapc $totalerrs $totalolderrs`
wdelta=`deltapc $totalwerrs $totaloldwerrs`
nswdelta=`deltapc $totalnswerrs $totaloldnswerrs `
tfile=$rdir/$vid.total.sum
echo "$vid Total $totalerrs - $chdelta% $totalwerrs\
- $wdelta% $totalnswerrs - $nswdelta%" >$tfile
cat $rdir/1995.*.sum $rdir/$vid.*.sum >$rdir/$vid.summary
| 1080228-arabicocr11 | testing/runalltests.sh | Shell | asf20 | 4,148 |
#!/bin/bash
# File: counttestset.sh
# Description: Script to count the errors on a single UNLV set.
# Author: Ray Smith
# Created: Wed Jun 13 11:58:01 PDT 2007
#
# (C) Copyright 2007, Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -ne 1 ]
then
echo "Usage:$0 pagesfile"
exit 1
fi
if [ ! -d api ]
then
echo "Run $0 from the tesseract-ocr root directory!"
exit 1
fi
if [ ! -r testing/unlv/accuracy ]
then
echo "Please download the UNLV accuracy tools (and build) to testing/unlv"
exit 1
fi
pages=$1
imdir=${pages%/pages}
setname=${imdir##*/}
resdir=testing/results/$setname
mkdir -p testing/reports
echo "Counting on set $setname in directory $imdir to $resdir"
accfiles=""
wafiles=""
while read page dir
do
if [ "$dir" ]
then
srcdir="$imdir/$dir"
else
srcdir="$imdir"
fi
# echo "$srcdir/$page.tif"
# Count character errors.
testing/unlv/accuracy $srcdir/$page.txt $resdir/$page.txt $resdir/$page.acc
accfiles="$accfiles $resdir/$page.acc"
# Count word errors.
testing/unlv/wordacc $srcdir/$page.txt $resdir/$page.txt $resdir/$page.wa
wafiles="$wafiles $resdir/$page.wa"
done <$pages
testing/unlv/accsum $accfiles >testing/reports/$setname.characc
testing/unlv/wordaccsum $wafiles >testing/reports/$setname.wordacc
| 1080228-arabicocr11 | testing/counttestset.sh | Shell | asf20 | 1,785 |
EXTRA_DIST = README counttestset.sh reorgdata.sh runalltests.sh runtestset.sh reports/1995.bus.3B.sum reports/1995.doe3.3B.sum reports/1995.mag.3B.sum reports/1995.news.3B.sum reports/2.03.summary reports/2.04.summary
| 1080228-arabicocr11 | testing/Makefile.am | Makefile | asf20 | 219 |
#!/bin/bash
# File: runtestset.sh
# Description: Script to run tesseract on a single UNLV set.
# Author: Ray Smith
# Created: Wed Jun 13 10:13:01 PDT 2007
#
# (C) Copyright 2007, Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -ne 1 -a $# -ne 2 ]
then
echo "Usage:$0 pagesfile [-zoning]"
exit 1
fi
if [ ! -d api ]
then
echo "Run $0 from the tesseract-ocr root directory!"
exit 1
fi
if [ ! -r api/tesseract ]
then
if [ ! -r tesseract.exe ]
then
echo "Please build tesseract before running $0"
exit 1
else
tess="./tesseract.exe"
fi
else
tess="time -f %U -o times.txt api/tesseract"
export TESSDATA_PREFIX=$PWD/
fi
pages=$1
imdir=${pages%/pages}
setname=${imdir##*/}
if [ $# -eq 2 -a "$2" = "-zoning" ]
then
config=unlv.auto
resdir=testing/results/zoning.$setname
else
config=unlv
resdir=testing/results/$setname
fi
echo -e "Testing on set $setname in directory $imdir to $resdir\n"
mkdir -p $resdir
rm -f testing/reports/$setname.times
while read page dir
do
# A pages file may be a list of files with subdirs or maybe just
# a plain list of files so accomodate both.
if [ "$dir" ]
then
srcdir="$imdir/$dir"
else
srcdir="$imdir"
fi
# echo "$srcdir/$page.tif"
$tess $srcdir/$page.tif $resdir/$page -psm 6 $config 2>&1 |grep -v "OCR Engine"
if [ -r times.txt ]
then
read t <times.txt
echo "$page $t" >>testing/reports/$setname.times
echo -e "\033M$page $t"
if [ "$t" = "Command terminated by signal 2" ]
then
exit 0
fi
fi
done <$pages
| 1080228-arabicocr11 | testing/runtestset.sh | Shell | asf20 | 2,058 |
#!/bin/bash
if [ $# -ne 1 ]
then
echo "Usage:$0 scantype"
echo "UNLV data comes in several scan types:"
echo "3B=300 dpi binary"
echo "3A=adaptive thresholded 300 dpi"
echo "3G=300 dpi grey"
echo "4B=400dpi binary"
echo "2B=200dpi binary"
echo "For now we only use 3B"
exit 1
fi
ext=$1
#There are several test sets without meaningful names, so rename
#them with something a bit more meaningful.
#Each s is oldname/newname
for s in 3/doe3 B/bus M/mag N/news L/legal R/rep S/spn Z/zset
do
old=${s%/*}
#if this set was downloaded then process it.
if [ -r "$old/PAGES" ]
then
new=${s#*/}.$ext
mkdir -p $new
echo "Set $old -> $new"
#The pages file had - instead of _ so fix it and add the extension.
for page in `cat $old/PAGES`
do
echo "${page%-*}_${page#*-}.$ext"
done >$new/pages
for f in `cat $new/pages`
do
#Put a tif extension on the tif files.
cp $old/${old}_B/$f $new/$f.tif
#Put a uzn extension on the zone files.
cp $old/${old}_B/${f}Z $new/$f.uzn
#Cat all the truth files together and put into a single txt file.
cat $old/${old}_GT/${f%.$ext}.Z* >$new/$f.txt
done
fi
done
| 1080228-arabicocr11 | testing/reorgdata.sh | Shell | asf20 | 1,193 |
/**********************************************************************
* File: cached_file.h
* Description: Declaration of a Cached File class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef CACHED_FILE_H
#define CACHED_FILE_H
// The CachedFile class provides a large-cache read access to a file
// It is mainly designed for loading large word dump files
#include <stdio.h>
#include <string>
#ifdef USE_STD_NAMESPACE
using std::string;
#endif
namespace tesseract {
class CachedFile {
public:
explicit CachedFile(string file_name);
~CachedFile();
// reads a specified number of bytes to the specified buffer and
// returns the actual number of bytes read
int Read(void *read_buff, int bytes);
// Returns the file size
long Size();
// returns the current position in the file
long Tell();
// End of file flag
bool eof();
private:
static const unsigned int kCacheSize = 0x8000000;
// file name
string file_name_;
// internal file buffer
unsigned char *buff_;
// file position
long file_pos_;
// file size
long file_size_;
// position of file within buffer
int buff_pos_;
// buffer size
int buff_size_;
// file handle
FILE *fp_;
// Opens the file
bool Open();
};
}
#endif // CACHED_FILE_H
| 1080228-arabicocr11 | cube/cached_file.h | C++ | asf20 | 1,939 |
/**********************************************************************
* File: char_altlist.cpp
* Description: Implementation of a Character Alternate List Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "char_altlist.h"
namespace tesseract {
// The CharSet is not class owned and must exist for
// the life time of this class
CharAltList::CharAltList(const CharSet *char_set, int max_alt)
: AltList(max_alt) {
char_set_ = char_set;
max_alt_ = max_alt;
class_id_alt_ = NULL;
class_id_cost_ = NULL;
}
CharAltList::~CharAltList() {
if (class_id_alt_ != NULL) {
delete []class_id_alt_;
class_id_alt_ = NULL;
}
if (class_id_cost_ != NULL) {
delete []class_id_cost_;
class_id_cost_ = NULL;
}
}
// Insert a new char alternate
bool CharAltList::Insert(int class_id, int cost, void *tag) {
// validate class ID
if (class_id < 0 || class_id >= char_set_->ClassCount()) {
return false;
}
// allocate buffers if nedded
if (class_id_alt_ == NULL || alt_cost_ == NULL) {
class_id_alt_ = new int[max_alt_];
alt_cost_ = new int[max_alt_];
alt_tag_ = new void *[max_alt_];
if (class_id_alt_ == NULL || alt_cost_ == NULL || alt_tag_ == NULL) {
return false;
}
memset(alt_tag_, 0, max_alt_ * sizeof(*alt_tag_));
}
if (class_id_cost_ == NULL) {
int class_cnt = char_set_->ClassCount();
class_id_cost_ = new int[class_cnt];
if (class_id_cost_ == NULL) {
return false;
}
for (int ich = 0; ich < class_cnt; ich++) {
class_id_cost_[ich] = WORST_COST;
}
}
if (class_id < 0 || class_id >= char_set_->ClassCount()) {
return false;
}
// insert the alternate
class_id_alt_[alt_cnt_] = class_id;
alt_cost_[alt_cnt_] = cost;
alt_tag_[alt_cnt_] = tag;
alt_cnt_++;
class_id_cost_[class_id] = cost;
return true;
}
// sort the alternate Desc. based on prob
void CharAltList::Sort() {
for (int alt_idx = 0; alt_idx < alt_cnt_; alt_idx++) {
for (int alt = alt_idx + 1; alt < alt_cnt_; alt++) {
if (alt_cost_[alt_idx] > alt_cost_[alt]) {
int temp = class_id_alt_[alt_idx];
class_id_alt_[alt_idx] = class_id_alt_[alt];
class_id_alt_[alt] = temp;
temp = alt_cost_[alt_idx];
alt_cost_[alt_idx] = alt_cost_[alt];
alt_cost_[alt] = temp;
void *tag = alt_tag_[alt_idx];
alt_tag_[alt_idx] = alt_tag_[alt];
alt_tag_[alt] = tag;
}
}
}
}
}
| 1080228-arabicocr11 | cube/char_altlist.cpp | C++ | asf20 | 3,147 |
/**********************************************************************
* File: cube_object.h
* Description: Declaration of the Cube Object Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CubeObject class is the main class used to perform recognition of
// a specific char_samp as a single word.
// To recognize a word, a CubeObject is constructed for this word.
// A Call to RecognizeWord is then issued specifying the language model that
// will be used during recognition. If none is specified, the default language
// model in the CubeRecoContext is used. The CubeRecoContext is passed at
// construction time
//
// The typical usage pattern for Cube is shown below:
//
// // Create and initialize Tesseract object and get its
// // CubeRecoContext object (note that Tesseract object owns it,
// // so it will be freed when the Tesseract object is freed).
// tesseract::Tesseract *tess_obj = new tesseract::Tesseract();
// tess_obj->init_tesseract(data_path, lang, tesseract::OEM_CUBE_ONLY);
// CubeRecoContext *cntxt = tess_obj->GetCubeRecoContext();
// CHECK(cntxt != NULL) << "Unable to create a Cube reco context";
// .
// .
// .
// // Do this to recognize a word in pix whose co-ordinates are
// // (left,top,width,height)
// tesseract::CubeObject *cube_obj;
// cube_obj = new tesseract::CubeObject(cntxt, pix,
// left, top, width, height);
//
// // Get back Cube's list of answers
// tesseract::WordAltList *alt_list = cube_obj->RecognizeWord();
// CHECK(alt_list != NULL && alt_list->AltCount() > 0);
//
// // Get the string and cost of every alternate
// for (int alt = 0; alt < alt_list->AltCount(); alt++) {
// // Return the result as a UTF-32 string
// string_32 res_str32 = alt_list->Alt(alt);
// // Convert to UTF8 if need-be
// string res_str;
// CubeUtils::UTF32ToUTF8(res_str32.c_str(), &res_str);
// // Get the string cost. This should get bigger as you go deeper
// // in the list
// int cost = alt_list->AltCost(alt);
// }
//
// // Call this once you are done recognizing this word
// delete cube_obj;
//
// // Call this once you are done recognizing all words with
// // for the current language
// delete tess_obj;
//
// Note that if the language supports "Italics" (see the CubeRecoContext), the
// RecognizeWord function attempts to de-slant the word.
#ifndef CUBE_OBJECT_H
#define CUBE_OBJECT_H
#include "char_samp.h"
#include "word_altlist.h"
#include "beam_search.h"
#include "cube_search_object.h"
#include "tess_lang_model.h"
#include "cube_reco_context.h"
namespace tesseract {
// minimum aspect ratio needed to normalize a char_samp before recognition
static const float kMinNormalizationAspectRatio = 3.5;
// minimum probability a top alt choice must meet before having
// deslanted processing applied to it
static const float kMinProbSkipDeslanted = 0.25;
class CubeObject {
public:
// Different flavors of constructor. They just differ in the way the
// word image is specified
CubeObject(CubeRecoContext *cntxt, CharSamp *char_samp);
CubeObject(CubeRecoContext *cntxt, Pix *pix,
int left, int top, int wid, int hgt);
~CubeObject();
// Perform the word recognition using the specified language mode. If none
// is specified, the default language model in the CubeRecoContext is used.
// Returns the sorted list of alternate word answers
WordAltList *RecognizeWord(LangModel *lang_mod = NULL);
// Same as RecognizeWord but recognizes as a phrase
WordAltList *RecognizePhrase(LangModel *lang_mod = NULL);
// Computes the cost of a specific string. This is done by performing
// recognition of a language model that allows only the specified word.
// The alternate list(s) will be permanently modified.
int WordCost(const char *str);
// Recognizes a single character and returns the list of results.
CharAltList *RecognizeChar();
// Returns the BeamSearch object that resulted from the last call to
// RecognizeWord
inline BeamSearch *BeamObj() const {
return (deslanted_ == true ? deslanted_beam_obj_ : beam_obj_);
}
// Returns the WordAltList object that resulted from the last call to
// RecognizeWord
inline WordAltList *AlternateList() const {
return (deslanted_ == true ? deslanted_alt_list_ : alt_list_);
}
// Returns the CubeSearchObject object that resulted from the last call to
// RecognizeWord
inline CubeSearchObject *SrchObj() const {
return (deslanted_ == true ? deslanted_srch_obj_ : srch_obj_);
}
// Returns the CharSamp object that resulted from the last call to
// RecognizeWord. Note that this object is not necessarily identical to the
// one passed at construction time as normalization might have occurred
inline CharSamp *CharSample() const {
return (deslanted_ == true ? deslanted_char_samp_ : char_samp_);
}
// Set the ownership of the CharSamp
inline void SetCharSampOwnership(bool own_char_samp) {
own_char_samp_ = own_char_samp;
}
protected:
// Normalize the CharSamp if its aspect ratio exceeds the below constant.
bool Normalize();
private:
// minimum segment count needed to normalize a char_samp before recognition
static const int kMinNormalizationSegmentCnt = 4;
// Data member initialization function
void Init();
// Free alternate lists.
void Cleanup();
// Perform the actual recognition using the specified language mode. If none
// is specified, the default language model in the CubeRecoContext is used.
// Returns the sorted list of alternate answers. Called by both
// RecognizerWord (word_mode is true) or RecognizePhrase (word mode is false)
WordAltList *Recognize(LangModel *lang_mod, bool word_mode);
CubeRecoContext *cntxt_;
BeamSearch *beam_obj_;
BeamSearch *deslanted_beam_obj_;
bool own_char_samp_;
bool deslanted_;
CharSamp *char_samp_;
CharSamp *deslanted_char_samp_;
CubeSearchObject *srch_obj_;
CubeSearchObject *deslanted_srch_obj_;
WordAltList *alt_list_;
WordAltList *deslanted_alt_list_;
};
}
#endif // CUBE_OBJECT_H
| 1080228-arabicocr11 | cube/cube_object.h | C++ | asf20 | 7,000 |
/**********************************************************************
* File: feature_base.h
* Description: Declaration of the Feature Base Class
* Author: Ping Ping (xiupingping), Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The FeatureBase class is the base class for any Feature Extraction class
// It provided 3 pure virtual functions (to inherit):
// 1- FeatureCnt: A method to returns the count of features
// 2- ComputeFeatures: A method to compute the features for a given CharSamp
// 3- ComputeFeatureBitmap: A method to render a visualization of the features
// to a CharSamp. This is mainly used by visual-debuggers
#ifndef FEATURE_BASE_H
#define FEATURE_BASE_H
#include "char_samp.h"
#include "tuning_params.h"
namespace tesseract {
class FeatureBase {
public:
explicit FeatureBase(TuningParams *params)
: params_(params) {
}
virtual ~FeatureBase() {}
// Compute the features for a given CharSamp
virtual bool ComputeFeatures(CharSamp *char_samp, float *features) = 0;
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
virtual CharSamp *ComputeFeatureBitmap(CharSamp *char_samp) = 0;
// Returns the count of features
virtual int FeatureCnt() = 0;
protected:
TuningParams *params_;
};
}
#endif // FEATURE_BASE_H
| 1080228-arabicocr11 | cube/feature_base.h | C++ | asf20 | 1,989 |
/**********************************************************************
* File: search_object.h
* Description: Declaration of the Beam Search Object Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The SearchObject class represents a char_samp (a word bitmap) that is
// being searched for characters (or recognizeable entities).
// This is an abstract class that all SearchObjects should inherit from
// A SearchObject class provides methods to:
// 1- Returns the count of segments
// 2- Recognize a segment range
// 3- Creates a CharSamp for a segment range
#ifndef SEARCH_OBJECT_H
#define SEARCH_OBJECT_H
#include "char_altlist.h"
#include "char_samp.h"
#include "cube_reco_context.h"
namespace tesseract {
class SearchObject {
public:
explicit SearchObject(CubeRecoContext *cntxt) { cntxt_ = cntxt; }
virtual ~SearchObject() {}
virtual int SegPtCnt() = 0;
virtual CharAltList *RecognizeSegment(int start_pt, int end_pt) = 0;
virtual CharSamp *CharSample(int start_pt, int end_pt) = 0;
virtual Box* CharBox(int start_pt, int end_pt) = 0;
virtual int SpaceCost(int seg_pt) = 0;
virtual int NoSpaceCost(int seg_pt) = 0;
virtual int NoSpaceCost(int start_pt, int end_pt) = 0;
protected:
CubeRecoContext *cntxt_;
};
}
#endif // SEARCH_OBJECT_H
| 1080228-arabicocr11 | cube/search_object.h | C++ | asf20 | 1,961 |
/**********************************************************************
* File: cube_line_object.h
* Description: Declaration of the Cube Line Object Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CubeLineObject implements an objects that holds a line of text
// Each line is broken into phrases. Phrases are blocks within the line that
// are unambiguously separate collections of words
#ifndef CUBE_LINE_OBJECT_H
#define CUBE_LINE_OBJECT_H
#include "cube_reco_context.h"
#include "cube_object.h"
#include "allheaders.h"
namespace tesseract {
class CubeLineObject {
public:
CubeLineObject(CubeRecoContext *cntxt, Pix *pix);
~CubeLineObject();
// accessors
inline int PhraseCount() {
if (!processed_ && !Process()) {
return 0;
}
return phrase_cnt_;
}
inline CubeObject **Phrases() {
if (!processed_ && !Process()) {
return NULL;
}
return phrases_;
}
private:
CubeRecoContext *cntxt_;
bool own_pix_;
bool processed_;
Pix *line_pix_;
CubeObject **phrases_;
int phrase_cnt_;
bool Process();
// Compute the least word breaking threshold that is required to produce a
// valid set of phrases. Phrases are validated using the Aspect ratio
// constraints specified in the language specific Params object
int ComputeWordBreakThreshold(int con_comp_cnt, ConComp **con_comps,
bool rtl);
};
}
#endif // CUBE_LINE_OBJECT_H
| 1080228-arabicocr11 | cube/cube_line_object.h | C++ | asf20 | 2,118 |
/**********************************************************************
* File: beam_search.h
* Description: Declaration of Beam Word Search Algorithm Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The Beam Search class implements a Beam Search algorithm for the
// N-best paths through the lattice of a search object using a language model
// The search object is a segmented bitmap of a word image. The language model
// is a state machine that defines valid sequences of characters
// The cost of each path is the combined (product) probabilities of the
// characters along the path. The character probabilities are computed using
// the character classifier member of the RecoContext
// The BeamSearch class itself holds the state of the last search it performed
// using its "Search" method. Subsequent class to the Search method erase the
// states of previously done searches
#ifndef BEAM_SEARCH_H
#define BEAM_SEARCH_H
#include "search_column.h"
#include "word_altlist.h"
#include "search_object.h"
#include "lang_model.h"
#include "cube_utils.h"
#include "cube_reco_context.h"
#include "allheaders.h"
namespace tesseract {
class BeamSearch {
public:
explicit BeamSearch(CubeRecoContext *cntxt, bool word_mode = true);
~BeamSearch();
// Performs a beam seach in the specified search using the specified
// language model; returns an alternate list of possible words as a result.
WordAltList *Search(SearchObject *srch_obj, LangModel *lang_mod = NULL);
// Returns the best node in the last column of last performed search.
SearchNode *BestNode() const;
// Returns the string corresponding to the specified alt.
char_32 *Alt(int alt) const;
// Backtracks from the specified lattice node and returns the corresponding
// character-mapped segments, character count, char_32 result string, and
// character bounding boxes (if char_boxes is not NULL). If the segments
// cannot be constructed, returns NULL, and all result arguments
// will be NULL.
CharSamp **BackTrack(SearchObject *srch_obj, int node_index,
int *char_cnt, char_32 **str32, Boxa **char_boxes) const;
// Same as above, except it takes a pointer to a search node object
// instead of node index.
CharSamp **BackTrack(SearchObject *srch_obj, SearchNode *node,
int *char_cnt, char_32 **str32, Boxa **char_boxes) const;
// Returns the size cost of a specified string of a lattice
// path that ends at the specified lattice node.
int SizeCost(SearchObject *srch_obj, SearchNode *node,
char_32 **str32 = NULL) const;
// Returns the word unigram cost of the given string, possibly
// stripping out a single trailing punctuation character.
int WordUnigramCost(char_32 *str32, WordUnigrams* word_unigrams) const;
// Supplementary functions needed for visualization
// Return column count of the lattice.
inline int ColCnt() const { return col_cnt_; }
// Returns the lattice column corresponding to the specified column index.
SearchColumn *Column(int col_idx) const;
// Return the index of the best node in the last column of the
// best-cost path before the alternates list is sorted.
inline int BestPresortedNodeIndex() const {
return best_presorted_node_idx_;
};
private:
// Maximum reasonable segmentation point count
static const int kMaxSegPointCnt = 128;
// Recognition context object; the context holds the character classifier
// and the tuning parameters object
CubeRecoContext *cntxt_;
// Count of segmentation pts
int seg_pt_cnt_;
// Lattice column count; currently redundant with respect to seg_pt_cnt_
// but that might change in the future
int col_cnt_;
// Array of lattice columns
SearchColumn **col_;
// Run in word or phrase mode
bool word_mode_;
// Node index of best-cost node, before alternates are merged and sorted
int best_presorted_node_idx_;
// Cleans up beam search state
void Cleanup();
// Creates a Word alternate list from the results in the lattice.
// This function computes a cost for each node in the final column
// of the lattice, which is a weighted average of several costs:
// size cost, character bigram cost, word unigram cost, and
// recognition cost from the beam search. The weights are the
// CubeTuningParams, which are learned together with the character
// classifiers.
WordAltList *CreateWordAltList(SearchObject *srch_obj);
// Creates a set of children nodes emerging from a parent node based on
// the character alternate list and the language model.
void CreateChildren(SearchColumn *out_col, LangModel *lang_mod,
SearchNode *parent_node, LangModEdge *lm_parent_edge,
CharAltList *char_alt_list, int extra_cost);
// Backtracks from the given lattice node and returns the corresponding
// char mapped segments, character count, and character bounding boxes (if
// char_boxes is not NULL). If the segments cannot be constructed,
// returns NULL, and all result arguments will be NULL.
CharSamp **SplitByNode(SearchObject *srch_obj, SearchNode *srch_node,
int* char_cnt, Boxa **char_boxes) const;
};
}
#endif // BEAM_SEARCH_H
| 1080228-arabicocr11 | cube/beam_search.h | C++ | asf20 | 5,904 |
/**********************************************************************
* File: search_node.cpp
* Description: Implementation of the Beam Search Node Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "search_node.h"
namespace tesseract {
// The constructor updates the best paths and costs:
// mean_char_reco_cost_ (returned by BestRecoCost()) is the mean
// char_reco cost of the best_path, including this node.
// best_path_reco_cost is the total char_reco_cost of the best_path,
// but excludes the char_reco_cost of this node.
// best_cost is the mean mixed cost, i.e., mean_char_reco_cost_ +
// current language model cost, all weighted by the cube context's
// RecoWgt parameter
SearchNode::SearchNode(CubeRecoContext *cntxt, SearchNode *parent_node,
int char_reco_cost, LangModEdge *edge, int col_idx) {
// copy data members
cntxt_ = cntxt;
lang_mod_edge_ = edge;
col_idx_ = col_idx;
parent_node_ = parent_node;
char_reco_cost_ = char_reco_cost;
// the string of this node is the same as that of the language model edge
str_ = (edge == NULL ? NULL : edge->EdgeString());
// compute best path total reco cost
best_path_reco_cost_ = (parent_node_ == NULL) ? 0 :
parent_node_->CharRecoCost() + parent_node_->BestPathRecoCost();
// update best path length
best_path_len_ = (parent_node_ == NULL) ?
1 : parent_node_->BestPathLength() + 1;
if (edge != NULL && edge->IsRoot() && parent_node_ != NULL) {
best_path_len_++;
}
// compute best reco cost mean cost
mean_char_reco_cost_ = static_cast<int>(
(best_path_reco_cost_ + char_reco_cost_) /
static_cast<double>(best_path_len_));
// get language model cost
int lm_cost = LangModCost(lang_mod_edge_, parent_node_);
// compute aggregate best cost
best_cost_ = static_cast<int>(cntxt_->Params()->RecoWgt() *
(best_path_reco_cost_ + char_reco_cost_) /
static_cast<double>(best_path_len_)
) + lm_cost;
}
SearchNode::~SearchNode() {
if (lang_mod_edge_ != NULL) {
delete lang_mod_edge_;
}
}
// update the parent_node node if provides a better (less) cost
bool SearchNode::UpdateParent(SearchNode *new_parent, int new_reco_cost,
LangModEdge *new_edge) {
if (lang_mod_edge_ == NULL) {
if (new_edge != NULL) {
return false;
}
} else {
// to update the parent_node, we have to have the same target
// state and char
if (new_edge == NULL || !lang_mod_edge_->IsIdentical(new_edge) ||
!SearchNode::IdenticalPath(parent_node_, new_parent)) {
return false;
}
}
// compute the path cost and combined cost of the new path
int new_best_path_reco_cost;
int new_cost;
int new_best_path_len;
new_best_path_reco_cost = (new_parent == NULL) ?
0 : new_parent->BestPathRecoCost() + new_parent->CharRecoCost();
new_best_path_len =
(new_parent == NULL) ? 1 : new_parent->BestPathLength() + 1;
// compute the new language model cost
int new_lm_cost = LangModCost(new_edge, new_parent);
new_cost = static_cast<int>(cntxt_->Params()->RecoWgt() *
(new_best_path_reco_cost + new_reco_cost) /
static_cast<double>(new_best_path_len)
) + new_lm_cost;
// update if it is better (less) than the current one
if (best_cost_ > new_cost) {
parent_node_ = new_parent;
char_reco_cost_ = new_reco_cost;
best_path_reco_cost_ = new_best_path_reco_cost;
best_path_len_ = new_best_path_len;
mean_char_reco_cost_ = static_cast<int>(
(best_path_reco_cost_ + char_reco_cost_) /
static_cast<double>(best_path_len_));
best_cost_ = static_cast<int>(cntxt_->Params()->RecoWgt() *
(best_path_reco_cost_ + char_reco_cost_) /
static_cast<double>(best_path_len_)
) + new_lm_cost;
return true;
}
return false;
}
char_32 *SearchNode::PathString() {
SearchNode *node = this;
// compute string length
int len = 0;
while (node != NULL) {
if (node->str_ != NULL) {
len += CubeUtils::StrLen(node->str_);
}
// if the edge is a root and does not have a NULL parent, account for space
LangModEdge *lm_edge = node->LangModelEdge();
if (lm_edge != NULL && lm_edge->IsRoot() && node->ParentNode() != NULL) {
len++;
}
node = node->parent_node_;
}
char_32 *char_ptr = new char_32[len + 1];
if (char_ptr == NULL) {
return NULL;
}
int ch_idx = len;
node = this;
char_ptr[ch_idx--] = 0;
while (node != NULL) {
int str_len = ((node->str_ == NULL) ? 0 : CubeUtils::StrLen(node->str_));
while (str_len > 0) {
char_ptr[ch_idx--] = node->str_[--str_len];
}
// if the edge is a root and does not have a NULL parent, insert a space
LangModEdge *lm_edge = node->LangModelEdge();
if (lm_edge != NULL && lm_edge->IsRoot() && node->ParentNode() != NULL) {
char_ptr[ch_idx--] = (char_32)' ';
}
node = node->parent_node_;
}
return char_ptr;
}
// compares the path of two nodes and checks if its identical
bool SearchNode::IdenticalPath(SearchNode *node1, SearchNode *node2) {
if (node1 != NULL && node2 != NULL &&
node1->best_path_len_ != node2->best_path_len_) {
return false;
}
// backtrack until either a root or a NULL edge is reached
while (node1 != NULL && node2 != NULL) {
if (node1->str_ != node2->str_) {
return false;
}
// stop if either nodes is a root
if (node1->LangModelEdge()->IsRoot() || node2->LangModelEdge()->IsRoot()) {
break;
}
node1 = node1->parent_node_;
node2 = node2->parent_node_;
}
return ((node1 == NULL && node2 == NULL) ||
(node1 != NULL && node1->LangModelEdge()->IsRoot() &&
node2 != NULL && node2->LangModelEdge()->IsRoot()));
}
// Computes the language model cost of a path
int SearchNode::LangModCost(LangModEdge *current_lm_edge,
SearchNode *parent_node) {
int lm_cost = 0;
int node_cnt = 0;
do {
// check if root
bool is_root = ((current_lm_edge != NULL && current_lm_edge->IsRoot()) ||
parent_node == NULL);
if (is_root) {
node_cnt++;
lm_cost += (current_lm_edge == NULL ? 0 : current_lm_edge->PathCost());
}
// continue until we hit a null parent
if (parent_node == NULL) {
break;
}
// get the previous language model edge
current_lm_edge = parent_node->LangModelEdge();
// back track
parent_node = parent_node->ParentNode();
} while (true);
return static_cast<int>(lm_cost / static_cast<double>(node_cnt));
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/search_node.cpp | C++ | asf20 | 7,550 |
/**********************************************************************
* File: word_unigrams.cpp
* Description: Implementation of the Word Unigrams Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <math.h>
#include <string>
#include <vector>
#include <algorithm>
#include "const.h"
#include "cube_utils.h"
#include "ndminx.h"
#include "word_unigrams.h"
namespace tesseract {
WordUnigrams::WordUnigrams() {
costs_ = NULL;
words_ = NULL;
word_cnt_ = 0;
}
WordUnigrams::~WordUnigrams() {
if (words_ != NULL) {
if (words_[0] != NULL) {
delete []words_[0];
}
delete []words_;
words_ = NULL;
}
if (costs_ != NULL) {
delete []costs_;
}
}
// Load the word-list and unigrams from file and create an object
// The word list is assumed to be sorted in lexicographic order.
WordUnigrams *WordUnigrams::Create(const string &data_file_path,
const string &lang) {
string file_name;
string str;
file_name = data_file_path + lang;
file_name += ".cube.word-freq";
// load the string into memory
if (CubeUtils::ReadFileToString(file_name, &str) == false) {
return NULL;
}
// split into lines
vector<string> str_vec;
CubeUtils::SplitStringUsing(str, "\r\n \t", &str_vec);
if (str_vec.size() < 2) {
return NULL;
}
// allocate memory
WordUnigrams *word_unigrams_obj = new WordUnigrams();
if (word_unigrams_obj == NULL) {
fprintf(stderr, "Cube ERROR (WordUnigrams::Create): could not create "
"word unigrams object.\n");
return NULL;
}
int full_len = str.length();
int word_cnt = str_vec.size() / 2;
word_unigrams_obj->words_ = new char*[word_cnt];
word_unigrams_obj->costs_ = new int[word_cnt];
if (word_unigrams_obj->words_ == NULL ||
word_unigrams_obj->costs_ == NULL) {
fprintf(stderr, "Cube ERROR (WordUnigrams::Create): error allocating "
"word unigram fields.\n");
delete word_unigrams_obj;
return NULL;
}
word_unigrams_obj->words_[0] = new char[full_len];
if (word_unigrams_obj->words_[0] == NULL) {
fprintf(stderr, "Cube ERROR (WordUnigrams::Create): error allocating "
"word unigram fields.\n");
delete word_unigrams_obj;
return NULL;
}
// construct sorted list of words and costs
word_unigrams_obj->word_cnt_ = 0;
char *char_buff = word_unigrams_obj->words_[0];
word_cnt = 0;
int max_cost = 0;
for (int wrd = 0; wrd < str_vec.size(); wrd += 2) {
word_unigrams_obj->words_[word_cnt] = char_buff;
strcpy(char_buff, str_vec[wrd].c_str());
char_buff += (str_vec[wrd].length() + 1);
if (sscanf(str_vec[wrd + 1].c_str(), "%d",
word_unigrams_obj->costs_ + word_cnt) != 1) {
fprintf(stderr, "Cube ERROR (WordUnigrams::Create): error reading "
"word unigram data.\n");
delete word_unigrams_obj;
return NULL;
}
// update max cost
max_cost = MAX(max_cost, word_unigrams_obj->costs_[word_cnt]);
word_cnt++;
}
word_unigrams_obj->word_cnt_ = word_cnt;
// compute the not-in-list-cost by assuming that a word not in the list
// [ahmadab]: This can be computed as follows:
// - Given that the distribution of words follow Zipf's law:
// (F = K / (rank ^ S)), where s is slightly > 1.0
// - Number of words in the list is N
// - The mean frequency of a word that did not appear in the list is the
// area under the rest of the Zipf's curve divided by 2 (the mean)
// - The area would be the bound integral from N to infinity =
// (K * S) / (N ^ (S + 1)) ~= K / (N ^ 2)
// - Given that cost = -LOG(prob), the cost of an unlisted word would be
// = max_cost + 2*LOG(N)
word_unigrams_obj->not_in_list_cost_ = max_cost +
(2 * CubeUtils::Prob2Cost(1.0 / word_cnt));
// success
return word_unigrams_obj;
}
// Split input into space-separated tokens, strip trailing punctuation
// from each, determine case properties, call UTF-8 flavor of cost
// function on each word, and aggregate all into single mean word
// cost.
int WordUnigrams::Cost(const char_32 *key_str32,
LangModel *lang_mod,
CharSet *char_set) const {
if (!key_str32)
return 0;
// convert string to UTF8 to split into space-separated words
string key_str;
CubeUtils::UTF32ToUTF8(key_str32, &key_str);
vector<string> words;
CubeUtils::SplitStringUsing(key_str, " \t", &words);
// no words => no cost
if (words.size() <= 0) {
return 0;
}
// aggregate the costs of all the words
int cost = 0;
for (int word_idx = 0; word_idx < words.size(); word_idx++) {
// convert each word back to UTF32 for analyzing case and punctuation
string_32 str32;
CubeUtils::UTF8ToUTF32(words[word_idx].c_str(), &str32);
int len = CubeUtils::StrLen(str32.c_str());
// strip all trailing punctuation
string clean_str;
int clean_len = len;
bool trunc = false;
while (clean_len > 0 &&
lang_mod->IsTrailingPunc(str32.c_str()[clean_len - 1])) {
--clean_len;
trunc = true;
}
// If either the original string was not truncated (no trailing
// punctuation) or the entire string was removed (all characters
// are trailing punctuation), evaluate original word as is;
// otherwise, copy all but the trailing punctuation characters
char_32 *clean_str32 = NULL;
if (clean_len == 0 || !trunc) {
clean_str32 = CubeUtils::StrDup(str32.c_str());
} else {
clean_str32 = new char_32[clean_len + 1];
for (int i = 0; i < clean_len; ++i) {
clean_str32[i] = str32[i];
}
clean_str32[clean_len] = '\0';
}
ASSERT_HOST(clean_str32 != NULL);
string str8;
CubeUtils::UTF32ToUTF8(clean_str32, &str8);
int word_cost = CostInternal(str8.c_str());
// if case invariant, get costs of all-upper-case and all-lower-case
// versions and return the min cost
if (clean_len >= kMinLengthNumOrCaseInvariant &&
CubeUtils::IsCaseInvariant(clean_str32, char_set)) {
char_32 *lower_32 = CubeUtils::ToLower(clean_str32, char_set);
if (lower_32) {
string lower_8;
CubeUtils::UTF32ToUTF8(lower_32, &lower_8);
word_cost = MIN(word_cost, CostInternal(lower_8.c_str()));
delete [] lower_32;
}
char_32 *upper_32 = CubeUtils::ToUpper(clean_str32, char_set);
if (upper_32) {
string upper_8;
CubeUtils::UTF32ToUTF8(upper_32, &upper_8);
word_cost = MIN(word_cost, CostInternal(upper_8.c_str()));
delete [] upper_32;
}
}
if (clean_len >= kMinLengthNumOrCaseInvariant) {
// if characters are all numeric, incur 0 word cost
bool is_numeric = true;
for (int i = 0; i < clean_len; ++i) {
if (!lang_mod->IsDigit(clean_str32[i]))
is_numeric = false;
}
if (is_numeric)
word_cost = 0;
}
delete [] clean_str32;
cost += word_cost;
} // word_idx
// return the mean cost
return static_cast<int>(cost / static_cast<double>(words.size()));
}
// Search for UTF-8 string using binary search of sorted words_ array.
int WordUnigrams::CostInternal(const char *key_str) const {
if (strlen(key_str) == 0)
return not_in_list_cost_;
int hi = word_cnt_ - 1;
int lo = 0;
while (lo <= hi) {
int current = (hi + lo) / 2;
int comp = strcmp(key_str, words_[current]);
// a match
if (comp == 0) {
return costs_[current];
}
if (comp < 0) {
// go lower
hi = current - 1;
} else {
// go higher
lo = current + 1;
}
}
return not_in_list_cost_;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/word_unigrams.cpp | C++ | asf20 | 8,362 |
/**********************************************************************
* File: search_column.h
* Description: Declaration of the Beam Search Column Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The SearchColumn class abstracts a column in the lattice that is created
// by the BeamSearch during the recognition process
// The class holds the lattice nodes. New nodes are added by calls to AddNode
// made from the BeamSearch
// The class maintains a hash table of the nodes to be able to lookup nodes
// quickly using their lang_mod_edge. This is needed to merge similar paths
// in the lattice
#ifndef SEARCH_COLUMN_H
#define SEARCH_COLUMN_H
#include "search_node.h"
#include "lang_mod_edge.h"
#include "cube_reco_context.h"
namespace tesseract {
class SearchColumn {
public:
SearchColumn(int col_idx, int max_node_cnt);
~SearchColumn();
// Accessor functions
inline int ColIdx() const { return col_idx_; }
inline int NodeCount() const { return node_cnt_; }
inline SearchNode **Nodes() const { return node_array_; }
// Prune the nodes if necessary. Pruning is done such that a max
// number of nodes is kept, i.e., the beam width
void Prune();
SearchNode *AddNode(LangModEdge *edge, int score,
SearchNode *parent, CubeRecoContext *cntxt);
// Returns the node with the least cost
SearchNode *BestNode();
// Sort the lattice nodes. Needed for visualization
void Sort();
// Free up the Hash Table. Added to be called by the Beam Search after
// a column is pruned to reduce memory foot print
void FreeHashTable() {
if (node_hash_table_ != NULL) {
delete node_hash_table_;
node_hash_table_ = NULL;
}
}
private:
static const int kNodeAllocChunk = 1024;
static const int kScoreBins = 1024;
bool init_;
int min_cost_;
int max_cost_;
int max_node_cnt_;
int node_cnt_;
int col_idx_;
int score_bins_[kScoreBins];
SearchNode **node_array_;
SearchNodeHashTable *node_hash_table_;
// Free node array and hash table
void Cleanup();
// Create hash table
bool Init();
};
}
#endif // SEARCH_COLUMN_H
| 1080228-arabicocr11 | cube/search_column.h | C++ | asf20 | 2,795 |
/**********************************************************************
* File: char_samp_enum.cpp
* Description: Implementation of a Character Sample Set Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdlib.h>
#include <string>
#include "char_samp_set.h"
#include "cached_file.h"
namespace tesseract {
CharSampSet::CharSampSet() {
cnt_ = 0;
samp_buff_ = NULL;
own_samples_ = false;
}
CharSampSet::~CharSampSet() {
Cleanup();
}
// free buffers and init vars
void CharSampSet::Cleanup() {
if (samp_buff_ != NULL) {
// only free samples if owned by class
if (own_samples_ == true) {
for (int samp_idx = 0; samp_idx < cnt_; samp_idx++) {
if (samp_buff_[samp_idx] != NULL) {
delete samp_buff_[samp_idx];
}
}
}
delete []samp_buff_;
}
cnt_ = 0;
samp_buff_ = NULL;
}
// add a new sample
bool CharSampSet::Add(CharSamp *char_samp) {
if ((cnt_ % SAMP_ALLOC_BLOCK) == 0) {
// create an extended buffer
CharSamp **new_samp_buff =
reinterpret_cast<CharSamp **>(new CharSamp *[cnt_ + SAMP_ALLOC_BLOCK]);
if (new_samp_buff == NULL) {
return false;
}
// copy old contents
if (cnt_ > 0) {
memcpy(new_samp_buff, samp_buff_, cnt_ * sizeof(*samp_buff_));
delete []samp_buff_;
}
samp_buff_ = new_samp_buff;
}
samp_buff_[cnt_++] = char_samp;
return true;
}
// load char samples from file
bool CharSampSet::LoadCharSamples(FILE *fp) {
// free existing
Cleanup();
// samples are created here and owned by the class
own_samples_ = true;
// start loading char samples
while (feof(fp) == 0) {
CharSamp *new_samp = CharSamp::FromCharDumpFile(fp);
if (new_samp != NULL) {
if (Add(new_samp) == false) {
return false;
}
}
}
return true;
}
// creates a CharSampSet object from file
CharSampSet * CharSampSet::FromCharDumpFile(string file_name) {
FILE *fp;
unsigned int val32;
// open the file
fp = fopen(file_name.c_str(), "rb");
if (fp == NULL) {
return NULL;
}
// read and verify marker
if (fread(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
fclose(fp);
return NULL;
}
if (val32 != 0xfefeabd0) {
fclose(fp);
return NULL;
}
// create an object
CharSampSet *samp_set = new CharSampSet();
if (samp_set == NULL) {
fclose(fp);
return NULL;
}
if (samp_set->LoadCharSamples(fp) == false) {
delete samp_set;
samp_set = NULL;
}
fclose(fp);
return samp_set;
}
// Create a new Char Dump file
FILE *CharSampSet::CreateCharDumpFile(string file_name) {
FILE *fp;
unsigned int val32;
// create the file
fp = fopen(file_name.c_str(), "wb");
if (!fp) {
return NULL;
}
// read and verify marker
val32 = 0xfefeabd0;
if (fwrite(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
fclose(fp);
return NULL;
}
return fp;
}
// Enumerate the Samples in the set one-by-one calling the enumertor's
// EnumCharSamp method for each sample
bool CharSampSet::EnumSamples(string file_name, CharSampEnum *enum_obj) {
CachedFile *fp_in;
unsigned int val32;
long i64_size,
i64_pos;
// open the file
fp_in = new CachedFile(file_name);
if (fp_in == NULL) {
return false;
}
i64_size = fp_in->Size();
if (i64_size < 1) {
return false;
}
// read and verify marker
if (fp_in->Read(&val32, sizeof(val32)) != sizeof(val32)) {
return false;
}
if (val32 != 0xfefeabd0) {
return false;
}
// start loading char samples
while (fp_in->eof() == false) {
CharSamp *new_samp = CharSamp::FromCharDumpFile(fp_in);
i64_pos = fp_in->Tell();
if (new_samp != NULL) {
bool ret_flag = (enum_obj)->EnumCharSamp(new_samp,
(100.0f * i64_pos / i64_size));
delete new_samp;
if (ret_flag == false) {
break;
}
}
}
delete fp_in;
return true;
}
} // namespace ocrlib
| 1080228-arabicocr11 | cube/char_samp_set.cpp | C++ | asf20 | 4,633 |
/**********************************************************************
* File: cube_page_segmenter.cpp
* Description: Implementation of the Cube Page Segmenter Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "cube_line_segmenter.h"
#include "ndminx.h"
namespace tesseract {
// constants that worked for Arabic page segmenter
const int CubeLineSegmenter::kLineSepMorphMinHgt = 20;
const int CubeLineSegmenter::kHgtBins = 20;
const double CubeLineSegmenter::kMaxValidLineRatio = 3.2;
const int CubeLineSegmenter::kMaxConnCompHgt = 150;
const int CubeLineSegmenter::kMaxConnCompWid = 500;
const int CubeLineSegmenter::kMaxHorzAspectRatio = 50;
const int CubeLineSegmenter::kMaxVertAspectRatio = 20;
const int CubeLineSegmenter::kMinWid = 2;
const int CubeLineSegmenter::kMinHgt = 2;
const float CubeLineSegmenter::kMinValidLineHgtRatio = 2.5;
CubeLineSegmenter::CubeLineSegmenter(CubeRecoContext *cntxt, Pix *img) {
cntxt_ = cntxt;
orig_img_ = img;
img_ = NULL;
lines_pixa_ = NULL;
init_ = false;
line_cnt_ = 0;
columns_ = NULL;
con_comps_ = NULL;
est_alef_hgt_ = 0.0;
est_dot_hgt_ = 0.0;
}
CubeLineSegmenter::~CubeLineSegmenter() {
if (img_ != NULL) {
pixDestroy(&img_);
img_ = NULL;
}
if (lines_pixa_ != NULL) {
pixaDestroy(&lines_pixa_);
lines_pixa_ = NULL;
}
if (con_comps_ != NULL) {
pixaDestroy(&con_comps_);
con_comps_ = NULL;
}
if (columns_ != NULL) {
pixaaDestroy(&columns_);
columns_ = NULL;
}
}
// compute validity ratio for a line
double CubeLineSegmenter::ValidityRatio(Pix *line_mask_pix, Box *line_box) {
return line_box->h / est_alef_hgt_;
}
// validate line
bool CubeLineSegmenter::ValidLine(Pix *line_mask_pix, Box *line_box) {
double validity_ratio = ValidityRatio(line_mask_pix, line_box);
return validity_ratio < kMaxValidLineRatio;
}
// perform a vertical Closing with the specified threshold
// returning the resulting conn comps as a pixa
Pixa *CubeLineSegmenter::VerticalClosing(Pix *pix,
int threshold, Boxa **boxa) {
char sequence_str[16];
// do the morphology
sprintf(sequence_str, "c100.%d", threshold);
Pix *morphed_pix = pixMorphCompSequence(pix, sequence_str, 0);
if (morphed_pix == NULL) {
return NULL;
}
// get the resulting lines by computing concomps
Pixa *pixac;
(*boxa) = pixConnComp(morphed_pix, &pixac, 8);
pixDestroy(&morphed_pix);
if ((*boxa) == NULL) {
return NULL;
}
return pixac;
}
// Helper cleans up after CrackLine.
static void CleanupCrackLine(int line_cnt, Pixa **lines_pixa,
Boxa **line_con_comps,
Pixa **line_con_comps_pix) {
for (int line = 0; line < line_cnt; line++) {
if (lines_pixa[line] != NULL) {
pixaDestroy(&lines_pixa[line]);
}
}
delete []lines_pixa;
boxaDestroy(line_con_comps);
pixaDestroy(line_con_comps_pix);
}
// do a desperate attempt at cracking lines
Pixa *CubeLineSegmenter::CrackLine(Pix *cracked_line_pix,
Box *cracked_line_box, int line_cnt) {
// create lines pixa array
Pixa **lines_pixa = new Pixa*[line_cnt];
if (lines_pixa == NULL) {
return NULL;
}
memset(lines_pixa, 0, line_cnt * sizeof(*lines_pixa));
// compute line conn comps
Pixa *line_con_comps_pix;
Boxa *line_con_comps = ComputeLineConComps(cracked_line_pix,
cracked_line_box, &line_con_comps_pix);
if (line_con_comps == NULL) {
delete []lines_pixa;
return NULL;
}
// assign each conn comp to the a line based on its centroid
for (int con = 0; con < line_con_comps->n; con++) {
Box *con_box = line_con_comps->box[con];
Pix *con_pix = line_con_comps_pix->pix[con];
int mid_y = (con_box->y - cracked_line_box->y) + (con_box->h / 2),
line_idx = MIN(line_cnt - 1,
(mid_y * line_cnt / cracked_line_box->h));
// create the line if it has not been created?
if (lines_pixa[line_idx] == NULL) {
lines_pixa[line_idx] = pixaCreate(line_con_comps->n);
if (lines_pixa[line_idx] == NULL) {
CleanupCrackLine(line_cnt, lines_pixa, &line_con_comps,
&line_con_comps_pix);
return NULL;
}
}
// add the concomp to the line
if (pixaAddPix(lines_pixa[line_idx], con_pix, L_CLONE) != 0 ||
pixaAddBox(lines_pixa[line_idx], con_box, L_CLONE)) {
CleanupCrackLine(line_cnt, lines_pixa, &line_con_comps,
&line_con_comps_pix);
return NULL;
}
}
// create the lines pixa
Pixa *lines = pixaCreate(line_cnt);
bool success = true;
// create and check the validity of the lines
for (int line = 0; line < line_cnt; line++) {
Pixa *line_pixa = lines_pixa[line];
// skip invalid lines
if (line_pixa == NULL) {
continue;
}
// merge the pix, check the validity of the line
// and add it to the lines pixa
Box *line_box;
Pix *line_pix = Pixa2Pix(line_pixa, &line_box);
if (line_pix == NULL ||
line_box == NULL ||
ValidLine(line_pix, line_box) == false ||
pixaAddPix(lines, line_pix, L_INSERT) != 0 ||
pixaAddBox(lines, line_box, L_INSERT) != 0) {
if (line_pix != NULL) {
pixDestroy(&line_pix);
}
if (line_box != NULL) {
boxDestroy(&line_box);
}
success = false;
break;
}
}
// cleanup
CleanupCrackLine(line_cnt, lines_pixa, &line_con_comps,
&line_con_comps_pix);
if (success == false) {
pixaDestroy(&lines);
lines = NULL;
}
return lines;
}
// do a desperate attempt at cracking lines
Pixa *CubeLineSegmenter::CrackLine(Pix *cracked_line_pix,
Box *cracked_line_box) {
// estimate max line count
int max_line_cnt = static_cast<int>((cracked_line_box->h /
est_alef_hgt_) + 0.5);
if (max_line_cnt < 2) {
return NULL;
}
for (int line_cnt = 2; line_cnt < max_line_cnt; line_cnt++) {
Pixa *lines = CrackLine(cracked_line_pix, cracked_line_box, line_cnt);
if (lines != NULL) {
return lines;
}
}
return NULL;
}
// split a line continously until valid or fail
Pixa *CubeLineSegmenter::SplitLine(Pix *line_mask_pix, Box *line_box) {
// clone the line mask
Pix *line_pix = pixClone(line_mask_pix);
if (line_pix == NULL) {
return NULL;
}
// AND with the image to get the actual line
pixRasterop(line_pix, 0, 0, line_pix->w, line_pix->h,
PIX_SRC & PIX_DST, img_, line_box->x, line_box->y);
// continue to do rasterop morphology on the line until
// it splits to valid lines or we fail
int morph_hgt = kLineSepMorphMinHgt - 1,
best_threshold = kLineSepMorphMinHgt - 1,
max_valid_portion = 0;
Boxa *boxa;
Pixa *pixac;
do {
pixac = VerticalClosing(line_pix, morph_hgt, &boxa);
// add the box offset to all the lines
// and check for the validity of each
int line,
valid_line_cnt = 0,
valid_portion = 0;
for (line = 0; line < pixac->n; line++) {
boxa->box[line]->x += line_box->x;
boxa->box[line]->y += line_box->y;
if (ValidLine(pixac->pix[line], boxa->box[line]) == true) {
// count valid lines
valid_line_cnt++;
// and the valid portions
valid_portion += boxa->box[line]->h;
}
}
// all the lines are valid
if (valid_line_cnt == pixac->n) {
boxaDestroy(&boxa);
pixDestroy(&line_pix);
return pixac;
}
// a larger valid portion
if (valid_portion > max_valid_portion) {
max_valid_portion = valid_portion;
best_threshold = morph_hgt;
}
boxaDestroy(&boxa);
pixaDestroy(&pixac);
morph_hgt--;
}
while (morph_hgt > 0);
// failed to break into valid lines
// attempt to crack the line
pixac = CrackLine(line_pix, line_box);
if (pixac != NULL) {
pixDestroy(&line_pix);
return pixac;
}
// try to leverage any of the lines
// did the best threshold yield a non zero valid portion
if (max_valid_portion > 0) {
// use this threshold to break lines
pixac = VerticalClosing(line_pix, best_threshold, &boxa);
// add the box offset to all the lines
// and check for the validity of each
for (int line = 0; line < pixac->n; line++) {
boxa->box[line]->x += line_box->x;
boxa->box[line]->y += line_box->y;
// remove invalid lines from the pixa
if (ValidLine(pixac->pix[line], boxa->box[line]) == false) {
pixaRemovePix(pixac, line);
line--;
}
}
boxaDestroy(&boxa);
pixDestroy(&line_pix);
return pixac;
}
// last resort: attempt to crack the line
pixDestroy(&line_pix);
return NULL;
}
// Checks of a line is too small
bool CubeLineSegmenter::SmallLine(Box *line_box) {
return line_box->h <= (kMinValidLineHgtRatio * est_dot_hgt_);
}
// Compute the connected components in a line
Boxa * CubeLineSegmenter::ComputeLineConComps(Pix *line_mask_pix,
Box *line_box,
Pixa **con_comps_pixa) {
// clone the line mask
Pix *line_pix = pixClone(line_mask_pix);
if (line_pix == NULL) {
return NULL;
}
// AND with the image to get the actual line
pixRasterop(line_pix, 0, 0, line_pix->w, line_pix->h,
PIX_SRC & PIX_DST, img_, line_box->x, line_box->y);
// compute the connected components of the line to be merged
Boxa *line_con_comps = pixConnComp(line_pix, con_comps_pixa, 8);
pixDestroy(&line_pix);
// offset boxes by the bbox of the line
for (int con = 0; con < line_con_comps->n; con++) {
line_con_comps->box[con]->x += line_box->x;
line_con_comps->box[con]->y += line_box->y;
}
return line_con_comps;
}
// create a union of two arbitrary pix
Pix *CubeLineSegmenter::PixUnion(Pix *dest_pix, Box *dest_box,
Pix *src_pix, Box *src_box) {
// compute dimensions of union rect
BOX *union_box = boxBoundingRegion(src_box, dest_box);
// create the union pix
Pix *union_pix = pixCreate(union_box->w, union_box->h, src_pix->d);
if (union_pix == NULL) {
return NULL;
}
// blt the src and dest pix
pixRasterop(union_pix,
src_box->x - union_box->x, src_box->y - union_box->y,
src_box->w, src_box->h, PIX_SRC | PIX_DST, src_pix, 0, 0);
pixRasterop(union_pix,
dest_box->x - union_box->x, dest_box->y - union_box->y,
dest_box->w, dest_box->h, PIX_SRC | PIX_DST, dest_pix, 0, 0);
// replace the dest_box
*dest_box = *union_box;
boxDestroy(&union_box);
return union_pix;
}
// create a union of a number of arbitrary pix
Pix *CubeLineSegmenter::Pixa2Pix(Pixa *pixa, Box **dest_box,
int start_pix, int pix_cnt) {
// compute union_box
int min_x = INT_MAX,
max_x = INT_MIN,
min_y = INT_MAX,
max_y = INT_MIN;
for (int pix_idx = start_pix; pix_idx < (start_pix + pix_cnt); pix_idx++) {
Box *pix_box = pixa->boxa->box[pix_idx];
UpdateRange(pix_box->x, pix_box->x + pix_box->w, &min_x, &max_x);
UpdateRange(pix_box->y, pix_box->y + pix_box->h, &min_y, &max_y);
}
(*dest_box) = boxCreate(min_x, min_y, max_x - min_x, max_y - min_y);
if ((*dest_box) == NULL) {
return NULL;
}
// create the union pix
Pix *union_pix = pixCreate((*dest_box)->w, (*dest_box)->h, img_->d);
if (union_pix == NULL) {
boxDestroy(dest_box);
return NULL;
}
// create a pix corresponding to the union of all pixs
// blt the src and dest pix
for (int pix_idx = start_pix; pix_idx < (start_pix + pix_cnt); pix_idx++) {
Box *pix_box = pixa->boxa->box[pix_idx];
Pix *con_pix = pixa->pix[pix_idx];
pixRasterop(union_pix,
pix_box->x - (*dest_box)->x, pix_box->y - (*dest_box)->y,
pix_box->w, pix_box->h, PIX_SRC | PIX_DST, con_pix, 0, 0);
}
return union_pix;
}
// create a union of a number of arbitrary pix
Pix *CubeLineSegmenter::Pixa2Pix(Pixa *pixa, Box **dest_box) {
return Pixa2Pix(pixa, dest_box, 0, pixa->n);
}
// merges a number of lines into one line given a bounding box and a mask
bool CubeLineSegmenter::MergeLine(Pix *line_mask_pix, Box *line_box,
Pixa *lines, Boxaa *lines_con_comps) {
// compute the connected components of the lines to be merged
Pixa *small_con_comps_pix;
Boxa *small_line_con_comps = ComputeLineConComps(line_mask_pix,
line_box, &small_con_comps_pix);
if (small_line_con_comps == NULL) {
return false;
}
// for each connected component
for (int con = 0; con < small_line_con_comps->n; con++) {
Box *small_con_comp_box = small_line_con_comps->box[con];
int best_line = -1,
best_dist = INT_MAX,
small_box_right = small_con_comp_box->x + small_con_comp_box->w,
small_box_bottom = small_con_comp_box->y + small_con_comp_box->h;
// for each valid line
for (int line = 0; line < lines->n; line++) {
if (SmallLine(lines->boxa->box[line]) == true) {
continue;
}
// for all the connected components in the line
Boxa *line_con_comps = lines_con_comps->boxa[line];
for (int lcon = 0; lcon < line_con_comps->n; lcon++) {
Box *con_comp_box = line_con_comps->box[lcon];
int xdist,
ydist,
box_right = con_comp_box->x + con_comp_box->w,
box_bottom = con_comp_box->y + con_comp_box->h;
xdist = MAX(small_con_comp_box->x, con_comp_box->x) -
MIN(small_box_right, box_right);
ydist = MAX(small_con_comp_box->y, con_comp_box->y) -
MIN(small_box_bottom, box_bottom);
// if there is an overlap in x-direction
if (xdist <= 0) {
if (best_line == -1 || ydist < best_dist) {
best_dist = ydist;
best_line = line;
}
}
}
}
// if the distance is too big, do not merged
if (best_line != -1 && best_dist < est_alef_hgt_) {
// add the pix to the best line
Pix *new_line = PixUnion(lines->pix[best_line],
lines->boxa->box[best_line],
small_con_comps_pix->pix[con], small_con_comp_box);
if (new_line == NULL) {
return false;
}
pixDestroy(&lines->pix[best_line]);
lines->pix[best_line] = new_line;
}
}
pixaDestroy(&small_con_comps_pix);
boxaDestroy(&small_line_con_comps);
return true;
}
// Creates new set of lines from the computed columns
bool CubeLineSegmenter::AddLines(Pixa *lines) {
// create an array that will hold the bounding boxes
// of the concomps belonging to each line
Boxaa *lines_con_comps = boxaaCreate(lines->n);
if (lines_con_comps == NULL) {
return false;
}
for (int line = 0; line < lines->n; line++) {
// if the line is not valid
if (ValidLine(lines->pix[line], lines->boxa->box[line]) == false) {
// split it
Pixa *split_lines = SplitLine(lines->pix[line],
lines->boxa->box[line]);
// remove the old line
if (pixaRemovePix(lines, line) != 0) {
return false;
}
line--;
if (split_lines == NULL) {
continue;
}
// add the split lines instead and move the pointer
for (int s_line = 0; s_line < split_lines->n; s_line++) {
Pix *sp_line = pixaGetPix(split_lines, s_line, L_CLONE);
Box *sp_box = boxaGetBox(split_lines->boxa, s_line, L_CLONE);
if (sp_line == NULL || sp_box == NULL) {
return false;
}
// insert the new line
if (pixaInsertPix(lines, ++line, sp_line, sp_box) != 0) {
return false;
}
}
// remove the split lines
pixaDestroy(&split_lines);
}
}
// compute the concomps bboxes of each line
for (int line = 0; line < lines->n; line++) {
Boxa *line_con_comps = ComputeLineConComps(lines->pix[line],
lines->boxa->box[line], NULL);
if (line_con_comps == NULL) {
return false;
}
// insert it into the boxaa array
if (boxaaAddBoxa(lines_con_comps, line_con_comps, L_INSERT) != 0) {
return false;
}
}
// post process the lines:
// merge the contents of "small" lines info legitimate lines
for (int line = 0; line < lines->n; line++) {
// a small line detected
if (SmallLine(lines->boxa->box[line]) == true) {
// merge its components to one of the valid lines
if (MergeLine(lines->pix[line], lines->boxa->box[line],
lines, lines_con_comps) == true) {
// remove the small line
if (pixaRemovePix(lines, line) != 0) {
return false;
}
if (boxaaRemoveBoxa(lines_con_comps, line) != 0) {
return false;
}
line--;
}
}
}
boxaaDestroy(&lines_con_comps);
// add the pix masks
if (pixaaAddPixa(columns_, lines, L_INSERT) != 0) {
return false;
}
return true;
}
// Index the specific pixa using RTL reading order
int *CubeLineSegmenter::IndexRTL(Pixa *pixa) {
int *pix_index = new int[pixa->n];
if (pix_index == NULL) {
return NULL;
}
for (int pix = 0; pix < pixa->n; pix++) {
pix_index[pix] = pix;
}
for (int ipix = 0; ipix < pixa->n; ipix++) {
for (int jpix = ipix + 1; jpix < pixa->n; jpix++) {
Box *ipix_box = pixa->boxa->box[pix_index[ipix]],
*jpix_box = pixa->boxa->box[pix_index[jpix]];
// swap?
if ((ipix_box->x + ipix_box->w) < (jpix_box->x + jpix_box->w)) {
int temp = pix_index[ipix];
pix_index[ipix] = pix_index[jpix];
pix_index[jpix] = temp;
}
}
}
return pix_index;
}
// Performs line segmentation
bool CubeLineSegmenter::LineSegment() {
// Use full image morphology to find columns
// This only works for simple layouts where each column
// of text extends the full height of the input image.
Pix *pix_temp1 = pixMorphCompSequence(img_, "c5.500", 0);
if (pix_temp1 == NULL) {
return false;
}
// Mask with a single component over each column
Pixa *pixam;
Boxa *boxa = pixConnComp(pix_temp1, &pixam, 8);
if (boxa == NULL) {
return false;
}
int init_morph_min_hgt = kLineSepMorphMinHgt;
char sequence_str[16];
sprintf(sequence_str, "c100.%d", init_morph_min_hgt);
// Use selective region-based morphology to get the textline mask.
Pixa *pixad = pixaMorphSequenceByRegion(img_, pixam, sequence_str, 0, 0);
if (pixad == NULL) {
return false;
}
// for all columns
int col_cnt = boxaGetCount(boxa);
// create columns
columns_ = pixaaCreate(col_cnt);
if (columns_ == NULL) {
return false;
}
// index columns based on readind order (RTL)
int *col_order = IndexRTL(pixad);
if (col_order == NULL) {
return false;
}
line_cnt_ = 0;
for (int col_idx = 0; col_idx < col_cnt; col_idx++) {
int col = col_order[col_idx];
// get the pix and box corresponding to the column
Pix *pixt3 = pixaGetPix(pixad, col, L_CLONE);
if (pixt3 == NULL) {
delete []col_order;
return false;
}
Box *col_box = pixad->boxa->box[col];
Pixa *pixac;
Boxa *boxa2 = pixConnComp(pixt3, &pixac, 8);
if (boxa2 == NULL) {
delete []col_order;
return false;
}
// offset the boxes by the column box
for (int line = 0; line < pixac->n; line++) {
pixac->boxa->box[line]->x += col_box->x;
pixac->boxa->box[line]->y += col_box->y;
}
// add the lines
if (AddLines(pixac) == true) {
if (pixaaAddBox(columns_, col_box, L_CLONE) != 0) {
delete []col_order;
return false;
}
}
pixDestroy(&pixt3);
boxaDestroy(&boxa2);
line_cnt_ += columns_->pixa[col_idx]->n;
}
pixaDestroy(&pixam);
pixaDestroy(&pixad);
boxaDestroy(&boxa);
delete []col_order;
pixDestroy(&pix_temp1);
return true;
}
// Estimate the paramters of the font(s) used in the page
bool CubeLineSegmenter::EstimateFontParams() {
int hgt_hist[kHgtBins];
int max_hgt;
double mean_hgt;
// init hgt histogram of concomps
memset(hgt_hist, 0, sizeof(hgt_hist));
// compute max hgt
max_hgt = 0;
for (int con = 0; con < con_comps_->n; con++) {
// skip conn comps that are too long or too wide
if (con_comps_->boxa->box[con]->h > kMaxConnCompHgt ||
con_comps_->boxa->box[con]->w > kMaxConnCompWid) {
continue;
}
max_hgt = MAX(max_hgt, con_comps_->boxa->box[con]->h);
}
if (max_hgt <= 0) {
return false;
}
// init hgt histogram of concomps
memset(hgt_hist, 0, sizeof(hgt_hist));
// compute histogram
mean_hgt = 0.0;
for (int con = 0; con < con_comps_->n; con++) {
// skip conn comps that are too long or too wide
if (con_comps_->boxa->box[con]->h > kMaxConnCompHgt ||
con_comps_->boxa->box[con]->w > kMaxConnCompWid) {
continue;
}
int bin = static_cast<int>(kHgtBins * con_comps_->boxa->box[con]->h /
max_hgt);
bin = MIN(bin, kHgtBins - 1);
hgt_hist[bin]++;
mean_hgt += con_comps_->boxa->box[con]->h;
}
mean_hgt /= con_comps_->n;
// find the top 2 bins
int idx[kHgtBins];
for (int bin = 0; bin < kHgtBins; bin++) {
idx[bin] = bin;
}
for (int ibin = 0; ibin < 2; ibin++) {
for (int jbin = ibin + 1; jbin < kHgtBins; jbin++) {
if (hgt_hist[idx[ibin]] < hgt_hist[idx[jbin]]) {
int swap = idx[ibin];
idx[ibin] = idx[jbin];
idx[jbin] = swap;
}
}
}
// emperically, we found out that the 2 highest freq bins correspond
// respectively to the dot and alef
est_dot_hgt_ = (1.0 * (idx[0] + 1) * max_hgt / kHgtBins);
est_alef_hgt_ = (1.0 * (idx[1] + 1) * max_hgt / kHgtBins);
// as a sanity check the dot hgt must be significanly lower than alef
if (est_alef_hgt_ < (est_dot_hgt_ * 2)) {
// use max_hgt to estimate instead
est_alef_hgt_ = mean_hgt * 1.5;
est_dot_hgt_ = est_alef_hgt_ / 5.0;
}
est_alef_hgt_ = MAX(est_alef_hgt_, est_dot_hgt_ * 4.0);
return true;
}
// clean up the image
Pix *CubeLineSegmenter::CleanUp(Pix *orig_img) {
// get rid of long horizontal lines
Pix *pix_temp0 = pixMorphCompSequence(orig_img, "o300.2", 0);
pixXor(pix_temp0, pix_temp0, orig_img);
// get rid of long vertical lines
Pix *pix_temp1 = pixMorphCompSequence(pix_temp0, "o2.300", 0);
pixXor(pix_temp1, pix_temp1, pix_temp0);
pixDestroy(&pix_temp0);
// detect connected components
Pixa *con_comps;
Boxa *boxa = pixConnComp(pix_temp1, &con_comps, 8);
if (boxa == NULL) {
return NULL;
}
// detect and remove suspicious conn comps
for (int con = 0; con < con_comps->n; con++) {
Box *box = boxa->box[con];
// remove if suspc. conn comp
if ((box->w > (box->h * kMaxHorzAspectRatio)) ||
(box->h > (box->w * kMaxVertAspectRatio)) ||
(box->w < kMinWid && box->h < kMinHgt)) {
pixRasterop(pix_temp1, box->x, box->y, box->w, box->h,
PIX_SRC ^ PIX_DST, con_comps->pix[con], 0, 0);
}
}
pixaDestroy(&con_comps);
boxaDestroy(&boxa);
return pix_temp1;
}
// Init the page segmenter
bool CubeLineSegmenter::Init() {
if (init_ == true) {
return true;
}
if (orig_img_ == NULL) {
return false;
}
// call the internal line segmentation
return FindLines();
}
// return the pix mask and box of a specific line
Pix *CubeLineSegmenter::Line(int line, Box **line_box) {
if (init_ == false && Init() == false) {
return NULL;
}
if (line < 0 || line >= line_cnt_) {
return NULL;
}
(*line_box) = lines_pixa_->boxa->box[line];
return lines_pixa_->pix[line];
}
// Implements a basic rudimentary layout analysis based on Leptonica
// works OK for Arabic. For other languages, the function TesseractPageAnalysis
// should be called instead.
bool CubeLineSegmenter::FindLines() {
// convert the image to gray scale if necessary
Pix *gray_scale_img = NULL;
if (orig_img_->d != 2 && orig_img_->d != 8) {
gray_scale_img = pixConvertTo8(orig_img_, false);
if (gray_scale_img == NULL) {
return false;
}
} else {
gray_scale_img = orig_img_;
}
// threshold image
Pix *thresholded_img;
thresholded_img = pixThresholdToBinary(gray_scale_img, 128);
// free the gray scale image if necessary
if (gray_scale_img != orig_img_) {
pixDestroy(&gray_scale_img);
}
// bail-out if thresholding failed
if (thresholded_img == NULL) {
return false;
}
// deskew
Pix *deskew_img = pixDeskew(thresholded_img, 2);
if (deskew_img == NULL) {
return false;
}
pixDestroy(&thresholded_img);
img_ = CleanUp(deskew_img);
pixDestroy(&deskew_img);
if (img_ == NULL) {
return false;
}
pixDestroy(&deskew_img);
// compute connected components
Boxa *boxa = pixConnComp(img_, &con_comps_, 8);
if (boxa == NULL) {
return false;
}
boxaDestroy(&boxa);
// estimate dot and alef hgts
if (EstimateFontParams() == false) {
return false;
}
// perform line segmentation
if (LineSegment() == false) {
return false;
}
// success
init_ = true;
return true;
}
}
| 1080228-arabicocr11 | cube/cube_line_segmenter.cpp | C++ | asf20 | 25,732 |
/**********************************************************************
* File: cube_page_segmenter.h
* Description: Declaration of the Cube Page Segmenter Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// TODO(ahmadab)
// This is really a makeshift line segmenter that works well for Arabic
// This should eventually be replaced by Ray Smith's Page segmenter
// There are lots of magic numbers below that were determined empirically
// but not thoroughly tested
#ifndef CUBE_LINE_SEGMENTER_H
#define CUBE_LINE_SEGMENTER_H
#include "cube_reco_context.h"
#include "allheaders.h"
namespace tesseract {
class CubeLineSegmenter {
public:
CubeLineSegmenter(CubeRecoContext *cntxt, Pix *img);
~CubeLineSegmenter();
// Accessor functions
Pix *PostProcessedImage() {
if (init_ == false && Init() == false) {
return NULL;
}
return img_;
}
int ColumnCnt() {
if (init_ == false && Init() == false) {
return 0;
}
return columns_->n;
}
Box *Column(int col) {
if (init_ == false && Init() == false) {
return NULL;
}
return columns_->boxa->box[col];
}
int LineCnt() {
if (init_ == false && Init() == false) {
return 0;
}
return line_cnt_;
}
Pixa *ConComps() {
if (init_ == false && Init() == false) {
return NULL;
}
return con_comps_;
}
Pixaa *Columns() {
if (init_ == false && Init() == false) {
return NULL;
}
return columns_;
}
inline double AlefHgtEst() { return est_alef_hgt_; }
inline double DotHgtEst() { return est_dot_hgt_; }
Pix *Line(int line, Box **line_box);
private:
static const float kMinValidLineHgtRatio;
static const int kLineSepMorphMinHgt;
static const int kHgtBins;
static const int kMaxConnCompHgt;
static const int kMaxConnCompWid;
static const int kMaxHorzAspectRatio;
static const int kMaxVertAspectRatio;
static const int kMinWid;
static const int kMinHgt;
static const double kMaxValidLineRatio;
// Cube Reco context
CubeRecoContext *cntxt_;
// Original image
Pix *orig_img_;
// Post processed image
Pix *img_;
// Init flag
bool init_;
// Output Line and column info
int line_cnt_;
Pixaa *columns_;
Pixa *con_comps_;
Pixa *lines_pixa_;
// Estimates for sizes of ALEF and DOT needed for Arabic analysis
double est_alef_hgt_;
double est_dot_hgt_;
// Init the page analysis
bool Init();
// Performs line segmentation
bool LineSegment();
// Cleanup function
Pix *CleanUp(Pix *pix);
// compute validity ratio for a line
double ValidityRatio(Pix *line_mask_pix, Box *line_box);
// validate line
bool ValidLine(Pix *line_mask_pix, Box *line_box);
// split a line continuously until valid or fail
Pixa *SplitLine(Pix *line_mask_pix, Box *line_box);
// do a desperate attempt at cracking lines
Pixa *CrackLine(Pix *line_mask_pix, Box *line_box);
Pixa *CrackLine(Pix *line_mask_pix, Box *line_box, int line_cnt);
// Checks of a line is too small
bool SmallLine(Box *line_box);
// Compute the connected components in a line
Boxa * ComputeLineConComps(Pix *line_mask_pix, Box *line_box,
Pixa **con_comps_pixa);
// create a union of two arbitrary pix
Pix *PixUnion(Pix *dest_pix, Box *dest_box, Pix *src_pix, Box *src_box);
// create a union of a pixa subset
Pix *Pixa2Pix(Pixa *pixa, Box **dest_box, int start_pix, int pix_cnt);
// create a union of a pixa
Pix *Pixa2Pix(Pixa *pixa, Box **dest_box);
// merges a number of lines into one line given a bounding box and a mask
bool MergeLine(Pix *line_mask_pix, Box *line_box,
Pixa *lines, Boxaa *lines_con_comps);
// Creates new set of lines from the computed columns
bool AddLines(Pixa *lines);
// Estimate the parameters of the font(s) used in the page
bool EstimateFontParams();
// perform a vertical Closing with the specified threshold
// returning the resulting conn comps as a pixa
Pixa *VerticalClosing(Pix *pix, int thresold, Boxa **boxa);
// Index the specific pixa using RTL reading order
int *IndexRTL(Pixa *pixa);
// Implements a rudimentary page & line segmenter
bool FindLines();
};
}
#endif // CUBE_LINE_SEGMENTER_H
| 1080228-arabicocr11 | cube/cube_line_segmenter.h | C++ | asf20 | 4,902 |
/**********************************************************************
* File: search_node.h
* Description: Declaration of the Beam Search Node Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The SearchNode class abstracts the search lattice node in the lattice
// generated by the BeamSearch class
// The SearchNode class holds the lang_mod_edge associated with the lattice
// node. It also holds a pointer to the parent SearchNode in the search path
// In addition it holds the recognition and the language model costs of the
// node and the path leading to this node
#ifndef SEARCH_NODE_H
#define SEARCH_NODE_H
#include "lang_mod_edge.h"
#include "cube_reco_context.h"
namespace tesseract {
class SearchNode {
public:
SearchNode(CubeRecoContext *cntxt, SearchNode *parent_node,
int char_reco_cost, LangModEdge *edge, int col_idx);
~SearchNode();
// Updates the parent of the current node if the specified path yields
// a better path cost
bool UpdateParent(SearchNode *new_parent, int new_reco_cost,
LangModEdge *new_edge);
// returns the 32-bit string corresponding to the path leading to this node
char_32 *PathString();
// True if the two input nodes correspond to the same path
static bool IdenticalPath(SearchNode *node1, SearchNode *node2);
inline const char_32 *NodeString() { return str_; }
inline void SetString(char_32 *str) { str_ = str; }
// This node's character recognition cost.
inline int CharRecoCost() { return char_reco_cost_; }
// Total character recognition cost of the nodes in the best path,
// excluding this node.
inline int BestPathRecoCost() { return best_path_reco_cost_; }
// Number of nodes in best path.
inline int BestPathLength() { return best_path_len_; }
// Mean mixed cost, i.e., mean character recognition cost +
// current language model cost, all weighted by the RecoWgt parameter
inline int BestCost() { return best_cost_; }
// Mean character recognition cost of the nodes on the best path,
// including this node.
inline int BestRecoCost() { return mean_char_reco_cost_ ; }
inline int ColIdx() { return col_idx_; }
inline SearchNode *ParentNode() { return parent_node_; }
inline LangModEdge *LangModelEdge() { return lang_mod_edge_;}
inline int LangModCost() { return LangModCost(lang_mod_edge_, parent_node_); }
// A comparer function that allows the SearchColumn class to sort the
// nodes based on the path cost
inline static int SearchNodeComparer(const void *node1, const void *node2) {
return (*(reinterpret_cast<SearchNode * const *>(node1)))->best_cost_ -
(*(reinterpret_cast<SearchNode * const *>(node2)))->best_cost_;
}
private:
CubeRecoContext *cntxt_;
// Character code
const char_32 *str_;
// Recognition cost of most recent character
int char_reco_cost_;
// Mean mixed cost, i.e., mean character recognition cost +
// current language model cost, all weighted by the RecoWgt parameter
int best_cost_;
// Mean character recognition cost of the nodes on the best path,
// including this node.
int mean_char_reco_cost_ ;
// Total character recognition cost of the nodes in the best path,
// excluding this node.
int best_path_reco_cost_;
// Number of nodes in best path.
int best_path_len_;
// Column index
int col_idx_;
// Parent Node
SearchNode *parent_node_;
// Language model edge
LangModEdge *lang_mod_edge_;
static int LangModCost(LangModEdge *lang_mod_edge, SearchNode *parent_node);
};
// Implments a SearchNode hash table used to detect if a Search Node exists
// or not. This is needed to make sure that identical paths in the BeamSearch
// converge
class SearchNodeHashTable {
public:
SearchNodeHashTable() {
memset(bin_size_array_, 0, sizeof(bin_size_array_));
}
~SearchNodeHashTable() {
}
// inserts an entry in the hash table
inline bool Insert(LangModEdge *lang_mod_edge, SearchNode *srch_node) {
// compute hash based on the edge and its parent node edge
unsigned int edge_hash = lang_mod_edge->Hash();
unsigned int parent_hash = (srch_node->ParentNode() == NULL ?
0 : srch_node->ParentNode()->LangModelEdge()->Hash());
unsigned int hash_bin = (edge_hash + parent_hash) % kSearchNodeHashBins;
// already maxed out, just fail
if (bin_size_array_[hash_bin] >= kMaxSearchNodePerBin) {
return false;
}
bin_array_[hash_bin][bin_size_array_[hash_bin]++] = srch_node;
return true;
}
// Looks up an entry in the hash table
inline SearchNode *Lookup(LangModEdge *lang_mod_edge,
SearchNode *parent_node) {
// compute hash based on the edge and its parent node edge
unsigned int edge_hash = lang_mod_edge->Hash();
unsigned int parent_hash = (parent_node == NULL ?
0 : parent_node->LangModelEdge()->Hash());
unsigned int hash_bin = (edge_hash + parent_hash) % kSearchNodeHashBins;
// lookup the entries in the hash bin
for (int node_idx = 0; node_idx < bin_size_array_[hash_bin]; node_idx++) {
if (lang_mod_edge->IsIdentical(
bin_array_[hash_bin][node_idx]->LangModelEdge()) == true &&
SearchNode::IdenticalPath(
bin_array_[hash_bin][node_idx]->ParentNode(), parent_node) == true) {
return bin_array_[hash_bin][node_idx];
}
}
return NULL;
}
private:
// Hash bin size parameters. These were determined emperically. These affect
// the speed of the beam search but have no impact on accuracy
static const int kSearchNodeHashBins = 4096;
static const int kMaxSearchNodePerBin = 512;
int bin_size_array_[kSearchNodeHashBins];
SearchNode *bin_array_[kSearchNodeHashBins][kMaxSearchNodePerBin];
};
}
#endif // SEARCH_NODE_H
| 1080228-arabicocr11 | cube/search_node.h | C++ | asf20 | 6,454 |
/**********************************************************************
* File: charclassifier.cpp
* Description: Implementation of Convolutional-NeuralNet Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <wctype.h>
#include "char_set.h"
#include "classifier_base.h"
#include "const.h"
#include "conv_net_classifier.h"
#include "cube_utils.h"
#include "feature_base.h"
#include "feature_bmp.h"
#include "tess_lang_model.h"
namespace tesseract {
ConvNetCharClassifier::ConvNetCharClassifier(CharSet *char_set,
TuningParams *params,
FeatureBase *feat_extract)
: CharClassifier(char_set, params, feat_extract) {
char_net_ = NULL;
net_input_ = NULL;
net_output_ = NULL;
}
ConvNetCharClassifier::~ConvNetCharClassifier() {
if (char_net_ != NULL) {
delete char_net_;
char_net_ = NULL;
}
if (net_input_ != NULL) {
delete []net_input_;
net_input_ = NULL;
}
if (net_output_ != NULL) {
delete []net_output_;
net_output_ = NULL;
}
}
// The main training function. Given a sample and a class ID the classifier
// updates its parameters according to its learning algorithm. This function
// is currently not implemented. TODO(ahmadab): implement end-2-end training
bool ConvNetCharClassifier::Train(CharSamp *char_samp, int ClassID) {
return false;
}
// A secondary function needed for training. Allows the trainer to set the
// value of any train-time paramter. This function is currently not
// implemented. TODO(ahmadab): implement end-2-end training
bool ConvNetCharClassifier::SetLearnParam(char *var_name, float val) {
// TODO(ahmadab): implementation of parameter initializing.
return false;
}
// Folds the output of the NeuralNet using the loaded folding sets
void ConvNetCharClassifier::Fold() {
// in case insensitive mode
if (case_sensitive_ == false) {
int class_cnt = char_set_->ClassCount();
// fold case
for (int class_id = 0; class_id < class_cnt; class_id++) {
// get class string
const char_32 *str32 = char_set_->ClassString(class_id);
// get the upper case form of the string
string_32 upper_form32 = str32;
for (int ch = 0; ch < upper_form32.length(); ch++) {
if (iswalpha(static_cast<int>(upper_form32[ch])) != 0) {
upper_form32[ch] = towupper(upper_form32[ch]);
}
}
// find out the upperform class-id if any
int upper_class_id =
char_set_->ClassID(reinterpret_cast<const char_32 *>(
upper_form32.c_str()));
if (upper_class_id != -1 && class_id != upper_class_id) {
float max_out = MAX(net_output_[class_id], net_output_[upper_class_id]);
net_output_[class_id] = max_out;
net_output_[upper_class_id] = max_out;
}
}
}
// The folding sets specify how groups of classes should be folded
// Folding involved assigning a min-activation to all the members
// of the folding set. The min-activation is a fraction of the max-activation
// of the members of the folding set
for (int fold_set = 0; fold_set < fold_set_cnt_; fold_set++) {
if (fold_set_len_[fold_set] == 0)
continue;
float max_prob = net_output_[fold_sets_[fold_set][0]];
for (int ch = 1; ch < fold_set_len_[fold_set]; ch++) {
if (net_output_[fold_sets_[fold_set][ch]] > max_prob) {
max_prob = net_output_[fold_sets_[fold_set][ch]];
}
}
for (int ch = 0; ch < fold_set_len_[fold_set]; ch++) {
net_output_[fold_sets_[fold_set][ch]] = MAX(max_prob * kFoldingRatio,
net_output_[fold_sets_[fold_set][ch]]);
}
}
}
// Compute the features of specified charsamp and feedforward the
// specified nets
bool ConvNetCharClassifier::RunNets(CharSamp *char_samp) {
if (char_net_ == NULL) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::RunNets): "
"NeuralNet is NULL\n");
return false;
}
int feat_cnt = char_net_->in_cnt();
int class_cnt = char_set_->ClassCount();
// allocate i/p and o/p buffers if needed
if (net_input_ == NULL) {
net_input_ = new float[feat_cnt];
if (net_input_ == NULL) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::RunNets): "
"unable to allocate memory for input nodes\n");
return false;
}
net_output_ = new float[class_cnt];
if (net_output_ == NULL) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::RunNets): "
"unable to allocate memory for output nodes\n");
return false;
}
}
// compute input features
if (feat_extract_->ComputeFeatures(char_samp, net_input_) == false) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::RunNets): "
"unable to compute features\n");
return false;
}
if (char_net_ != NULL) {
if (char_net_->FeedForward(net_input_, net_output_) == false) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::RunNets): "
"unable to run feed-forward\n");
return false;
}
} else {
return false;
}
Fold();
return true;
}
// return the cost of being a char
int ConvNetCharClassifier::CharCost(CharSamp *char_samp) {
if (RunNets(char_samp) == false) {
return 0;
}
return CubeUtils::Prob2Cost(1.0f - net_output_[0]);
}
// classifies a charsamp and returns an alternate list
// of chars sorted by char costs
CharAltList *ConvNetCharClassifier::Classify(CharSamp *char_samp) {
// run the needed nets
if (RunNets(char_samp) == false) {
return NULL;
}
int class_cnt = char_set_->ClassCount();
// create an altlist
CharAltList *alt_list = new CharAltList(char_set_, class_cnt);
if (alt_list == NULL) {
fprintf(stderr, "Cube WARNING (ConvNetCharClassifier::Classify): "
"returning emtpy CharAltList\n");
return NULL;
}
for (int out = 1; out < class_cnt; out++) {
int cost = CubeUtils::Prob2Cost(net_output_[out]);
alt_list->Insert(out, cost);
}
return alt_list;
}
// Set an external net (for training purposes)
void ConvNetCharClassifier::SetNet(tesseract::NeuralNet *char_net) {
if (char_net_ != NULL) {
delete char_net_;
char_net_ = NULL;
}
char_net_ = char_net;
}
// This function will return true if the file does not exist.
// But will fail if the it did not pass the sanity checks
bool ConvNetCharClassifier::LoadFoldingSets(const string &data_file_path,
const string &lang,
LangModel *lang_mod) {
fold_set_cnt_ = 0;
string fold_file_name;
fold_file_name = data_file_path + lang;
fold_file_name += ".cube.fold";
// folding sets are optional
FILE *fp = fopen(fold_file_name.c_str(), "rb");
if (fp == NULL) {
return true;
}
fclose(fp);
string fold_sets_str;
if (!CubeUtils::ReadFileToString(fold_file_name,
&fold_sets_str)) {
return false;
}
// split into lines
vector<string> str_vec;
CubeUtils::SplitStringUsing(fold_sets_str, "\r\n", &str_vec);
fold_set_cnt_ = str_vec.size();
fold_sets_ = new int *[fold_set_cnt_];
if (fold_sets_ == NULL) {
return false;
}
fold_set_len_ = new int[fold_set_cnt_];
if (fold_set_len_ == NULL) {
fold_set_cnt_ = 0;
return false;
}
for (int fold_set = 0; fold_set < fold_set_cnt_; fold_set++) {
reinterpret_cast<TessLangModel *>(lang_mod)->RemoveInvalidCharacters(
&str_vec[fold_set]);
// if all or all but one character are invalid, invalidate this set
if (str_vec[fold_set].length() <= 1) {
fprintf(stderr, "Cube WARNING (ConvNetCharClassifier::LoadFoldingSets): "
"invalidating folding set %d\n", fold_set);
fold_set_len_[fold_set] = 0;
fold_sets_[fold_set] = NULL;
continue;
}
string_32 str32;
CubeUtils::UTF8ToUTF32(str_vec[fold_set].c_str(), &str32);
fold_set_len_[fold_set] = str32.length();
fold_sets_[fold_set] = new int[fold_set_len_[fold_set]];
if (fold_sets_[fold_set] == NULL) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::LoadFoldingSets): "
"could not allocate folding set\n");
fold_set_cnt_ = fold_set;
return false;
}
for (int ch = 0; ch < fold_set_len_[fold_set]; ch++) {
fold_sets_[fold_set][ch] = char_set_->ClassID(str32[ch]);
}
}
return true;
}
// Init the classifier provided a data-path and a language string
bool ConvNetCharClassifier::Init(const string &data_file_path,
const string &lang,
LangModel *lang_mod) {
if (init_) {
return true;
}
// load the nets if any. This function will return true if the net file
// does not exist. But will fail if the net did not pass the sanity checks
if (!LoadNets(data_file_path, lang)) {
return false;
}
// load the folding sets if any. This function will return true if the
// file does not exist. But will fail if the it did not pass the sanity checks
if (!LoadFoldingSets(data_file_path, lang, lang_mod)) {
return false;
}
init_ = true;
return true;
}
// Load the classifier's Neural Nets
// This function will return true if the net file does not exist.
// But will fail if the net did not pass the sanity checks
bool ConvNetCharClassifier::LoadNets(const string &data_file_path,
const string &lang) {
string char_net_file;
// add the lang identifier
char_net_file = data_file_path + lang;
char_net_file += ".cube.nn";
// neural network is optional
FILE *fp = fopen(char_net_file.c_str(), "rb");
if (fp == NULL) {
return true;
}
fclose(fp);
// load main net
char_net_ = tesseract::NeuralNet::FromFile(char_net_file);
if (char_net_ == NULL) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::LoadNets): "
"could not load %s\n", char_net_file.c_str());
return false;
}
// validate net
if (char_net_->in_cnt()!= feat_extract_->FeatureCnt()) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::LoadNets): "
"could not validate net %s\n", char_net_file.c_str());
return false;
}
// alloc net i/o buffers
int feat_cnt = char_net_->in_cnt();
int class_cnt = char_set_->ClassCount();
if (char_net_->out_cnt() != class_cnt) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::LoadNets): "
"output count (%d) and class count (%d) are not equal\n",
char_net_->out_cnt(), class_cnt);
return false;
}
// allocate i/p and o/p buffers if needed
if (net_input_ == NULL) {
net_input_ = new float[feat_cnt];
if (net_input_ == NULL) {
return false;
}
net_output_ = new float[class_cnt];
if (net_output_ == NULL) {
return false;
}
}
return true;
}
} // tesseract
| 1080228-arabicocr11 | cube/conv_net_classifier.cpp | C++ | asf20 | 11,671 |
/**********************************************************************
* File: word_size_model.h
* Description: Declaration of the Word Size Model Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The WordSizeModel class abstracts the geometrical relationships
// between characters/shapes in the same word (presumeably of the same font)
// A non-parametric bigram model describes the three geometrical properties of a
// character pair:
// 1- Normalized Width
// 2- Normalized Top
// 3- Normalized Height
// These dimensions are computed for each character pair in a word. These are
// then compared to the same information for each of the fonts that the size
// model knows about. The WordSizeCost is the cost of the font that matches
// best.
#ifndef WORD_SIZE_MODEL_H
#define WORD_SIZE_MODEL_H
#include <string>
#include "char_samp.h"
#include "char_set.h"
namespace tesseract {
struct PairSizeInfo {
int delta_top;
int wid_0;
int hgt_0;
int wid_1;
int hgt_1;
};
struct FontPairSizeInfo {
string font_name;
PairSizeInfo **pair_size_info;
};
class WordSizeModel {
public:
WordSizeModel(CharSet *, bool contextual);
virtual ~WordSizeModel();
static WordSizeModel *Create(const string &data_file_path,
const string &lang,
CharSet *char_set,
bool contextual);
// Given a word and number of unichars, return the size cost,
// minimized over all fonts in the size model.
int Cost(CharSamp **samp_array, int samp_cnt) const;
// Given dimensions of a pair of character samples and a font size
// model for that character pair, return the pair's size cost for
// the font.
static double PairCost(int width_0, int height_0, int top_0,
int width_1, int height_1, int top_1,
const PairSizeInfo& pair_info);
bool Save(string file_name);
// Number of fonts in size model.
inline int FontCount() const {
return font_pair_size_models_.size();
}
inline const FontPairSizeInfo *FontInfo() const {
return &font_pair_size_models_[0];
}
// Helper functions to convert between size codes, class id and position
// codes
static inline int SizeCode(int cls_id, int start, int end) {
return (cls_id << 2) + (end << 1) + start;
}
private:
// Scaling constant used to convert floating point ratios in size table
// to fixed point
static const int kShapeModelScale = 1000;
static const int kExpectedTokenCount = 10;
// Language properties
bool contextual_;
CharSet *char_set_;
// Size ratios table
vector<FontPairSizeInfo> font_pair_size_models_;
// Initialize the word size model object
bool Init(const string &data_file_path, const string &lang);
};
}
#endif // WORD_SIZE_MODEL_H
| 1080228-arabicocr11 | cube/word_size_model.h | C++ | asf20 | 3,494 |
/**********************************************************************
* File: classifier_factory.cpp
* Description: Implementation of the Base Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include "classifier_factory.h"
#include "conv_net_classifier.h"
#include "feature_base.h"
#include "feature_bmp.h"
#include "feature_chebyshev.h"
#include "feature_hybrid.h"
#include "hybrid_neural_net_classifier.h"
namespace tesseract {
// Creates a CharClassifier object of the appropriate type depending on the
// classifier type in the settings file
CharClassifier *CharClassifierFactory::Create(const string &data_file_path,
const string &lang,
LangModel *lang_mod,
CharSet *char_set,
TuningParams *params) {
// create the feature extraction object
FeatureBase *feat_extract;
switch (params->TypeFeature()) {
case TuningParams::BMP:
feat_extract = new FeatureBmp(params);
break;
case TuningParams::CHEBYSHEV:
feat_extract = new FeatureChebyshev(params);
break;
case TuningParams::HYBRID:
feat_extract = new FeatureHybrid(params);
break;
default:
fprintf(stderr, "Cube ERROR (CharClassifierFactory::Create): invalid "
"feature type.\n");
return NULL;
}
if (feat_extract == NULL) {
fprintf(stderr, "Cube ERROR (CharClassifierFactory::Create): unable "
"to instantiate feature extraction object.\n");
return NULL;
}
// create the classifier object
CharClassifier *classifier_obj;
switch (params->TypeClassifier()) {
case TuningParams::NN:
classifier_obj = new ConvNetCharClassifier(char_set, params,
feat_extract);
break;
case TuningParams::HYBRID_NN:
classifier_obj = new HybridNeuralNetCharClassifier(char_set, params,
feat_extract);
break;
default:
fprintf(stderr, "Cube ERROR (CharClassifierFactory::Create): invalid "
"classifier type.\n");
return NULL;
}
if (classifier_obj == NULL) {
fprintf(stderr, "Cube ERROR (CharClassifierFactory::Create): error "
"allocating memory for character classifier object.\n");
return NULL;
}
// Init the classifier
if (!classifier_obj->Init(data_file_path, lang, lang_mod)) {
delete classifier_obj;
fprintf(stderr, "Cube ERROR (CharClassifierFactory::Create): unable "
"to Init() character classifier object.\n");
return NULL;
}
return classifier_obj;
}
}
| 1080228-arabicocr11 | cube/classifier_factory.cpp | C++ | asf20 | 3,485 |
/**********************************************************************
* File: feature_chebyshev.h
* Description: Declaration of the Chebyshev coefficients Feature Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The FeatureHybrid class implements a Bitmap feature extractor class. It
// inherits from the FeatureBase class
// This class describes the a hybrid feature vector composed by combining
// the bitmap and the chebyshev feature vectors
#ifndef FEATURE_HYBRID_H
#define FEATURE_HYBRID_H
#include "char_samp.h"
#include "feature_bmp.h"
#include "feature_chebyshev.h"
namespace tesseract {
class FeatureHybrid : public FeatureBase {
public:
explicit FeatureHybrid(TuningParams *params);
virtual ~FeatureHybrid();
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
virtual CharSamp *ComputeFeatureBitmap(CharSamp *samp);
// Compute the features for a given CharSamp
virtual bool ComputeFeatures(CharSamp *samp, float *features);
// Returns the count of features
virtual int FeatureCnt() {
if (feature_bmp_ == NULL || feature_chebyshev_ == NULL) {
return 0;
}
return feature_bmp_->FeatureCnt() + feature_chebyshev_->FeatureCnt();
}
protected:
FeatureBmp *feature_bmp_;
FeatureChebyshev *feature_chebyshev_;
};
}
#endif // FEATURE_HYBRID_H
| 1080228-arabicocr11 | cube/feature_hybrid.h | C++ | asf20 | 2,032 |
/**********************************************************************
* File: tuning_params.h
* Description: Declaration of the Tuning Parameters Base Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The TuningParams class abstracts all the parameters that can be learned or
// tuned during the training process. It is a base class that all TuningParams
// classes should inherit from.
#ifndef TUNING_PARAMS_H
#define TUNING_PARAMS_H
#include <string>
#ifdef USE_STD_NAMESPACE
using std::string;
#endif
namespace tesseract {
class TuningParams {
public:
enum type_classifer {
NN,
HYBRID_NN
};
enum type_feature {
BMP,
CHEBYSHEV,
HYBRID
};
TuningParams() {}
virtual ~TuningParams() {}
// Accessor functions
inline double RecoWgt() const { return reco_wgt_; }
inline double SizeWgt() const { return size_wgt_; }
inline double CharBigramWgt() const { return char_bigrams_wgt_; }
inline double WordUnigramWgt() const { return word_unigrams_wgt_; }
inline int MaxSegPerChar() const { return max_seg_per_char_; }
inline int BeamWidth() const { return beam_width_; }
inline int TypeClassifier() const { return tp_classifier_; }
inline int TypeFeature() const { return tp_feat_; }
inline int ConvGridSize() const { return conv_grid_size_; }
inline int HistWindWid() const { return hist_wind_wid_; }
inline int MinConCompSize() const { return min_con_comp_size_; }
inline double MaxWordAspectRatio() const { return max_word_aspect_ratio_; }
inline double MinSpaceHeightRatio() const { return min_space_height_ratio_; }
inline double MaxSpaceHeightRatio() const { return max_space_height_ratio_; }
inline double CombinerRunThresh() const { return combiner_run_thresh_; }
inline double CombinerClassifierThresh() const {
return combiner_classifier_thresh_; }
inline void SetRecoWgt(double wgt) { reco_wgt_ = wgt; }
inline void SetSizeWgt(double wgt) { size_wgt_ = wgt; }
inline void SetCharBigramWgt(double wgt) { char_bigrams_wgt_ = wgt; }
inline void SetWordUnigramWgt(double wgt) { word_unigrams_wgt_ = wgt; }
inline void SetMaxSegPerChar(int max_seg_per_char) {
max_seg_per_char_ = max_seg_per_char;
}
inline void SetBeamWidth(int beam_width) { beam_width_ = beam_width; }
inline void SetTypeClassifier(type_classifer tp_classifier) {
tp_classifier_ = tp_classifier;
}
inline void SetTypeFeature(type_feature tp_feat) {tp_feat_ = tp_feat;}
inline void SetHistWindWid(int hist_wind_wid) {
hist_wind_wid_ = hist_wind_wid;
}
virtual bool Save(string file_name) = 0;
virtual bool Load(string file_name) = 0;
protected:
// weight of recognition cost. This includes the language model cost
double reco_wgt_;
// weight of size cost
double size_wgt_;
// weight of character bigrams cost
double char_bigrams_wgt_;
// weight of word unigrams cost
double word_unigrams_wgt_;
// Maximum number of segments per character
int max_seg_per_char_;
// Beam width equal to the maximum number of nodes kept in the beam search
// trellis column after pruning
int beam_width_;
// Classifier type: See enum type_classifer for classifier types
type_classifer tp_classifier_;
// Feature types: See enum type_feature for feature types
type_feature tp_feat_;
// Grid size to scale a grapheme bitmap used by the BMP feature type
int conv_grid_size_;
// Histogram window size as a ratio of the word height used in computing
// the vertical pixel density histogram in the segmentation algorithm
int hist_wind_wid_;
// Minimum possible size of a connected component
int min_con_comp_size_;
// Maximum aspect ratio of a word (width / height)
double max_word_aspect_ratio_;
// Minimum ratio relative to the line height of a gap to be considered as
// a word break
double min_space_height_ratio_;
// Maximum ratio relative to the line height of a gap to be considered as
// a definite word break
double max_space_height_ratio_;
// When Cube and Tesseract are run in combined mode, only run
// combiner classifier when tesseract confidence is below this
// threshold. When Cube is run without Tesseract, this is ignored.
double combiner_run_thresh_;
// When Cube and tesseract are run in combined mode, threshold on
// output of combiner binary classifier (chosen from ROC during
// combiner training). When Cube is run without Tesseract, this is ignored.
double combiner_classifier_thresh_;
};
}
#endif // TUNING_PARAMS_H
| 1080228-arabicocr11 | cube/tuning_params.h | C++ | asf20 | 5,174 |
/**********************************************************************
* File: char_samp_enum.cpp
* Description: Implementation of a Character Sample Enumerator Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "char_samp_enum.h"
namespace tesseract {
CharSampEnum::CharSampEnum() {
}
CharSampEnum::~CharSampEnum() {
}
} // namespace ocrlib
| 1080228-arabicocr11 | cube/char_samp_enum.cpp | C++ | asf20 | 1,040 |
/**********************************************************************
* File: tess_lang_mod_edge.cpp
* Description: Implementation of the Tesseract Language Model Edge Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "tess_lang_mod_edge.h"
#include "const.h"
#include "unichar.h"
namespace tesseract {
// OOD constructor
TessLangModEdge::TessLangModEdge(CubeRecoContext *cntxt, int class_id) {
root_ = false;
cntxt_ = cntxt;
dawg_ = NULL;
start_edge_ = 0;
end_edge_ = 0;
edge_mask_ = 0;
class_id_ = class_id;
str_ = cntxt_->CharacterSet()->ClassString(class_id);
path_cost_ = Cost();
}
// leading, trailing punc constructor and single byte UTF char
TessLangModEdge::TessLangModEdge(CubeRecoContext *cntxt,
const Dawg *dawg, EDGE_REF edge_idx, int class_id) {
root_ = false;
cntxt_ = cntxt;
dawg_ = dawg;
start_edge_ = edge_idx;
end_edge_ = edge_idx;
edge_mask_ = 0;
class_id_ = class_id;
str_ = cntxt_->CharacterSet()->ClassString(class_id);
path_cost_ = Cost();
}
// dict constructor: multi byte UTF char
TessLangModEdge::TessLangModEdge(CubeRecoContext *cntxt, const Dawg *dawg,
EDGE_REF start_edge_idx, EDGE_REF end_edge_idx,
int class_id) {
root_ = false;
cntxt_ = cntxt;
dawg_ = dawg;
start_edge_ = start_edge_idx;
end_edge_ = end_edge_idx;
edge_mask_ = 0;
class_id_ = class_id;
str_ = cntxt_->CharacterSet()->ClassString(class_id);
path_cost_ = Cost();
}
char *TessLangModEdge::Description() const {
char *char_ptr = new char[256];
if (!char_ptr) {
return NULL;
}
char dawg_str[256];
char edge_str[32];
if (dawg_ == (Dawg *)DAWG_OOD) {
strcpy(dawg_str, "OOD");
} else if (dawg_ == (Dawg *)DAWG_NUMBER) {
strcpy(dawg_str, "NUM");
} else if (dawg_->permuter() == SYSTEM_DAWG_PERM) {
strcpy(dawg_str, "Main");
} else if (dawg_->permuter() == USER_DAWG_PERM) {
strcpy(dawg_str, "User");
} else if (dawg_->permuter() == DOC_DAWG_PERM) {
strcpy(dawg_str, "Doc");
} else {
strcpy(dawg_str, "N/A");
}
sprintf(edge_str, "%d", static_cast<int>(start_edge_));
if (IsLeadingPuncEdge(edge_mask_)) {
strcat(edge_str, "-LP");
}
if (IsTrailingPuncEdge(edge_mask_)) {
strcat(edge_str, "-TP");
}
sprintf(char_ptr, "%s(%s)%s, Wtd Dawg Cost=%d",
dawg_str, edge_str, IsEOW() ? "-EOW-" : "", path_cost_);
return char_ptr;
}
int TessLangModEdge::CreateChildren(CubeRecoContext *cntxt,
const Dawg *dawg,
NODE_REF parent_node,
LangModEdge **edge_array) {
int edge_cnt = 0;
NodeChildVector vec;
dawg->unichar_ids_of(parent_node, &vec, false); // find all children
for (int i = 0; i < vec.size(); ++i) {
const NodeChild &child = vec[i];
if (child.unichar_id == INVALID_UNICHAR_ID) continue;
edge_array[edge_cnt] =
new TessLangModEdge(cntxt, dawg, child.edge_ref, child.unichar_id);
if (edge_array[edge_cnt] != NULL) edge_cnt++;
}
return edge_cnt;
}
}
| 1080228-arabicocr11 | cube/tess_lang_mod_edge.cpp | C++ | asf20 | 3,787 |
/**********************************************************************
* File: alt_list.h
* Description: Class to abstarct a list of alternate results
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The AltList class is the base class for the list of alternate recognition
// results. Each alternate has a cost an an optional tag associated with it
#ifndef ALT_LIST_H
#define ALT_LIST_H
#include <math.h>
#include "cube_utils.h"
namespace tesseract {
class AltList {
public:
explicit AltList(int max_alt);
virtual ~AltList();
// sort the list of alternates based
virtual void Sort() = 0;
// return the best possible cost and index of corresponding alternate
int BestCost (int *best_alt) const;
// return the count of alternates
inline int AltCount() const { return alt_cnt_; }
// returns the cost (-ve log prob) of an alternate
inline int AltCost(int alt_idx) const { return alt_cost_[alt_idx]; }
// returns the prob of an alternate
inline double AltProb(int alt_idx) const {
return CubeUtils::Cost2Prob(AltCost(alt_idx));
}
// returns the alternate tag
inline void *AltTag(int alt_idx) const { return alt_tag_[alt_idx]; }
protected:
// max number of alternates the list can hold
int max_alt_;
// actual alternate count
int alt_cnt_;
// array of alternate costs
int *alt_cost_;
// array of alternate tags
void **alt_tag_;
};
}
#endif // ALT_LIST_H
| 1080228-arabicocr11 | cube/altlist.h | C++ | asf20 | 2,090 |
/**********************************************************************
* File: charclassifier.cpp
* Description: Implementation of Convolutional-NeuralNet Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <wctype.h>
#include "classifier_base.h"
#include "char_set.h"
#include "const.h"
#include "conv_net_classifier.h"
#include "cube_utils.h"
#include "feature_base.h"
#include "feature_bmp.h"
#include "hybrid_neural_net_classifier.h"
#include "tess_lang_model.h"
namespace tesseract {
HybridNeuralNetCharClassifier::HybridNeuralNetCharClassifier(
CharSet *char_set,
TuningParams *params,
FeatureBase *feat_extract)
: CharClassifier(char_set, params, feat_extract) {
net_input_ = NULL;
net_output_ = NULL;
}
HybridNeuralNetCharClassifier::~HybridNeuralNetCharClassifier() {
for (int net_idx = 0; net_idx < nets_.size(); net_idx++) {
if (nets_[net_idx] != NULL) {
delete nets_[net_idx];
}
}
nets_.clear();
if (net_input_ != NULL) {
delete []net_input_;
net_input_ = NULL;
}
if (net_output_ != NULL) {
delete []net_output_;
net_output_ = NULL;
}
}
// The main training function. Given a sample and a class ID the classifier
// updates its parameters according to its learning algorithm. This function
// is currently not implemented. TODO(ahmadab): implement end-2-end training
bool HybridNeuralNetCharClassifier::Train(CharSamp *char_samp, int ClassID) {
return false;
}
// A secondary function needed for training. Allows the trainer to set the
// value of any train-time paramter. This function is currently not
// implemented. TODO(ahmadab): implement end-2-end training
bool HybridNeuralNetCharClassifier::SetLearnParam(char *var_name, float val) {
// TODO(ahmadab): implementation of parameter initializing.
return false;
}
// Folds the output of the NeuralNet using the loaded folding sets
void HybridNeuralNetCharClassifier::Fold() {
// in case insensitive mode
if (case_sensitive_ == false) {
int class_cnt = char_set_->ClassCount();
// fold case
for (int class_id = 0; class_id < class_cnt; class_id++) {
// get class string
const char_32 *str32 = char_set_->ClassString(class_id);
// get the upper case form of the string
string_32 upper_form32 = str32;
for (int ch = 0; ch < upper_form32.length(); ch++) {
if (iswalpha(static_cast<int>(upper_form32[ch])) != 0) {
upper_form32[ch] = towupper(upper_form32[ch]);
}
}
// find out the upperform class-id if any
int upper_class_id =
char_set_->ClassID(reinterpret_cast<const char_32 *>(
upper_form32.c_str()));
if (upper_class_id != -1 && class_id != upper_class_id) {
float max_out = MAX(net_output_[class_id], net_output_[upper_class_id]);
net_output_[class_id] = max_out;
net_output_[upper_class_id] = max_out;
}
}
}
// The folding sets specify how groups of classes should be folded
// Folding involved assigning a min-activation to all the members
// of the folding set. The min-activation is a fraction of the max-activation
// of the members of the folding set
for (int fold_set = 0; fold_set < fold_set_cnt_; fold_set++) {
float max_prob = net_output_[fold_sets_[fold_set][0]];
for (int ch = 1; ch < fold_set_len_[fold_set]; ch++) {
if (net_output_[fold_sets_[fold_set][ch]] > max_prob) {
max_prob = net_output_[fold_sets_[fold_set][ch]];
}
}
for (int ch = 0; ch < fold_set_len_[fold_set]; ch++) {
net_output_[fold_sets_[fold_set][ch]] = MAX(max_prob * kFoldingRatio,
net_output_[fold_sets_[fold_set][ch]]);
}
}
}
// compute the features of specified charsamp and
// feedforward the specified nets
bool HybridNeuralNetCharClassifier::RunNets(CharSamp *char_samp) {
int feat_cnt = feat_extract_->FeatureCnt();
int class_cnt = char_set_->ClassCount();
// allocate i/p and o/p buffers if needed
if (net_input_ == NULL) {
net_input_ = new float[feat_cnt];
if (net_input_ == NULL) {
return false;
}
net_output_ = new float[class_cnt];
if (net_output_ == NULL) {
return false;
}
}
// compute input features
if (feat_extract_->ComputeFeatures(char_samp, net_input_) == false) {
return false;
}
// go thru all the nets
memset(net_output_, 0, class_cnt * sizeof(*net_output_));
float *inputs = net_input_;
for (int net_idx = 0; net_idx < nets_.size(); net_idx++) {
// run each net
vector<float> net_out(class_cnt, 0.0);
if (!nets_[net_idx]->FeedForward(inputs, &net_out[0])) {
return false;
}
// add the output values
for (int class_idx = 0; class_idx < class_cnt; class_idx++) {
net_output_[class_idx] += (net_out[class_idx] * net_wgts_[net_idx]);
}
// increment inputs pointer
inputs += nets_[net_idx]->in_cnt();
}
Fold();
return true;
}
// return the cost of being a char
int HybridNeuralNetCharClassifier::CharCost(CharSamp *char_samp) {
// it is by design that a character cost is equal to zero
// when no nets are present. This is the case during training.
if (RunNets(char_samp) == false) {
return 0;
}
return CubeUtils::Prob2Cost(1.0f - net_output_[0]);
}
// classifies a charsamp and returns an alternate list
// of chars sorted by char costs
CharAltList *HybridNeuralNetCharClassifier::Classify(CharSamp *char_samp) {
// run the needed nets
if (RunNets(char_samp) == false) {
return NULL;
}
int class_cnt = char_set_->ClassCount();
// create an altlist
CharAltList *alt_list = new CharAltList(char_set_, class_cnt);
if (alt_list == NULL) {
return NULL;
}
for (int out = 1; out < class_cnt; out++) {
int cost = CubeUtils::Prob2Cost(net_output_[out]);
alt_list->Insert(out, cost);
}
return alt_list;
}
// set an external net (for training purposes)
void HybridNeuralNetCharClassifier::SetNet(tesseract::NeuralNet *char_net) {
}
// Load folding sets
// This function returns true on success or if the file can't be read,
// returns false if an error is encountered.
bool HybridNeuralNetCharClassifier::LoadFoldingSets(
const string &data_file_path, const string &lang, LangModel *lang_mod) {
fold_set_cnt_ = 0;
string fold_file_name;
fold_file_name = data_file_path + lang;
fold_file_name += ".cube.fold";
// folding sets are optional
FILE *fp = fopen(fold_file_name.c_str(), "rb");
if (fp == NULL) {
return true;
}
fclose(fp);
string fold_sets_str;
if (!CubeUtils::ReadFileToString(fold_file_name,
&fold_sets_str)) {
return false;
}
// split into lines
vector<string> str_vec;
CubeUtils::SplitStringUsing(fold_sets_str, "\r\n", &str_vec);
fold_set_cnt_ = str_vec.size();
fold_sets_ = new int *[fold_set_cnt_];
if (fold_sets_ == NULL) {
return false;
}
fold_set_len_ = new int[fold_set_cnt_];
if (fold_set_len_ == NULL) {
fold_set_cnt_ = 0;
return false;
}
for (int fold_set = 0; fold_set < fold_set_cnt_; fold_set++) {
reinterpret_cast<TessLangModel *>(lang_mod)->RemoveInvalidCharacters(
&str_vec[fold_set]);
// if all or all but one character are invalid, invalidate this set
if (str_vec[fold_set].length() <= 1) {
fprintf(stderr, "Cube WARNING (ConvNetCharClassifier::LoadFoldingSets): "
"invalidating folding set %d\n", fold_set);
fold_set_len_[fold_set] = 0;
fold_sets_[fold_set] = NULL;
continue;
}
string_32 str32;
CubeUtils::UTF8ToUTF32(str_vec[fold_set].c_str(), &str32);
fold_set_len_[fold_set] = str32.length();
fold_sets_[fold_set] = new int[fold_set_len_[fold_set]];
if (fold_sets_[fold_set] == NULL) {
fprintf(stderr, "Cube ERROR (ConvNetCharClassifier::LoadFoldingSets): "
"could not allocate folding set\n");
fold_set_cnt_ = fold_set;
return false;
}
for (int ch = 0; ch < fold_set_len_[fold_set]; ch++) {
fold_sets_[fold_set][ch] = char_set_->ClassID(str32[ch]);
}
}
return true;
}
// Init the classifier provided a data-path and a language string
bool HybridNeuralNetCharClassifier::Init(const string &data_file_path,
const string &lang,
LangModel *lang_mod) {
if (init_ == true) {
return true;
}
// load the nets if any. This function will return true if the net file
// does not exist. But will fail if the net did not pass the sanity checks
if (!LoadNets(data_file_path, lang)) {
return false;
}
// load the folding sets if any. This function will return true if the
// file does not exist. But will fail if the it did not pass the sanity checks
if (!LoadFoldingSets(data_file_path, lang, lang_mod)) {
return false;
}
init_ = true;
return true;
}
// Load the classifier's Neural Nets
// This function will return true if the net file does not exist.
// But will fail if the net did not pass the sanity checks
bool HybridNeuralNetCharClassifier::LoadNets(const string &data_file_path,
const string &lang) {
string hybrid_net_file;
string junk_net_file;
// add the lang identifier
hybrid_net_file = data_file_path + lang;
hybrid_net_file += ".cube.hybrid";
// neural network is optional
FILE *fp = fopen(hybrid_net_file.c_str(), "rb");
if (fp == NULL) {
return true;
}
fclose(fp);
string str;
if (!CubeUtils::ReadFileToString(hybrid_net_file, &str)) {
return false;
}
// split into lines
vector<string> str_vec;
CubeUtils::SplitStringUsing(str, "\r\n", &str_vec);
if (str_vec.size() <= 0) {
return false;
}
// create and add the nets
nets_.resize(str_vec.size(), NULL);
net_wgts_.resize(str_vec.size(), 0);
int total_input_size = 0;
for (int net_idx = 0; net_idx < str_vec.size(); net_idx++) {
// parse the string
vector<string> tokens_vec;
CubeUtils::SplitStringUsing(str_vec[net_idx], " \t", &tokens_vec);
// has to be 2 tokens, net name and input size
if (tokens_vec.size() != 2) {
return false;
}
// load the net
string net_file_name = data_file_path + tokens_vec[0];
nets_[net_idx] = tesseract::NeuralNet::FromFile(net_file_name);
if (nets_[net_idx] == NULL) {
return false;
}
// parse the input size and validate it
net_wgts_[net_idx] = atof(tokens_vec[1].c_str());
if (net_wgts_[net_idx] < 0.0) {
return false;
}
total_input_size += nets_[net_idx]->in_cnt();
}
// validate total input count
if (total_input_size != feat_extract_->FeatureCnt()) {
return false;
}
// success
return true;
}
} // tesseract
| 1080228-arabicocr11 | cube/hybrid_neural_net_classifier.cpp | C++ | asf20 | 11,565 |
/**********************************************************************
* File: word_list_lang_model.cpp
* Description: Implementation of the Word List Language Model Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string>
#include <vector>
#include "word_list_lang_model.h"
#include "cube_utils.h"
#include "ratngs.h"
#include "trie.h"
namespace tesseract {
WordListLangModel::WordListLangModel(CubeRecoContext *cntxt) {
cntxt_ = cntxt;
dawg_ = NULL;
init_ = false;
}
WordListLangModel::~WordListLangModel() {
Cleanup();
}
// Cleanup
void WordListLangModel::Cleanup() {
if (dawg_ != NULL) {
delete dawg_;
dawg_ = NULL;
}
init_ = false;
}
// Initialize the language model
bool WordListLangModel::Init() {
if (init_ == true) {
return true;
}
// The last parameter to the Trie constructor (the debug level) is set to
// false for now, until Cube has a way to express its preferred debug level.
dawg_ = new Trie(DAWG_TYPE_WORD, "", NO_PERM,
cntxt_->CharacterSet()->ClassCount(), false);
if (dawg_ == NULL) {
return false;
}
init_ = true;
return true;
}
// return a pointer to the root
LangModEdge * WordListLangModel::Root() {
return NULL;
}
// return the edges emerging from the current state
LangModEdge **WordListLangModel::GetEdges(CharAltList *alt_list,
LangModEdge *edge,
int *edge_cnt) {
// initialize if necessary
if (init_ == false) {
if (Init() == false) {
return NULL;
}
}
(*edge_cnt) = 0;
EDGE_REF edge_ref;
TessLangModEdge *tess_lm_edge = reinterpret_cast<TessLangModEdge *>(edge);
if (tess_lm_edge == NULL) {
edge_ref = 0;
} else {
edge_ref = tess_lm_edge->EndEdge();
// advance node
edge_ref = dawg_->next_node(edge_ref);
if (edge_ref == 0) {
return NULL;
}
}
// allocate memory for edges
LangModEdge **edge_array = new LangModEdge *[kMaxEdge];
if (edge_array == NULL) {
return NULL;
}
// now get all the emerging edges
(*edge_cnt) += TessLangModEdge::CreateChildren(cntxt_, dawg_, edge_ref,
edge_array + (*edge_cnt));
return edge_array;
}
// returns true if the char_32 is supported by the language model
// TODO(ahmadab) currently not implemented
bool WordListLangModel::IsValidSequence(const char_32 *sequence,
bool terminal, LangModEdge **edges) {
return false;
}
// Recursive helper function for WordVariants().
void WordListLangModel::WordVariants(const CharSet &char_set,
string_32 prefix_str32,
WERD_CHOICE *word_so_far,
string_32 str32,
vector<WERD_CHOICE *> *word_variants) {
int str_len = str32.length();
if (str_len == 0) {
if (word_so_far->length() > 0) {
word_variants->push_back(new WERD_CHOICE(*word_so_far));
}
} else {
// Try out all the possible prefixes of the str32.
for (int len = 1; len <= str_len; len++) {
// Check if prefix is supported in character set.
string_32 str_pref32 = str32.substr(0, len);
int class_id = char_set.ClassID(reinterpret_cast<const char_32 *>(
str_pref32.c_str()));
if (class_id <= 0) {
continue;
} else {
string_32 new_prefix_str32 = prefix_str32 + str_pref32;
string_32 new_str32 = str32.substr(len);
word_so_far->append_unichar_id(class_id, 1, 0.0, 0.0);
WordVariants(char_set, new_prefix_str32, word_so_far, new_str32,
word_variants);
word_so_far->remove_last_unichar_id();
}
}
}
}
// Compute all the variants of a 32-bit string in terms of the class-ids
// This is needed for languages that have ligatures. A word can then have more
// than one spelling in terms of the class-ids
void WordListLangModel::WordVariants(const CharSet &char_set,
const UNICHARSET *uchset, string_32 str32,
vector<WERD_CHOICE *> *word_variants) {
for (int i = 0; i < word_variants->size(); i++) {
delete (*word_variants)[i];
}
word_variants->clear();
string_32 prefix_str32;
WERD_CHOICE word_so_far(uchset);
WordVariants(char_set, prefix_str32, &word_so_far, str32, word_variants);
}
// add a new UTF-8 string to the lang model
bool WordListLangModel::AddString(const char *char_ptr) {
if (!init_ && !Init()) { // initialize if necessary
return false;
}
string_32 str32;
CubeUtils::UTF8ToUTF32(char_ptr, &str32);
if (str32.length() < 1) {
return false;
}
return AddString32(str32.c_str());
}
// add a new UTF-32 string to the lang model
bool WordListLangModel::AddString32(const char_32 *char_32_ptr) {
if (char_32_ptr == NULL) {
return false;
}
// get all the word variants
vector<WERD_CHOICE *> word_variants;
WordVariants(*(cntxt_->CharacterSet()), cntxt_->TessUnicharset(),
char_32_ptr, &word_variants);
if (word_variants.size() > 0) {
// find the shortest variant
int shortest_word = 0;
for (int word = 1; word < word_variants.size(); word++) {
if (word_variants[shortest_word]->length() >
word_variants[word]->length()) {
shortest_word = word;
}
}
// only add the shortest grapheme interpretation of string to the word list
dawg_->add_word_to_dawg(*word_variants[shortest_word]);
}
for (int i = 0; i < word_variants.size(); i++) { delete word_variants[i]; }
return true;
}
}
| 1080228-arabicocr11 | cube/word_list_lang_model.cpp | C++ | asf20 | 6,369 |
/**********************************************************************
* File: cube_tuning_params.h
* Description: Declaration of the CubeTuningParameters Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CubeTuningParams class abstracts all the parameters that are used
// in Cube and are tuned/learned during the training process. Inherits
// from the TuningParams class.
#ifndef CUBE_TUNING_PARAMS_H
#define CUBE_TUNING_PARAMS_H
#include <string>
#include "tuning_params.h"
namespace tesseract {
class CubeTuningParams : public TuningParams {
public:
CubeTuningParams();
~CubeTuningParams();
// Accessor functions
inline double OODWgt() { return ood_wgt_; }
inline double NumWgt() { return num_wgt_; }
inline void SetOODWgt(double wgt) { ood_wgt_ = wgt; }
inline void SetNumWgt(double wgt) { num_wgt_ = wgt; }
// Create an object given the data file path and the language by loading
// the approporiate file
static CubeTuningParams * Create(const string &data_file,
const string &lang);
// Save and load the tuning parameters to a specified file
bool Save(string file_name);
bool Load(string file_name);
private:
double ood_wgt_;
double num_wgt_;
};
}
#endif // CUBE_TUNING_PARAMS_H
| 1080228-arabicocr11 | cube/cube_tuning_params.h | C++ | asf20 | 1,954 |
/**********************************************************************
* File: char_samp.cpp
* Description: Implementation of a Character Bitmap Sample Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string.h>
#include <string>
#include "char_samp.h"
#include "cube_utils.h"
namespace tesseract {
#define MAX_LINE_LEN 1024
CharSamp::CharSamp()
: Bmp8(0, 0) {
left_ = 0;
top_ = 0;
label32_ = NULL;
page_ = -1;
}
CharSamp::CharSamp(int wid, int hgt)
: Bmp8(wid, hgt) {
left_ = 0;
top_ = 0;
label32_ = NULL;
page_ = -1;
}
CharSamp::CharSamp(int left, int top, int wid, int hgt)
: Bmp8(wid, hgt)
, left_(left)
, top_(top) {
label32_ = NULL;
page_ = -1;
}
CharSamp::~CharSamp() {
if (label32_ != NULL) {
delete []label32_;
label32_ = NULL;
}
}
// returns a UTF-8 version of the string label
string CharSamp::stringLabel() const {
string str = "";
if (label32_ != NULL) {
string_32 str32(label32_);
CubeUtils::UTF32ToUTF8(str32.c_str(), &str);
}
return str;
}
// set a the string label using a UTF encoded string
void CharSamp::SetLabel(string str) {
if (label32_ != NULL) {
delete []label32_;
label32_ = NULL;
}
string_32 str32;
CubeUtils::UTF8ToUTF32(str.c_str(), &str32);
SetLabel(reinterpret_cast<const char_32 *>(str32.c_str()));
}
// creates a CharSamp object from file
CharSamp *CharSamp::FromCharDumpFile(CachedFile *fp) {
unsigned short left;
unsigned short top;
unsigned short page;
unsigned short first_char;
unsigned short last_char;
unsigned short norm_top;
unsigned short norm_bottom;
unsigned short norm_aspect_ratio;
unsigned int val32;
char_32 *label32;
// read and check 32 bit marker
if (fp->Read(&val32, sizeof(val32)) != sizeof(val32)) {
return NULL;
}
if (val32 != 0xabd0fefe) {
return NULL;
}
// read label length,
if (fp->Read(&val32, sizeof(val32)) != sizeof(val32)) {
return NULL;
}
// the label is not null terminated in the file
if (val32 > 0 && val32 < MAX_UINT32) {
label32 = new char_32[val32 + 1];
if (label32 == NULL) {
return NULL;
}
// read label
if (fp->Read(label32, val32 * sizeof(*label32)) !=
(val32 * sizeof(*label32))) {
return NULL;
}
// null terminate
label32[val32] = 0;
} else {
label32 = NULL;
}
// read coordinates
if (fp->Read(&page, sizeof(page)) != sizeof(page)) {
return NULL;
}
if (fp->Read(&left, sizeof(left)) != sizeof(left)) {
return NULL;
}
if (fp->Read(&top, sizeof(top)) != sizeof(top)) {
return NULL;
}
if (fp->Read(&first_char, sizeof(first_char)) != sizeof(first_char)) {
return NULL;
}
if (fp->Read(&last_char, sizeof(last_char)) != sizeof(last_char)) {
return NULL;
}
if (fp->Read(&norm_top, sizeof(norm_top)) != sizeof(norm_top)) {
return NULL;
}
if (fp->Read(&norm_bottom, sizeof(norm_bottom)) != sizeof(norm_bottom)) {
return NULL;
}
if (fp->Read(&norm_aspect_ratio, sizeof(norm_aspect_ratio)) !=
sizeof(norm_aspect_ratio)) {
return NULL;
}
// create the object
CharSamp *char_samp = new CharSamp();
if (char_samp == NULL) {
return NULL;
}
// init
char_samp->label32_ = label32;
char_samp->page_ = page;
char_samp->left_ = left;
char_samp->top_ = top;
char_samp->first_char_ = first_char;
char_samp->last_char_ = last_char;
char_samp->norm_top_ = norm_top;
char_samp->norm_bottom_ = norm_bottom;
char_samp->norm_aspect_ratio_ = norm_aspect_ratio;
// load the Bmp8 part
if (char_samp->LoadFromCharDumpFile(fp) == false) {
delete char_samp;
return NULL;
}
return char_samp;
}
// Load a Char Samp from a dump file
CharSamp *CharSamp::FromCharDumpFile(FILE *fp) {
unsigned short left;
unsigned short top;
unsigned short page;
unsigned short first_char;
unsigned short last_char;
unsigned short norm_top;
unsigned short norm_bottom;
unsigned short norm_aspect_ratio;
unsigned int val32;
char_32 *label32;
// read and check 32 bit marker
if (fread(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
return NULL;
}
if (val32 != 0xabd0fefe) {
return NULL;
}
// read label length,
if (fread(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
return NULL;
}
// the label is not null terminated in the file
if (val32 > 0 && val32 < MAX_UINT32) {
label32 = new char_32[val32 + 1];
if (label32 == NULL) {
return NULL;
}
// read label
if (fread(label32, 1, val32 * sizeof(*label32), fp) !=
(val32 * sizeof(*label32))) {
delete [] label32;
return NULL;
}
// null terminate
label32[val32] = 0;
} else {
label32 = NULL;
}
// read coordinates
if (fread(&page, 1, sizeof(page), fp) != sizeof(page) ||
fread(&left, 1, sizeof(left), fp) != sizeof(left) ||
fread(&top, 1, sizeof(top), fp) != sizeof(top) ||
fread(&first_char, 1, sizeof(first_char), fp) != sizeof(first_char) ||
fread(&last_char, 1, sizeof(last_char), fp) != sizeof(last_char) ||
fread(&norm_top, 1, sizeof(norm_top), fp) != sizeof(norm_top) ||
fread(&norm_bottom, 1, sizeof(norm_bottom), fp) != sizeof(norm_bottom) ||
fread(&norm_aspect_ratio, 1, sizeof(norm_aspect_ratio), fp) !=
sizeof(norm_aspect_ratio)) {
delete [] label32;
return NULL;
}
// create the object
CharSamp *char_samp = new CharSamp();
if (char_samp == NULL) {
delete [] label32;
return NULL;
}
// init
char_samp->label32_ = label32;
char_samp->page_ = page;
char_samp->left_ = left;
char_samp->top_ = top;
char_samp->first_char_ = first_char;
char_samp->last_char_ = last_char;
char_samp->norm_top_ = norm_top;
char_samp->norm_bottom_ = norm_bottom;
char_samp->norm_aspect_ratio_ = norm_aspect_ratio;
// load the Bmp8 part
if (char_samp->LoadFromCharDumpFile(fp) == false) {
delete char_samp; // It owns label32.
return NULL;
}
return char_samp;
}
// returns a copy of the charsamp that is scaled to the
// specified width and height
CharSamp *CharSamp::Scale(int wid, int hgt, bool isotropic) {
CharSamp *scaled_samp = new CharSamp(wid, hgt);
if (scaled_samp == NULL) {
return NULL;
}
if (scaled_samp->ScaleFrom(this, isotropic) == false) {
delete scaled_samp;
return NULL;
}
scaled_samp->left_ = left_;
scaled_samp->top_ = top_;
scaled_samp->page_ = page_;
scaled_samp->SetLabel(label32_);
scaled_samp->first_char_ = first_char_;
scaled_samp->last_char_ = last_char_;
scaled_samp->norm_top_ = norm_top_;
scaled_samp->norm_bottom_ = norm_bottom_;
scaled_samp->norm_aspect_ratio_ = norm_aspect_ratio_;
return scaled_samp;
}
// Load a Char Samp from a dump file
CharSamp *CharSamp::FromRawData(int left, int top, int wid, int hgt,
unsigned char *data) {
// create the object
CharSamp *char_samp = new CharSamp(left, top, wid, hgt);
if (char_samp == NULL) {
return NULL;
}
if (char_samp->LoadFromRawData(data) == false) {
delete char_samp;
return NULL;
}
return char_samp;
}
// Saves the charsamp to a dump file
bool CharSamp::Save2CharDumpFile(FILE *fp) const {
unsigned int val32;
// write and check 32 bit marker
val32 = 0xabd0fefe;
if (fwrite(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
return false;
}
// write label length
val32 = (label32_ == NULL) ? 0 : LabelLen(label32_);
if (fwrite(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
return false;
}
// write label
if (label32_ != NULL) {
if (fwrite(label32_, 1, val32 * sizeof(*label32_), fp) !=
(val32 * sizeof(*label32_))) {
return false;
}
}
// write coordinates
if (fwrite(&page_, 1, sizeof(page_), fp) != sizeof(page_)) {
return false;
}
if (fwrite(&left_, 1, sizeof(left_), fp) != sizeof(left_)) {
return false;
}
if (fwrite(&top_, 1, sizeof(top_), fp) != sizeof(top_)) {
return false;
}
if (fwrite(&first_char_, 1, sizeof(first_char_), fp) !=
sizeof(first_char_)) {
return false;
}
if (fwrite(&last_char_, 1, sizeof(last_char_), fp) != sizeof(last_char_)) {
return false;
}
if (fwrite(&norm_top_, 1, sizeof(norm_top_), fp) != sizeof(norm_top_)) {
return false;
}
if (fwrite(&norm_bottom_, 1, sizeof(norm_bottom_), fp) !=
sizeof(norm_bottom_)) {
return false;
}
if (fwrite(&norm_aspect_ratio_, 1, sizeof(norm_aspect_ratio_), fp) !=
sizeof(norm_aspect_ratio_)) {
return false;
}
if (SaveBmp2CharDumpFile(fp) == false) {
return false;
}
return true;
}
// Crop the char samp such that there are no white spaces on any side.
// The norm_top_ and norm_bottom_ fields are the character top/bottom
// with respect to whatever context the character is being recognized
// in (e.g. word bounding box) normalized to a standard size of
// 255. Here they default to 0 and 255 (word box boundaries), but
// since they are context dependent, they may need to be reset by the
// calling function.
CharSamp *CharSamp::Crop() {
// get the dimesions of the cropped img
int cropped_left = 0;
int cropped_top = 0;
int cropped_wid = wid_;
int cropped_hgt = hgt_;
Bmp8::Crop(&cropped_left, &cropped_top,
&cropped_wid, &cropped_hgt);
if (cropped_wid == 0 || cropped_hgt == 0) {
return NULL;
}
// create the cropped char samp
CharSamp *cropped_samp = new CharSamp(left_ + cropped_left,
top_ + cropped_top,
cropped_wid, cropped_hgt);
cropped_samp->SetLabel(label32_);
cropped_samp->SetFirstChar(first_char_);
cropped_samp->SetLastChar(last_char_);
// the following 3 fields may/should be reset by the calling function
// using context information, i.e., location of character box
// w.r.t. the word bounding box
cropped_samp->SetNormAspectRatio(255 *
cropped_wid / (cropped_wid + cropped_hgt));
cropped_samp->SetNormTop(0);
cropped_samp->SetNormBottom(255);
// copy the bitmap to the cropped img
Copy(cropped_left, cropped_top, cropped_wid, cropped_hgt, cropped_samp);
return cropped_samp;
}
// segment the char samp to connected components
// based on contiguity and vertical pixel density histogram
ConComp **CharSamp::Segment(int *segment_cnt, bool right_2_left,
int max_hist_wnd, int min_con_comp_size) const {
// init
(*segment_cnt) = 0;
int concomp_cnt = 0;
int seg_cnt = 0;
// find the concomps of the image
ConComp **concomp_array = FindConComps(&concomp_cnt, min_con_comp_size);
if (concomp_cnt <= 0 || !concomp_array) {
if (concomp_array)
delete []concomp_array;
return NULL;
}
ConComp **seg_array = NULL;
// segment each concomp further using vertical histogram
for (int concomp = 0; concomp < concomp_cnt; concomp++) {
int concomp_seg_cnt = 0;
// segment the concomp
ConComp **concomp_seg_array = NULL;
ConComp **concomp_alloc_seg =
concomp_array[concomp]->Segment(max_hist_wnd, &concomp_seg_cnt);
// no segments, add the whole concomp
if (concomp_alloc_seg == NULL) {
concomp_seg_cnt = 1;
concomp_seg_array = concomp_array + concomp;
} else {
// delete the original concomp, we no longer need it
concomp_seg_array = concomp_alloc_seg;
delete concomp_array[concomp];
}
// add the resulting segments
for (int seg_idx = 0; seg_idx < concomp_seg_cnt; seg_idx++) {
// too small of a segment: ignore
if (concomp_seg_array[seg_idx]->Width() < 2 &&
concomp_seg_array[seg_idx]->Height() < 2) {
delete concomp_seg_array[seg_idx];
} else {
// add the new segment
// extend the segment array
if ((seg_cnt % kConCompAllocChunk) == 0) {
ConComp **temp_segm_array =
new ConComp *[seg_cnt + kConCompAllocChunk];
if (temp_segm_array == NULL) {
fprintf(stderr, "Cube ERROR (CharSamp::Segment): could not "
"allocate additional connected components\n");
delete []concomp_seg_array;
delete []concomp_array;
delete []seg_array;
return NULL;
}
if (seg_cnt > 0) {
memcpy(temp_segm_array, seg_array, seg_cnt * sizeof(*seg_array));
delete []seg_array;
}
seg_array = temp_segm_array;
}
seg_array[seg_cnt++] = concomp_seg_array[seg_idx];
}
} // segment
if (concomp_alloc_seg != NULL) {
delete []concomp_alloc_seg;
}
} // concomp
delete []concomp_array;
// sort the concomps from Left2Right or Right2Left, based on the reading order
if (seg_cnt > 0 && seg_array != NULL) {
qsort(seg_array, seg_cnt, sizeof(*seg_array), right_2_left ?
ConComp::Right2LeftComparer : ConComp::Left2RightComparer);
}
(*segment_cnt) = seg_cnt;
return seg_array;
}
// builds a char samp from a set of connected components
CharSamp *CharSamp::FromConComps(ConComp **concomp_array, int strt_concomp,
int seg_flags_size, int *seg_flags,
bool *left_most, bool *right_most,
int word_hgt) {
int concomp;
int end_concomp;
int concomp_cnt = 0;
end_concomp = strt_concomp + seg_flags_size;
// determine ID range
bool once = false;
int min_id = -1;
int max_id = -1;
for (concomp = strt_concomp; concomp < end_concomp; concomp++) {
if (!seg_flags || seg_flags[concomp - strt_concomp] != 0) {
if (!once) {
min_id = concomp_array[concomp]->ID();
max_id = concomp_array[concomp]->ID();
once = true;
} else {
UpdateRange(concomp_array[concomp]->ID(), &min_id, &max_id);
}
concomp_cnt++;
}
}
if (concomp_cnt < 1 || !once || min_id == -1 || max_id == -1) {
return NULL;
}
// alloc memo for computing leftmost and right most attributes
int id_cnt = max_id - min_id + 1;
bool *id_exist = new bool[id_cnt];
bool *left_most_exist = new bool[id_cnt];
bool *right_most_exist = new bool[id_cnt];
if (!id_exist || !left_most_exist || !right_most_exist)
return NULL;
memset(id_exist, 0, id_cnt * sizeof(*id_exist));
memset(left_most_exist, 0, id_cnt * sizeof(*left_most_exist));
memset(right_most_exist, 0, id_cnt * sizeof(*right_most_exist));
// find the dimensions of the charsamp
once = false;
int left = -1;
int right = -1;
int top = -1;
int bottom = -1;
int unq_ids = 0;
int unq_left_most = 0;
int unq_right_most = 0;
for (concomp = strt_concomp; concomp < end_concomp; concomp++) {
if (!seg_flags || seg_flags[concomp - strt_concomp] != 0) {
if (!once) {
left = concomp_array[concomp]->Left();
right = concomp_array[concomp]->Right();
top = concomp_array[concomp]->Top();
bottom = concomp_array[concomp]->Bottom();
once = true;
} else {
UpdateRange(concomp_array[concomp]->Left(),
concomp_array[concomp]->Right(), &left, &right);
UpdateRange(concomp_array[concomp]->Top(),
concomp_array[concomp]->Bottom(), &top, &bottom);
}
// count unq ids, unq left most and right mosts ids
int concomp_id = concomp_array[concomp]->ID() - min_id;
if (!id_exist[concomp_id]) {
id_exist[concomp_id] = true;
unq_ids++;
}
if (concomp_array[concomp]->LeftMost()) {
if (left_most_exist[concomp_id] == false) {
left_most_exist[concomp_id] = true;
unq_left_most++;
}
}
if (concomp_array[concomp]->RightMost()) {
if (right_most_exist[concomp_id] == false) {
right_most_exist[concomp_id] = true;
unq_right_most++;
}
}
}
}
delete []id_exist;
delete []left_most_exist;
delete []right_most_exist;
if (!once || left == -1 || top == -1 || right == -1 || bottom == -1) {
return NULL;
}
(*left_most) = (unq_left_most >= unq_ids);
(*right_most) = (unq_right_most >= unq_ids);
// create the char sample object
CharSamp *samp = new CharSamp(left, top, right - left + 1, bottom - top + 1);
if (!samp) {
return NULL;
}
// set the foreground pixels
for (concomp = strt_concomp; concomp < end_concomp; concomp++) {
if (!seg_flags || seg_flags[concomp - strt_concomp] != 0) {
ConCompPt *pt_ptr = concomp_array[concomp]->Head();
while (pt_ptr) {
samp->line_buff_[pt_ptr->y() - top][pt_ptr->x() - left] = 0;
pt_ptr = pt_ptr->Next();
}
}
}
return samp;
}
// clones the object
CharSamp *CharSamp::Clone() const {
// create the cropped char samp
CharSamp *samp = new CharSamp(left_, top_, wid_, hgt_);
samp->SetLabel(label32_);
samp->SetFirstChar(first_char_);
samp->SetLastChar(last_char_);
samp->SetNormTop(norm_top_);
samp->SetNormBottom(norm_bottom_);
samp->SetNormAspectRatio(norm_aspect_ratio_);
// copy the bitmap to the cropped img
Copy(0, 0, wid_, hgt_, samp);
return samp;
}
// Load a Char Samp from a dump file
CharSamp *CharSamp::FromCharDumpFile(unsigned char **raw_data_ptr) {
unsigned int val32;
char_32 *label32;
unsigned char *raw_data = *raw_data_ptr;
// read and check 32 bit marker
memcpy(&val32, raw_data, sizeof(val32));
raw_data += sizeof(val32);
if (val32 != 0xabd0fefe) {
return NULL;
}
// read label length,
memcpy(&val32, raw_data, sizeof(val32));
raw_data += sizeof(val32);
// the label is not null terminated in the file
if (val32 > 0 && val32 < MAX_UINT32) {
label32 = new char_32[val32 + 1];
if (label32 == NULL) {
return NULL;
}
// read label
memcpy(label32, raw_data, val32 * sizeof(*label32));
raw_data += (val32 * sizeof(*label32));
// null terminate
label32[val32] = 0;
} else {
label32 = NULL;
}
// create the object
CharSamp *char_samp = new CharSamp();
if (char_samp == NULL) {
return NULL;
}
// read coordinates
char_samp->label32_ = label32;
memcpy(&char_samp->page_, raw_data, sizeof(char_samp->page_));
raw_data += sizeof(char_samp->page_);
memcpy(&char_samp->left_, raw_data, sizeof(char_samp->left_));
raw_data += sizeof(char_samp->left_);
memcpy(&char_samp->top_, raw_data, sizeof(char_samp->top_));
raw_data += sizeof(char_samp->top_);
memcpy(&char_samp->first_char_, raw_data, sizeof(char_samp->first_char_));
raw_data += sizeof(char_samp->first_char_);
memcpy(&char_samp->last_char_, raw_data, sizeof(char_samp->last_char_));
raw_data += sizeof(char_samp->last_char_);
memcpy(&char_samp->norm_top_, raw_data, sizeof(char_samp->norm_top_));
raw_data += sizeof(char_samp->norm_top_);
memcpy(&char_samp->norm_bottom_, raw_data, sizeof(char_samp->norm_bottom_));
raw_data += sizeof(char_samp->norm_bottom_);
memcpy(&char_samp->norm_aspect_ratio_, raw_data,
sizeof(char_samp->norm_aspect_ratio_));
raw_data += sizeof(char_samp->norm_aspect_ratio_);
// load the Bmp8 part
if (char_samp->LoadFromCharDumpFile(&raw_data) == false) {
delete char_samp;
return NULL;
}
(*raw_data_ptr) = raw_data;
return char_samp;
}
// computes the features corresponding to the char sample
bool CharSamp::ComputeFeatures(int conv_grid_size, float *features) {
// Create a scaled BMP
CharSamp *scaled_bmp = Scale(conv_grid_size, conv_grid_size);
if (!scaled_bmp) {
return false;
}
// prepare input
unsigned char *buff = scaled_bmp->RawData();
// bitmap features
int input;
int bmp_size = conv_grid_size * conv_grid_size;
for (input = 0; input < bmp_size; input++) {
features[input] = 255.0f - (1.0f * buff[input]);
}
// word context features
features[input++] = FirstChar();
features[input++] = LastChar();
features[input++] = NormTop();
features[input++] = NormBottom();
features[input++] = NormAspectRatio();
delete scaled_bmp;
return true;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/char_samp.cpp | C++ | asf20 | 20,757 |
/**********************************************************************
* File: classifier_factory.h
* Description: Declaration of the Base Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharClassifierFactory provides a single static method to create an
// instance of the desired classifier
#ifndef CHAR_CLASSIFIER_FACTORY_H
#define CHAR_CLASSIFIER_FACTORY_H
#include <string>
#include "classifier_base.h"
#include "lang_model.h"
namespace tesseract {
class CharClassifierFactory {
public:
// Creates a CharClassifier object of the appropriate type depending on the
// classifier type in the settings file
static CharClassifier *Create(const string &data_file_path,
const string &lang,
LangModel *lang_mod,
CharSet *char_set,
TuningParams *params);
};
} // tesseract
#endif // CHAR_CLASSIFIER_FACTORY_H
| 1080228-arabicocr11 | cube/classifier_factory.h | C++ | asf20 | 1,654 |
/**********************************************************************
* File: word_altlist.cpp
* Description: Implementation of the Word Alternate List Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "word_altlist.h"
namespace tesseract {
WordAltList::WordAltList(int max_alt)
: AltList(max_alt) {
word_alt_ = NULL;
}
WordAltList::~WordAltList() {
if (word_alt_ != NULL) {
for (int alt_idx = 0; alt_idx < alt_cnt_; alt_idx++) {
if (word_alt_[alt_idx] != NULL) {
delete []word_alt_[alt_idx];
}
}
delete []word_alt_;
word_alt_ = NULL;
}
}
// insert an alternate word with the specified cost and tag
bool WordAltList::Insert(char_32 *word_str, int cost, void *tag) {
if (word_alt_ == NULL || alt_cost_ == NULL) {
word_alt_ = new char_32*[max_alt_];
alt_cost_ = new int[max_alt_];
alt_tag_ = new void *[max_alt_];
if (word_alt_ == NULL || alt_cost_ == NULL || alt_tag_ == NULL) {
return false;
}
memset(alt_tag_, 0, max_alt_ * sizeof(*alt_tag_));
} else {
// check if alt already exists
for (int alt_idx = 0; alt_idx < alt_cnt_; alt_idx++) {
if (CubeUtils::StrCmp(word_str, word_alt_[alt_idx]) == 0) {
// update the cost if we have a lower one
if (cost < alt_cost_[alt_idx]) {
alt_cost_[alt_idx] = cost;
alt_tag_[alt_idx] = tag;
}
return true;
}
}
}
// determine length of alternate
int len = CubeUtils::StrLen(word_str);
word_alt_[alt_cnt_] = new char_32[len + 1];
if (word_alt_[alt_cnt_] == NULL) {
return false;
}
if (len > 0) {
memcpy(word_alt_[alt_cnt_], word_str, len * sizeof(*word_str));
}
word_alt_[alt_cnt_][len] = 0;
alt_cost_[alt_cnt_] = cost;
alt_tag_[alt_cnt_] = tag;
alt_cnt_++;
return true;
}
// sort the alternate in descending order based on the cost
void WordAltList::Sort() {
for (int alt_idx = 0; alt_idx < alt_cnt_; alt_idx++) {
for (int alt = alt_idx + 1; alt < alt_cnt_; alt++) {
if (alt_cost_[alt_idx] > alt_cost_[alt]) {
char_32 *pchTemp = word_alt_[alt_idx];
word_alt_[alt_idx] = word_alt_[alt];
word_alt_[alt] = pchTemp;
int temp = alt_cost_[alt_idx];
alt_cost_[alt_idx] = alt_cost_[alt];
alt_cost_[alt] = temp;
void *tag = alt_tag_[alt_idx];
alt_tag_[alt_idx] = alt_tag_[alt];
alt_tag_[alt] = tag;
}
}
}
}
void WordAltList::PrintDebug() {
for (int alt_idx = 0; alt_idx < alt_cnt_; alt_idx++) {
char_32 *word_32 = word_alt_[alt_idx];
string word_str;
CubeUtils::UTF32ToUTF8(word_32, &word_str);
int num_unichars = CubeUtils::StrLen(word_32);
fprintf(stderr, "Alt[%d]=%s (cost=%d, num_unichars=%d); unichars=", alt_idx,
word_str.c_str(), alt_cost_[alt_idx], num_unichars);
for (int i = 0; i < num_unichars; ++i)
fprintf(stderr, "%d ", word_32[i]);
fprintf(stderr, "\n");
}
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/word_altlist.cpp | C++ | asf20 | 3,672 |
/**********************************************************************
* File: cube_line_object.cpp
* Description: Implementation of the Cube Line Object Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <algorithm>
#include "cube_line_object.h"
namespace tesseract {
CubeLineObject::CubeLineObject(CubeRecoContext *cntxt, Pix *pix) {
line_pix_ = pix;
own_pix_ = false;
processed_ = false;
cntxt_ = cntxt;
phrase_cnt_ = 0;
phrases_ = NULL;
}
CubeLineObject::~CubeLineObject() {
if (line_pix_ != NULL && own_pix_ == true) {
pixDestroy(&line_pix_);
line_pix_ = NULL;
}
if (phrases_ != NULL) {
for (int phrase_idx = 0; phrase_idx < phrase_cnt_; phrase_idx++) {
if (phrases_[phrase_idx] != NULL) {
delete phrases_[phrase_idx];
}
}
delete []phrases_;
phrases_ = NULL;
}
}
// Recognize the specified pix as one line returning the recognized
bool CubeLineObject::Process() {
// do nothing if pix had already been processed
if (processed_) {
return true;
}
// validate data
if (line_pix_ == NULL || cntxt_ == NULL) {
return false;
}
// create a CharSamp
CharSamp *char_samp = CubeUtils::CharSampleFromPix(line_pix_, 0, 0,
line_pix_->w,
line_pix_->h);
if (char_samp == NULL) {
return false;
}
// compute connected components.
int con_comp_cnt = 0;
ConComp **con_comps = char_samp->FindConComps(&con_comp_cnt,
cntxt_->Params()->MinConCompSize());
// no longer need char_samp, delete it
delete char_samp;
// no connected components, bail out
if (con_comp_cnt <= 0 || con_comps == NULL) {
return false;
}
// sort connected components based on reading order
bool rtl = (cntxt_->ReadingOrder() == tesseract::CubeRecoContext::R2L);
qsort(con_comps, con_comp_cnt, sizeof(*con_comps), rtl ?
ConComp::Right2LeftComparer : ConComp::Left2RightComparer);
// compute work breaking threshold as a ratio of line height
bool ret_val = false;
int word_break_threshold = ComputeWordBreakThreshold(con_comp_cnt, con_comps,
rtl);
if (word_break_threshold > 0) {
// over-allocate phrases object buffer
phrases_ = new CubeObject *[con_comp_cnt];
if (phrases_ != NULL) {
// create a phrase if the horizontal distance between two consecutive
// concomps is higher than threshold
int start_con_idx = 0;
int current_phrase_limit = rtl ? con_comps[0]->Left() :
con_comps[0]->Right();
for (int con_idx = 1; con_idx <= con_comp_cnt; con_idx++) {
bool create_new_phrase = true;
// if not at the end, compute the distance between two consecutive
// concomps
if (con_idx < con_comp_cnt) {
int dist = 0;
if (cntxt_->ReadingOrder() == tesseract::CubeRecoContext::R2L) {
dist = current_phrase_limit - con_comps[con_idx]->Right();
} else {
dist = con_comps[con_idx]->Left() - current_phrase_limit;
}
create_new_phrase = (dist > word_break_threshold);
}
// create a new phrase
if (create_new_phrase) {
// create a phrase corresponding to a range on components
bool left_most;
bool right_most;
CharSamp *phrase_char_samp =
CharSamp::FromConComps(con_comps, start_con_idx,
con_idx - start_con_idx, NULL,
&left_most, &right_most,
line_pix_->h);
if (phrase_char_samp == NULL) {
break;
}
phrases_[phrase_cnt_] = new CubeObject(cntxt_, phrase_char_samp);
if (phrases_[phrase_cnt_] == NULL) {
delete phrase_char_samp;
break;
}
// set the ownership of the charsamp to the cube object
phrases_[phrase_cnt_]->SetCharSampOwnership(true);
phrase_cnt_++;
// advance the starting index to the current index
start_con_idx = con_idx;
// set the limit of the newly starting phrase (if any)
if (con_idx < con_comp_cnt) {
current_phrase_limit = rtl ? con_comps[con_idx]->Left() :
con_comps[con_idx]->Right();
}
} else {
// update the limit of the current phrase
if (cntxt_->ReadingOrder() == tesseract::CubeRecoContext::R2L) {
current_phrase_limit = MIN(current_phrase_limit,
con_comps[con_idx]->Left());
} else {
current_phrase_limit = MAX(current_phrase_limit,
con_comps[con_idx]->Right());
}
}
}
ret_val = true;
}
}
// clean-up connected comps
for (int con_idx = 0; con_idx < con_comp_cnt; con_idx++) {
delete con_comps[con_idx];
}
delete []con_comps;
// success
processed_ = true;
return ret_val;
}
// Compute the least word breaking threshold that is required to produce a
// valid set of phrases. Phrases are validated using the Aspect ratio
// constraints specified in the language specific Params object
int CubeLineObject::ComputeWordBreakThreshold(int con_comp_cnt,
ConComp **con_comps, bool rtl) {
// initial estimate of word breaking threshold
int word_break_threshold =
static_cast<int>(line_pix_->h * cntxt_->Params()->MaxSpaceHeightRatio());
bool valid = false;
// compute the resulting words and validate each's aspect ratio
do {
// group connected components into words based on breaking threshold
int start_con_idx = 0;
int current_phrase_limit = (rtl ? con_comps[0]->Left() :
con_comps[0]->Right());
int min_x = con_comps[0]->Left();
int max_x = con_comps[0]->Right();
int min_y = con_comps[0]->Top();
int max_y = con_comps[0]->Bottom();
valid = true;
for (int con_idx = 1; con_idx <= con_comp_cnt; con_idx++) {
bool create_new_phrase = true;
// if not at the end, compute the distance between two consecutive
// concomps
if (con_idx < con_comp_cnt) {
int dist = 0;
if (rtl) {
dist = current_phrase_limit - con_comps[con_idx]->Right();
} else {
dist = con_comps[con_idx]->Left() - current_phrase_limit;
}
create_new_phrase = (dist > word_break_threshold);
}
// create a new phrase
if (create_new_phrase) {
// check aspect ratio. Break if invalid
if ((max_x - min_x + 1) >
(cntxt_->Params()->MaxWordAspectRatio() * (max_y - min_y + 1))) {
valid = false;
break;
}
// advance the starting index to the current index
start_con_idx = con_idx;
// set the limit of the newly starting phrase (if any)
if (con_idx < con_comp_cnt) {
current_phrase_limit = rtl ? con_comps[con_idx]->Left() :
con_comps[con_idx]->Right();
// re-init bounding box
min_x = con_comps[con_idx]->Left();
max_x = con_comps[con_idx]->Right();
min_y = con_comps[con_idx]->Top();
max_y = con_comps[con_idx]->Bottom();
}
} else {
// update the limit of the current phrase
if (rtl) {
current_phrase_limit = MIN(current_phrase_limit,
con_comps[con_idx]->Left());
} else {
current_phrase_limit = MAX(current_phrase_limit,
con_comps[con_idx]->Right());
}
// update bounding box
UpdateRange(con_comps[con_idx]->Left(),
con_comps[con_idx]->Right(), &min_x, &max_x);
UpdateRange(con_comps[con_idx]->Top(),
con_comps[con_idx]->Bottom(), &min_y, &max_y);
}
}
// return the breaking threshold if all broken word dimensions are valid
if (valid) {
return word_break_threshold;
}
// decrease the threshold and try again
word_break_threshold--;
} while (!valid && word_break_threshold > 0);
// failed to find a threshold that acheives the target aspect ratio.
// Just use the default threshold
return static_cast<int>(line_pix_->h *
cntxt_->Params()->MaxSpaceHeightRatio());
}
}
| 1080228-arabicocr11 | cube/cube_line_object.cpp | C++ | asf20 | 9,287 |
/**********************************************************************
* File: beam_search.cpp
* Description: Class to implement Beam Word Search Algorithm
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <algorithm>
#include "beam_search.h"
#include "tesseractclass.h"
namespace tesseract {
BeamSearch::BeamSearch(CubeRecoContext *cntxt, bool word_mode) {
cntxt_ = cntxt;
seg_pt_cnt_ = 0;
col_cnt_ = 1;
col_ = NULL;
word_mode_ = word_mode;
}
// Cleanup the lattice corresponding to the last search
void BeamSearch::Cleanup() {
if (col_ != NULL) {
for (int col = 0; col < col_cnt_; col++) {
if (col_[col])
delete col_[col];
}
delete []col_;
}
col_ = NULL;
}
BeamSearch::~BeamSearch() {
Cleanup();
}
// Creates a set of children nodes emerging from a parent node based on
// the character alternate list and the language model.
void BeamSearch::CreateChildren(SearchColumn *out_col, LangModel *lang_mod,
SearchNode *parent_node,
LangModEdge *lm_parent_edge,
CharAltList *char_alt_list, int extra_cost) {
// get all the edges from this parent
int edge_cnt;
LangModEdge **lm_edges = lang_mod->GetEdges(char_alt_list,
lm_parent_edge, &edge_cnt);
if (lm_edges) {
// add them to the ending column with the appropriate parent
for (int edge = 0; edge < edge_cnt; edge++) {
// add a node to the column if the current column is not the
// last one, or if the lang model edge indicates it is valid EOW
if (!cntxt_->NoisyInput() && out_col->ColIdx() >= seg_pt_cnt_ &&
!lm_edges[edge]->IsEOW()) {
// free edge since no object is going to own it
delete lm_edges[edge];
continue;
}
// compute the recognition cost of this node
int recognition_cost = MIN_PROB_COST;
if (char_alt_list && char_alt_list->AltCount() > 0) {
recognition_cost = MAX(0, char_alt_list->ClassCost(
lm_edges[edge]->ClassID()));
// Add the no space cost. This should zero in word mode
recognition_cost += extra_cost;
}
// Note that the edge will be freed inside the column if
// AddNode is called
if (recognition_cost >= 0) {
out_col->AddNode(lm_edges[edge], recognition_cost, parent_node,
cntxt_);
} else {
delete lm_edges[edge];
}
} // edge
// free edge array
delete []lm_edges;
} // lm_edges
}
// Performs a beam seach in the specified search using the specified
// language model; returns an alternate list of possible words as a result.
WordAltList * BeamSearch::Search(SearchObject *srch_obj, LangModel *lang_mod) {
// verifications
if (!lang_mod)
lang_mod = cntxt_->LangMod();
if (!lang_mod) {
fprintf(stderr, "Cube ERROR (BeamSearch::Search): could not construct "
"LangModel\n");
return NULL;
}
// free existing state
Cleanup();
// get seg pt count
seg_pt_cnt_ = srch_obj->SegPtCnt();
if (seg_pt_cnt_ < 0) {
return NULL;
}
col_cnt_ = seg_pt_cnt_ + 1;
// disregard suspicious cases
if (seg_pt_cnt_ > 128) {
fprintf(stderr, "Cube ERROR (BeamSearch::Search): segment point count is "
"suspiciously high; bailing out\n");
return NULL;
}
// alloc memory for columns
col_ = new SearchColumn *[col_cnt_];
if (!col_) {
fprintf(stderr, "Cube ERROR (BeamSearch::Search): could not construct "
"SearchColumn array\n");
return NULL;
}
memset(col_, 0, col_cnt_ * sizeof(*col_));
// for all possible segments
for (int end_seg = 1; end_seg <= (seg_pt_cnt_ + 1); end_seg++) {
// create a search column
col_[end_seg - 1] = new SearchColumn(end_seg - 1,
cntxt_->Params()->BeamWidth());
if (!col_[end_seg - 1]) {
fprintf(stderr, "Cube ERROR (BeamSearch::Search): could not construct "
"SearchColumn for column %d\n", end_seg - 1);
return NULL;
}
// for all possible start segments
int init_seg = MAX(0, end_seg - cntxt_->Params()->MaxSegPerChar());
for (int strt_seg = init_seg; strt_seg < end_seg; strt_seg++) {
int parent_nodes_cnt;
SearchNode **parent_nodes;
// for the root segment, we do not have a parent
if (strt_seg == 0) {
parent_nodes_cnt = 1;
parent_nodes = NULL;
} else {
// for all the existing nodes in the starting column
parent_nodes_cnt = col_[strt_seg - 1]->NodeCount();
parent_nodes = col_[strt_seg - 1]->Nodes();
}
// run the shape recognizer
CharAltList *char_alt_list = srch_obj->RecognizeSegment(strt_seg - 1,
end_seg - 1);
// for all the possible parents
for (int parent_idx = 0; parent_idx < parent_nodes_cnt; parent_idx++) {
// point to the parent node
SearchNode *parent_node = !parent_nodes ? NULL
: parent_nodes[parent_idx];
LangModEdge *lm_parent_edge = !parent_node ? lang_mod->Root()
: parent_node->LangModelEdge();
// compute the cost of not having spaces within the segment range
int contig_cost = srch_obj->NoSpaceCost(strt_seg - 1, end_seg - 1);
// In phrase mode, compute the cost of not having a space before
// this character
int no_space_cost = 0;
if (!word_mode_ && strt_seg > 0) {
no_space_cost = srch_obj->NoSpaceCost(strt_seg - 1);
}
// if the no space cost is low enough
if ((contig_cost + no_space_cost) < MIN_PROB_COST) {
// Add the children nodes
CreateChildren(col_[end_seg - 1], lang_mod, parent_node,
lm_parent_edge, char_alt_list,
contig_cost + no_space_cost);
}
// In phrase mode and if not starting at the root
if (!word_mode_ && strt_seg > 0) { // parent_node must be non-NULL
// consider starting a new word for nodes that are valid EOW
if (parent_node->LangModelEdge()->IsEOW()) {
// get the space cost
int space_cost = srch_obj->SpaceCost(strt_seg - 1);
// if the space cost is low enough
if ((contig_cost + space_cost) < MIN_PROB_COST) {
// Restart the language model and add nodes as children to the
// space node.
CreateChildren(col_[end_seg - 1], lang_mod, parent_node, NULL,
char_alt_list, contig_cost + space_cost);
}
}
}
} // parent
} // strt_seg
// prune the column nodes
col_[end_seg - 1]->Prune();
// Free the column hash table. No longer needed
col_[end_seg - 1]->FreeHashTable();
} // end_seg
WordAltList *alt_list = CreateWordAltList(srch_obj);
return alt_list;
}
// Creates a Word alternate list from the results in the lattice.
WordAltList *BeamSearch::CreateWordAltList(SearchObject *srch_obj) {
// create an alternate list of all the nodes in the last column
int node_cnt = col_[col_cnt_ - 1]->NodeCount();
SearchNode **srch_nodes = col_[col_cnt_ - 1]->Nodes();
CharBigrams *bigrams = cntxt_->Bigrams();
WordUnigrams *word_unigrams = cntxt_->WordUnigramsObj();
// Save the index of the best-cost node before the alt list is
// sorted, so that we can retrieve it from the node list when backtracking.
best_presorted_node_idx_ = 0;
int best_cost = -1;
if (node_cnt <= 0)
return NULL;
// start creating the word alternate list
WordAltList *alt_list = new WordAltList(node_cnt + 1);
for (int node_idx = 0; node_idx < node_cnt; node_idx++) {
// recognition cost
int recognition_cost = srch_nodes[node_idx]->BestCost();
// compute the size cost of the alternate
char_32 *ch_buff = NULL;
int size_cost = SizeCost(srch_obj, srch_nodes[node_idx], &ch_buff);
// accumulate other costs
if (ch_buff) {
int cost = 0;
// char bigram cost
int bigram_cost = !bigrams ? 0 :
bigrams->Cost(ch_buff, cntxt_->CharacterSet());
// word unigram cost
int unigram_cost = !word_unigrams ? 0 :
word_unigrams->Cost(ch_buff, cntxt_->LangMod(),
cntxt_->CharacterSet());
// overall cost
cost = static_cast<int>(
(size_cost * cntxt_->Params()->SizeWgt()) +
(bigram_cost * cntxt_->Params()->CharBigramWgt()) +
(unigram_cost * cntxt_->Params()->WordUnigramWgt()) +
(recognition_cost * cntxt_->Params()->RecoWgt()));
// insert into word alt list
alt_list->Insert(ch_buff, cost,
static_cast<void *>(srch_nodes[node_idx]));
// Note that strict < is necessary because WordAltList::Sort()
// uses it in a bubble sort to swap entries.
if (best_cost < 0 || cost < best_cost) {
best_presorted_node_idx_ = node_idx;
best_cost = cost;
}
delete []ch_buff;
}
}
// sort the alternates based on cost
alt_list->Sort();
return alt_list;
}
// Returns the lattice column corresponding to the specified column index.
SearchColumn *BeamSearch::Column(int col) const {
if (col < 0 || col >= col_cnt_ || !col_)
return NULL;
return col_[col];
}
// Returns the best node in the last column of last performed search.
SearchNode *BeamSearch::BestNode() const {
if (col_cnt_ < 1 || !col_ || !col_[col_cnt_ - 1])
return NULL;
int node_cnt = col_[col_cnt_ - 1]->NodeCount();
SearchNode **srch_nodes = col_[col_cnt_ - 1]->Nodes();
if (node_cnt < 1 || !srch_nodes || !srch_nodes[0])
return NULL;
return srch_nodes[0];
}
// Returns the string corresponding to the specified alt.
char_32 *BeamSearch::Alt(int alt) const {
// get the last column of the lattice
if (col_cnt_ <= 0)
return NULL;
SearchColumn *srch_col = col_[col_cnt_ - 1];
if (!srch_col)
return NULL;
// point to the last node in the selected path
if (alt >= srch_col->NodeCount() || srch_col->Nodes() == NULL) {
return NULL;
}
SearchNode *srch_node = srch_col->Nodes()[alt];
if (!srch_node)
return NULL;
// get string
char_32 *str32 = srch_node->PathString();
if (!str32)
return NULL;
return str32;
}
// Backtracks from the specified node index and returns the corresponding
// character mapped segments and character count. Optional return
// arguments are the char_32 result string and character bounding
// boxes, if non-NULL values are passed in.
CharSamp **BeamSearch::BackTrack(SearchObject *srch_obj, int node_index,
int *char_cnt, char_32 **str32,
Boxa **char_boxes) const {
// get the last column of the lattice
if (col_cnt_ <= 0)
return NULL;
SearchColumn *srch_col = col_[col_cnt_ - 1];
if (!srch_col)
return NULL;
// point to the last node in the selected path
if (node_index >= srch_col->NodeCount() || !srch_col->Nodes())
return NULL;
SearchNode *srch_node = srch_col->Nodes()[node_index];
if (!srch_node)
return NULL;
return BackTrack(srch_obj, srch_node, char_cnt, str32, char_boxes);
}
// Backtracks from the specified node index and returns the corresponding
// character mapped segments and character count. Optional return
// arguments are the char_32 result string and character bounding
// boxes, if non-NULL values are passed in.
CharSamp **BeamSearch::BackTrack(SearchObject *srch_obj, SearchNode *srch_node,
int *char_cnt, char_32 **str32,
Boxa **char_boxes) const {
if (!srch_node)
return NULL;
if (str32) {
if (*str32)
delete [](*str32); // clear existing value
*str32 = srch_node->PathString();
if (!*str32)
return NULL;
}
if (char_boxes && *char_boxes) {
boxaDestroy(char_boxes); // clear existing value
}
CharSamp **chars;
chars = SplitByNode(srch_obj, srch_node, char_cnt, char_boxes);
if (!chars && str32)
delete []*str32;
return chars;
}
// Backtracks from the given lattice node and return the corresponding
// char mapped segments and character count. The character bounding
// boxes are optional return arguments, if non-NULL values are passed in.
CharSamp **BeamSearch::SplitByNode(SearchObject *srch_obj,
SearchNode *srch_node,
int *char_cnt,
Boxa **char_boxes) const {
// Count the characters (could be less than the path length when in
// phrase mode)
*char_cnt = 0;
SearchNode *node = srch_node;
while (node) {
node = node->ParentNode();
(*char_cnt)++;
}
if (*char_cnt == 0)
return NULL;
// Allocate box array
if (char_boxes) {
if (*char_boxes)
boxaDestroy(char_boxes); // clear existing value
*char_boxes = boxaCreate(*char_cnt);
if (*char_boxes == NULL)
return NULL;
}
// Allocate memory for CharSamp array.
CharSamp **chars = new CharSamp *[*char_cnt];
if (!chars) {
if (char_boxes)
boxaDestroy(char_boxes);
return NULL;
}
int ch_idx = *char_cnt - 1;
int seg_pt_cnt = srch_obj->SegPtCnt();
bool success=true;
while (srch_node && ch_idx >= 0) {
// Parent node (could be null)
SearchNode *parent_node = srch_node->ParentNode();
// Get the seg pts corresponding to the search node
int st_col = !parent_node ? 0 : parent_node->ColIdx() + 1;
int st_seg_pt = st_col <= 0 ? -1 : st_col - 1;
int end_col = srch_node->ColIdx();
int end_seg_pt = end_col >= seg_pt_cnt ? seg_pt_cnt : end_col;
// Get a char sample corresponding to the segmentation points
CharSamp *samp = srch_obj->CharSample(st_seg_pt, end_seg_pt);
if (!samp) {
success = false;
break;
}
samp->SetLabel(srch_node->NodeString());
chars[ch_idx] = samp;
if (char_boxes) {
// Create the corresponding character bounding box
Box *char_box = boxCreate(samp->Left(), samp->Top(),
samp->Width(), samp->Height());
if (!char_box) {
success = false;
break;
}
boxaAddBox(*char_boxes, char_box, L_INSERT);
}
srch_node = parent_node;
ch_idx--;
}
if (!success) {
delete []chars;
if (char_boxes)
boxaDestroy(char_boxes);
return NULL;
}
// Reverse the order of boxes.
if (char_boxes) {
int char_boxa_size = boxaGetCount(*char_boxes);
int limit = char_boxa_size / 2;
for (int i = 0; i < limit; ++i) {
int box1_idx = i;
int box2_idx = char_boxa_size - 1 - i;
Box *box1 = boxaGetBox(*char_boxes, box1_idx, L_CLONE);
Box *box2 = boxaGetBox(*char_boxes, box2_idx, L_CLONE);
boxaReplaceBox(*char_boxes, box2_idx, box1);
boxaReplaceBox(*char_boxes, box1_idx, box2);
}
}
return chars;
}
// Returns the size cost of a string for a lattice path that
// ends at the specified lattice node.
int BeamSearch::SizeCost(SearchObject *srch_obj, SearchNode *node,
char_32 **str32) const {
CharSamp **chars = NULL;
int char_cnt = 0;
if (!node)
return 0;
// Backtrack to get string and character segmentation
chars = BackTrack(srch_obj, node, &char_cnt, str32, NULL);
if (!chars)
return WORST_COST;
int size_cost = (cntxt_->SizeModel() == NULL) ? 0 :
cntxt_->SizeModel()->Cost(chars, char_cnt);
delete []chars;
return size_cost;
}
} // namespace tesesract
| 1080228-arabicocr11 | cube/beam_search.cpp | C++ | asf20 | 16,356 |
/**********************************************************************
* File: search_column.cpp
* Description: Implementation of the Beam Search Column Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "search_column.h"
#include <stdlib.h>
namespace tesseract {
SearchColumn::SearchColumn(int col_idx, int max_node) {
col_idx_ = col_idx;
node_cnt_ = 0;
node_array_ = NULL;
max_node_cnt_ = max_node;
node_hash_table_ = NULL;
init_ = false;
min_cost_ = INT_MAX;
max_cost_ = 0;
}
// Cleanup data
void SearchColumn::Cleanup() {
if (node_array_ != NULL) {
for (int node_idx = 0; node_idx < node_cnt_; node_idx++) {
if (node_array_[node_idx] != NULL) {
delete node_array_[node_idx];
}
}
delete []node_array_;
node_array_ = NULL;
}
FreeHashTable();
init_ = false;
}
SearchColumn::~SearchColumn() {
Cleanup();
}
// Initializations
bool SearchColumn::Init() {
if (init_ == true) {
return true;
}
// create hash table
if (node_hash_table_ == NULL) {
node_hash_table_ = new SearchNodeHashTable();
if (node_hash_table_ == NULL) {
return false;
}
}
init_ = true;
return true;
}
// Prune the nodes if necessary. Pruning is done such that a max
// number of nodes is kept, i.e., the beam width
void SearchColumn::Prune() {
// no need to prune
if (node_cnt_ <= max_node_cnt_) {
return;
}
// compute the cost histogram
memset(score_bins_, 0, sizeof(score_bins_));
int cost_range = max_cost_ - min_cost_ + 1;
for (int node_idx = 0; node_idx < node_cnt_; node_idx++) {
int cost_bin = static_cast<int>(
((node_array_[node_idx]->BestCost() - min_cost_) *
kScoreBins) / static_cast<double>(cost_range));
if (cost_bin >= kScoreBins) {
cost_bin = kScoreBins - 1;
}
score_bins_[cost_bin]++;
}
// determine the pruning cost by scanning the cost histogram from
// least to greatest cost bins and finding the cost at which the
// max number of nodes is exceeded
int pruning_cost = 0;
int new_node_cnt = 0;
for (int cost_bin = 0; cost_bin < kScoreBins; cost_bin++) {
if (new_node_cnt > 0 &&
(new_node_cnt + score_bins_[cost_bin]) > max_node_cnt_) {
pruning_cost = min_cost_ + ((cost_bin * cost_range) / kScoreBins);
break;
}
new_node_cnt += score_bins_[cost_bin];
}
// prune out all the nodes above this cost
for (int node_idx = new_node_cnt = 0; node_idx < node_cnt_; node_idx++) {
// prune this node out
if (node_array_[node_idx]->BestCost() > pruning_cost ||
new_node_cnt > max_node_cnt_) {
delete node_array_[node_idx];
} else {
// keep it
node_array_[new_node_cnt++] = node_array_[node_idx];
}
}
node_cnt_ = new_node_cnt;
}
// sort all nodes
void SearchColumn::Sort() {
if (node_cnt_ > 0 && node_array_ != NULL) {
qsort(node_array_, node_cnt_, sizeof(*node_array_),
SearchNode::SearchNodeComparer);
}
}
// add a new node
SearchNode *SearchColumn::AddNode(LangModEdge *edge, int reco_cost,
SearchNode *parent_node,
CubeRecoContext *cntxt) {
// init if necessary
if (init_ == false && Init() == false) {
return NULL;
}
// find out if we have an node with the same edge
// look in the hash table
SearchNode *new_node = node_hash_table_->Lookup(edge, parent_node);
// node does not exist
if (new_node == NULL) {
new_node = new SearchNode(cntxt, parent_node, reco_cost, edge, col_idx_);
if (new_node == NULL) {
return NULL;
}
// if the max node count has already been reached, check if the cost of
// the new node exceeds the max cost. This indicates that it will be pruned
// and so there is no point adding it
if (node_cnt_ >= max_node_cnt_ && new_node->BestCost() > max_cost_) {
delete new_node;
return NULL;
}
// expand the node buffer if necc
if ((node_cnt_ % kNodeAllocChunk) == 0) {
// alloc a new buff
SearchNode **new_node_buff =
new SearchNode *[node_cnt_ + kNodeAllocChunk];
if (new_node_buff == NULL) {
delete new_node;
return NULL;
}
// free existing after copying contents
if (node_array_ != NULL) {
memcpy(new_node_buff, node_array_, node_cnt_ * sizeof(*new_node_buff));
delete []node_array_;
}
node_array_ = new_node_buff;
}
// add the node to the hash table only if it is non-OOD edge
// because the langmod state is not unique
if (edge->IsOOD() == false) {
if (!node_hash_table_->Insert(edge, new_node)) {
tprintf("Hash table full!!!");
delete new_node;
return NULL;
}
}
node_array_[node_cnt_++] = new_node;
} else {
// node exists before
// if no update occurred, return NULL
if (new_node->UpdateParent(parent_node, reco_cost, edge) == false) {
new_node = NULL;
}
// free the edge
if (edge != NULL) {
delete edge;
}
}
// update Min and Max Costs
if (new_node != NULL) {
if (min_cost_ > new_node->BestCost()) {
min_cost_ = new_node->BestCost();
}
if (max_cost_ < new_node->BestCost()) {
max_cost_ = new_node->BestCost();
}
}
return new_node;
}
SearchNode *SearchColumn::BestNode() {
SearchNode *best_node = NULL;
for (int node_idx = 0; node_idx < node_cnt_; node_idx++) {
if (best_node == NULL ||
best_node->BestCost() > node_array_[node_idx]->BestCost()) {
best_node = node_array_[node_idx];
}
}
return best_node;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/search_column.cpp | C++ | asf20 | 6,334 |
/**********************************************************************
* File: feature_bmp.h
* Description: Declaration of the Bitmap Feature Class
* Author: PingPing xiu (xiupingping) & Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The FeatureBmp class implements a Bitmap feature extractor class. It
// inherits from the FeatureBase class
// The Bitmap feature vectors is the the bitmap of the specified CharSamp
// scaled to a fixed grid size and then augmented by a 5 aux features that
// describe the size, aspect ration and placement within a word
#ifndef FEATURE_BMP_H
#define FEATURE_BMP_H
#include "char_samp.h"
#include "feature_base.h"
namespace tesseract {
class FeatureBmp : public FeatureBase {
public:
explicit FeatureBmp(TuningParams *params);
virtual ~FeatureBmp();
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
virtual CharSamp *ComputeFeatureBitmap(CharSamp *samp);
// Compute the features for a given CharSamp
virtual bool ComputeFeatures(CharSamp *samp, float *features);
// Returns the count of features
virtual int FeatureCnt() {
return 5 + (conv_grid_size_ * conv_grid_size_);
}
protected:
// grid size, cached from the TuningParams object
int conv_grid_size_;
};
}
#endif // FEATURE_BMP_H
| 1080228-arabicocr11 | cube/feature_bmp.h | C++ | asf20 | 1,982 |
/**********************************************************************
* File: classifier_base.h
* Description: Declaration of the Base Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharClassifier class is the abstract class for any character/grapheme
// classifier.
#ifndef CHAR_CLASSIFIER_BASE_H
#define CHAR_CLASSIFIER_BASE_H
#include <string>
#include "char_samp.h"
#include "char_altlist.h"
#include "char_set.h"
#include "feature_base.h"
#include "lang_model.h"
#include "tuning_params.h"
namespace tesseract {
class CharClassifier {
public:
CharClassifier(CharSet *char_set, TuningParams *params,
FeatureBase *feat_extract) {
char_set_ = char_set;
params_ = params;
feat_extract_ = feat_extract;
fold_sets_ = NULL;
fold_set_cnt_ = 0;
fold_set_len_ = NULL;
init_ = false;
case_sensitive_ = true;
}
virtual ~CharClassifier() {
if (fold_sets_ != NULL) {
for (int fold_set = 0; fold_set < fold_set_cnt_; fold_set++) {
if (fold_sets_[fold_set] != NULL) {
delete []fold_sets_[fold_set];
}
}
delete []fold_sets_;
fold_sets_ = NULL;
}
if (fold_set_len_ != NULL) {
delete []fold_set_len_;
fold_set_len_ = NULL;
}
if (feat_extract_ != NULL) {
delete feat_extract_;
feat_extract_ = NULL;
}
}
// pure virtual functions that need to be implemented by any inheriting class
virtual CharAltList * Classify(CharSamp *char_samp) = 0;
virtual int CharCost(CharSamp *char_samp) = 0;
virtual bool Train(CharSamp *char_samp, int ClassID) = 0;
virtual bool SetLearnParam(char *var_name, float val) = 0;
virtual bool Init(const string &data_file_path, const string &lang,
LangModel *lang_mod) = 0;
// accessors
FeatureBase *FeatureExtractor() {return feat_extract_;}
inline bool CaseSensitive() const { return case_sensitive_; }
inline void SetCaseSensitive(bool case_sensitive) {
case_sensitive_ = case_sensitive;
}
protected:
virtual void Fold() = 0;
virtual bool LoadFoldingSets(const string &data_file_path,
const string &lang,
LangModel *lang_mod) = 0;
FeatureBase *feat_extract_;
CharSet *char_set_;
TuningParams *params_;
int **fold_sets_;
int *fold_set_len_;
int fold_set_cnt_;
bool init_;
bool case_sensitive_;
};
} // tesseract
#endif // CHAR_CLASSIFIER_BASE_H
| 1080228-arabicocr11 | cube/classifier_base.h | C++ | asf20 | 3,161 |
/**********************************************************************
* File: tess_lang_model.cpp
* Description: Implementation of the Tesseract Language Model Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The TessLangModel class abstracts the Tesseract language model. It inherits
// from the LangModel class. The Tesseract language model encompasses several
// Dawgs (words from training data, punctuation, numbers, document words).
// On top of this Cube adds an OOD state machine
// The class provides methods to traverse the language model in a generative
// fashion. Given any node in the DAWG, the language model can generate a list
// of children (or fan-out) edges
#include <string>
#include <vector>
#include "char_samp.h"
#include "cube_utils.h"
#include "dict.h"
#include "tesseractclass.h"
#include "tess_lang_model.h"
#include "tessdatamanager.h"
#include "unicharset.h"
namespace tesseract {
// max fan-out (used for preallocation). Initialized here, but modified by
// constructor
int TessLangModel::max_edge_ = 4096;
// Language model extra State machines
const Dawg *TessLangModel::ood_dawg_ = reinterpret_cast<Dawg *>(DAWG_OOD);
const Dawg *TessLangModel::number_dawg_ = reinterpret_cast<Dawg *>(DAWG_NUMBER);
// number state machine
const int TessLangModel::num_state_machine_[kStateCnt][kNumLiteralCnt] = {
{0, 1, 1, NUM_TRM, NUM_TRM},
{NUM_TRM, 1, 1, 3, 2},
{NUM_TRM, NUM_TRM, 1, NUM_TRM, 2},
{NUM_TRM, NUM_TRM, 3, NUM_TRM, 2},
};
const int TessLangModel::num_max_repeat_[kStateCnt] = {3, 32, 8, 3};
// thresholds and penalties
int TessLangModel::max_ood_shape_cost_ = CubeUtils::Prob2Cost(1e-4);
TessLangModel::TessLangModel(const string &lm_params,
const string &data_file_path,
bool load_system_dawg,
TessdataManager *tessdata_manager,
CubeRecoContext *cntxt) {
cntxt_ = cntxt;
has_case_ = cntxt_->HasCase();
// Load the rest of the language model elements from file
LoadLangModelElements(lm_params);
// Load word_dawgs_ if needed.
if (tessdata_manager->SeekToStart(TESSDATA_CUBE_UNICHARSET)) {
word_dawgs_ = new DawgVector();
if (load_system_dawg &&
tessdata_manager->SeekToStart(TESSDATA_CUBE_SYSTEM_DAWG)) {
// The last parameter to the Dawg constructor (the debug level) is set to
// false, until Cube has a way to express its preferred debug level.
*word_dawgs_ += new SquishedDawg(tessdata_manager->GetDataFilePtr(),
DAWG_TYPE_WORD,
cntxt_->Lang().c_str(),
SYSTEM_DAWG_PERM, false);
}
} else {
word_dawgs_ = NULL;
}
}
// Cleanup an edge array
void TessLangModel::FreeEdges(int edge_cnt, LangModEdge **edge_array) {
if (edge_array != NULL) {
for (int edge_idx = 0; edge_idx < edge_cnt; edge_idx++) {
if (edge_array[edge_idx] != NULL) {
delete edge_array[edge_idx];
}
}
delete []edge_array;
}
}
// Determines if a sequence of 32-bit chars is valid in this language model
// starting from the specified edge. If the eow_flag is ON, also checks for
// a valid EndOfWord. If final_edge is not NULL, returns a pointer to the last
// edge
bool TessLangModel::IsValidSequence(LangModEdge *edge,
const char_32 *sequence,
bool eow_flag,
LangModEdge **final_edge) {
// get the edges emerging from this edge
int edge_cnt = 0;
LangModEdge **edge_array = GetEdges(NULL, edge, &edge_cnt);
// find the 1st char in the sequence in the children
for (int edge_idx = 0; edge_idx < edge_cnt; edge_idx++) {
// found a match
if (sequence[0] == edge_array[edge_idx]->EdgeString()[0]) {
// if this is the last char
if (sequence[1] == 0) {
// succeed if we are in prefix mode or this is a terminal edge
if (eow_flag == false || edge_array[edge_idx]->IsEOW()) {
if (final_edge != NULL) {
(*final_edge) = edge_array[edge_idx];
edge_array[edge_idx] = NULL;
}
FreeEdges(edge_cnt, edge_array);
return true;
}
} else {
// not the last char continue checking
if (IsValidSequence(edge_array[edge_idx], sequence + 1, eow_flag,
final_edge) == true) {
FreeEdges(edge_cnt, edge_array);
return true;
}
}
}
}
FreeEdges(edge_cnt, edge_array);
return false;
}
// Determines if a sequence of 32-bit chars is valid in this language model
// starting from the root. If the eow_flag is ON, also checks for
// a valid EndOfWord. If final_edge is not NULL, returns a pointer to the last
// edge
bool TessLangModel::IsValidSequence(const char_32 *sequence, bool eow_flag,
LangModEdge **final_edge) {
if (final_edge != NULL) {
(*final_edge) = NULL;
}
return IsValidSequence(NULL, sequence, eow_flag, final_edge);
}
bool TessLangModel::IsLeadingPunc(const char_32 ch) {
return lead_punc_.find(ch) != string::npos;
}
bool TessLangModel::IsTrailingPunc(const char_32 ch) {
return trail_punc_.find(ch) != string::npos;
}
bool TessLangModel::IsDigit(const char_32 ch) {
return digits_.find(ch) != string::npos;
}
// The general fan-out generation function. Returns the list of edges
// fanning-out of the specified edge and their count. If an AltList is
// specified, only the class-ids with a minimum cost are considered
LangModEdge ** TessLangModel::GetEdges(CharAltList *alt_list,
LangModEdge *lang_mod_edge,
int *edge_cnt) {
TessLangModEdge *tess_lm_edge =
reinterpret_cast<TessLangModEdge *>(lang_mod_edge);
LangModEdge **edge_array = NULL;
(*edge_cnt) = 0;
// if we are starting from the root, we'll instantiate every DAWG
// and get the all the edges that emerge from the root
if (tess_lm_edge == NULL) {
// get DAWG count from Tesseract
int dawg_cnt = NumDawgs();
// preallocate the edge buffer
(*edge_cnt) = dawg_cnt * max_edge_;
edge_array = new LangModEdge *[(*edge_cnt)];
if (edge_array == NULL) {
return NULL;
}
for (int dawg_idx = (*edge_cnt) = 0; dawg_idx < dawg_cnt; dawg_idx++) {
const Dawg *curr_dawg = GetDawg(dawg_idx);
// Only look through word Dawgs (since there is a special way of
// handling numbers and punctuation).
if (curr_dawg->type() == DAWG_TYPE_WORD) {
(*edge_cnt) += FanOut(alt_list, curr_dawg, 0, 0, NULL, true,
edge_array + (*edge_cnt));
}
} // dawg
(*edge_cnt) += FanOut(alt_list, number_dawg_, 0, 0, NULL, true,
edge_array + (*edge_cnt));
// OOD: it is intentionally not added to the list to make sure it comes
// at the end
(*edge_cnt) += FanOut(alt_list, ood_dawg_, 0, 0, NULL, true,
edge_array + (*edge_cnt));
// set the root flag for all root edges
for (int edge_idx = 0; edge_idx < (*edge_cnt); edge_idx++) {
edge_array[edge_idx]->SetRoot(true);
}
} else { // not starting at the root
// preallocate the edge buffer
(*edge_cnt) = max_edge_;
// allocate memory for edges
edge_array = new LangModEdge *[(*edge_cnt)];
if (edge_array == NULL) {
return NULL;
}
// get the FanOut edges from the root of each dawg
(*edge_cnt) = FanOut(alt_list,
tess_lm_edge->GetDawg(),
tess_lm_edge->EndEdge(), tess_lm_edge->EdgeMask(),
tess_lm_edge->EdgeString(), false, edge_array);
}
return edge_array;
}
// generate edges from an NULL terminated string
// (used for punctuation, operators and digits)
int TessLangModel::Edges(const char *strng, const Dawg *dawg,
EDGE_REF edge_ref, EDGE_REF edge_mask,
LangModEdge **edge_array) {
int edge_idx,
edge_cnt = 0;
for (edge_idx = 0; strng[edge_idx] != 0; edge_idx++) {
int class_id = cntxt_->CharacterSet()->ClassID((char_32)strng[edge_idx]);
if (class_id != INVALID_UNICHAR_ID) {
// create an edge object
edge_array[edge_cnt] = new TessLangModEdge(cntxt_, dawg, edge_ref,
class_id);
if (edge_array[edge_cnt] == NULL) {
return 0;
}
reinterpret_cast<TessLangModEdge *>(edge_array[edge_cnt])->
SetEdgeMask(edge_mask);
edge_cnt++;
}
}
return edge_cnt;
}
// generate OOD edges
int TessLangModel::OODEdges(CharAltList *alt_list, EDGE_REF edge_ref,
EDGE_REF edge_ref_mask, LangModEdge **edge_array) {
int class_cnt = cntxt_->CharacterSet()->ClassCount();
int edge_cnt = 0;
for (int class_id = 0; class_id < class_cnt; class_id++) {
// produce an OOD edge only if the cost of the char is low enough
if ((alt_list == NULL ||
alt_list->ClassCost(class_id) <= max_ood_shape_cost_)) {
// create an edge object
edge_array[edge_cnt] = new TessLangModEdge(cntxt_, class_id);
if (edge_array[edge_cnt] == NULL) {
return 0;
}
edge_cnt++;
}
}
return edge_cnt;
}
// computes and returns the edges that fan out of an edge ref
int TessLangModel::FanOut(CharAltList *alt_list, const Dawg *dawg,
EDGE_REF edge_ref, EDGE_REF edge_mask,
const char_32 *str, bool root_flag,
LangModEdge **edge_array) {
int edge_cnt = 0;
NODE_REF next_node = NO_EDGE;
// OOD
if (dawg == reinterpret_cast<Dawg *>(DAWG_OOD)) {
if (ood_enabled_ == true) {
return OODEdges(alt_list, edge_ref, edge_mask, edge_array);
} else {
return 0;
}
} else if (dawg == reinterpret_cast<Dawg *>(DAWG_NUMBER)) {
// Number
if (numeric_enabled_ == true) {
return NumberEdges(edge_ref, edge_array);
} else {
return 0;
}
} else if (IsTrailingPuncEdge(edge_mask)) {
// a TRAILING PUNC MASK, generate more trailing punctuation and return
if (punc_enabled_ == true) {
EDGE_REF trail_cnt = TrailingPuncCount(edge_mask);
return Edges(trail_punc_.c_str(), dawg, edge_ref,
TrailingPuncEdgeMask(trail_cnt + 1), edge_array);
} else {
return 0;
}
} else if (root_flag == true || edge_ref == 0) {
// Root, generate leading punctuation and continue
if (root_flag) {
if (punc_enabled_ == true) {
edge_cnt += Edges(lead_punc_.c_str(), dawg, 0, LEAD_PUNC_EDGE_REF_MASK,
edge_array);
}
}
next_node = 0;
} else {
// a node in the main trie
bool eow_flag = (dawg->end_of_word(edge_ref) != 0);
// for EOW
if (eow_flag == true) {
// generate trailing punctuation
if (punc_enabled_ == true) {
edge_cnt += Edges(trail_punc_.c_str(), dawg, edge_ref,
TrailingPuncEdgeMask((EDGE_REF)1), edge_array);
// generate a hyphen and go back to the root
edge_cnt += Edges("-/", dawg, 0, 0, edge_array + edge_cnt);
}
}
// advance node
next_node = dawg->next_node(edge_ref);
if (next_node == 0 || next_node == NO_EDGE) {
return edge_cnt;
}
}
// now get all the emerging edges if word list is enabled
if (word_list_enabled_ == true && next_node != NO_EDGE) {
// create child edges
int child_edge_cnt =
TessLangModEdge::CreateChildren(cntxt_, dawg, next_node,
edge_array + edge_cnt);
int strt_cnt = edge_cnt;
// set the edge mask
for (int child = 0; child < child_edge_cnt; child++) {
reinterpret_cast<TessLangModEdge *>(edge_array[edge_cnt++])->
SetEdgeMask(edge_mask);
}
// if we are at the root, create upper case forms of these edges if possible
if (root_flag == true) {
for (int child = 0; child < child_edge_cnt; child++) {
TessLangModEdge *child_edge =
reinterpret_cast<TessLangModEdge *>(edge_array[strt_cnt + child]);
if (has_case_ == true) {
const char_32 *edge_str = child_edge->EdgeString();
if (edge_str != NULL && islower(edge_str[0]) != 0 &&
edge_str[1] == 0) {
int class_id =
cntxt_->CharacterSet()->ClassID(toupper(edge_str[0]));
if (class_id != INVALID_UNICHAR_ID) {
// generate an upper case edge for lower case chars
edge_array[edge_cnt] = new TessLangModEdge(cntxt_, dawg,
child_edge->StartEdge(), child_edge->EndEdge(), class_id);
if (edge_array[edge_cnt] != NULL) {
reinterpret_cast<TessLangModEdge *>(edge_array[edge_cnt])->
SetEdgeMask(edge_mask);
edge_cnt++;
}
}
}
}
}
}
}
return edge_cnt;
}
// Generate the edges fanning-out from an edge in the number state machine
int TessLangModel::NumberEdges(EDGE_REF edge_ref, LangModEdge **edge_array) {
EDGE_REF new_state,
state;
inT64 repeat_cnt,
new_repeat_cnt;
state = ((edge_ref & NUMBER_STATE_MASK) >> NUMBER_STATE_SHIFT);
repeat_cnt = ((edge_ref & NUMBER_REPEAT_MASK) >> NUMBER_REPEAT_SHIFT);
if (state < 0 || state >= kStateCnt) {
return 0;
}
// go thru all valid transitions from the state
int edge_cnt = 0;
EDGE_REF new_edge_ref;
for (int lit = 0; lit < kNumLiteralCnt; lit++) {
// move to the new state
new_state = num_state_machine_[state][lit];
if (new_state == NUM_TRM) {
continue;
}
if (new_state == state) {
new_repeat_cnt = repeat_cnt + 1;
} else {
new_repeat_cnt = 1;
}
// not allowed to repeat beyond this
if (new_repeat_cnt > num_max_repeat_[state]) {
continue;
}
new_edge_ref = (new_state << NUMBER_STATE_SHIFT) |
(lit << NUMBER_LITERAL_SHIFT) |
(new_repeat_cnt << NUMBER_REPEAT_SHIFT);
edge_cnt += Edges(literal_str_[lit]->c_str(), number_dawg_,
new_edge_ref, 0, edge_array + edge_cnt);
}
return edge_cnt;
}
// Loads Language model elements from contents of the <lang>.cube.lm file
bool TessLangModel::LoadLangModelElements(const string &lm_params) {
bool success = true;
// split into lines, each corresponding to a token type below
vector<string> str_vec;
CubeUtils::SplitStringUsing(lm_params, "\r\n", &str_vec);
for (int entry = 0; entry < str_vec.size(); entry++) {
vector<string> tokens;
// should be only two tokens: type and value
CubeUtils::SplitStringUsing(str_vec[entry], "=", &tokens);
if (tokens.size() != 2)
success = false;
if (tokens[0] == "LeadPunc") {
lead_punc_ = tokens[1];
} else if (tokens[0] == "TrailPunc") {
trail_punc_ = tokens[1];
} else if (tokens[0] == "NumLeadPunc") {
num_lead_punc_ = tokens[1];
} else if (tokens[0] == "NumTrailPunc") {
num_trail_punc_ = tokens[1];
} else if (tokens[0] == "Operators") {
operators_ = tokens[1];
} else if (tokens[0] == "Digits") {
digits_ = tokens[1];
} else if (tokens[0] == "Alphas") {
alphas_ = tokens[1];
} else {
success = false;
}
}
RemoveInvalidCharacters(&num_lead_punc_);
RemoveInvalidCharacters(&num_trail_punc_);
RemoveInvalidCharacters(&digits_);
RemoveInvalidCharacters(&operators_);
RemoveInvalidCharacters(&alphas_);
// form the array of literal strings needed for number state machine
// It is essential that the literal strings go in the order below
literal_str_[0] = &num_lead_punc_;
literal_str_[1] = &num_trail_punc_;
literal_str_[2] = &digits_;
literal_str_[3] = &operators_;
literal_str_[4] = &alphas_;
return success;
}
void TessLangModel::RemoveInvalidCharacters(string *lm_str) {
CharSet *char_set = cntxt_->CharacterSet();
tesseract::string_32 lm_str32;
CubeUtils::UTF8ToUTF32(lm_str->c_str(), &lm_str32);
int len = CubeUtils::StrLen(lm_str32.c_str());
char_32 *clean_str32 = new char_32[len + 1];
if (!clean_str32)
return;
int clean_len = 0;
for (int i = 0; i < len; ++i) {
int class_id = char_set->ClassID((char_32)lm_str32[i]);
if (class_id != INVALID_UNICHAR_ID) {
clean_str32[clean_len] = lm_str32[i];
++clean_len;
}
}
clean_str32[clean_len] = 0;
if (clean_len < len) {
lm_str->clear();
CubeUtils::UTF32ToUTF8(clean_str32, lm_str);
}
delete [] clean_str32;
}
int TessLangModel::NumDawgs() const {
return (word_dawgs_ != NULL) ?
word_dawgs_->size() : cntxt_->TesseractObject()->getDict().NumDawgs();
}
// Returns the dawgs with the given index from either the dawgs
// stored by the Tesseract object, or the word_dawgs_.
const Dawg *TessLangModel::GetDawg(int index) const {
if (word_dawgs_ != NULL) {
ASSERT_HOST(index < word_dawgs_->size());
return (*word_dawgs_)[index];
} else {
ASSERT_HOST(index < cntxt_->TesseractObject()->getDict().NumDawgs());
return cntxt_->TesseractObject()->getDict().GetDawg(index);
}
}
}
| 1080228-arabicocr11 | cube/tess_lang_model.cpp | C++ | asf20 | 18,024 |
/**********************************************************************
* File: word_unigrams.h
* Description: Declaration of the Word Unigrams Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The WordUnigram class holds the unigrams of the most frequent set of words
// in a language. It is an optional component of the Cube OCR engine. If
// present, the unigram cost of a word is aggregated with the other costs
// (Recognition, Language Model, Size) to compute a cost for a word.
// The word list is assumed to be sorted in lexicographic order.
#ifndef WORD_UNIGRAMS_H
#define WORD_UNIGRAMS_H
#include <string>
#include "char_set.h"
#include "lang_model.h"
namespace tesseract {
class WordUnigrams {
public:
WordUnigrams();
~WordUnigrams();
// Load the word-list and unigrams from file and create an object
// The word list is assumed to be sorted
static WordUnigrams *Create(const string &data_file_path,
const string &lang);
// Compute the unigram cost of a UTF-32 string. Splits into
// space-separated tokens, strips trailing punctuation from each
// token, evaluates case properties, and calls internal Cost()
// function on UTF-8 version. To avoid unnecessarily penalizing
// all-one-case words or capitalized words (first-letter
// upper-case and remaining letters lower-case) when not all
// versions of the word appear in the <lang>.cube.word-freq file, a
// case-invariant cost is computed in those cases, assuming the word
// meets a minimum length.
int Cost(const char_32 *str32, LangModel *lang_mod,
CharSet *char_set) const;
protected:
// Compute the word unigram cost of a UTF-8 string with binary
// search of sorted words_ array.
int CostInternal(const char *str) const;
private:
// Only words this length or greater qualify for all-numeric or
// case-invariant word unigram cost.
static const int kMinLengthNumOrCaseInvariant = 4;
int word_cnt_;
char **words_;
int *costs_;
int not_in_list_cost_;
};
}
#endif // WORD_UNIGRAMS_H
| 1080228-arabicocr11 | cube/word_unigrams.h | C++ | asf20 | 2,739 |
/**********************************************************************
* File: cube_utils.cpp
* Description: Implementation of the Cube Utilities Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <math.h>
#include <string>
#include <vector>
#include "cube_utils.h"
#include "char_set.h"
#include "unichar.h"
namespace tesseract {
CubeUtils::CubeUtils() {
}
CubeUtils::~CubeUtils() {
}
// convert a prob to a cost (-ve log prob)
int CubeUtils::Prob2Cost(double prob_val) {
if (prob_val < MIN_PROB) {
return MIN_PROB_COST;
}
return static_cast<int>(-log(prob_val) * PROB2COST_SCALE);
}
// converts a cost to probability
double CubeUtils::Cost2Prob(int cost) {
return exp(-cost / PROB2COST_SCALE);
}
// computes the length of a NULL terminated char_32 string
int CubeUtils::StrLen(const char_32 *char_32_ptr) {
if (char_32_ptr == NULL) {
return 0;
}
int len = -1;
while (char_32_ptr[++len]);
return len;
}
// compares two char_32 strings
int CubeUtils::StrCmp(const char_32 *str1, const char_32 *str2) {
const char_32 *pch1 = str1;
const char_32 *pch2 = str2;
for (; (*pch1) != 0 && (*pch2) != 0; pch1++, pch2++) {
if ((*pch1) != (*pch2)) {
return (*pch1) - (*pch2);
}
}
if ((*pch1) == 0) {
if ((*pch2) == 0) {
return 0;
} else {
return -1;
}
} else {
return 1;
}
}
// Duplicates a 32-bit char buffer
char_32 *CubeUtils::StrDup(const char_32 *str32) {
int len = StrLen(str32);
char_32 *new_str = new char_32[len + 1];
if (new_str == NULL) {
return NULL;
}
memcpy(new_str, str32, len * sizeof(*str32));
new_str[len] = 0;
return new_str;
}
// creates a char samp from a specified portion of the image
CharSamp *CubeUtils::CharSampleFromPix(Pix *pix, int left, int top,
int wid, int hgt) {
// get the raw img data from the image
unsigned char *temp_buff = GetImageData(pix, left, top, wid, hgt);
if (temp_buff == NULL) {
return NULL;
}
// create a char samp from temp buffer
CharSamp *char_samp = CharSamp::FromRawData(left, top, wid, hgt, temp_buff);
// clean up temp buffer
delete []temp_buff;
return char_samp;
}
// create a B/W image from a char_sample
Pix *CubeUtils::PixFromCharSample(CharSamp *char_samp) {
// parameter check
if (char_samp == NULL) {
return NULL;
}
// get the raw data
int stride = char_samp->Stride();
int wid = char_samp->Width();
int hgt = char_samp->Height();
Pix *pix = pixCreate(wid, hgt, 1);
if (pix == NULL) {
return NULL;
}
// copy the contents
unsigned char *line = char_samp->RawData();
for (int y = 0; y < hgt ; y++, line += stride) {
for (int x = 0; x < wid; x++) {
if (line[x] != 0) {
pixSetPixel(pix, x, y, 0);
} else {
pixSetPixel(pix, x, y, 255);
}
}
}
return pix;
}
// creates a raw buffer from the specified location of the pix
unsigned char *CubeUtils::GetImageData(Pix *pix, int left, int top,
int wid, int hgt) {
// skip invalid dimensions
if (left < 0 || top < 0 || wid < 0 || hgt < 0 ||
(left + wid) > pix->w || (top + hgt) > pix->h ||
pix->d != 1) {
return NULL;
}
// copy the char img to a temp buffer
unsigned char *temp_buff = new unsigned char[wid * hgt];
if (temp_buff == NULL) {
return NULL;
}
l_int32 w;
l_int32 h;
l_int32 d;
l_int32 wpl;
l_uint32 *line;
l_uint32 *data;
pixGetDimensions(pix, &w, &h, &d);
wpl = pixGetWpl(pix);
data = pixGetData(pix);
line = data + (top * wpl);
for (int y = 0, off = 0; y < hgt ; y++) {
for (int x = 0; x < wid; x++, off++) {
temp_buff[off] = GET_DATA_BIT(line, x + left) ? 0 : 255;
}
line += wpl;
}
return temp_buff;
}
// read file contents to a string
bool CubeUtils::ReadFileToString(const string &file_name, string *str) {
str->clear();
FILE *fp = fopen(file_name.c_str(), "rb");
if (fp == NULL) {
return false;
}
// get the size of the size
fseek(fp, 0, SEEK_END);
int file_size = ftell(fp);
if (file_size < 1) {
fclose(fp);
return false;
}
// adjust string size
str->reserve(file_size);
// read the contents
rewind(fp);
char *buff = new char[file_size];
if (buff == NULL) {
fclose(fp);
return false;
}
int read_bytes = fread(buff, 1, static_cast<int>(file_size), fp);
if (read_bytes == file_size) {
str->append(buff, file_size);
}
delete []buff;
fclose(fp);
return (read_bytes == file_size);
}
// splits a string into vectors based on specified delimiters
void CubeUtils::SplitStringUsing(const string &str,
const string &delims,
vector<string> *str_vec) {
// Optimize the common case where delims is a single character.
if (delims[0] != '\0' && delims[1] == '\0') {
char c = delims[0];
const char* p = str.data();
const char* end = p + str.size();
while (p != end) {
if (*p == c) {
++p;
} else {
const char* start = p;
while (++p != end && *p != c);
str_vec->push_back(string(start, p - start));
}
}
return;
}
string::size_type begin_index, end_index;
begin_index = str.find_first_not_of(delims);
while (begin_index != string::npos) {
end_index = str.find_first_of(delims, begin_index);
if (end_index == string::npos) {
str_vec->push_back(str.substr(begin_index));
return;
}
str_vec->push_back(str.substr(begin_index, (end_index - begin_index)));
begin_index = str.find_first_not_of(delims, end_index);
}
}
// UTF-8 to UTF-32 convesion functions
void CubeUtils::UTF8ToUTF32(const char *utf8_str, string_32 *str32) {
str32->clear();
int len = strlen(utf8_str);
int step = 0;
for (int ch = 0; ch < len; ch += step) {
step = UNICHAR::utf8_step(utf8_str + ch);
if (step > 0) {
UNICHAR uni_ch(utf8_str + ch, step);
(*str32) += uni_ch.first_uni();
}
}
}
// UTF-8 to UTF-32 convesion functions
void CubeUtils::UTF32ToUTF8(const char_32 *utf32_str, string *str) {
str->clear();
for (const char_32 *ch_32 = utf32_str; (*ch_32) != 0; ch_32++) {
UNICHAR uni_ch((*ch_32));
char *utf8 = uni_ch.utf8_str();
if (utf8 != NULL) {
(*str) += utf8;
delete []utf8;
}
}
}
bool CubeUtils::IsCaseInvariant(const char_32 *str32, CharSet *char_set) {
bool all_one_case = true;
bool capitalized;
bool prev_upper;
bool prev_lower;
bool first_upper;
bool first_lower;
bool cur_upper;
bool cur_lower;
string str8;
if (!char_set) {
// If cube char_set is missing, use C-locale-dependent functions
// on UTF8 characters to determine case properties.
first_upper = isupper(str32[0]);
first_lower = islower(str32[0]);
if (first_upper)
capitalized = true;
prev_upper = first_upper;
prev_lower = islower(str32[0]);
for (int c = 1; str32[c] != 0; ++c) {
cur_upper = isupper(str32[c]);
cur_lower = islower(str32[c]);
if ((prev_upper && cur_lower) || (prev_lower && cur_upper))
all_one_case = false;
if (cur_upper)
capitalized = false;
prev_upper = cur_upper;
prev_lower = cur_lower;
}
} else {
UNICHARSET *unicharset = char_set->InternalUnicharset();
// Use UNICHARSET functions to determine case properties
first_upper = unicharset->get_isupper(char_set->ClassID(str32[0]));
first_lower = unicharset->get_islower(char_set->ClassID(str32[0]));
if (first_upper)
capitalized = true;
prev_upper = first_upper;
prev_lower = unicharset->get_islower(char_set->ClassID(str32[0]));
for (int c = 1; c < StrLen(str32); ++c) {
cur_upper = unicharset->get_isupper(char_set->ClassID(str32[c]));
cur_lower = unicharset->get_islower(char_set->ClassID(str32[c]));
if ((prev_upper && cur_lower) || (prev_lower && cur_upper))
all_one_case = false;
if (cur_upper)
capitalized = false;
prev_upper = cur_upper;
prev_lower = cur_lower;
}
}
return all_one_case || capitalized;
}
char_32 *CubeUtils::ToLower(const char_32 *str32, CharSet *char_set) {
if (!char_set) {
return NULL;
}
UNICHARSET *unicharset = char_set->InternalUnicharset();
int len = StrLen(str32);
char_32 *lower = new char_32[len + 1];
if (!lower)
return NULL;
for (int i = 0; i < len; ++i) {
char_32 ch = str32[i];
if (ch == INVALID_UNICHAR_ID) {
delete [] lower;
return NULL;
}
// convert upper-case characters to lower-case
if (unicharset->get_isupper(char_set->ClassID(ch))) {
UNICHAR_ID uid_lower = unicharset->get_other_case(char_set->ClassID(ch));
const char_32 *str32_lower = char_set->ClassString(uid_lower);
// expect lower-case version of character to be a single character
if (!str32_lower || StrLen(str32_lower) != 1) {
delete [] lower;
return NULL;
}
lower[i] = str32_lower[0];
} else {
lower[i] = ch;
}
}
lower[len] = 0;
return lower;
}
char_32 *CubeUtils::ToUpper(const char_32 *str32, CharSet *char_set) {
if (!char_set) {
return NULL;
}
UNICHARSET *unicharset = char_set->InternalUnicharset();
int len = StrLen(str32);
char_32 *upper = new char_32[len + 1];
if (!upper)
return NULL;
for (int i = 0; i < len; ++i) {
char_32 ch = str32[i];
if (ch == INVALID_UNICHAR_ID) {
delete [] upper;
return NULL;
}
// convert lower-case characters to upper-case
if (unicharset->get_islower(char_set->ClassID(ch))) {
UNICHAR_ID uid_upper = unicharset->get_other_case(char_set->ClassID(ch));
const char_32 *str32_upper = char_set->ClassString(uid_upper);
// expect upper-case version of character to be a single character
if (!str32_upper || StrLen(str32_upper) != 1) {
delete [] upper;
return NULL;
}
upper[i] = str32_upper[0];
} else {
upper[i] = ch;
}
}
upper[len] = 0;
return upper;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/cube_utils.cpp | C++ | asf20 | 10,790 |
/**********************************************************************
* File: word_list_lang_model.h
* Description: Declaration of the Word List Language Model Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The WordListLangModel class abstracts a language model that is based on
// a list of words. It inherits from the LangModel abstract class
// Besides providing the methods inherited from the LangModel abstract class,
// the class provided methods to add new strings to the Language Model:
// AddString & AddString32
#ifndef WORD_LIST_LANG_MODEL_H
#define WORD_LIST_LANG_MODEL_H
#include <vector>
#include "cube_reco_context.h"
#include "lang_model.h"
#include "tess_lang_mod_edge.h"
namespace tesseract {
class Trie;
class WordListLangModel : public LangModel {
public:
explicit WordListLangModel(CubeRecoContext *cntxt);
~WordListLangModel();
// Returns an edge pointer to the Root
LangModEdge *Root();
// Returns the edges that fan-out of the specified edge and their count
LangModEdge **GetEdges(CharAltList *alt_list,
LangModEdge *edge,
int *edge_cnt);
// Returns is a sequence of 32-bit characters are valid within this language
// model or net. And EndOfWord flag is specified. If true, the sequence has
// to end on a valid word. The function also optionally returns the list
// of language model edges traversed to parse the string
bool IsValidSequence(const char_32 *sequence,
bool eow_flag,
LangModEdge **edges);
bool IsLeadingPunc(char_32 ch) { return false; } // not yet implemented
bool IsTrailingPunc(char_32 ch) { return false; } // not yet implemented
bool IsDigit(char_32 ch) { return false; } // not yet implemented
// Adds a new UTF-8 string to the language model
bool AddString(const char *char_ptr);
// Adds a new UTF-32 string to the language model
bool AddString32(const char_32 *char_32_ptr);
// Compute all the variants of a 32-bit string in terms of the class-ids.
// This is needed for languages that have ligatures. A word can then have
// more than one spelling in terms of the class-ids.
static void WordVariants(const CharSet &char_set, const UNICHARSET *uchset,
string_32 str32,
vector<WERD_CHOICE *> *word_variants);
private:
// constants needed to configure the language model
static const int kMaxEdge = 512;
CubeRecoContext *cntxt_;
Trie *dawg_;
bool init_;
// Initialize the language model
bool Init();
// Cleanup
void Cleanup();
// Recursive helper function for WordVariants().
static void WordVariants(
const CharSet &char_set,
string_32 prefix_str32, WERD_CHOICE *word_so_far,
string_32 str32,
vector<WERD_CHOICE *> *word_variants);
};
} // tesseract
#endif // WORD_LIST_LANG_MODEL_H
| 1080228-arabicocr11 | cube/word_list_lang_model.h | C++ | asf20 | 3,577 |
/**********************************************************************
* File: bmp_8.cpp
* Description: Implementation of an 8-bit Bitmap class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdlib.h>
#include <math.h>
#include <cstring>
#include <algorithm>
#include "bmp_8.h"
#include "con_comp.h"
#include "platform.h"
#ifdef USE_STD_NAMESPACE
using std::min;
using std::max;
#endif
namespace tesseract {
const int Bmp8::kDeslantAngleCount = (1 + static_cast<int>(0.5f +
(kMaxDeslantAngle - kMinDeslantAngle) / kDeslantAngleDelta));
float *Bmp8::tan_table_ = NULL;
Bmp8::Bmp8(unsigned short wid, unsigned short hgt)
: wid_(wid)
, hgt_(hgt) {
line_buff_ = CreateBmpBuffer();
}
Bmp8::~Bmp8() {
FreeBmpBuffer(line_buff_);
}
// free buffer
void Bmp8::FreeBmpBuffer(unsigned char **buff) {
if (buff != NULL) {
if (buff[0] != NULL) {
delete []buff[0];
}
delete []buff;
}
}
void Bmp8::FreeBmpBuffer(unsigned int **buff) {
if (buff != NULL) {
if (buff[0] != NULL) {
delete []buff[0];
}
delete []buff;
}
}
// init bmp buffers
unsigned char **Bmp8::CreateBmpBuffer(unsigned char init_val) {
unsigned char **buff;
// Check valid sizes
if (!hgt_ || !wid_)
return NULL;
// compute stride (align on 4 byte boundries)
stride_ = ((wid_ % 4) == 0) ? wid_ : (4 * (1 + (wid_ / 4)));
buff = (unsigned char **) new unsigned char *[hgt_ * sizeof(*buff)];
if (!buff) {
delete []buff;
return NULL;
}
// alloc and init memory for buffer and line buffer
buff[0] = (unsigned char *)
new unsigned char[stride_ * hgt_ * sizeof(*buff[0])];
if (!buff[0]) {
return NULL;
}
memset(buff[0], init_val, stride_ * hgt_ * sizeof(*buff[0]));
for (int y = 1; y < hgt_; y++) {
buff[y] = buff[y -1] + stride_;
}
return buff;
}
// init bmp buffers
unsigned int ** Bmp8::CreateBmpBuffer(int wid, int hgt,
unsigned char init_val) {
unsigned int **buff;
// compute stride (align on 4 byte boundries)
buff = (unsigned int **) new unsigned int *[hgt * sizeof(*buff)];
if (!buff) {
delete []buff;
return NULL;
}
// alloc and init memory for buffer and line buffer
buff[0] = (unsigned int *) new unsigned int[wid * hgt * sizeof(*buff[0])];
if (!buff[0]) {
return NULL;
}
memset(buff[0], init_val, wid * hgt * sizeof(*buff[0]));
for (int y = 1; y < hgt; y++) {
buff[y] = buff[y -1] + wid;
}
return buff;
}
// clears the contents of the bmp
bool Bmp8::Clear() {
if (line_buff_ == NULL) {
return false;
}
memset(line_buff_[0], 0xff, stride_ * hgt_ * sizeof(*line_buff_[0]));
return true;
}
bool Bmp8::LoadFromCharDumpFile(CachedFile *fp) {
unsigned short wid;
unsigned short hgt;
unsigned short x;
unsigned short y;
int buf_size;
int pix;
int pix_cnt;
unsigned int val32;
unsigned char *buff;
// read and check 32 bit marker
if (fp->Read(&val32, sizeof(val32)) != sizeof(val32)) {
return false;
}
if (val32 != kMagicNumber) {
return false;
}
// read wid and hgt
if (fp->Read(&wid, sizeof(wid)) != sizeof(wid)) {
return false;
}
if (fp->Read(&hgt, sizeof(hgt)) != sizeof(hgt)) {
return false;
}
// read buf size
if (fp->Read(&buf_size, sizeof(buf_size)) != sizeof(buf_size)) {
return false;
}
// validate buf size: for now, only 3 channel (RBG) is supported
pix_cnt = wid * hgt;
if (buf_size != (3 * pix_cnt)) {
return false;
}
// alloc memory & read the 3 channel buffer
buff = new unsigned char[buf_size];
if (buff == NULL) {
return false;
}
if (fp->Read(buff, buf_size) != buf_size) {
delete []buff;
return false;
}
// create internal buffers
wid_ = wid;
hgt_ = hgt;
line_buff_ = CreateBmpBuffer();
if (line_buff_ == NULL) {
delete []buff;
return false;
}
// copy the data
for (y = 0, pix = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++, pix += 3) {
// for now we only support gray scale,
// so we expect R = G = B, it this is not the case, bail out
if (buff[pix] != buff[pix + 1] || buff[pix] != buff[pix + 2]) {
delete []buff;
return false;
}
line_buff_[y][x] = buff[pix];
}
}
// delete temp buffer
delete[]buff;
return true;
}
Bmp8 * Bmp8::FromCharDumpFile(CachedFile *fp) {
// create a Bmp8 object
Bmp8 *bmp_obj = new Bmp8(0, 0);
if (bmp_obj == NULL) {
return NULL;
}
if (bmp_obj->LoadFromCharDumpFile(fp) == false) {
delete bmp_obj;
return NULL;
}
return bmp_obj;
}
bool Bmp8::LoadFromCharDumpFile(FILE *fp) {
unsigned short wid;
unsigned short hgt;
unsigned short x;
unsigned short y;
int buf_size;
int pix;
int pix_cnt;
unsigned int val32;
unsigned char *buff;
// read and check 32 bit marker
if (fread(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
return false;
}
if (val32 != kMagicNumber) {
return false;
}
// read wid and hgt
if (fread(&wid, 1, sizeof(wid), fp) != sizeof(wid)) {
return false;
}
if (fread(&hgt, 1, sizeof(hgt), fp) != sizeof(hgt)) {
return false;
}
// read buf size
if (fread(&buf_size, 1, sizeof(buf_size), fp) != sizeof(buf_size)) {
return false;
}
// validate buf size: for now, only 3 channel (RBG) is supported
pix_cnt = wid * hgt;
if (buf_size != (3 * pix_cnt)) {
return false;
}
// alloc memory & read the 3 channel buffer
buff = new unsigned char[buf_size];
if (buff == NULL) {
return false;
}
if (fread(buff, 1, buf_size, fp) != buf_size) {
delete []buff;
return false;
}
// create internal buffers
wid_ = wid;
hgt_ = hgt;
line_buff_ = CreateBmpBuffer();
if (line_buff_ == NULL) {
delete []buff;
return false;
}
// copy the data
for (y = 0, pix = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++, pix += 3) {
// for now we only support gray scale,
// so we expect R = G = B, it this is not the case, bail out
if (buff[pix] != buff[pix + 1] || buff[pix] != buff[pix + 2]) {
delete []buff;
return false;
}
line_buff_[y][x] = buff[pix];
}
}
// delete temp buffer
delete[]buff;
return true;
}
Bmp8 * Bmp8::FromCharDumpFile(FILE *fp) {
// create a Bmp8 object
Bmp8 *bmp_obj = new Bmp8(0, 0);
if (bmp_obj == NULL) {
return NULL;
}
if (bmp_obj->LoadFromCharDumpFile(fp) == false) {
delete bmp_obj;
return NULL;
}
return bmp_obj;
}
bool Bmp8::IsBlankColumn(int x) const {
for (int y = 0; y < hgt_; y++) {
if (line_buff_[y][x] != 0xff) {
return false;
}
}
return true;
}
bool Bmp8::IsBlankRow(int y) const {
for (int x = 0; x < wid_; x++) {
if (line_buff_[y][x] != 0xff) {
return false;
}
}
return true;
}
// crop the bitmap returning new dimensions
void Bmp8::Crop(int *xst, int *yst, int *wid, int *hgt) {
(*xst) = 0;
(*yst) = 0;
int xend = wid_ - 1;
int yend = hgt_ - 1;
while ((*xst) < (wid_ - 1) && (*xst) <= xend) {
// column is not empty
if (!IsBlankColumn((*xst))) {
break;
}
(*xst)++;
}
while (xend > 0 && xend >= (*xst)) {
// column is not empty
if (!IsBlankColumn(xend)) {
break;
}
xend--;
}
while ((*yst) < (hgt_ - 1) && (*yst) <= yend) {
// column is not empty
if (!IsBlankRow((*yst))) {
break;
}
(*yst)++;
}
while (yend > 0 && yend >= (*yst)) {
// column is not empty
if (!IsBlankRow(yend)) {
break;
}
yend--;
}
(*wid) = xend - (*xst) + 1;
(*hgt) = yend - (*yst) + 1;
}
// generates a scaled bitmap with dimensions the new bmp will have the
// same aspect ratio and will be centered in the box
bool Bmp8::ScaleFrom(Bmp8 *bmp, bool isotropic) {
int x_num;
int x_denom;
int y_num;
int y_denom;
int xoff;
int yoff;
int xsrc;
int ysrc;
int xdest;
int ydest;
int xst_src = 0;
int yst_src = 0;
int xend_src = bmp->wid_ - 1;
int yend_src = bmp->hgt_ - 1;
int wid_src;
int hgt_src;
// src dimensions
wid_src = xend_src - xst_src + 1,
hgt_src = yend_src - yst_src + 1;
// scale to maintain aspect ratio if required
if (isotropic) {
if ((wid_ * hgt_src) > (hgt_ * wid_src)) {
x_num = y_num = hgt_;
x_denom = y_denom = hgt_src;
} else {
x_num = y_num = wid_;
x_denom = y_denom = wid_src;
}
} else {
x_num = wid_;
y_num = hgt_;
x_denom = wid_src;
y_denom = hgt_src;
}
// compute offsets needed to center new bmp
xoff = (wid_ - ((x_num * wid_src) / x_denom)) / 2;
yoff = (hgt_ - ((y_num * hgt_src) / y_denom)) / 2;
// scale up
if (y_num > y_denom) {
for (ydest = yoff; ydest < (hgt_ - yoff); ydest++) {
// compute un-scaled y
ysrc = static_cast<int>(0.5 + (1.0 * (ydest - yoff) *
y_denom / y_num));
if (ysrc < 0 || ysrc >= hgt_src) {
continue;
}
for (xdest = xoff; xdest < (wid_ - xoff); xdest++) {
// compute un-scaled y
xsrc = static_cast<int>(0.5 + (1.0 * (xdest - xoff) *
x_denom / x_num));
if (xsrc < 0 || xsrc >= wid_src) {
continue;
}
line_buff_[ydest][xdest] =
bmp->line_buff_[ysrc + yst_src][xsrc + xst_src];
}
}
} else {
// or scale down
// scaling down is a bit tricky: we'll accumulate pixels
// and then compute the means
unsigned int **dest_line_buff = CreateBmpBuffer(wid_, hgt_, 0),
**dest_pix_cnt = CreateBmpBuffer(wid_, hgt_, 0);
for (ysrc = 0; ysrc < hgt_src; ysrc++) {
// compute scaled y
ydest = yoff + static_cast<int>(0.5 + (1.0 * ysrc * y_num / y_denom));
if (ydest < 0 || ydest >= hgt_) {
continue;
}
for (xsrc = 0; xsrc < wid_src; xsrc++) {
// compute scaled y
xdest = xoff + static_cast<int>(0.5 + (1.0 * xsrc * x_num / x_denom));
if (xdest < 0 || xdest >= wid_) {
continue;
}
dest_line_buff[ydest][xdest] +=
bmp->line_buff_[ysrc + yst_src][xsrc + xst_src];
dest_pix_cnt[ydest][xdest]++;
}
}
for (ydest = 0; ydest < hgt_; ydest++) {
for (xdest = 0; xdest < wid_; xdest++) {
if (dest_pix_cnt[ydest][xdest] > 0) {
unsigned int pixval =
dest_line_buff[ydest][xdest] / dest_pix_cnt[ydest][xdest];
line_buff_[ydest][xdest] =
(unsigned char) min((unsigned int)255, pixval);
}
}
}
// we no longer need these temp buffers
FreeBmpBuffer(dest_line_buff);
FreeBmpBuffer(dest_pix_cnt);
}
return true;
}
bool Bmp8::LoadFromRawData(unsigned char *data) {
unsigned char *pline_data = data;
// copy the data
for (int y = 0; y < hgt_; y++, pline_data += wid_) {
memcpy(line_buff_[y], pline_data, wid_ * sizeof(*pline_data));
}
return true;
}
bool Bmp8::SaveBmp2CharDumpFile(FILE *fp) const {
unsigned short wid;
unsigned short hgt;
unsigned short x;
unsigned short y;
int buf_size;
int pix;
int pix_cnt;
unsigned int val32;
unsigned char *buff;
// write and check 32 bit marker
val32 = kMagicNumber;
if (fwrite(&val32, 1, sizeof(val32), fp) != sizeof(val32)) {
return false;
}
// write wid and hgt
wid = wid_;
if (fwrite(&wid, 1, sizeof(wid), fp) != sizeof(wid)) {
return false;
}
hgt = hgt_;
if (fwrite(&hgt, 1, sizeof(hgt), fp) != sizeof(hgt)) {
return false;
}
// write buf size
pix_cnt = wid * hgt;
buf_size = 3 * pix_cnt;
if (fwrite(&buf_size, 1, sizeof(buf_size), fp) != sizeof(buf_size)) {
return false;
}
// alloc memory & write the 3 channel buffer
buff = new unsigned char[buf_size];
if (buff == NULL) {
return false;
}
// copy the data
for (y = 0, pix = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++, pix += 3) {
buff[pix] =
buff[pix + 1] =
buff[pix + 2] = line_buff_[y][x];
}
}
if (fwrite(buff, 1, buf_size, fp) != buf_size) {
delete []buff;
return false;
}
// delete temp buffer
delete[]buff;
return true;
}
// copy part of the specified bitmap to the top of the bitmap
// does any necessary clipping
void Bmp8::Copy(int x_st, int y_st, int wid, int hgt, Bmp8 *bmp_dest) const {
int x_end = min(x_st + wid, static_cast<int>(wid_)),
y_end = min(y_st + hgt, static_cast<int>(hgt_));
for (int y = y_st; y < y_end; y++) {
for (int x = x_st; x < x_end; x++) {
bmp_dest->line_buff_[y - y_st][x - x_st] =
line_buff_[y][x];
}
}
}
bool Bmp8::IsIdentical(Bmp8 *pBmp) const {
if (wid_ != pBmp->wid_ || hgt_ != pBmp->hgt_) {
return false;
}
for (int y = 0; y < hgt_; y++) {
if (memcmp(line_buff_[y], pBmp->line_buff_[y], wid_) != 0) {
return false;
}
}
return true;
}
// Detect connected components in the bitmap
ConComp ** Bmp8::FindConComps(int *concomp_cnt, int min_size) const {
(*concomp_cnt) = 0;
unsigned int **out_bmp_array = CreateBmpBuffer(wid_, hgt_, 0);
if (out_bmp_array == NULL) {
fprintf(stderr, "Cube ERROR (Bmp8::FindConComps): could not allocate "
"bitmap array\n");
return NULL;
}
// listed of connected components
ConComp **concomp_array = NULL;
int x;
int y;
int x_nbr;
int y_nbr;
int concomp_id;
int alloc_concomp_cnt = 0;
// neighbors to check
const int nbr_cnt = 4;
// relative coordinates of nbrs
int x_del[nbr_cnt] = {-1, 0, 1, -1},
y_del[nbr_cnt] = {-1, -1, -1, 0};
for (y = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++) {
// is this a foreground pix
if (line_buff_[y][x] != 0xff) {
int master_concomp_id = 0;
ConComp *master_concomp = NULL;
// checkout the nbrs
for (int nbr = 0; nbr < nbr_cnt; nbr++) {
x_nbr = x + x_del[nbr];
y_nbr = y + y_del[nbr];
if (x_nbr < 0 || y_nbr < 0 || x_nbr >= wid_ || y_nbr >= hgt_) {
continue;
}
// is this nbr a foreground pix
if (line_buff_[y_nbr][x_nbr] != 0xff) {
// get its concomp ID
concomp_id = out_bmp_array[y_nbr][x_nbr];
// this should not happen
if (concomp_id < 1 || concomp_id > alloc_concomp_cnt) {
fprintf(stderr, "Cube ERROR (Bmp8::FindConComps): illegal "
"connected component id: %d\n", concomp_id);
FreeBmpBuffer(out_bmp_array);
delete []concomp_array;
return NULL;
}
// if we has previously found a component then merge the two
// and delete the latest one
if (master_concomp != NULL && concomp_id != master_concomp_id) {
// relabel all the pts
ConCompPt *pt_ptr = concomp_array[concomp_id - 1]->Head();
while (pt_ptr != NULL) {
out_bmp_array[pt_ptr->y()][pt_ptr->x()] = master_concomp_id;
pt_ptr = pt_ptr->Next();
}
// merge the two concomp
if (!master_concomp->Merge(concomp_array[concomp_id - 1])) {
fprintf(stderr, "Cube ERROR (Bmp8::FindConComps): could not "
"merge connected component: %d\n", concomp_id);
FreeBmpBuffer(out_bmp_array);
delete []concomp_array;
return NULL;
}
// delete the merged concomp
delete concomp_array[concomp_id - 1];
concomp_array[concomp_id - 1] = NULL;
} else {
// this is the first concomp we encounter
master_concomp_id = concomp_id;
master_concomp = concomp_array[master_concomp_id - 1];
out_bmp_array[y][x] = master_concomp_id;
if (!master_concomp->Add(x, y)) {
fprintf(stderr, "Cube ERROR (Bmp8::FindConComps): could not "
"add connected component (%d,%d)\n", x, y);
FreeBmpBuffer(out_bmp_array);
delete []concomp_array;
return NULL;
}
}
} // foreground nbr
} // nbrs
// if there was no foreground pix, then create a new concomp
if (master_concomp == NULL) {
master_concomp = new ConComp();
if (master_concomp == NULL || master_concomp->Add(x, y) == false) {
fprintf(stderr, "Cube ERROR (Bmp8::FindConComps): could not "
"allocate or add a connected component\n");
FreeBmpBuffer(out_bmp_array);
delete []concomp_array;
return NULL;
}
// extend the list of concomps if needed
if ((alloc_concomp_cnt % kConCompAllocChunk) == 0) {
ConComp **temp_con_comp =
new ConComp *[alloc_concomp_cnt + kConCompAllocChunk];
if (temp_con_comp == NULL) {
fprintf(stderr, "Cube ERROR (Bmp8::FindConComps): could not "
"extend array of connected components\n");
FreeBmpBuffer(out_bmp_array);
delete []concomp_array;
return NULL;
}
if (alloc_concomp_cnt > 0) {
memcpy(temp_con_comp, concomp_array,
alloc_concomp_cnt * sizeof(*concomp_array));
delete []concomp_array;
}
concomp_array = temp_con_comp;
}
concomp_array[alloc_concomp_cnt++] = master_concomp;
out_bmp_array[y][x] = alloc_concomp_cnt;
}
} // foreground pix
} // x
} // y
// free the concomp bmp
FreeBmpBuffer(out_bmp_array);
if (alloc_concomp_cnt > 0 && concomp_array != NULL) {
// scan the array of connected components and color
// the o/p buffer with the corresponding concomps
(*concomp_cnt) = 0;
ConComp *concomp = NULL;
for (int concomp_idx = 0; concomp_idx < alloc_concomp_cnt; concomp_idx++) {
concomp = concomp_array[concomp_idx];
// found a concomp
if (concomp != NULL) {
// add the connected component if big enough
if (concomp->PtCnt() > min_size) {
concomp->SetLeftMost(true);
concomp->SetRightMost(true);
concomp->SetID((*concomp_cnt));
concomp_array[(*concomp_cnt)++] = concomp;
} else {
delete concomp;
}
}
}
}
return concomp_array;
}
// precompute the tan table to speedup deslanting
bool Bmp8::ComputeTanTable() {
int ang_idx;
float ang_val;
// alloc memory for tan table
delete []tan_table_;
tan_table_ = new float[kDeslantAngleCount];
if (tan_table_ == NULL) {
return false;
}
for (ang_idx = 0, ang_val = kMinDeslantAngle;
ang_idx < kDeslantAngleCount; ang_idx++) {
tan_table_[ang_idx] = tan(ang_val * M_PI / 180.0f);
ang_val += kDeslantAngleDelta;
}
return true;
}
// generates a deslanted bitmap from the passed bitmap.
bool Bmp8::Deslant() {
int x;
int y;
int des_x;
int des_y;
int ang_idx;
int best_ang;
int min_des_x;
int max_des_x;
int des_wid;
// only do deslanting if bitmap is wide enough
// otherwise it slant estimate might not be reliable
if (wid_ < (hgt_ * 2)) {
return true;
}
// compute tan table if needed
if (tan_table_ == NULL && !ComputeTanTable()) {
return false;
}
// compute min and max values for x after deslant
min_des_x = static_cast<int>(0.5f + (hgt_ - 1) * tan_table_[0]);
max_des_x = (wid_ - 1) +
static_cast<int>(0.5f + (hgt_ - 1) * tan_table_[kDeslantAngleCount - 1]);
des_wid = max_des_x - min_des_x + 1;
// alloc memory for histograms
int **angle_hist = new int*[kDeslantAngleCount];
for (ang_idx = 0; ang_idx < kDeslantAngleCount; ang_idx++) {
angle_hist[ang_idx] = new int[des_wid];
if (angle_hist[ang_idx] == NULL) {
delete[] angle_hist;
return false;
}
memset(angle_hist[ang_idx], 0, des_wid * sizeof(*angle_hist[ang_idx]));
}
// compute histograms
for (y = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++) {
// find a non-bkgrnd pixel
if (line_buff_[y][x] != 0xff) {
des_y = hgt_ - y - 1;
// stamp all histograms
for (ang_idx = 0; ang_idx < kDeslantAngleCount; ang_idx++) {
des_x = x + static_cast<int>(0.5f + (des_y * tan_table_[ang_idx]));
if (des_x >= min_des_x && des_x <= max_des_x) {
angle_hist[ang_idx][des_x - min_des_x]++;
}
}
}
}
}
// find the histogram with the lowest entropy
float entropy;
double best_entropy = 0.0f;
double norm_val;
best_ang = -1;
for (ang_idx = 0; ang_idx < kDeslantAngleCount; ang_idx++) {
entropy = 0.0f;
for (x = min_des_x; x <= max_des_x; x++) {
if (angle_hist[ang_idx][x - min_des_x] > 0) {
norm_val = (1.0f * angle_hist[ang_idx][x - min_des_x] / hgt_);
entropy += (-1.0f * norm_val * log(norm_val));
}
}
if (best_ang == -1 || entropy < best_entropy) {
best_ang = ang_idx;
best_entropy = entropy;
}
// free the histogram
delete[] angle_hist[ang_idx];
}
delete[] angle_hist;
// deslant
if (best_ang != -1) {
unsigned char **dest_lines;
int old_wid = wid_;
// create a new buffer
wid_ = des_wid;
dest_lines = CreateBmpBuffer();
if (dest_lines == NULL) {
return false;
}
for (y = 0; y < hgt_; y++) {
for (x = 0; x < old_wid; x++) {
// find a non-bkgrnd pixel
if (line_buff_[y][x] != 0xff) {
des_y = hgt_ - y - 1;
// compute new pos
des_x = x + static_cast<int>(0.5f + (des_y * tan_table_[best_ang]));
dest_lines[y][des_x - min_des_x] = 0;
}
}
}
// free old buffer
FreeBmpBuffer(line_buff_);
line_buff_ = dest_lines;
}
return true;
}
// Load dimensions & contents of bitmap from raw data
bool Bmp8::LoadFromCharDumpFile(unsigned char **raw_data_ptr) {
unsigned short wid;
unsigned short hgt;
unsigned short x;
unsigned short y;
unsigned char *raw_data = (*raw_data_ptr);
int buf_size;
int pix;
unsigned int val32;
// read and check 32 bit marker
memcpy(&val32, raw_data, sizeof(val32));
raw_data += sizeof(val32);
if (val32 != kMagicNumber) {
return false;
}
// read wid and hgt
memcpy(&wid, raw_data, sizeof(wid));
raw_data += sizeof(wid);
memcpy(&hgt, raw_data, sizeof(hgt));
raw_data += sizeof(hgt);
// read buf size
memcpy(&buf_size, raw_data, sizeof(buf_size));
raw_data += sizeof(buf_size);
// validate buf size: for now, only 3 channel (RBG) is supported
if (buf_size != (3 * wid * hgt)) {
return false;
}
wid_ = wid;
hgt_ = hgt;
line_buff_ = CreateBmpBuffer();
if (line_buff_ == NULL) {
return false;
}
// copy the data
for (y = 0, pix = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++, pix += 3) {
// for now we only support gray scale,
// so we expect R = G = B, it this is not the case, bail out
if (raw_data[pix] != raw_data[pix + 1] ||
raw_data[pix] != raw_data[pix + 2]) {
return false;
}
line_buff_[y][x] = raw_data[pix];
}
}
(*raw_data_ptr) = raw_data + buf_size;
return true;
}
float Bmp8::ForegroundRatio() const {
int fore_cnt = 0;
if (wid_ == 0 || hgt_ == 0) {
return 1.0;
}
for (int y = 0; y < hgt_; y++) {
for (int x = 0; x < wid_; x++) {
fore_cnt += (line_buff_[y][x] == 0xff ? 0 : 1);
}
}
return (1.0 * (fore_cnt / hgt_) / wid_);
}
// generates a deslanted bitmap from the passed bitmap
bool Bmp8::HorizontalDeslant(double *deslant_angle) {
int x;
int y;
int des_y;
int ang_idx;
int best_ang;
int min_des_y;
int max_des_y;
int des_hgt;
// compute tan table if necess.
if (tan_table_ == NULL && !ComputeTanTable()) {
return false;
}
// compute min and max values for x after deslant
min_des_y = min(0, static_cast<int>((wid_ - 1) * tan_table_[0]));
max_des_y = (hgt_ - 1) +
max(0, static_cast<int>((wid_ - 1) * tan_table_[kDeslantAngleCount - 1]));
des_hgt = max_des_y - min_des_y + 1;
// alloc memory for histograms
int **angle_hist = new int*[kDeslantAngleCount];
for (ang_idx = 0; ang_idx < kDeslantAngleCount; ang_idx++) {
angle_hist[ang_idx] = new int[des_hgt];
if (angle_hist[ang_idx] == NULL) {
delete[] angle_hist;
return false;
}
memset(angle_hist[ang_idx], 0, des_hgt * sizeof(*angle_hist[ang_idx]));
}
// compute histograms
for (y = 0; y < hgt_; y++) {
for (x = 0; x < wid_; x++) {
// find a non-bkgrnd pixel
if (line_buff_[y][x] != 0xff) {
// stamp all histograms
for (ang_idx = 0; ang_idx < kDeslantAngleCount; ang_idx++) {
des_y = y - static_cast<int>(x * tan_table_[ang_idx]);
if (des_y >= min_des_y && des_y <= max_des_y) {
angle_hist[ang_idx][des_y - min_des_y]++;
}
}
}
}
}
// find the histogram with the lowest entropy
float entropy;
float best_entropy = 0.0f;
float norm_val;
best_ang = -1;
for (ang_idx = 0; ang_idx < kDeslantAngleCount; ang_idx++) {
entropy = 0.0f;
for (y = min_des_y; y <= max_des_y; y++) {
if (angle_hist[ang_idx][y - min_des_y] > 0) {
norm_val = (1.0f * angle_hist[ang_idx][y - min_des_y] / wid_);
entropy += (-1.0f * norm_val * log(norm_val));
}
}
if (best_ang == -1 || entropy < best_entropy) {
best_ang = ang_idx;
best_entropy = entropy;
}
// free the histogram
delete[] angle_hist[ang_idx];
}
delete[] angle_hist;
(*deslant_angle) = 0.0;
// deslant
if (best_ang != -1) {
unsigned char **dest_lines;
int old_hgt = hgt_;
// create a new buffer
min_des_y = min(0, static_cast<int>((wid_ - 1) * -tan_table_[best_ang]));
max_des_y = (hgt_ - 1) +
max(0, static_cast<int>((wid_ - 1) * -tan_table_[best_ang]));
hgt_ = max_des_y - min_des_y + 1;
dest_lines = CreateBmpBuffer();
if (dest_lines == NULL) {
return false;
}
for (y = 0; y < old_hgt; y++) {
for (x = 0; x < wid_; x++) {
// find a non-bkgrnd pixel
if (line_buff_[y][x] != 0xff) {
// compute new pos
des_y = y - static_cast<int>((x * tan_table_[best_ang]));
dest_lines[des_y - min_des_y][x] = 0;
}
}
}
// free old buffer
FreeBmpBuffer(line_buff_);
line_buff_ = dest_lines;
(*deslant_angle) = kMinDeslantAngle + (best_ang * kDeslantAngleDelta);
}
return true;
}
float Bmp8::MeanHorizontalHistogramEntropy() const {
float entropy = 0.0f;
// compute histograms
for (int y = 0; y < hgt_; y++) {
int pix_cnt = 0;
for (int x = 0; x < wid_; x++) {
// find a non-bkgrnd pixel
if (line_buff_[y][x] != 0xff) {
pix_cnt++;
}
}
if (pix_cnt > 0) {
float norm_val = (1.0f * pix_cnt / wid_);
entropy += (-1.0f * norm_val * log(norm_val));
}
}
return entropy / hgt_;
}
int *Bmp8::HorizontalHistogram() const {
int *hist = new int[hgt_];
if (hist == NULL) {
return NULL;
}
// compute histograms
for (int y = 0; y < hgt_; y++) {
hist[y] = 0;
for (int x = 0; x < wid_; x++) {
// find a non-bkgrnd pixel
if (line_buff_[y][x] != 0xff) {
hist[y]++;
}
}
}
return hist;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/bmp_8.cpp | C++ | asf20 | 28,231 |
/**********************************************************************
* File: cached_file.pp
* Description: Implementation of an Cached File Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string>
#include <stdlib.h>
#include <cstring>
#include "cached_file.h"
namespace tesseract {
CachedFile::CachedFile(string file_name) {
file_name_ = file_name;
buff_ = NULL;
buff_pos_ = 0;
buff_size_ = 0;
file_pos_ = 0;
file_size_ = 0;
fp_ = NULL;
}
CachedFile::~CachedFile() {
if (fp_ != NULL) {
fclose(fp_);
fp_ = NULL;
}
if (buff_ != NULL) {
delete []buff_;
buff_ = NULL;
}
}
// free buffers and init vars
bool CachedFile::Open() {
if (fp_ != NULL) {
return true;
}
fp_ = fopen(file_name_.c_str(), "rb");
if (fp_ == NULL) {
return false;
}
// seek to the end
fseek(fp_, 0, SEEK_END);
// get file size
file_size_ = ftell(fp_);
if (file_size_ < 1) {
return false;
}
// rewind again
rewind(fp_);
// alloc memory for buffer
buff_ = new unsigned char[kCacheSize];
if (buff_ == NULL) {
return false;
}
// init counters
buff_size_ = 0;
buff_pos_ = 0;
file_pos_ = 0;
return true;
}
// add a new sample
int CachedFile::Read(void *read_buff, int bytes) {
int read_bytes = 0;
unsigned char *buff = (unsigned char *)read_buff;
// do we need to read beyond the buffer
if ((buff_pos_ + bytes) > buff_size_) {
// copy as much bytes from the current buffer if any
int copy_bytes = buff_size_ - buff_pos_;
if (copy_bytes > 0) {
memcpy(buff, buff_ + buff_pos_, copy_bytes);
buff += copy_bytes;
bytes -= copy_bytes;
read_bytes += copy_bytes;
}
// determine how much to read
buff_size_ = kCacheSize;
if ((file_pos_ + buff_size_) > file_size_) {
buff_size_ = static_cast<int>(file_size_ - file_pos_);
}
// EOF ?
if (buff_size_ <= 0 || bytes > buff_size_) {
return read_bytes;
}
// read the first chunck
if (fread(buff_, 1, buff_size_, fp_) != buff_size_) {
return read_bytes;
}
buff_pos_ = 0;
file_pos_ += buff_size_;
}
memcpy(buff, buff_ + buff_pos_, bytes);
read_bytes += bytes;
buff_pos_ += bytes;
return read_bytes;
}
long CachedFile::Size() {
if (fp_ == NULL && Open() == false) {
return 0;
}
return file_size_;
}
long CachedFile::Tell() {
if (fp_ == NULL && Open() == false) {
return 0;
}
return file_pos_ - buff_size_ + buff_pos_;
}
bool CachedFile::eof() {
if (fp_ == NULL && Open() == false) {
return true;
}
return (file_pos_ - buff_size_ + buff_pos_) >= file_size_;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/cached_file.cpp | C++ | asf20 | 3,351 |
/**********************************************************************
* File: lang_mod_edge.h
* Description: Declaration of the Language Model Edge Base Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The LangModEdge abstracts an Edge in the language model trie
// This is an abstract class that any Language Model Edge should inherit from
// It provides methods for:
// 1- Returns the class ID corresponding to the edge
// 2- If the edge is a valid EndOfWord (EOW)
// 3- If the edge is coming from a OutOfDictionary (OOF) state machine
// 4- If the edge is a Terminal (has no children)
// 5- A Hash of the edge that will be used to retrieve the edge
// quickly from the BeamSearch lattice
// 6- If two edges are identcial
// 7- Returns a verbal description of the edge (use by debuggers)
// 8- the language model cost of the edge (if any)
// 9- The string corresponding to this edge
// 10- Getting and setting the "Root" status of the edge
#ifndef LANG_MOD_EDGE_H
#define LANG_MOD_EDGE_H
#include "cube_tuning_params.h"
#include "char_set.h"
namespace tesseract {
class LangModEdge {
public:
LangModEdge() {}
virtual ~LangModEdge() {}
// The string corresponding to this edge
virtual const char_32 * EdgeString() const = 0;
// Returns the class ID corresponding to the edge
virtual int ClassID() const = 0;
// If the edge is the root edge
virtual bool IsRoot() const = 0;
// Set the Root flag
virtual void SetRoot(bool flag) = 0;
// If the edge is a valid EndOfWord (EOW)
virtual bool IsEOW() const = 0;
// is the edge is coming from a OutOfDictionary (OOF) state machine
virtual bool IsOOD() const = 0;
// Is the edge is a Terminal (has no children)
virtual bool IsTerminal() const = 0;
// Returns A hash of the edge that will be used to retrieve the edge
virtual unsigned int Hash() const = 0;
// Are the two edges identcial?
virtual bool IsIdentical(LangModEdge *edge) const = 0;
// a verbal description of the edge (use by debuggers)
virtual char *Description() const = 0;
// the language model cost of the edge (if any)
virtual int PathCost() const = 0;
};
}
#endif // LANG_MOD_EDGE_H
| 1080228-arabicocr11 | cube/lang_mod_edge.h | C++ | asf20 | 2,839 |
/**********************************************************************
* File: char_bigrams.cpp
* Description: Implementation of a Character Bigrams Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <algorithm>
#include <math.h>
#include <string>
#include <vector>
#include "char_bigrams.h"
#include "cube_utils.h"
#include "ndminx.h"
#include "cube_const.h"
namespace tesseract {
CharBigrams::CharBigrams() {
memset(&bigram_table_, 0, sizeof(bigram_table_));
}
CharBigrams::~CharBigrams() {
if (bigram_table_.char_bigram != NULL) {
for (int ch1 = 0; ch1 <= bigram_table_.max_char; ch1++) {
CharBigram *char_bigram = bigram_table_.char_bigram + ch1;
if (char_bigram->bigram != NULL) {
delete []char_bigram->bigram;
}
}
delete []bigram_table_.char_bigram;
}
}
CharBigrams *CharBigrams::Create(const string &data_file_path,
const string &lang) {
string file_name;
string str;
file_name = data_file_path + lang;
file_name += ".cube.bigrams";
// load the string into memory
if (!CubeUtils::ReadFileToString(file_name, &str)) {
return NULL;
}
// construct a new object
CharBigrams *char_bigrams_obj = new CharBigrams();
if (char_bigrams_obj == NULL) {
fprintf(stderr, "Cube ERROR (CharBigrams::Create): could not create "
"character bigrams object.\n");
return NULL;
}
CharBigramTable *table = &char_bigrams_obj->bigram_table_;
table->total_cnt = 0;
table->max_char = -1;
table->char_bigram = NULL;
// split into lines
vector<string> str_vec;
CubeUtils::SplitStringUsing(str, "\r\n", &str_vec);
for (int big = 0; big < str_vec.size(); big++) {
char_32 ch1;
char_32 ch2;
int cnt;
if (sscanf(str_vec[big].c_str(), "%d %x %x", &cnt, &ch1, &ch2) != 3) {
fprintf(stderr, "Cube ERROR (CharBigrams::Create): invalid format "
"reading line: %s\n", str_vec[big].c_str());
delete char_bigrams_obj;
return NULL;
}
// expand the bigram table
if (ch1 > table->max_char) {
CharBigram *char_bigram = new CharBigram[ch1 + 1];
if (char_bigram == NULL) {
fprintf(stderr, "Cube ERROR (CharBigrams::Create): error allocating "
"additional memory for character bigram table.\n");
return NULL;
}
if (table->char_bigram != NULL && table->max_char >= 0) {
memcpy(char_bigram, table->char_bigram,
(table->max_char + 1) * sizeof(*char_bigram));
delete []table->char_bigram;
}
table->char_bigram = char_bigram;
// init
for (int new_big = table->max_char + 1; new_big <= ch1; new_big++) {
table->char_bigram[new_big].total_cnt = 0;
table->char_bigram[new_big].max_char = -1;
table->char_bigram[new_big].bigram = NULL;
}
table->max_char = ch1;
}
if (ch2 > table->char_bigram[ch1].max_char) {
Bigram *bigram = new Bigram[ch2 + 1];
if (bigram == NULL) {
fprintf(stderr, "Cube ERROR (CharBigrams::Create): error allocating "
"memory for bigram.\n");
delete char_bigrams_obj;
return NULL;
}
if (table->char_bigram[ch1].bigram != NULL &&
table->char_bigram[ch1].max_char >= 0) {
memcpy(bigram, table->char_bigram[ch1].bigram,
(table->char_bigram[ch1].max_char + 1) * sizeof(*bigram));
delete []table->char_bigram[ch1].bigram;
}
table->char_bigram[ch1].bigram = bigram;
// init
for (int new_big = table->char_bigram[ch1].max_char + 1;
new_big <= ch2; new_big++) {
table->char_bigram[ch1].bigram[new_big].cnt = 0;
}
table->char_bigram[ch1].max_char = ch2;
}
table->char_bigram[ch1].bigram[ch2].cnt = cnt;
table->char_bigram[ch1].total_cnt += cnt;
table->total_cnt += cnt;
}
// compute costs (-log probs)
table->worst_cost = static_cast<int>(
-PROB2COST_SCALE * log(0.5 / table->total_cnt));
for (char_32 ch1 = 0; ch1 <= table->max_char; ch1++) {
for (char_32 ch2 = 0; ch2 <= table->char_bigram[ch1].max_char; ch2++) {
int cnt = table->char_bigram[ch1].bigram[ch2].cnt;
table->char_bigram[ch1].bigram[ch2].cost =
static_cast<int>(-PROB2COST_SCALE *
log(MAX(0.5, static_cast<double>(cnt)) /
table->total_cnt));
}
}
return char_bigrams_obj;
}
int CharBigrams::PairCost(char_32 ch1, char_32 ch2) const {
if (ch1 > bigram_table_.max_char) {
return bigram_table_.worst_cost;
}
if (ch2 > bigram_table_.char_bigram[ch1].max_char) {
return bigram_table_.worst_cost;
}
return bigram_table_.char_bigram[ch1].bigram[ch2].cost;
}
int CharBigrams::Cost(const char_32 *char_32_ptr, CharSet *char_set) const {
if (!char_32_ptr || char_32_ptr[0] == 0) {
return bigram_table_.worst_cost;
}
int cost = MeanCostWithSpaces(char_32_ptr);
if (CubeUtils::StrLen(char_32_ptr) >= kMinLengthCaseInvariant &&
CubeUtils::IsCaseInvariant(char_32_ptr, char_set)) {
char_32 *lower_32 = CubeUtils::ToLower(char_32_ptr, char_set);
if (lower_32 && lower_32[0] != 0) {
int cost_lower = MeanCostWithSpaces(lower_32);
cost = MIN(cost, cost_lower);
delete [] lower_32;
}
char_32 *upper_32 = CubeUtils::ToUpper(char_32_ptr, char_set);
if (upper_32 && upper_32[0] != 0) {
int cost_upper = MeanCostWithSpaces(upper_32);
cost = MIN(cost, cost_upper);
delete [] upper_32;
}
}
return cost;
}
int CharBigrams::MeanCostWithSpaces(const char_32 *char_32_ptr) const {
if (!char_32_ptr)
return bigram_table_.worst_cost;
int len = CubeUtils::StrLen(char_32_ptr);
int cost = 0;
int c = 0;
cost = PairCost(' ', char_32_ptr[0]);
for (c = 1; c < len; c++) {
cost += PairCost(char_32_ptr[c - 1], char_32_ptr[c]);
}
cost += PairCost(char_32_ptr[len - 1], ' ');
return static_cast<int>(cost / static_cast<double>(len + 1));
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/char_bigrams.cpp | C++ | asf20 | 6,723 |
/**********************************************************************
* File: alt_list.cpp
* Description: Class to abstarct a list of alternate results
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "altlist.h"
#include <stdlib.h>
namespace tesseract {
AltList::AltList(int max_alt) {
max_alt_ = max_alt;
alt_cnt_ = 0;
alt_cost_ = NULL;
alt_tag_ = NULL;
}
AltList::~AltList() {
if (alt_cost_ != NULL) {
delete []alt_cost_;
alt_cost_ = NULL;
}
if (alt_tag_ != NULL) {
delete []alt_tag_;
alt_tag_ = NULL;
}
}
// return the best possible cost and index of corresponding alternate
int AltList::BestCost(int *best_alt) const {
if (alt_cnt_ <= 0) {
(*best_alt) = -1;
return -1;
}
int best_alt_idx = 0;
for (int alt_idx = 1; alt_idx < alt_cnt_; alt_idx++) {
if (alt_cost_[alt_idx] < alt_cost_[best_alt_idx]) {
best_alt_idx = alt_idx;
}
}
(*best_alt) = best_alt_idx;
return alt_cost_[best_alt_idx];
}
}
| 1080228-arabicocr11 | cube/altlist.cpp | C++ | asf20 | 1,661 |
AM_CPPFLAGS += \
-DUSE_STD_NAMESPACE \
-I$(top_srcdir)/cutil -I$(top_srcdir)/ccutil \
-I$(top_srcdir)/ccstruct -I$(top_srcdir)/dict \
-I$(top_srcdir)/ccmain -I$(top_srcdir)/classify \
-I$(top_srcdir)/textord -I$(top_srcdir)/wordrec \
-I$(top_srcdir)/neural_networks/runtime \
-I$(top_srcdir)/viewer
if VISIBILITY
AM_CPPFLAGS += -DTESS_EXPORTS \
-fvisibility=hidden -fvisibility-inlines-hidden
endif
noinst_HEADERS = \
altlist.h beam_search.h bmp_8.h cached_file.h \
char_altlist.h char_bigrams.h char_samp.h char_samp_enum.h \
char_samp_set.h char_set.h classifier_base.h classifier_factory.h \
con_comp.h cube_const.h conv_net_classifier.h cube_line_object.h \
cube_line_segmenter.h cube_object.h cube_search_object.h \
cube_tuning_params.h cube_utils.h feature_base.h feature_bmp.h \
feature_chebyshev.h feature_hybrid.h hybrid_neural_net_classifier.h \
lang_mod_edge.h lang_model.h search_column.h search_node.h \
search_object.h string_32.h tess_lang_mod_edge.h tess_lang_model.h \
tuning_params.h word_altlist.h word_list_lang_model.h word_size_model.h \
word_unigrams.h
if !USING_MULTIPLELIBS
noinst_LTLIBRARIES = libtesseract_cube.la
else
lib_LTLIBRARIES = libtesseract_cube.la
libtesseract_cube_la_LDFLAGS = -version-info $(GENERIC_LIBRARY_VERSION)
libtesseract_cube_la_LIBADD = \
../ccstruct/libtesseract_ccstruct.la \
../ccutil/libtesseract_ccutil.la \
../neural_networks/runtime/libtesseract_neural.la \
../viewer/libtesseract_viewer.la \
../wordrec/libtesseract_wordrec.la \
../cutil/libtesseract_cutil.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la
endif
libtesseract_cube_la_SOURCES = \
altlist.cpp beam_search.cpp bmp_8.cpp cached_file.cpp \
char_altlist.cpp char_bigrams.cpp char_samp.cpp char_samp_enum.cpp \
char_samp_set.cpp char_set.cpp classifier_factory.cpp \
con_comp.cpp conv_net_classifier.cpp cube_line_object.cpp \
cube_line_segmenter.cpp cube_object.cpp cube_search_object.cpp \
cube_tuning_params.cpp cube_utils.cpp feature_bmp.cpp \
feature_chebyshev.cpp feature_hybrid.cpp hybrid_neural_net_classifier.cpp \
search_column.cpp search_node.cpp \
tess_lang_mod_edge.cpp tess_lang_model.cpp \
word_altlist.cpp word_list_lang_model.cpp word_size_model.cpp \
word_unigrams.cpp
| 1080228-arabicocr11 | cube/Makefile.am | Makefile | asf20 | 2,393 |
/**********************************************************************
* File: cube_object.cpp
* Description: Implementation of the Cube Object Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <math.h>
#include "cube_object.h"
#include "cube_utils.h"
#include "word_list_lang_model.h"
namespace tesseract {
CubeObject::CubeObject(CubeRecoContext *cntxt, CharSamp *char_samp) {
Init();
char_samp_ = char_samp;
cntxt_ = cntxt;
}
CubeObject::CubeObject(CubeRecoContext *cntxt, Pix *pix,
int left, int top, int wid, int hgt) {
Init();
char_samp_ = CubeUtils::CharSampleFromPix(pix, left, top, wid, hgt);
own_char_samp_ = true;
cntxt_ = cntxt;
}
// Data member initialization function
void CubeObject::Init() {
char_samp_ = NULL;
own_char_samp_ = false;
alt_list_ = NULL;
srch_obj_ = NULL;
deslanted_alt_list_ = NULL;
deslanted_srch_obj_ = NULL;
deslanted_ = false;
deslanted_char_samp_ = NULL;
beam_obj_ = NULL;
deslanted_beam_obj_ = NULL;
cntxt_ = NULL;
}
// Cleanup function
void CubeObject::Cleanup() {
if (alt_list_ != NULL) {
delete alt_list_;
alt_list_ = NULL;
}
if (deslanted_alt_list_ != NULL) {
delete deslanted_alt_list_;
deslanted_alt_list_ = NULL;
}
}
CubeObject::~CubeObject() {
if (char_samp_ != NULL && own_char_samp_ == true) {
delete char_samp_;
char_samp_ = NULL;
}
if (srch_obj_ != NULL) {
delete srch_obj_;
srch_obj_ = NULL;
}
if (deslanted_srch_obj_ != NULL) {
delete deslanted_srch_obj_;
deslanted_srch_obj_ = NULL;
}
if (beam_obj_ != NULL) {
delete beam_obj_;
beam_obj_ = NULL;
}
if (deslanted_beam_obj_ != NULL) {
delete deslanted_beam_obj_;
deslanted_beam_obj_ = NULL;
}
if (deslanted_char_samp_ != NULL) {
delete deslanted_char_samp_;
deslanted_char_samp_ = NULL;
}
Cleanup();
}
// Actually do the recognition using the specified language mode. If none
// is specified, the default language model in the CubeRecoContext is used.
// Returns the sorted list of alternate answers
// The Word mode determines whether recognition is done as a word or a phrase
WordAltList *CubeObject::Recognize(LangModel *lang_mod, bool word_mode) {
if (char_samp_ == NULL) {
return NULL;
}
// clear alt lists
Cleanup();
// no specified language model, use the one in the reco context
if (lang_mod == NULL) {
lang_mod = cntxt_->LangMod();
}
// normalize if necessary
if (cntxt_->SizeNormalization()) {
Normalize();
}
// assume not de-slanted by default
deslanted_ = false;
// create a beam search object
if (beam_obj_ == NULL) {
beam_obj_ = new BeamSearch(cntxt_, word_mode);
if (beam_obj_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeObject::Recognize): could not construct "
"BeamSearch\n");
return NULL;
}
}
// create a cube search object
if (srch_obj_ == NULL) {
srch_obj_ = new CubeSearchObject(cntxt_, char_samp_);
if (srch_obj_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeObject::Recognize): could not construct "
"CubeSearchObject\n");
return NULL;
}
}
// run a beam search against the tesslang model
alt_list_ = beam_obj_->Search(srch_obj_, lang_mod);
// deslant (if supported by language) and re-reco if probability is low enough
if (cntxt_->HasItalics() == true &&
(alt_list_ == NULL || alt_list_->AltCount() < 1 ||
alt_list_->AltCost(0) > CubeUtils::Prob2Cost(kMinProbSkipDeslanted))) {
if (deslanted_beam_obj_ == NULL) {
deslanted_beam_obj_ = new BeamSearch(cntxt_);
if (deslanted_beam_obj_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeObject::Recognize): could not "
"construct deslanted BeamSearch\n");
return NULL;
}
}
if (deslanted_srch_obj_ == NULL) {
deslanted_char_samp_ = char_samp_->Clone();
if (deslanted_char_samp_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeObject::Recognize): could not "
"construct deslanted CharSamp\n");
return NULL;
}
if (deslanted_char_samp_->Deslant() == false) {
return NULL;
}
deslanted_srch_obj_ = new CubeSearchObject(cntxt_, deslanted_char_samp_);
if (deslanted_srch_obj_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeObject::Recognize): could not "
"construct deslanted CubeSearchObject\n");
return NULL;
}
}
// run a beam search against the tesslang model
deslanted_alt_list_ = deslanted_beam_obj_->Search(deslanted_srch_obj_,
lang_mod);
// should we use de-slanted altlist?
if (deslanted_alt_list_ != NULL && deslanted_alt_list_->AltCount() > 0) {
if (alt_list_ == NULL || alt_list_->AltCount() < 1 ||
deslanted_alt_list_->AltCost(0) < alt_list_->AltCost(0)) {
deslanted_ = true;
return deslanted_alt_list_;
}
}
}
return alt_list_;
}
// Recognize the member char sample as a word
WordAltList *CubeObject::RecognizeWord(LangModel *lang_mod) {
return Recognize(lang_mod, true);
}
// Recognize the member char sample as a word
WordAltList *CubeObject::RecognizePhrase(LangModel *lang_mod) {
return Recognize(lang_mod, false);
}
// Computes the cost of a specific string. This is done by performing
// recognition of a language model that allows only the specified word
int CubeObject::WordCost(const char *str) {
WordListLangModel *lang_mod = new WordListLangModel(cntxt_);
if (lang_mod == NULL) {
return WORST_COST;
}
if (lang_mod->AddString(str) == false) {
delete lang_mod;
return WORST_COST;
}
// run a beam search against the single string wordlist model
WordAltList *alt_list = RecognizeWord(lang_mod);
delete lang_mod;
int cost = WORST_COST;
if (alt_list != NULL) {
if (alt_list->AltCount() > 0) {
cost = alt_list->AltCost(0);
}
}
return cost;
}
// Recognizes a single character and returns the list of results.
CharAltList *CubeObject::RecognizeChar() {
if (char_samp_ == NULL) return NULL;
CharAltList* alt_list = NULL;
CharClassifier *char_classifier = cntxt_->Classifier();
ASSERT_HOST(char_classifier != NULL);
alt_list = char_classifier->Classify(char_samp_);
return alt_list;
}
// Normalize the input word bitmap to have a minimum aspect ratio
bool CubeObject::Normalize() {
// create a cube search object
CubeSearchObject *srch_obj = new CubeSearchObject(cntxt_, char_samp_);
if (srch_obj == NULL) {
return false;
}
// Perform over-segmentation
int seg_cnt = srch_obj->SegPtCnt();
// Only perform normalization if segment count is large enough
if (seg_cnt < kMinNormalizationSegmentCnt) {
delete srch_obj;
return true;
}
// compute the mean AR of the segments
double ar_mean = 0.0;
for (int seg_idx = 0; seg_idx <= seg_cnt; seg_idx++) {
CharSamp *seg_samp = srch_obj->CharSample(seg_idx - 1, seg_idx);
if (seg_samp != NULL && seg_samp->Width() > 0) {
ar_mean += (1.0 * seg_samp->Height() / seg_samp->Width());
}
}
ar_mean /= (seg_cnt + 1);
// perform normalization if segment AR is too high
if (ar_mean > kMinNormalizationAspectRatio) {
// scale down the image in the y-direction to attain AR
CharSamp *new_samp = char_samp_->Scale(char_samp_->Width(),
2.0 * char_samp_->Height() / ar_mean,
false);
if (new_samp != NULL) {
// free existing char samp if owned
if (own_char_samp_) {
delete char_samp_;
}
// update with new scaled charsamp and set ownership flag
char_samp_ = new_samp;
own_char_samp_ = true;
}
}
delete srch_obj;
return true;
}
}
| 1080228-arabicocr11 | cube/cube_object.cpp | C++ | asf20 | 8,526 |
/**********************************************************************
* File: word_size_model.cpp
* Description: Implementation of the Word Size Model Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <math.h>
#include <string>
#include <vector>
#include "word_size_model.h"
#include "cube_utils.h"
namespace tesseract {
WordSizeModel::WordSizeModel(CharSet * char_set, bool contextual) {
char_set_ = char_set;
contextual_ = contextual;
}
WordSizeModel::~WordSizeModel() {
for (int fnt = 0; fnt < font_pair_size_models_.size(); fnt++) {
FontPairSizeInfo fnt_info = font_pair_size_models_[fnt];
delete []fnt_info.pair_size_info[0];
delete []fnt_info.pair_size_info;
}
}
WordSizeModel *WordSizeModel::Create(const string &data_file_path,
const string &lang,
CharSet *char_set,
bool contextual) {
WordSizeModel *obj = new WordSizeModel(char_set, contextual);
if (!obj) {
fprintf(stderr, "Cube ERROR (WordSizeModel::Create): unable to allocate "
"new word size model object\n");
return NULL;
}
if (!obj->Init(data_file_path, lang)) {
delete obj;
return NULL;
}
return obj;
}
bool WordSizeModel::Init(const string &data_file_path, const string &lang) {
string stats_file_name;
stats_file_name = data_file_path + lang;
stats_file_name += ".cube.size";
// read file to memory
string str_data;
if (!CubeUtils::ReadFileToString(stats_file_name, &str_data)) {
return false;
}
// split to words
vector<string> tokens;
CubeUtils::SplitStringUsing(str_data, "\t\r\n", &tokens);
if (tokens.size() < 1) {
fprintf(stderr, "Cube ERROR (WordSizeModel::Init): invalid "
"file contents: %s\n", stats_file_name.c_str());
return false;
}
font_pair_size_models_.clear();
// token count per line depends on whether the language is contextual or not
int token_cnt = contextual_ ?
(kExpectedTokenCount + 4) : kExpectedTokenCount;
// the count of size classes depends on whether the language is contextual
// or not. For non contextual languages (Ex: Eng), it is equal to the class
// count. For contextual languages (Ex: Ara), it is equal to the class count
// multiplied by the position count (4: start, middle, final, isolated)
int size_class_cnt = contextual_ ?
(char_set_->ClassCount() * 4) : char_set_->ClassCount();
string fnt_name = "";
for (int tok = 0; tok < tokens.size(); tok += token_cnt) {
// a new font, write the old font data and re-init
if (tok == 0 || fnt_name != tokens[tok]) {
FontPairSizeInfo fnt_info;
fnt_info.pair_size_info = new PairSizeInfo *[size_class_cnt];
if (!fnt_info.pair_size_info) {
fprintf(stderr, "Cube ERROR (WordSizeModel::Init): error allcoating "
"memory for font pair size info\n");
return false;
}
fnt_info.pair_size_info[0] =
new PairSizeInfo[size_class_cnt * size_class_cnt];
if (!fnt_info.pair_size_info[0]) {
fprintf(stderr, "Cube ERROR (WordSizeModel::Init): error allocating "
"memory for font pair size info\n");
return false;
}
memset(fnt_info.pair_size_info[0], 0, size_class_cnt * size_class_cnt *
sizeof(PairSizeInfo));
for (int cls = 1; cls < size_class_cnt; cls++) {
fnt_info.pair_size_info[cls] =
fnt_info.pair_size_info[cls - 1] + size_class_cnt;
}
// strip out path and extension
string stripped_font_name = tokens[tok].substr(0, tokens[tok].find('.'));
string::size_type strt_pos = stripped_font_name.find_last_of("/\\");
if (strt_pos != string::npos) {
fnt_info.font_name = stripped_font_name.substr(strt_pos);
} else {
fnt_info.font_name = stripped_font_name;
}
font_pair_size_models_.push_back(fnt_info);
}
// parse the data
int cls_0;
int cls_1;
double delta_top;
double wid_0;
double hgt_0;
double wid_1;
double hgt_1;
int size_code_0;
int size_code_1;
// read and parse the tokens
if (contextual_) {
int start_0;
int end_0;
int start_1;
int end_1;
// The expected format for a character size bigram is as follows:
// ClassId0<delim>Start-flag0<delim>End-flag0<delim>String0(ignored)
// Width0<delim>Height0<delim>
// ClassId1<delim>Start-flag1<delim>End-flag1<delim>String1(ignored)
// HeightDelta<delim>Width1<delim>Height0<delim>
// In case of non-contextual languages, the Start and End flags are
// omitted
if (sscanf(tokens[tok + 1].c_str(), "%d", &cls_0) != 1 ||
sscanf(tokens[tok + 2].c_str(), "%d", &start_0) != 1 ||
sscanf(tokens[tok + 3].c_str(), "%d", &end_0) != 1 ||
sscanf(tokens[tok + 5].c_str(), "%lf", &wid_0) != 1 ||
sscanf(tokens[tok + 6].c_str(), "%lf", &hgt_0) != 1 ||
sscanf(tokens[tok + 7].c_str(), "%d", &cls_1) != 1 ||
sscanf(tokens[tok + 8].c_str(), "%d", &start_1) != 1 ||
sscanf(tokens[tok + 9].c_str(), "%d", &end_1) != 1 ||
sscanf(tokens[tok + 11].c_str(), "%lf", &delta_top) != 1 ||
sscanf(tokens[tok + 12].c_str(), "%lf", &wid_1) != 1 ||
sscanf(tokens[tok + 13].c_str(), "%lf", &hgt_1) != 1 ||
(start_0 != 0 && start_0 != 1) || (end_0 != 0 && end_0 != 1) ||
(start_1 != 0 && start_1 != 1) || (end_1 != 0 && end_1 != 1)) {
fprintf(stderr, "Cube ERROR (WordSizeModel::Init): bad format at "
"line %d\n", 1 + (tok / token_cnt));
return false;
}
size_code_0 = SizeCode(cls_0, start_0, end_0);
size_code_1 = SizeCode(cls_1, start_1, end_1);
} else {
if (sscanf(tokens[tok + 1].c_str(), "%d", &cls_0) != 1 ||
sscanf(tokens[tok + 3].c_str(), "%lf", &wid_0) != 1 ||
sscanf(tokens[tok + 4].c_str(), "%lf", &hgt_0) != 1 ||
sscanf(tokens[tok + 5].c_str(), "%d", &cls_1) != 1 ||
sscanf(tokens[tok + 7].c_str(), "%lf", &delta_top) != 1 ||
sscanf(tokens[tok + 8].c_str(), "%lf", &wid_1) != 1 ||
sscanf(tokens[tok + 9].c_str(), "%lf", &hgt_1) != 1) {
fprintf(stderr, "Cube ERROR (WordSizeModel::Init): bad format at "
"line %d\n", 1 + (tok / token_cnt));
return false;
}
size_code_0 = cls_0;
size_code_1 = cls_1;
}
// copy the data to the size tables
FontPairSizeInfo fnt_info = font_pair_size_models_.back();
fnt_info.pair_size_info[size_code_0][size_code_1].delta_top =
static_cast<int>(delta_top * kShapeModelScale);
fnt_info.pair_size_info[size_code_0][size_code_1].wid_0 =
static_cast<int>(wid_0 * kShapeModelScale);
fnt_info.pair_size_info[size_code_0][size_code_1].hgt_0 =
static_cast<int>(hgt_0 * kShapeModelScale);
fnt_info.pair_size_info[size_code_0][size_code_1].wid_1 =
static_cast<int>(wid_1 * kShapeModelScale);
fnt_info.pair_size_info[size_code_0][size_code_1].hgt_1 =
static_cast<int>(hgt_1 * kShapeModelScale);
fnt_name = tokens[tok];
}
return true;
}
int WordSizeModel::Cost(CharSamp **samp_array, int samp_cnt) const {
if (samp_cnt < 2) {
return 0;
}
double best_dist = static_cast<double>(WORST_COST);
int best_fnt = -1;
for (int fnt = 0; fnt < font_pair_size_models_.size(); fnt++) {
const FontPairSizeInfo *fnt_info = &font_pair_size_models_[fnt];
double mean_dist = 0;
int pair_cnt = 0;
for (int smp_0 = 0; smp_0 < samp_cnt; smp_0++) {
int cls_0 = char_set_->ClassID(samp_array[smp_0]->StrLabel());
if (cls_0 < 1) {
continue;
}
// compute size code for samp 0 based on class id and position
int size_code_0;
if (contextual_) {
size_code_0 = SizeCode(cls_0,
samp_array[smp_0]->FirstChar() == 0 ? 0 : 1,
samp_array[smp_0]->LastChar() == 0 ? 0 : 1);
} else {
size_code_0 = cls_0;
}
int char0_height = samp_array[smp_0]->Height();
int char0_width = samp_array[smp_0]->Width();
int char0_top = samp_array[smp_0]->Top();
for (int smp_1 = smp_0 + 1; smp_1 < samp_cnt; smp_1++) {
int cls_1 = char_set_->ClassID(samp_array[smp_1]->StrLabel());
if (cls_1 < 1) {
continue;
}
// compute size code for samp 0 based on class id and position
int size_code_1;
if (contextual_) {
size_code_1 = SizeCode(cls_1,
samp_array[smp_1]->FirstChar() == 0 ? 0 : 1,
samp_array[smp_1]->LastChar() == 0 ? 0 : 1);
} else {
size_code_1 = cls_1;
}
double dist = PairCost(
char0_width, char0_height, char0_top, samp_array[smp_1]->Width(),
samp_array[smp_1]->Height(), samp_array[smp_1]->Top(),
fnt_info->pair_size_info[size_code_0][size_code_1]);
if (dist > 0) {
mean_dist += dist;
pair_cnt++;
}
} // smp_1
} // smp_0
if (pair_cnt == 0) {
continue;
}
mean_dist /= pair_cnt;
if (best_fnt == -1 || mean_dist < best_dist) {
best_dist = mean_dist;
best_fnt = fnt;
}
}
if (best_fnt == -1) {
return static_cast<int>(WORST_COST);
} else {
return static_cast<int>(best_dist);
}
}
double WordSizeModel::PairCost(int width_0, int height_0, int top_0,
int width_1, int height_1, int top_1,
const PairSizeInfo& pair_info) {
double scale_factor = static_cast<double>(pair_info.hgt_0) /
static_cast<double>(height_0);
double dist = 0.0;
if (scale_factor > 0) {
double norm_width_0 = width_0 * scale_factor;
double norm_width_1 = width_1 * scale_factor;
double norm_height_1 = height_1 * scale_factor;
double norm_delta_top = (top_1 - top_0) * scale_factor;
// accumulate the distance between the model character and the
// predicted one on all dimensions of the pair
dist += fabs(pair_info.wid_0 - norm_width_0);
dist += fabs(pair_info.wid_1 - norm_width_1);
dist += fabs(pair_info.hgt_1 - norm_height_1);
dist += fabs(pair_info.delta_top - norm_delta_top);
}
return dist;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/word_size_model.cpp | C++ | asf20 | 11,145 |
/**********************************************************************
* File: char_samp_set.h
* Description: Declaration of a Character Sample Set Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharSampSet set encapsulates a set of CharSet objects typically
// but not necessarily loaded from a file
// It provides methods to load samples from File, Create a new file and
// Add new char samples to the set
#ifndef CHAR_SAMP_SET_H
#define CHAR_SAMP_SET_H
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include "char_samp.h"
#include "char_samp_enum.h"
#include "char_set.h"
namespace tesseract {
// chunks of samp pointers to allocate
#define SAMP_ALLOC_BLOCK 10000
class CharSampSet {
public:
CharSampSet();
~CharSampSet();
// return sample count
int SampleCount() const { return cnt_; }
// returns samples buffer
CharSamp ** Samples() const { return samp_buff_; }
// Create a CharSampSet set object from a file
static CharSampSet *FromCharDumpFile(string file_name);
// Enumerate the Samples in the set one-by-one calling the enumertor's
// EnumCharSamp method for each sample
static bool EnumSamples(string file_name, CharSampEnum *enumerator);
// Create a new Char Dump file
static FILE *CreateCharDumpFile(string file_name);
// Add a new sample to the set
bool Add(CharSamp *char_samp);
private:
// sample count
int cnt_;
// the char samp array
CharSamp **samp_buff_;
// Are the samples owned by the set or not.
// Determines whether we should cleanup in the end
bool own_samples_;
// Cleanup
void Cleanup();
// Load character samples from a file
bool LoadCharSamples(FILE *fp);
};
}
#endif // CHAR_SAMP_SET_H
| 1080228-arabicocr11 | cube/char_samp_set.h | C++ | asf20 | 2,385 |
/**********************************************************************
* File: char_samp_enum.cpp
* Description: Implementation of a Character Set Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string>
#include "char_set.h"
#include "cube_utils.h"
#include "tessdatamanager.h"
namespace tesseract {
CharSet::CharSet() {
class_cnt_ = 0;
class_strings_ = NULL;
unicharset_map_ = NULL;
init_ = false;
// init hash table
memset(hash_bin_size_, 0, sizeof(hash_bin_size_));
}
CharSet::~CharSet() {
if (class_strings_ != NULL) {
for (int cls = 0; cls < class_cnt_; cls++) {
if (class_strings_[cls] != NULL) {
delete class_strings_[cls];
}
}
delete []class_strings_;
class_strings_ = NULL;
}
delete []unicharset_map_;
}
// Creates CharSet object by reading the unicharset from the
// TessDatamanager, and mapping Cube's unicharset to Tesseract's if
// they differ.
CharSet *CharSet::Create(TessdataManager *tessdata_manager,
UNICHARSET *tess_unicharset) {
CharSet *char_set = new CharSet();
if (char_set == NULL) {
return NULL;
}
// First look for Cube's unicharset; if not there, use tesseract's
bool cube_unicharset_exists;
if (!(cube_unicharset_exists =
tessdata_manager->SeekToStart(TESSDATA_CUBE_UNICHARSET)) &&
!tessdata_manager->SeekToStart(TESSDATA_UNICHARSET)) {
fprintf(stderr, "Cube ERROR (CharSet::Create): could not find "
"either cube or tesseract unicharset\n");
return NULL;
}
FILE *charset_fp = tessdata_manager->GetDataFilePtr();
if (!charset_fp) {
fprintf(stderr, "Cube ERROR (CharSet::Create): could not load "
"a unicharset\n");
return NULL;
}
// If we found a cube unicharset separate from tesseract's, load it and
// map its unichars to tesseract's; if only one unicharset exists,
// just load it.
bool loaded;
if (cube_unicharset_exists) {
char_set->cube_unicharset_.load_from_file(charset_fp);
loaded = tessdata_manager->SeekToStart(TESSDATA_CUBE_UNICHARSET);
loaded = loaded && char_set->LoadSupportedCharList(
tessdata_manager->GetDataFilePtr(), tess_unicharset);
char_set->unicharset_ = &char_set->cube_unicharset_;
} else {
loaded = char_set->LoadSupportedCharList(charset_fp, NULL);
char_set->unicharset_ = tess_unicharset;
}
if (!loaded) {
delete char_set;
return NULL;
}
char_set->init_ = true;
return char_set;
}
// Load the list of supported chars from the given data file pointer.
bool CharSet::LoadSupportedCharList(FILE *fp, UNICHARSET *tess_unicharset) {
if (init_)
return true;
char str_line[256];
// init hash table
memset(hash_bin_size_, 0, sizeof(hash_bin_size_));
// read the char count
if (fgets(str_line, sizeof(str_line), fp) == NULL) {
fprintf(stderr, "Cube ERROR (CharSet::InitMemory): could not "
"read char count.\n");
return false;
}
class_cnt_ = atoi(str_line);
if (class_cnt_ < 2) {
fprintf(stderr, "Cube ERROR (CharSet::InitMemory): invalid "
"class count: %d\n", class_cnt_);
return false;
}
// memory for class strings
class_strings_ = new string_32*[class_cnt_];
if (class_strings_ == NULL) {
fprintf(stderr, "Cube ERROR (CharSet::InitMemory): could not "
"allocate memory for class strings.\n");
return false;
}
// memory for unicharset map
if (tess_unicharset) {
unicharset_map_ = new int[class_cnt_];
if (unicharset_map_ == NULL) {
fprintf(stderr, "Cube ERROR (CharSet::InitMemory): could not "
"allocate memory for unicharset map.\n");
return false;
}
}
// Read in character strings and add to hash table
for (int class_id = 0; class_id < class_cnt_; class_id++) {
// Read the class string
if (fgets(str_line, sizeof(str_line), fp) == NULL) {
fprintf(stderr, "Cube ERROR (CharSet::ReadAndHashStrings): "
"could not read class string with class_id=%d.\n", class_id);
return false;
}
// Terminate at space if any
char *p = strchr(str_line, ' ');
if (p != NULL)
*p = '\0';
// Convert to UTF32 and store
string_32 str32;
// Convert NULL to a space
if (strcmp(str_line, "NULL") == 0) {
strcpy(str_line, " ");
}
CubeUtils::UTF8ToUTF32(str_line, &str32);
class_strings_[class_id] = new string_32(str32);
if (class_strings_[class_id] == NULL) {
fprintf(stderr, "Cube ERROR (CharSet::ReadAndHashStrings): could not "
"allocate memory for class string with class_id=%d.\n", class_id);
return false;
}
// Add to hash-table
int hash_val = Hash(reinterpret_cast<const char_32 *>(str32.c_str()));
if (hash_bin_size_[hash_val] >= kMaxHashSize) {
fprintf(stderr, "Cube ERROR (CharSet::LoadSupportedCharList): hash "
"table is full.\n");
return false;
}
hash_bins_[hash_val][hash_bin_size_[hash_val]++] = class_id;
if (tess_unicharset != NULL) {
// Add class id to unicharset map
UNICHAR_ID tess_id = tess_unicharset->unichar_to_id(str_line);
if (tess_id == INVALID_UNICHAR_ID) {
tess_unicharset->unichar_insert(str_line);
tess_id = tess_unicharset->unichar_to_id(str_line);
}
ASSERT_HOST(tess_id != INVALID_UNICHAR_ID);
unicharset_map_[class_id] = tess_id;
}
}
return true;
}
} // tesseract
| 1080228-arabicocr11 | cube/char_set.cpp | C++ | asf20 | 6,136 |
/**********************************************************************
* File: lang_model.h
* Description: Declaration of the Language Model Edge Base Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The LanguageModel class abstracts a State machine that is modeled as a Trie
// structure. The state machine models the language being recognized by the OCR
// Engine
// This is an abstract class that is to be inherited by any language model
#ifndef LANG_MODEL_H
#define LANG_MODEL_H
#include "lang_mod_edge.h"
#include "char_altlist.h"
#include "char_set.h"
#include "tuning_params.h"
namespace tesseract {
class LangModel {
public:
LangModel() {
ood_enabled_ = true;
numeric_enabled_ = true;
word_list_enabled_ = true;
punc_enabled_ = true;
}
virtual ~LangModel() {}
// Returns an edge pointer to the Root
virtual LangModEdge *Root() = 0;
// Returns the edges that fan-out of the specified edge and their count
virtual LangModEdge **GetEdges(CharAltList *alt_list,
LangModEdge *parent_edge,
int *edge_cnt) = 0;
// Returns is a sequence of 32-bit characters are valid within this language
// model or net. And EndOfWord flag is specified. If true, the sequence has
// to end on a valid word. The function also optionally returns the list
// of language model edges traversed to parse the string
virtual bool IsValidSequence(const char_32 *str, bool eow_flag,
LangModEdge **edge_array = NULL) = 0;
virtual bool IsLeadingPunc(char_32 ch) = 0;
virtual bool IsTrailingPunc(char_32 ch) = 0;
virtual bool IsDigit(char_32 ch) = 0;
// accessor functions
inline bool OOD() { return ood_enabled_; }
inline bool Numeric() { return numeric_enabled_; }
inline bool WordList() { return word_list_enabled_; }
inline bool Punc() { return punc_enabled_; }
inline void SetOOD(bool ood) { ood_enabled_ = ood; }
inline void SetNumeric(bool numeric) { numeric_enabled_ = numeric; }
inline void SetWordList(bool word_list) { word_list_enabled_ = word_list; }
inline void SetPunc(bool punc_enabled) { punc_enabled_ = punc_enabled; }
protected:
bool ood_enabled_;
bool numeric_enabled_;
bool word_list_enabled_;
bool punc_enabled_;
};
}
#endif // LANG_MODEL_H
| 1080228-arabicocr11 | cube/lang_model.h | C++ | asf20 | 3,004 |
/**********************************************************************
* File: cube_search_object.h
* Description: Declaration of the Cube Search Object Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CubeSearchObject class represents a char_samp (a word bitmap) that is
// being searched for characters (or recognizeable entities).
// The Class detects the connected components and peforms an oversegmentation
// on each ConComp. The result of which is a list of segments that are ordered
// in reading order.
// The class provided methods that inquire about the number of segments, the
// CharSamp corresponding to any segment range and the recognition results
// of any segment range
// An object of Class CubeSearchObject is used by the BeamSearch algorithm
// to recognize a CharSamp into a list of word alternates
#ifndef CUBE_SEARCH_OBJECT_H
#define CUBE_SEARCH_OBJECT_H
#include "search_object.h"
#include "char_samp.h"
#include "conv_net_classifier.h"
#include "cube_reco_context.h"
#include "allheaders.h"
namespace tesseract {
class CubeSearchObject : public SearchObject {
public:
CubeSearchObject(CubeRecoContext *cntxt, CharSamp *samp);
~CubeSearchObject();
// returns the Segmentation Point count of the CharSamp owned by the class
int SegPtCnt();
// Recognize the set of segments given by the specified range and return
// a list of possible alternate answers
CharAltList * RecognizeSegment(int start_pt, int end_pt);
// Returns the CharSamp corresponding to the specified segment range
CharSamp *CharSample(int start_pt, int end_pt);
// Returns a leptonica box corresponding to the specified segment range
Box *CharBox(int start_pt, int end_pt);
// Returns the cost of having a space before the specified segmentation pt
int SpaceCost(int seg_pt);
// Returns the cost of not having a space before the specified
// segmentation pt
int NoSpaceCost(int seg_pt);
// Returns the cost of not having any spaces within the specified range
// of segmentation points
int NoSpaceCost(int seg_pt, int end_pt);
private:
// Maximum reasonable segment count
static const int kMaxSegmentCnt = 128;
// Use cropped samples
static const bool kUseCroppedChars;
// reading order flag
bool rtl_;
// cached dimensions of char samp
int left_;
int itop_;
int wid_;
int hgt_;
// minimum and maximum and possible inter-segment gaps for spaces
int min_spc_gap_;
int max_spc_gap_;
// initialization flag
bool init_;
// maximum segments per character: Cached from tuning parameters object
int max_seg_per_char_;
// char sample to be processed
CharSamp *samp_;
// segment count
int segment_cnt_;
// segments of the processed char samp
ConComp **segments_;
// Cache data members:
// There are two caches kept; a CharSamp cache and a CharAltList cache
// Each is a 2-D array of CharSamp and CharAltList pointers respectively
// hence the triple pointer.
CharAltList ***reco_cache_;
CharSamp ***samp_cache_;
// Cached costs of space and no-space after every segment. Computed only
// in phrase mode
int *space_cost_;
int *no_space_cost_;
// init and allocate variables, perform segmentation
bool Init();
// Cleanup
void Cleanup();
// Perform segmentation of the bitmap by detecting connected components,
// segmenting each connected component using windowed vertical pixel density
// histogram and sorting the resulting segments in reading order
// Returns true on success
bool Segment();
// validate the segment ranges.
inline bool IsValidSegmentRange(int start_pt, int end_pt) {
return (end_pt > start_pt && start_pt >= -1 && start_pt < segment_cnt_ &&
end_pt >= 0 && end_pt <= segment_cnt_ &&
end_pt <= (start_pt + max_seg_per_char_));
}
// computes the space and no space costs at gaps between segments
// return true on sucess
bool ComputeSpaceCosts();
};
}
#endif // CUBE_SEARCH_OBJECT_H
| 1080228-arabicocr11 | cube/cube_search_object.h | C++ | asf20 | 4,646 |
/**********************************************************************
* File: char_samp.h
* Description: Declaration of a Character Bitmap Sample Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharSamp inherits the Bmp8 class that represents images of
// words, characters and segments throughout Cube
// CharSamp adds more data members to hold the physical location of the image
// in a page, page number in a book if available.
// It also holds the label (GT) of the image that might correspond to a single
// character or a word
// It also provides methods for segmenting, scaling and cropping of the sample
#ifndef CHAR_SAMP_H
#define CHAR_SAMP_H
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include "bmp_8.h"
#include "string_32.h"
namespace tesseract {
class CharSamp : public Bmp8 {
public:
CharSamp();
CharSamp(int wid, int hgt);
CharSamp(int left, int top, int wid, int hgt);
~CharSamp();
// accessor methods
unsigned short Left() const { return left_; }
unsigned short Right() const { return left_ + wid_; }
unsigned short Top() const { return top_; }
unsigned short Bottom() const { return top_ + hgt_; }
unsigned short Page() const { return page_; }
unsigned short NormTop() const { return norm_top_; }
unsigned short NormBottom() const { return norm_bottom_; }
unsigned short NormAspectRatio() const { return norm_aspect_ratio_; }
unsigned short FirstChar() const { return first_char_; }
unsigned short LastChar() const { return last_char_; }
char_32 Label() const {
if (label32_ == NULL || LabelLen() != 1) {
return 0;
}
return label32_[0];
}
char_32 * StrLabel() const { return label32_; }
string stringLabel() const;
void SetLeft(unsigned short left) { left_ = left; }
void SetTop(unsigned short top) { top_ = top; }
void SetPage(unsigned short page) { page_ = page; }
void SetLabel(char_32 label) {
if (label32_ != NULL) {
delete []label32_;
}
label32_ = new char_32[2];
if (label32_ != NULL) {
label32_[0] = label;
label32_[1] = 0;
}
}
void SetLabel(const char_32 *label32) {
if (label32_ != NULL) {
delete []label32_;
label32_ = NULL;
}
if (label32 != NULL) {
// remove any byte order markes if any
if (label32[0] == 0xfeff) {
label32++;
}
int len = LabelLen(label32);
label32_ = new char_32[len + 1];
if (label32_ != NULL) {
memcpy(label32_, label32, len * sizeof(*label32));
label32_[len] = 0;
}
}
}
void SetLabel(string str);
void SetNormTop(unsigned short norm_top) { norm_top_ = norm_top; }
void SetNormBottom(unsigned short norm_bottom) {
norm_bottom_ = norm_bottom;
}
void SetNormAspectRatio(unsigned short norm_aspect_ratio) {
norm_aspect_ratio_ = norm_aspect_ratio;
}
void SetFirstChar(unsigned short first_char) {
first_char_ = first_char;
}
void SetLastChar(unsigned short last_char) {
last_char_ = last_char;
}
// Saves the charsamp to a dump file
bool Save2CharDumpFile(FILE *fp) const;
// Crops the underlying image and returns a new CharSamp with the
// same character information but new dimensions. Warning: does not
// necessarily set the normalized top and bottom correctly since
// those depend on its location within the word (or CubeSearchObject).
CharSamp *Crop();
// Computes the connected components of the char sample
ConComp **Segment(int *seg_cnt, bool right_2_left, int max_hist_wnd,
int min_con_comp_size) const;
// returns a copy of the charsamp that is scaled to the
// specified width and height
CharSamp *Scale(int wid, int hgt, bool isotropic = true);
// returns a Clone of the charsample
CharSamp *Clone() const;
// computes the features corresponding to the char sample
bool ComputeFeatures(int conv_grid_size, float *features);
// Load a Char Samp from a dump file
static CharSamp *FromCharDumpFile(CachedFile *fp);
static CharSamp *FromCharDumpFile(FILE *fp);
static CharSamp *FromCharDumpFile(unsigned char **raw_data);
static CharSamp *FromRawData(int left, int top, int wid, int hgt,
unsigned char *data);
static CharSamp *FromConComps(ConComp **concomp_array,
int strt_concomp, int seg_flags_size,
int *seg_flags, bool *left_most,
bool *right_most, int word_hgt);
static int AuxFeatureCnt() { return (5); }
// Return the length of the label string
int LabelLen() const { return LabelLen(label32_); }
static int LabelLen(const char_32 *label32) {
if (label32 == NULL) {
return 0;
}
int len = 0;
while (label32[++len] != 0);
return len;
}
private:
char_32 * label32_;
unsigned short page_;
unsigned short left_;
unsigned short top_;
// top of sample normalized to a word height of 255
unsigned short norm_top_;
// bottom of sample normalized to a word height of 255
unsigned short norm_bottom_;
// 255 * ratio of character width to (width + height)
unsigned short norm_aspect_ratio_;
unsigned short first_char_;
unsigned short last_char_;
};
}
#endif // CHAR_SAMP_H
| 1080228-arabicocr11 | cube/char_samp.h | C++ | asf20 | 5,908 |
/**********************************************************************
* File: feature_bmp.cpp
* Description: Implementation of the Bitmap Feature Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include "feature_base.h"
#include "feature_bmp.h"
#include "cube_utils.h"
#include "const.h"
#include "char_samp.h"
namespace tesseract {
FeatureBmp::FeatureBmp(TuningParams *params)
:FeatureBase(params) {
conv_grid_size_ = params->ConvGridSize();
}
FeatureBmp::~FeatureBmp() {
}
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
CharSamp *FeatureBmp::ComputeFeatureBitmap(CharSamp *char_samp) {
return char_samp->Scale(conv_grid_size_, conv_grid_size_);
}
// Compute the features for a given CharSamp
bool FeatureBmp::ComputeFeatures(CharSamp *char_samp, float *features) {
return char_samp->ComputeFeatures(conv_grid_size_, features);
}
}
| 1080228-arabicocr11 | cube/feature_bmp.cpp | C++ | asf20 | 1,647 |
/**********************************************************************
* File: feature_chebyshev.cpp
* Description: Implementation of the Chebyshev coefficients Feature Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string>
#include <vector>
#include <algorithm>
#include "feature_base.h"
#include "feature_chebyshev.h"
#include "cube_utils.h"
#include "const.h"
#include "char_samp.h"
namespace tesseract {
FeatureChebyshev::FeatureChebyshev(TuningParams *params)
: FeatureBase(params) {
}
FeatureChebyshev::~FeatureChebyshev() {
}
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
CharSamp *FeatureChebyshev::ComputeFeatureBitmap(CharSamp *char_samp) {
return char_samp;
}
// Compute Chebyshev coefficients for the specified vector
void FeatureChebyshev::ChebyshevCoefficients(const vector<float> &input,
int coeff_cnt, float *coeff) {
// re-sample function
int input_range = (input.size() - 1);
vector<float> resamp(coeff_cnt);
for (int samp_idx = 0; samp_idx < coeff_cnt; samp_idx++) {
// compute sampling position
float samp_pos = input_range *
(1 + cos(M_PI * (samp_idx + 0.5) / coeff_cnt)) / 2;
// interpolate
int samp_start = static_cast<int>(samp_pos);
int samp_end = static_cast<int>(samp_pos + 0.5);
float func_delta = input[samp_end] - input[samp_start];
resamp[samp_idx] = input[samp_start] +
((samp_pos - samp_start) * func_delta);
}
// compute the coefficients
float normalizer = 2.0 / coeff_cnt;
for (int coeff_idx = 0; coeff_idx < coeff_cnt; coeff_idx++, coeff++) {
double sum = 0.0;
for (int samp_idx = 0; samp_idx < coeff_cnt; samp_idx++) {
sum += resamp[samp_idx] * cos(M_PI * coeff_idx * (samp_idx + 0.5) /
coeff_cnt);
}
(*coeff) = (normalizer * sum);
}
}
// Compute the features of a given CharSamp
bool FeatureChebyshev::ComputeFeatures(CharSamp *char_samp, float *features) {
return ComputeChebyshevCoefficients(char_samp, features);
}
// Compute the Chebyshev coefficients of a given CharSamp
bool FeatureChebyshev::ComputeChebyshevCoefficients(CharSamp *char_samp,
float *features) {
if (char_samp->NormBottom() <= 0) {
return false;
}
unsigned char *raw_data = char_samp->RawData();
int stride = char_samp->Stride();
// compute the height of the word
int word_hgt = (255 * (char_samp->Top() + char_samp->Height()) /
char_samp->NormBottom());
// compute left & right profiles
vector<float> left_profile(word_hgt, 0.0);
vector<float> right_profile(word_hgt, 0.0);
unsigned char *line_data = raw_data;
for (int y = 0; y < char_samp->Height(); y++, line_data += stride) {
int min_x = char_samp->Width();
int max_x = -1;
for (int x = 0; x < char_samp->Width(); x++) {
if (line_data[x] == 0) {
UpdateRange(x, &min_x, &max_x);
}
}
left_profile[char_samp->Top() + y] =
1.0 * (min_x == char_samp->Width() ? 0 : (min_x + 1)) /
char_samp->Width();
right_profile[char_samp->Top() + y] =
1.0 * (max_x == -1 ? 0 : char_samp->Width() - max_x) /
char_samp->Width();
}
// compute top and bottom profiles
vector<float> top_profile(char_samp->Width(), 0);
vector<float> bottom_profile(char_samp->Width(), 0);
for (int x = 0; x < char_samp->Width(); x++) {
int min_y = word_hgt;
int max_y = -1;
line_data = raw_data;
for (int y = 0; y < char_samp->Height(); y++, line_data += stride) {
if (line_data[x] == 0) {
UpdateRange(y + char_samp->Top(), &min_y, &max_y);
}
}
top_profile[x] = 1.0 * (min_y == word_hgt ? 0 : (min_y + 1)) / word_hgt;
bottom_profile[x] = 1.0 * (max_y == -1 ? 0 : (word_hgt - max_y)) / word_hgt;
}
// compute the chebyshev coefficients of each profile
ChebyshevCoefficients(left_profile, kChebychevCoefficientCnt, features);
ChebyshevCoefficients(top_profile, kChebychevCoefficientCnt,
features + kChebychevCoefficientCnt);
ChebyshevCoefficients(right_profile, kChebychevCoefficientCnt,
features + (2 * kChebychevCoefficientCnt));
ChebyshevCoefficients(bottom_profile, kChebychevCoefficientCnt,
features + (3 * kChebychevCoefficientCnt));
return true;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/feature_chebyshev.cpp | C++ | asf20 | 5,242 |
/**********************************************************************
* File: const.h
* Description: Defintions of constants used by Cube
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef CUBE_CONST_H
#define CUBE_CONST_H
// Scale used to normalize a log-prob to a cost
#define PROB2COST_SCALE 4096.0
// Maximum possible cost (-log prob of MIN_PROB)
#define MIN_PROB_COST 65536
// Probability corresponding to the max cost MIN_PROB_COST
#define MIN_PROB 0.000000113
// Worst possible cost (returned on failure)
#define WORST_COST 0x40000
// Oversegmentation hysteresis thresholds
#define HIST_WND_RATIO 0.1f
#define SEG_PT_WND_RATIO 0.1f
#ifdef _WIN32
#ifdef __GNUC__
#include <climits>
#endif
#endif
#endif // CUBE_CONST_H
| 1080228-arabicocr11 | cube/cube_const.h | C | asf20 | 1,442 |
/**********************************************************************
* File: char_samp_enum.h
* Description: Declaration of a Character Set Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharSet class encapsulates the list of 32-bit strings/characters that
// Cube supports for a specific language. The char set is loaded from the
// .unicharset file corresponding to a specific language
// Each string has a corresponding int class-id that gets used throughout Cube
// The class provides pass back and forth conversion between the class-id
// and its corresponding 32-bit string. This is done using a hash table that
// maps the string to the class id.
#ifndef CHAR_SET_H
#define CHAR_SET_H
#include <string.h>
#include <string>
#include <algorithm>
#include "string_32.h"
#include "tessdatamanager.h"
#include "unicharset.h"
#include "cube_const.h"
namespace tesseract {
class CharSet {
public:
CharSet();
~CharSet();
// Returns true if Cube is sharing Tesseract's unicharset.
inline bool SharedUnicharset() { return (unicharset_map_ == NULL); }
// Returns the class id corresponding to a 32-bit string. Returns -1
// if the string is not supported. This is done by hashing the
// string and then looking up the string in the hash-bin if there
// are collisions.
inline int ClassID(const char_32 *str) const {
int hash_val = Hash(str);
if (hash_bin_size_[hash_val] == 0)
return -1;
for (int bin = 0; bin < hash_bin_size_[hash_val]; bin++) {
if (class_strings_[hash_bins_[hash_val][bin]]->compare(str) == 0)
return hash_bins_[hash_val][bin];
}
return -1;
}
// Same as above but using a 32-bit char instead of a string
inline int ClassID(char_32 ch) const {
int hash_val = Hash(ch);
if (hash_bin_size_[hash_val] == 0)
return -1;
for (int bin = 0; bin < hash_bin_size_[hash_val]; bin++) {
if ((*class_strings_[hash_bins_[hash_val][bin]])[0] == ch &&
class_strings_[hash_bins_[hash_val][bin]]->length() == 1) {
return hash_bins_[hash_val][bin];
}
}
return -1;
}
// Retrieve the unicharid in Tesseract's unicharset corresponding
// to a 32-bit string. When Tesseract and Cube share the same
// unicharset, this will just be the class id.
inline int UnicharID(const char_32 *str) const {
int class_id = ClassID(str);
if (class_id == INVALID_UNICHAR_ID)
return INVALID_UNICHAR_ID;
int unichar_id;
if (unicharset_map_)
unichar_id = unicharset_map_[class_id];
else
unichar_id = class_id;
return unichar_id;
}
// Same as above but using a 32-bit char instead of a string
inline int UnicharID(char_32 ch) const {
int class_id = ClassID(ch);
if (class_id == INVALID_UNICHAR_ID)
return INVALID_UNICHAR_ID;
int unichar_id;
if (unicharset_map_)
unichar_id = unicharset_map_[class_id];
else
unichar_id = class_id;
return unichar_id;
}
// Returns the 32-bit string corresponding to a class id
inline const char_32 * ClassString(int class_id) const {
if (class_id < 0 || class_id >= class_cnt_) {
return NULL;
}
return reinterpret_cast<const char_32 *>(class_strings_[class_id]->c_str());
}
// Returns the count of supported strings
inline int ClassCount() const { return class_cnt_; }
// Creates CharSet object by reading the unicharset from the
// TessDatamanager, and mapping Cube's unicharset to Tesseract's if
// they differ.
static CharSet *Create(TessdataManager *tessdata_manager,
UNICHARSET *tess_unicharset);
// Return the UNICHARSET cube is using for recognition internally --
// ClassId() returns unichar_id's in this unicharset.
UNICHARSET *InternalUnicharset() { return unicharset_; }
private:
// Hash table configuration params. Determined emperically on
// the supported languages so far (Eng, Ara, Hin). Might need to be
// tuned for speed when more languages are supported
static const int kHashBins = 3001;
static const int kMaxHashSize = 16;
// Using djb2 hashing function to hash a 32-bit string
// introduced in http://www.cse.yorku.ca/~oz/hash.html
static inline int Hash(const char_32 *str) {
unsigned long hash = 5381;
int c;
while ((c = *str++))
hash = ((hash << 5) + hash) + c;
return (hash%kHashBins);
}
// Same as above but for a single char
static inline int Hash(char_32 ch) {
char_32 b[2];
b[0] = ch;
b[1] = 0;
return Hash(b);
}
// Load the list of supported chars from the given data file
// pointer. If tess_unicharset is non-NULL, mapping each Cube class
// id to a tesseract unicharid.
bool LoadSupportedCharList(FILE *fp, UNICHARSET *tess_unicharset);
// class count
int class_cnt_;
// hash-bin sizes array
int hash_bin_size_[kHashBins];
// hash bins
int hash_bins_[kHashBins][kMaxHashSize];
// supported strings array
string_32 **class_strings_;
// map from class id to secondary (tesseract's) unicharset's ids
int *unicharset_map_;
// A unicharset which is filled in with a Tesseract-style UNICHARSET for
// cube's data if our unicharset is different from tesseract's.
UNICHARSET cube_unicharset_;
// This points to either the tess_unicharset we're passed or cube_unicharset_,
// depending upon whether we just have one unicharset or one for each
// tesseract and cube, respectively.
UNICHARSET *unicharset_;
// has the char set been initialized flag
bool init_;
};
}
#endif // CHAR_SET_H
| 1080228-arabicocr11 | cube/char_set.h | C++ | asf20 | 6,224 |
/**********************************************************************
* File: char_samp_enum.h
* Description: Declaration of a Character Sample Enumerator Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharSampEnum class provides the base class for CharSamp class
// Enumerators. This is typically used to implement dump file readers
#ifndef CHARSAMP_ENUM_H
#define CHARSAMP_ENUM_H
#include "char_samp.h"
namespace tesseract {
class CharSampEnum {
public:
CharSampEnum();
virtual ~CharSampEnum();
virtual bool EnumCharSamp(CharSamp *char_samp, float progress) = 0;
};
}
#endif // CHARSAMP_ENUM_H
| 1080228-arabicocr11 | cube/char_samp_enum.h | C++ | asf20 | 1,305 |
/**********************************************************************
* File: char_altlist.h
* Description: Declaration of a Character Alternate List Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef CHAR_ALT_LIST_H
#define CHAR_ALT_LIST_H
// The CharAltList class holds the list of class alternates returned from
// a character classifier. Each alternate represents a class ID.
// It inherits from the AltList class.
// The CharAltList owns a CharSet object that maps a class-id to a string.
#include "altlist.h"
#include "char_set.h"
namespace tesseract {
class CharAltList : public AltList {
public:
CharAltList(const CharSet *char_set, int max_alt = kMaxCharAlt);
~CharAltList();
// Sort the alternate list based on cost
void Sort();
// insert a new alternate with the specified class-id, cost and tag
bool Insert(int class_id, int cost, void *tag = NULL);
// returns the cost of a specific class ID
inline int ClassCost(int class_id) const {
if (class_id_cost_ == NULL ||
class_id < 0 ||
class_id >= char_set_->ClassCount()) {
return WORST_COST;
}
return class_id_cost_[class_id];
}
// returns the alternate class-id corresponding to an alternate index
inline int Alt(int alt_idx) const { return class_id_alt_[alt_idx]; }
// set the cost of a certain alternate
void SetAltCost(int alt_idx, int cost) {
alt_cost_[alt_idx] = cost;
class_id_cost_[class_id_alt_[alt_idx]] = cost;
}
private:
// character set object. Passed at construction time
const CharSet *char_set_;
// array of alternate class-ids
int *class_id_alt_;
// array of alternate costs
int *class_id_cost_;
// default max count of alternates
static const int kMaxCharAlt = 256;
};
}
#endif // CHAR_ALT_LIST_H
| 1080228-arabicocr11 | cube/char_altlist.h | C++ | asf20 | 2,463 |
/**********************************************************************
* File: cube_tuning_params.cpp
* Description: Implementation of the CubeTuningParameters Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <string>
#include <vector>
#include "cube_tuning_params.h"
#include "tuning_params.h"
#include "cube_utils.h"
namespace tesseract {
CubeTuningParams::CubeTuningParams() {
reco_wgt_ = 1.0;
size_wgt_ = 1.0;
char_bigrams_wgt_ = 1.0;
word_unigrams_wgt_ = 0.0;
max_seg_per_char_ = 8;
beam_width_ = 32;
tp_classifier_ = NN;
tp_feat_ = BMP;
conv_grid_size_ = 32;
hist_wind_wid_ = 0;
max_word_aspect_ratio_ = 10.0;
min_space_height_ratio_ = 0.2;
max_space_height_ratio_ = 0.3;
min_con_comp_size_ = 0;
combiner_run_thresh_ = 1.0;
combiner_classifier_thresh_ = 0.5;
ood_wgt_ = 1.0;
num_wgt_ = 1.0;
}
CubeTuningParams::~CubeTuningParams() {
}
// Create an Object given the data file path and the language by loading
// the approporiate file
CubeTuningParams *CubeTuningParams::Create(const string &data_file_path,
const string &lang) {
CubeTuningParams *obj = new CubeTuningParams();
if (!obj) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Create): unable to "
"allocate new tuning params object\n");
return NULL;
}
string tuning_params_file;
tuning_params_file = data_file_path + lang;
tuning_params_file += ".cube.params";
if (!obj->Load(tuning_params_file)) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Create): unable to "
"load tuning parameters from %s\n", tuning_params_file.c_str());
delete obj;
obj = NULL;
}
return obj;
}
// Loads the params file
bool CubeTuningParams::Load(string tuning_params_file) {
// load the string into memory
string param_str;
if (CubeUtils::ReadFileToString(tuning_params_file, ¶m_str) == false) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): unable to read "
"file %s\n", tuning_params_file.c_str());
return false;
}
// split into lines
vector<string> str_vec;
CubeUtils::SplitStringUsing(param_str, "\r\n", &str_vec);
if (str_vec.size() < 8) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): number of rows "
"in parameter file is too low\n");
return false;
}
// for all entries
for (int entry = 0; entry < str_vec.size(); entry++) {
// tokenize
vector<string> str_tok;
// should be only two tokens
CubeUtils::SplitStringUsing(str_vec[entry], "=", &str_tok);
if (str_tok.size() != 2) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): invalid format in "
"line: %s.\n", str_vec[entry].c_str());
return false;
}
double val = 0;
char peekchar = (str_tok[1].c_str())[0];
if ((peekchar >= '0' && peekchar <= '9') ||
peekchar == '-' || peekchar == '+' ||
peekchar == '.') {
// read the value
if (sscanf(str_tok[1].c_str(), "%lf", &val) != 1) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): invalid format "
"in line: %s.\n", str_vec[entry].c_str());
return false;
}
}
// token type
if (str_tok[0] == "RecoWgt") {
reco_wgt_ = val;
} else if (str_tok[0] == "SizeWgt") {
size_wgt_ = val;
} else if (str_tok[0] == "CharBigramsWgt") {
char_bigrams_wgt_ = val;
} else if (str_tok[0] == "WordUnigramsWgt") {
word_unigrams_wgt_ = val;
} else if (str_tok[0] == "MaxSegPerChar") {
max_seg_per_char_ = static_cast<int>(val);
} else if (str_tok[0] == "BeamWidth") {
beam_width_ = static_cast<int>(val);
} else if (str_tok[0] == "Classifier") {
if (str_tok[1] == "NN") {
tp_classifier_ = TuningParams::NN;
} else if (str_tok[1] == "HYBRID_NN") {
tp_classifier_ = TuningParams::HYBRID_NN;
} else {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): invalid "
"classifier type in line: %s.\n", str_vec[entry].c_str());
return false;
}
} else if (str_tok[0] == "FeatureType") {
if (str_tok[1] == "BMP") {
tp_feat_ = TuningParams::BMP;
} else if (str_tok[1] == "CHEBYSHEV") {
tp_feat_ = TuningParams::CHEBYSHEV;
} else if (str_tok[1] == "HYBRID") {
tp_feat_ = TuningParams::HYBRID;
} else {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): invalid feature "
"type in line: %s.\n", str_vec[entry].c_str());
return false;
}
} else if (str_tok[0] == "ConvGridSize") {
conv_grid_size_ = static_cast<int>(val);
} else if (str_tok[0] == "HistWindWid") {
hist_wind_wid_ = val;
} else if (str_tok[0] == "MinConCompSize") {
min_con_comp_size_ = val;
} else if (str_tok[0] == "MaxWordAspectRatio") {
max_word_aspect_ratio_ = val;
} else if (str_tok[0] == "MinSpaceHeightRatio") {
min_space_height_ratio_ = val;
} else if (str_tok[0] == "MaxSpaceHeightRatio") {
max_space_height_ratio_ = val;
} else if (str_tok[0] == "CombinerRunThresh") {
combiner_run_thresh_ = val;
} else if (str_tok[0] == "CombinerClassifierThresh") {
combiner_classifier_thresh_ = val;
} else if (str_tok[0] == "OODWgt") {
ood_wgt_ = val;
} else if (str_tok[0] == "NumWgt") {
num_wgt_ = val;
} else {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Load): unknown parameter "
"in line: %s.\n", str_vec[entry].c_str());
return false;
}
}
return true;
}
// Save the parameters to a file
bool CubeTuningParams::Save(string file_name) {
FILE *params_file = fopen(file_name.c_str(), "wb");
if (params_file == NULL) {
fprintf(stderr, "Cube ERROR (CubeTuningParams::Save): error opening file "
"%s for write.\n", file_name.c_str());
return false;
}
fprintf(params_file, "RecoWgt=%.4f\n", reco_wgt_);
fprintf(params_file, "SizeWgt=%.4f\n", size_wgt_);
fprintf(params_file, "CharBigramsWgt=%.4f\n", char_bigrams_wgt_);
fprintf(params_file, "WordUnigramsWgt=%.4f\n", word_unigrams_wgt_);
fprintf(params_file, "MaxSegPerChar=%d\n", max_seg_per_char_);
fprintf(params_file, "BeamWidth=%d\n", beam_width_);
fprintf(params_file, "ConvGridSize=%d\n", conv_grid_size_);
fprintf(params_file, "HistWindWid=%d\n", hist_wind_wid_);
fprintf(params_file, "MinConCompSize=%d\n", min_con_comp_size_);
fprintf(params_file, "MaxWordAspectRatio=%.4f\n", max_word_aspect_ratio_);
fprintf(params_file, "MinSpaceHeightRatio=%.4f\n", min_space_height_ratio_);
fprintf(params_file, "MaxSpaceHeightRatio=%.4f\n", max_space_height_ratio_);
fprintf(params_file, "CombinerRunThresh=%.4f\n", combiner_run_thresh_);
fprintf(params_file, "CombinerClassifierThresh=%.4f\n",
combiner_classifier_thresh_);
fprintf(params_file, "OODWgt=%.4f\n", ood_wgt_);
fprintf(params_file, "NumWgt=%.4f\n", num_wgt_);
fclose(params_file);
return true;
}
}
| 1080228-arabicocr11 | cube/cube_tuning_params.cpp | C++ | asf20 | 7,713 |
/**********************************************************************
* File: string_32.h
* Description: Declaration of a 32 Bit string class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// the string_32 class provides the functionality needed
// for a 32-bit string class
#ifndef STRING_32_H
#define STRING_32_H
#include <string.h>
#include <string>
#include <algorithm>
#include <vector>
#ifdef USE_STD_NAMESPACE
using std::basic_string;
using std::string;
using std::vector;
#endif
namespace tesseract {
// basic definitions
typedef signed int char_32;
typedef basic_string<char_32> string_32;
}
#endif // STRING_32_H
| 1080228-arabicocr11 | cube/string_32.h | C++ | asf20 | 1,313 |
/**********************************************************************
* File: char_bigrams.h
* Description: Declaration of a Character Bigrams Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The CharBigram class represents the interface to the character bigram
// table used by Cube
// A CharBigram object can be constructed from the Char Bigrams file
// Given a sequence of characters, the "Cost" method returns the Char Bigram
// cost of the string according to the table
#ifndef CHAR_BIGRAMS_H
#define CHAR_BIGRAMS_H
#include <string>
#include "char_set.h"
namespace tesseract {
// structure representing a single bigram value
struct Bigram {
int cnt;
int cost;
};
// structure representing the char bigram array of characters
// following a specific character
struct CharBigram {
int total_cnt;
char_32 max_char;
Bigram *bigram;
};
// structure representing the whole bigram table
struct CharBigramTable {
int total_cnt;
int worst_cost;
char_32 max_char;
CharBigram *char_bigram;
};
class CharBigrams {
public:
CharBigrams();
~CharBigrams();
// Construct the CharBigrams class from a file
static CharBigrams *Create(const string &data_file_path,
const string &lang);
// Top-level function to return the mean character bigram cost of a
// sequence of characters. If char_set is not NULL, use
// tesseract functions to return a case-invariant cost.
// This avoids unnecessarily penalizing all-one-case words or
// capitalized words (first-letter upper-case and remaining letters
// lower-case).
int Cost(const char_32 *str, CharSet *char_set) const;
protected:
// Returns the character bigram cost of two characters.
int PairCost(char_32 ch1, char_32 ch2) const;
// Returns the mean character bigram cost of a sequence of
// characters. Adds a space at the beginning and end to account for
// cost of starting and ending characters.
int MeanCostWithSpaces(const char_32 *char_32_ptr) const;
private:
// Only words this length or greater qualify for case-invariant character
// bigram cost.
static const int kMinLengthCaseInvariant = 4;
CharBigramTable bigram_table_;
};
}
#endif // CHAR_BIGRAMS_H
| 1080228-arabicocr11 | cube/char_bigrams.h | C++ | asf20 | 2,896 |
/**********************************************************************
* File: con_comp.h
* Description: Declaration of a Connected Component class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef CONCOMP_H
#define CONCOMP_H
// The ConComp class implements the functionality needed for a
// Connected Component object and Connected Component (ConComp) points.
// The points consituting a connected component are kept in a linked-list
// The Concomp class provided methods to:
// 1- Compare components in L2R and R2L reading orders.
// 2- Merge ConComps
// 3- Compute the windowed vertical pixel density histogram for a specific
// windows size
// 4- Segment a ConComp based on the local windowed vertical pixel
// density histogram local minima
namespace tesseract {
// Implments a ConComp point in a linked list of points
class ConCompPt {
public:
ConCompPt(int x, int y) {
x_ = x;
y_ = y;
next_pt_ = NULL;
}
inline int x() { return x_; }
inline int y() { return y_; }
inline void Shift(int dx, int dy) {
x_ += dx;
y_ += dy;
}
inline ConCompPt * Next() { return next_pt_; }
inline void SetNext(ConCompPt *pt) { next_pt_ = pt; }
private:
int x_;
int y_;
ConCompPt *next_pt_;
};
class ConComp {
public:
ConComp();
virtual ~ConComp();
// accessors
inline ConCompPt *Head() { return head_; }
inline int Left() const { return left_; }
inline int Top() const { return top_; }
inline int Right() const { return right_; }
inline int Bottom() const { return bottom_; }
inline int Width() const { return right_ - left_ + 1; }
inline int Height() const { return bottom_ - top_ + 1; }
// Comparer used for sorting L2R reading order
inline static int Left2RightComparer(const void *comp1,
const void *comp2) {
return (*(reinterpret_cast<ConComp * const *>(comp1)))->left_ +
(*(reinterpret_cast<ConComp * const *>(comp1)))->right_ -
(*(reinterpret_cast<ConComp * const *>(comp2)))->left_ -
(*(reinterpret_cast<ConComp * const *>(comp2)))->right_;
}
// Comparer used for sorting R2L reading order
inline static int Right2LeftComparer(const void *comp1,
const void *comp2) {
return (*(reinterpret_cast<ConComp * const *>(comp2)))->right_ -
(*(reinterpret_cast<ConComp * const *>(comp1)))->right_;
}
// accessors for attribues of a ConComp
inline bool LeftMost() const { return left_most_; }
inline bool RightMost() const { return right_most_; }
inline void SetLeftMost(bool left_most) { left_most_ = left_most; }
inline void SetRightMost(bool right_most) { right_most_ = right_most;
}
inline int ID () const { return id_; }
inline void SetID(int id) { id_ = id; }
inline int PtCnt () const { return pt_cnt_; }
// Add a new pt
bool Add(int x, int y);
// Merge two connected components in-place
bool Merge(ConComp *con_comp);
// Shifts the co-ordinates of all points by the specified x & y deltas
void Shift(int dx, int dy);
// segments a concomp based on pixel density histogram local minima
ConComp **Segment(int max_hist_wnd, int *concomp_cnt);
// creates the vertical pixel density histogram of the concomp
int *CreateHistogram(int max_hist_wnd);
// find out the seg pts by looking for local minima in the histogram
int *SegmentHistogram(int *hist_array, int *seg_pt_cnt);
private:
int id_;
bool left_most_;
bool right_most_;
int left_;
int top_;
int right_;
int bottom_;
ConCompPt *head_;
ConCompPt *tail_;
int pt_cnt_;
};
}
#endif // CONCOMP_H
| 1080228-arabicocr11 | cube/con_comp.h | C++ | asf20 | 4,432 |
/**********************************************************************
* File: tess_lang_mod_edge.h
* Description: Declaration of the Tesseract Language Model Edge Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The TessLangModEdge models an edge in the Tesseract language models
// It inherits from the LangModEdge class
#ifndef TESS_LANG_MOD_EDGE_H
#define TESS_LANG_MOD_EDGE_H
#include "dawg.h"
#include "char_set.h"
#include "lang_mod_edge.h"
#include "cube_reco_context.h"
#include "cube_utils.h"
// Macros needed to identify punctuation in the langmodel state
#ifdef _HMSW32_H
#define LEAD_PUNC_EDGE_REF_MASK (inT64) 0x0000000100000000i64
#define TRAIL_PUNC_EDGE_REF_MASK (inT64) 0x0000000200000000i64
#define TRAIL_PUNC_REPEAT_MASK (inT64) 0xffff000000000000i64
#else
#define LEAD_PUNC_EDGE_REF_MASK (inT64) 0x0000000100000000ll
#define TRAIL_PUNC_EDGE_REF_MASK (inT64) 0x0000000200000000ll
#define TRAIL_PUNC_REPEAT_MASK (inT64) 0xffff000000000000ll
#endif
// Number state machine macros
#define NUMBER_STATE_SHIFT 0
#define NUMBER_STATE_MASK 0x0000000fl
#define NUMBER_LITERAL_SHIFT 4
#define NUMBER_LITERAL_MASK 0x000000f0l
#define NUMBER_REPEAT_SHIFT 8
#define NUMBER_REPEAT_MASK 0x00000f00l
#define NUM_TRM -99
#define TRAIL_PUNC_REPEAT_SHIFT 48
#define IsLeadingPuncEdge(edge_mask) \
((edge_mask & LEAD_PUNC_EDGE_REF_MASK) != 0)
#define IsTrailingPuncEdge(edge_mask) \
((edge_mask & TRAIL_PUNC_EDGE_REF_MASK) != 0)
#define TrailingPuncCount(edge_mask) \
((edge_mask & TRAIL_PUNC_REPEAT_MASK) >> TRAIL_PUNC_REPEAT_SHIFT)
#define TrailingPuncEdgeMask(Cnt) \
(TRAIL_PUNC_EDGE_REF_MASK | ((Cnt) << TRAIL_PUNC_REPEAT_SHIFT))
// State machine IDs
#define DAWG_OOD 0
#define DAWG_NUMBER 1
namespace tesseract {
class TessLangModEdge : public LangModEdge {
public:
// Different ways of constructing a TessLangModEdge
TessLangModEdge(CubeRecoContext *cntxt, const Dawg *edge_array,
EDGE_REF edge, int class_id);
TessLangModEdge(CubeRecoContext *cntxt, const Dawg *edge_array,
EDGE_REF start_edge_idx, EDGE_REF end_edge_idx,
int class_id);
TessLangModEdge(CubeRecoContext *cntxt, int class_id);
~TessLangModEdge() {}
// Accessors
inline bool IsRoot() const {
return root_;
}
inline void SetRoot(bool flag) { root_ = flag; }
inline bool IsOOD() const {
return (dawg_ == (Dawg *)DAWG_OOD);
}
inline bool IsNumber() const {
return (dawg_ == (Dawg *)DAWG_NUMBER);
}
inline bool IsEOW() const {
return (IsTerminal() || (dawg_->end_of_word(end_edge_) != 0));
}
inline const Dawg *GetDawg() const { return dawg_; }
inline EDGE_REF StartEdge() const { return start_edge_; }
inline EDGE_REF EndEdge() const { return end_edge_; }
inline EDGE_REF EdgeMask() const { return edge_mask_; }
inline const char_32 * EdgeString() const { return str_; }
inline int ClassID () const { return class_id_; }
inline int PathCost() const { return path_cost_; }
inline void SetEdgeMask(EDGE_REF edge_mask) { edge_mask_ = edge_mask; }
inline void SetDawg(Dawg *dawg) { dawg_ = dawg; }
inline void SetStartEdge(EDGE_REF edge_idx) { start_edge_ = edge_idx; }
inline void SetEndEdge(EDGE_REF edge_idx) { end_edge_ = edge_idx; }
// is this a terminal node:
// we can terminate at any OOD char, trailing punc or
// when the dawg terminates
inline bool IsTerminal() const {
return (IsOOD() || IsNumber() || IsTrailingPuncEdge(start_edge_) ||
dawg_->next_node(end_edge_) == 0);
}
// How many signals does the LM provide for tuning. These are flags like:
// OOD or not, Number of not that are used by the training to compute
// extra costs for each word.
inline int SignalCnt() const {
return 2;
}
// returns the weight assigned to a specified signal
inline double SignalWgt(int signal) const {
CubeTuningParams *params =
reinterpret_cast<CubeTuningParams *>(cntxt_->Params());
if (params != NULL) {
switch (signal) {
case 0:
return params->OODWgt();
break;
case 1:
return params->NumWgt();
break;
}
}
return 0.0;
}
// sets the weight assigned to a specified signal: Used in training
void SetSignalWgt(int signal, double wgt) {
CubeTuningParams *params =
reinterpret_cast<CubeTuningParams *>(cntxt_->Params());
if (params != NULL) {
switch (signal) {
case 0:
params->SetOODWgt(wgt);
break;
case 1:
params->SetNumWgt(wgt);
break;
}
}
}
// returns the actual value of a specified signal
int Signal(int signal) {
switch (signal) {
case 0:
return IsOOD() ? MIN_PROB_COST : 0;
break;
case 1:
return IsNumber() ? MIN_PROB_COST : 0;
break;
default:
return 0;
}
}
// returns the Hash value of the edge. Used by the SearchNode hash table
// to quickly lookup exisiting edges to converge during search
inline unsigned int Hash() const {
return static_cast<unsigned int>(((start_edge_ | end_edge_) ^
((reinterpret_cast<unsigned long int>(dawg_)))) ^
((unsigned int)edge_mask_) ^
class_id_);
}
// A verbal description of the edge: Used by visualizers
char *Description() const;
// Is this edge identical to the specified edge
inline bool IsIdentical(LangModEdge *lang_mod_edge) const {
return (class_id_ ==
reinterpret_cast<TessLangModEdge *>(lang_mod_edge)->class_id_ &&
str_ == reinterpret_cast<TessLangModEdge *>(lang_mod_edge)->str_ &&
dawg_ == reinterpret_cast<TessLangModEdge *>(lang_mod_edge)->dawg_ &&
start_edge_ ==
reinterpret_cast<TessLangModEdge *>(lang_mod_edge)->start_edge_ &&
end_edge_ ==
reinterpret_cast<TessLangModEdge *>(lang_mod_edge)->end_edge_ &&
edge_mask_ ==
reinterpret_cast<TessLangModEdge *>(lang_mod_edge)->edge_mask_);
}
// Creates a set of fan-out edges for the specified edge
static int CreateChildren(CubeRecoContext *cntxt,
const Dawg *edges,
NODE_REF edge_reg,
LangModEdge **lm_edges);
private:
bool root_;
CubeRecoContext *cntxt_;
const Dawg *dawg_;
EDGE_REF start_edge_;
EDGE_REF end_edge_;
EDGE_REF edge_mask_;
int path_cost_;
int class_id_;
const char_32 * str_;
// returns the cost of the lang_mod_edge
inline int Cost() const {
if (cntxt_ != NULL) {
CubeTuningParams *params =
reinterpret_cast<CubeTuningParams *>(cntxt_->Params());
if (dawg_ == (Dawg *)DAWG_OOD) {
return static_cast<int>(params->OODWgt() * MIN_PROB_COST);
} else if (dawg_ == (Dawg *)DAWG_NUMBER) {
return static_cast<int>(params->NumWgt() * MIN_PROB_COST);
}
}
return 0;
}
};
} // namespace tesseract
#endif // TESS_LANG_MOD_EDGE_H
| 1080228-arabicocr11 | cube/tess_lang_mod_edge.h | C++ | asf20 | 7,721 |
/**********************************************************************
* File: bmp_8.h
* Description: Declaration of an 8-bit Bitmap class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef BMP8_H
#define BMP8_H
// The Bmp8 class is an 8-bit bitmap that represents images of
// words, characters and segments throughout Cube
// It is meant to provide fast access to the bitmap bits and provide
// fast scaling, cropping, deslanting, connected components detection,
// loading and saving functionality
#include <stdlib.h>
#include <stdio.h>
#include "con_comp.h"
#include "cached_file.h"
namespace tesseract {
// Non-integral deslanting parameters.
static const float kMinDeslantAngle = -30.0f;
static const float kMaxDeslantAngle = 30.0f;
static const float kDeslantAngleDelta = 0.5f;
class Bmp8 {
public:
Bmp8(unsigned short wid, unsigned short hgt);
~Bmp8();
// Clears the bitmap
bool Clear();
// accessors to bitmap dimensions
inline unsigned short Width() const { return wid_; }
inline unsigned short Stride() const { return stride_; }
inline unsigned short Height() const { return hgt_; }
inline unsigned char *RawData() const {
return (line_buff_ == NULL ? NULL : line_buff_[0]);
}
// creates a scaled version of the specified bitmap
// Optionally, scaling can be isotropic (preserving aspect ratio) or not
bool ScaleFrom(Bmp8 *bmp, bool isotropic = true);
// Deslant the bitmap vertically
bool Deslant();
// Deslant the bitmap horizontally
bool HorizontalDeslant(double *deslant_angle);
// Create a bitmap object from a file
static Bmp8 *FromCharDumpFile(CachedFile *fp);
static Bmp8 *FromCharDumpFile(FILE *fp);
// are two bitmaps identical
bool IsIdentical(Bmp8 *pBmp) const;
// Detect connected components
ConComp ** FindConComps(int *concomp_cnt, int min_size) const;
// compute the foreground ratio
float ForegroundRatio() const;
// returns the mean horizontal histogram entropy of the bitmap
float MeanHorizontalHistogramEntropy() const;
// returns the horizontal histogram of the bitmap
int *HorizontalHistogram() const;
private:
// Compute a look up tan table that will be used for fast slant computation
static bool ComputeTanTable();
// create a bitmap buffer (two flavors char & int) and init contents
unsigned char ** CreateBmpBuffer(unsigned char init_val = 0xff);
static unsigned int ** CreateBmpBuffer(int wid, int hgt,
unsigned char init_val = 0xff);
// Free a bitmap buffer
static void FreeBmpBuffer(unsigned char **buff);
static void FreeBmpBuffer(unsigned int **buff);
// a static array that holds the tan lookup table
static float *tan_table_;
// bitmap 32-bit-aligned stride
unsigned short stride_;
// Bmp8 magic number used to validate saved bitmaps
static const unsigned int kMagicNumber = 0xdeadbeef;
protected:
// bitmap dimensions
unsigned short wid_;
unsigned short hgt_;
// bitmap contents
unsigned char **line_buff_;
// deslanting parameters
static const int kConCompAllocChunk = 16;
static const int kDeslantAngleCount;
// Load dimensions & contents of bitmap from file
bool LoadFromCharDumpFile(CachedFile *fp);
bool LoadFromCharDumpFile(FILE *fp);
// Load dimensions & contents of bitmap from raw data
bool LoadFromCharDumpFile(unsigned char **raw_data);
// Load contents of bitmap from raw data
bool LoadFromRawData(unsigned char *data);
// save bitmap to a file
bool SaveBmp2CharDumpFile(FILE *fp) const;
// checks if a row or a column are entirely blank
bool IsBlankColumn(int x) const;
bool IsBlankRow(int y) const;
// crop the bitmap returning new dimensions
void Crop(int *xst_src, int *yst_src, int *wid, int *hgt);
// copy part of the specified bitmap
void Copy(int x, int y, int wid, int hgt, Bmp8 *bmp_dest) const;
};
}
#endif // BMP8_H
| 1080228-arabicocr11 | cube/bmp_8.h | C++ | asf20 | 4,538 |
/**********************************************************************
* File: word_altlist.h
* Description: Declaration of the Word Alternate List Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The WordAltList abstracts a alternate list of words and their corresponding
// costs that result from the word recognition process. The class inherits
// from the AltList class
// It provides methods to add a new word alternate, its corresponding score and
// a tag.
#ifndef WORD_ALT_LIST_H
#define WORD_ALT_LIST_H
#include "altlist.h"
namespace tesseract {
class WordAltList : public AltList {
public:
explicit WordAltList(int max_alt);
~WordAltList();
// Sort the list of alternates based on cost
void Sort();
// insert an alternate word with the specified cost and tag
bool Insert(char_32 *char_ptr, int cost, void *tag = NULL);
// returns the alternate string at the specified position
inline char_32 * Alt(int alt_idx) { return word_alt_[alt_idx]; }
// print each entry of the altlist, both UTF8 and unichar ids, and
// their costs, to stderr
void PrintDebug();
private:
char_32 **word_alt_;
};
} // namespace tesseract
#endif // WORD_ALT_LIST_H
| 1080228-arabicocr11 | cube/word_altlist.h | C++ | asf20 | 1,869 |
/**********************************************************************
* File: feature_chebyshev.h
* Description: Declaration of the Chebyshev coefficients Feature Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The FeatureChebyshev class implements a Bitmap feature extractor class. It
// inherits from the FeatureBase class
// The feature vector is the composed of the chebyshev coefficients of 4 time
// sequences. The time sequences are the left, top, right & bottom
// bitmap profiles of the input samples
#ifndef FEATURE_CHEBYSHEV_H
#define FEATURE_CHEBYSHEV_H
#include "char_samp.h"
#include "feature_base.h"
namespace tesseract {
class FeatureChebyshev : public FeatureBase {
public:
explicit FeatureChebyshev(TuningParams *params);
virtual ~FeatureChebyshev();
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
virtual CharSamp *ComputeFeatureBitmap(CharSamp *samp);
// Compute the features for a given CharSamp
virtual bool ComputeFeatures(CharSamp *samp, float *features);
// Returns the count of features
virtual int FeatureCnt() {
return (4 * kChebychevCoefficientCnt);
}
protected:
static const int kChebychevCoefficientCnt = 40;
// Compute Chebychev coefficients for the specified vector
void ChebyshevCoefficients(const vector<float> &input,
int coeff_cnt, float *coeff);
// Compute the features for a given CharSamp
bool ComputeChebyshevCoefficients(CharSamp *samp, float *features);
};
}
#endif // FEATURE_CHEBYSHEV_H
| 1080228-arabicocr11 | cube/feature_chebyshev.h | C++ | asf20 | 2,247 |
/**********************************************************************
* File: cube_search_object.cpp
* Description: Implementation of the Cube Search Object Class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "cube_search_object.h"
#include "cube_utils.h"
#include "ndminx.h"
namespace tesseract {
const bool CubeSearchObject::kUseCroppedChars = true;
CubeSearchObject::CubeSearchObject(CubeRecoContext *cntxt, CharSamp *samp)
: SearchObject(cntxt) {
init_ = false;
reco_cache_ = NULL;
samp_cache_ = NULL;
segments_ = NULL;
segment_cnt_ = 0;
samp_ = samp;
left_ = 0;
itop_ = 0;
space_cost_ = NULL;
no_space_cost_ = NULL;
wid_ = samp_->Width();
hgt_ = samp_->Height();
max_seg_per_char_ = cntxt_->Params()->MaxSegPerChar();
rtl_ = (cntxt_->ReadingOrder() == CubeRecoContext::R2L);
min_spc_gap_ =
static_cast<int>(hgt_ * cntxt_->Params()->MinSpaceHeightRatio());
max_spc_gap_ =
static_cast<int>(hgt_ * cntxt_->Params()->MaxSpaceHeightRatio());
}
CubeSearchObject::~CubeSearchObject() {
Cleanup();
}
// Cleanup
void CubeSearchObject::Cleanup() {
// delete Recognition Cache
if (reco_cache_) {
for (int strt_seg = 0; strt_seg < segment_cnt_; strt_seg++) {
if (reco_cache_[strt_seg]) {
for (int end_seg = 0; end_seg < segment_cnt_; end_seg++) {
if (reco_cache_[strt_seg][end_seg]) {
delete reco_cache_[strt_seg][end_seg];
}
}
delete []reco_cache_[strt_seg];
}
}
delete []reco_cache_;
reco_cache_ = NULL;
}
// delete CharSamp Cache
if (samp_cache_) {
for (int strt_seg = 0; strt_seg < segment_cnt_; strt_seg++) {
if (samp_cache_[strt_seg]) {
for (int end_seg = 0; end_seg < segment_cnt_; end_seg++) {
if (samp_cache_[strt_seg][end_seg]) {
delete samp_cache_[strt_seg][end_seg];
}
}
delete []samp_cache_[strt_seg];
}
}
delete []samp_cache_;
samp_cache_ = NULL;
}
// delete segment list
if (segments_) {
for (int seg = 0; seg < segment_cnt_; seg++) {
if (segments_[seg]) {
delete segments_[seg];
}
}
delete []segments_;
segments_ = NULL;
}
if (space_cost_) {
delete []space_cost_;
space_cost_ = NULL;
}
if (no_space_cost_) {
delete []no_space_cost_;
no_space_cost_ = NULL;
}
segment_cnt_ = 0;
init_ = false;
}
// # of segmentation points. One less than the count of segments
int CubeSearchObject::SegPtCnt() {
if (!init_ && !Init())
return -1;
return segment_cnt_ - 1;
}
// init and allocate variables, perform segmentation
bool CubeSearchObject::Init() {
if (init_)
return true;
if (!Segment()) {
return false;
}
// init cache
reco_cache_ = new CharAltList **[segment_cnt_];
if (reco_cache_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::Init): could not "
"allocate CharAltList array\n");
return false;
}
samp_cache_ = new CharSamp **[segment_cnt_];
if (samp_cache_ == NULL) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::Init): could not "
"allocate CharSamp array\n");
return false;
}
for (int seg = 0; seg < segment_cnt_; seg++) {
reco_cache_[seg] = new CharAltList *[segment_cnt_];
if (reco_cache_[seg] == NULL) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::Init): could not "
"allocate a single segment's CharAltList array\n");
return false;
}
memset(reco_cache_[seg], 0, segment_cnt_ * sizeof(*reco_cache_[seg]));
samp_cache_[seg] = new CharSamp *[segment_cnt_];
if (samp_cache_[seg] == NULL) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::Init): could not "
"allocate a single segment's CharSamp array\n");
return false;
}
memset(samp_cache_[seg], 0, segment_cnt_ * sizeof(*samp_cache_[seg]));
}
init_ = true;
return true;
}
// returns a char sample corresponding to the bitmap between 2 seg pts
CharSamp *CubeSearchObject::CharSample(int start_pt, int end_pt) {
// init if necessary
if (!init_ && !Init())
return NULL;
// validate segment range
if (!IsValidSegmentRange(start_pt, end_pt))
return NULL;
// look for the samp in the cache
if (samp_cache_ && samp_cache_[start_pt + 1] &&
samp_cache_[start_pt + 1][end_pt]) {
return samp_cache_[start_pt + 1][end_pt];
}
// create a char samp object from the specified range of segments
bool left_most;
bool right_most;
CharSamp *samp = CharSamp::FromConComps(segments_, start_pt + 1,
end_pt - start_pt, NULL,
&left_most, &right_most, hgt_);
if (!samp)
return NULL;
if (kUseCroppedChars) {
CharSamp *cropped_samp = samp->Crop();
// we no longer need the orig sample
delete samp;
if (!cropped_samp)
return NULL;
samp = cropped_samp;
}
// get the dimensions of the new cropped sample
int char_top = samp->Top();
int char_wid = samp->Width();
int char_hgt = samp->Height();
// for cursive languages, these features correspond to whether
// the charsamp is at the beginning or end of conncomp
if (cntxt_->Cursive() == true) {
// first and last char flags depend on reading order
bool first_char = rtl_ ? right_most : left_most;
bool last_char = rtl_ ? left_most : right_most;
samp->SetFirstChar(first_char ? 255 : 0);
samp->SetLastChar(last_char ? 255 : 0);
} else {
// for non cursive languages, these features correspond
// to whether the charsamp is at the begining or end of the word
samp->SetFirstChar((start_pt == -1) ? 255 : 0);
samp->SetLastChar((end_pt == (segment_cnt_ - 1)) ? 255 : 0);
}
samp->SetNormTop(255 * char_top / hgt_);
samp->SetNormBottom(255 * (char_top + char_hgt) / hgt_);
samp->SetNormAspectRatio(255 * char_wid / (char_wid + char_hgt));
// add to cache & return
samp_cache_[start_pt + 1][end_pt] = samp;
return samp;
}
Box *CubeSearchObject::CharBox(int start_pt, int end_pt) {
if (!init_ && !Init())
return NULL;
if (!IsValidSegmentRange(start_pt, end_pt)) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::CharBox): invalid "
"segment range (%d, %d)\n", start_pt, end_pt);
return NULL;
}
// create a char samp object from the specified range of segments,
// extract its dimensions into a leptonica box, and delete it
bool left_most;
bool right_most;
CharSamp *samp = CharSamp::FromConComps(segments_, start_pt + 1,
end_pt - start_pt, NULL,
&left_most, &right_most, hgt_);
if (!samp)
return NULL;
if (kUseCroppedChars) {
CharSamp *cropped_samp = samp->Crop();
delete samp;
if (!cropped_samp) {
return NULL;
}
samp = cropped_samp;
}
Box *box = boxCreate(samp->Left(), samp->Top(),
samp->Width(), samp->Height());
delete samp;
return box;
}
// call from Beam Search to return the alt list corresponding to
// recognizing the bitmap between two segmentation pts
CharAltList * CubeSearchObject::RecognizeSegment(int start_pt, int end_pt) {
// init if necessary
if (!init_ && !Init()) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::RecognizeSegment): could "
"not initialize CubeSearchObject\n");
return NULL;
}
// validate segment range
if (!IsValidSegmentRange(start_pt, end_pt)) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::RecognizeSegment): invalid "
"segment range (%d, %d)\n", start_pt, end_pt);
return NULL;
}
// look for the recognition results in cache in the cache
if (reco_cache_ && reco_cache_[start_pt + 1] &&
reco_cache_[start_pt + 1][end_pt]) {
return reco_cache_[start_pt + 1][end_pt];
}
// create the char sample corresponding to the blob
CharSamp *samp = CharSample(start_pt, end_pt);
if (!samp) {
fprintf(stderr, "Cube ERROR (CubeSearchObject::RecognizeSegment): could "
"not construct CharSamp\n");
return NULL;
}
// recognize the char sample
CharClassifier *char_classifier = cntxt_->Classifier();
if (char_classifier) {
reco_cache_[start_pt + 1][end_pt] = char_classifier->Classify(samp);
} else {
// no classifer: all characters are equally probable; add a penalty
// that favors 2-segment characters and aspect ratios (w/h) > 1
fprintf(stderr, "Cube WARNING (CubeSearchObject::RecognizeSegment): cube "
"context has no character classifier!! Inventing a probability "
"distribution.\n");
int class_cnt = cntxt_->CharacterSet()->ClassCount();
CharAltList *alt_list = new CharAltList(cntxt_->CharacterSet(), class_cnt);
int seg_cnt = end_pt - start_pt;
double prob_val = (1.0 / class_cnt) *
exp(-fabs(seg_cnt - 2.0)) *
exp(-samp->Width() / static_cast<double>(samp->Height()));
if (alt_list) {
for (int class_idx = 0; class_idx < class_cnt; class_idx++) {
alt_list->Insert(class_idx, CubeUtils::Prob2Cost(prob_val));
}
reco_cache_[start_pt + 1][end_pt] = alt_list;
}
}
return reco_cache_[start_pt + 1][end_pt];
}
// Perform segmentation of the bitmap by detecting connected components,
// segmenting each connected component using windowed vertical pixel density
// histogram and sorting the resulting segments in reading order
bool CubeSearchObject::Segment() {
if (!samp_)
return false;
segment_cnt_ = 0;
segments_ = samp_->Segment(&segment_cnt_, rtl_,
cntxt_->Params()->HistWindWid(),
cntxt_->Params()->MinConCompSize());
if (!segments_ || segment_cnt_ <= 0) {
return false;
}
if (segment_cnt_ >= kMaxSegmentCnt) {
return false;
}
return true;
}
// computes the space and no space costs at gaps between segments
bool CubeSearchObject::ComputeSpaceCosts() {
// init if necessary
if (!init_ && !Init())
return false;
// Already computed
if (space_cost_)
return true;
// No segmentation points
if (segment_cnt_ < 2)
return false;
// Compute the maximum x to the left of and minimum x to the right of each
// segmentation point
int *max_left_x = new int[segment_cnt_ - 1];
int *min_right_x = new int[segment_cnt_ - 1];
if (!max_left_x || !min_right_x) {
delete []min_right_x;
delete []max_left_x;
return false;
}
if (rtl_) {
min_right_x[0] = segments_[0]->Left();
max_left_x[segment_cnt_ - 2] = segments_[segment_cnt_ - 1]->Right();
for (int pt_idx = 1; pt_idx < (segment_cnt_ - 1); pt_idx++) {
min_right_x[pt_idx] =
MIN(min_right_x[pt_idx - 1], segments_[pt_idx]->Left());
max_left_x[segment_cnt_ - pt_idx - 2] =
MAX(max_left_x[segment_cnt_ - pt_idx - 1],
segments_[segment_cnt_ - pt_idx - 1]->Right());
}
} else {
min_right_x[segment_cnt_ - 2] = segments_[segment_cnt_ - 1]->Left();
max_left_x[0] = segments_[0]->Right();
for (int pt_idx = 1; pt_idx < (segment_cnt_ - 1); pt_idx++) {
min_right_x[segment_cnt_ - pt_idx - 2] =
MIN(min_right_x[segment_cnt_ - pt_idx - 1],
segments_[segment_cnt_ - pt_idx - 1]->Left());
max_left_x[pt_idx] =
MAX(max_left_x[pt_idx - 1], segments_[pt_idx]->Right());
}
}
// Allocate memory for space and no space costs
// trivial cases
space_cost_ = new int[segment_cnt_ - 1];
no_space_cost_ = new int[segment_cnt_ - 1];
if (!space_cost_ || !no_space_cost_) {
delete []min_right_x;
delete []max_left_x;
return false;
}
// go through all segmentation points determining the horizontal gap between
// the images on both sides of each break points. Use the gap to estimate
// the probability of a space. The probability is modeled a linear function
// of the gap width
for (int pt_idx = 0; pt_idx < (segment_cnt_ - 1); pt_idx++) {
// determine the gap at the segmentation point
int gap = min_right_x[pt_idx] - max_left_x[pt_idx];
float prob = 0.0;
// gap is too small => no space
if (gap < min_spc_gap_) {
prob = 0.0;
} else if (gap > max_spc_gap_) {
// gap is too big => definite space
prob = 1.0;
} else {
// gap is somewhere in between, compute probability
prob = (gap - min_spc_gap_) /
static_cast<double>(max_spc_gap_ - min_spc_gap_);
}
// compute cost of space and non-space
space_cost_[pt_idx] = CubeUtils::Prob2Cost(prob) +
CubeUtils::Prob2Cost(0.1);
no_space_cost_[pt_idx] = CubeUtils::Prob2Cost(1.0 - prob);
}
delete []min_right_x;
delete []max_left_x;
return true;
}
// Returns the cost of having a space before the specified segmentation point
int CubeSearchObject::SpaceCost(int pt_idx) {
if (!space_cost_ && !ComputeSpaceCosts()) {
// Failed to compute costs return a zero prob
return CubeUtils::Prob2Cost(0.0);
}
return space_cost_[pt_idx];
}
// Returns the cost of not having a space before the specified
// segmentation point
int CubeSearchObject::NoSpaceCost(int pt_idx) {
// If failed to compute costs, return a 1.0 prob
if (!space_cost_ && !ComputeSpaceCosts())
return CubeUtils::Prob2Cost(0.0);
return no_space_cost_[pt_idx];
}
// Returns the cost of not having any spaces within the specified range
// of segmentation points
int CubeSearchObject::NoSpaceCost(int st_pt, int end_pt) {
// If fail to compute costs, return a 1.0 prob
if (!space_cost_ && !ComputeSpaceCosts())
return CubeUtils::Prob2Cost(1.0);
int no_spc_cost = 0;
for (int pt_idx = st_pt + 1; pt_idx < end_pt; pt_idx++)
no_spc_cost += NoSpaceCost(pt_idx);
return no_spc_cost;
}
}
| 1080228-arabicocr11 | cube/cube_search_object.cpp | C++ | asf20 | 14,502 |
/**********************************************************************
* File: con_comp.cpp
* Description: Implementation of a Connected Component class
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdlib.h>
#include <string.h>
#include "con_comp.h"
#include "cube_const.h"
namespace tesseract {
ConComp::ConComp() {
head_ = NULL;
tail_ = NULL;
left_ = 0;
top_ = 0;
right_ = 0;
bottom_ = 0;
left_most_ = false;
right_most_ = false;
id_ = -1;
pt_cnt_ = 0;
}
ConComp::~ConComp() {
if (head_ != NULL) {
ConCompPt *pt_ptr = head_;
while (pt_ptr != NULL) {
ConCompPt *pptNext = pt_ptr->Next();
delete pt_ptr;
pt_ptr = pptNext;
}
head_ = NULL;
}
}
// adds a pt to the conn comp and updates its boundaries
bool ConComp::Add(int x, int y) {
ConCompPt *pt_ptr = new ConCompPt(x, y);
if (pt_ptr == NULL) {
return false;
}
if (head_ == NULL) {
left_ = x;
right_ = x;
top_ = y;
bottom_ = y;
head_ = pt_ptr;
} else {
left_ = left_ <= x ? left_ : x;
top_ = top_ <= y ? top_ : y;
right_ = right_ >= x ? right_ : x;
bottom_ = bottom_ >= y ? bottom_ : y;
}
if (tail_ != NULL) {
tail_->SetNext(pt_ptr);
}
tail_ = pt_ptr;
pt_cnt_++;
return true;
}
// merges two connected components
bool ConComp::Merge(ConComp *concomp) {
if (head_ == NULL || tail_ == NULL ||
concomp->head_ == NULL || concomp->tail_ == NULL) {
return false;
}
tail_->SetNext(concomp->head_);
tail_ = concomp->tail_;
left_ = left_ <= concomp->left_ ? left_ : concomp->left_;
top_ = top_ <= concomp->top_ ? top_ : concomp->top_;
right_ = right_ >= concomp->right_ ? right_ : concomp->right_;
bottom_ = bottom_ >= concomp->bottom_ ? bottom_ : concomp->bottom_;
pt_cnt_ += concomp->pt_cnt_;
concomp->head_ = NULL;
concomp->tail_ = NULL;
return true;
}
// Creates the x-coord density histogram after spreading
// each x-coord position by the HIST_WND_RATIO fraction of the
// height of the ConComp, but limited to max_hist_wnd
int *ConComp::CreateHistogram(int max_hist_wnd) {
int wid = right_ - left_ + 1,
hgt = bottom_ - top_ + 1,
hist_wnd = static_cast<int>(hgt * HIST_WND_RATIO);
if (hist_wnd > max_hist_wnd) {
hist_wnd = max_hist_wnd;
}
// alloc memo for histogram
int *hist_array = new int[wid];
if (hist_array == NULL) {
return NULL;
}
memset(hist_array, 0, wid * sizeof(*hist_array));
// compute windowed histogram
ConCompPt *pt_ptr = head_;
while (pt_ptr != NULL) {
int x = pt_ptr->x() - left_,
xw = x - hist_wnd;
for (int xdel = -hist_wnd; xdel <= hist_wnd; xdel++, xw++) {
if (xw >= 0 && xw < wid) {
hist_array[xw]++;
}
}
pt_ptr = pt_ptr->Next();
}
return hist_array;
}
// find out the seg pts by looking for local minima in the histogram
int *ConComp::SegmentHistogram(int *hist_array, int *seg_pt_cnt) {
// init
(*seg_pt_cnt) = 0;
int wid = right_ - left_ + 1,
hgt = bottom_ - top_ + 1;
int *x_seg_pt = new int[wid];
if (x_seg_pt == NULL) {
return NULL;
}
int seg_pt_wnd = static_cast<int>(hgt * SEG_PT_WND_RATIO);
if (seg_pt_wnd > 1) {
seg_pt_wnd = 1;
}
for (int x = 2; x < (wid - 2); x++) {
if (hist_array[x] < hist_array[x - 1] &&
hist_array[x] < hist_array[x - 2] &&
hist_array[x] <= hist_array[x + 1] &&
hist_array[x] <= hist_array[x + 2]) {
x_seg_pt[(*seg_pt_cnt)++] = x;
x += seg_pt_wnd;
} else if (hist_array[x] <= hist_array[x - 1] &&
hist_array[x] <= hist_array[x - 2] &&
hist_array[x] < hist_array[x + 1] &&
hist_array[x] < hist_array[x + 2]) {
x_seg_pt[(*seg_pt_cnt)++] = x;
x += seg_pt_wnd;
}
}
// no segments, nothing to do
if ((*seg_pt_cnt) == 0) {
delete []x_seg_pt;
return NULL;
}
return x_seg_pt;
}
// segments a concomp based on pixel density histogram local minima
// if there were none found, it returns NULL
// this is more useful than creating a clone of itself
ConComp **ConComp::Segment(int max_hist_wnd, int *concomp_cnt) {
// init
(*concomp_cnt) = 0;
// No pts
if (head_ == NULL) {
return NULL;
}
int seg_pt_cnt = 0;
// create the histogram
int *hist_array = CreateHistogram(max_hist_wnd);
if (hist_array == NULL) {
return NULL;
}
int *x_seg_pt = SegmentHistogram(hist_array, &seg_pt_cnt);
// free histogram
delete []hist_array;
// no segments, nothing to do
if (seg_pt_cnt == 0) {
delete []x_seg_pt;
return NULL;
}
// create concomp array
ConComp **concomp_array = new ConComp *[seg_pt_cnt + 1];
if (concomp_array == NULL) {
delete []x_seg_pt;
return NULL;
}
for (int concomp = 0; concomp <= seg_pt_cnt; concomp++) {
concomp_array[concomp] = new ConComp();
if (concomp_array[concomp] == NULL) {
delete []x_seg_pt;
delete []concomp_array;
return NULL;
}
// split concomps inherit the ID this concomp
concomp_array[concomp]->SetID(id_);
}
// set the left and right most attributes of the
// appropriate concomps
concomp_array[0]->left_most_ = true;
concomp_array[seg_pt_cnt]->right_most_ = true;
// assign pts to concomps
ConCompPt *pt_ptr = head_;
while (pt_ptr != NULL) {
int seg_pt;
// find the first seg-pt that exceeds the x value
// of the pt
for (seg_pt = 0; seg_pt < seg_pt_cnt; seg_pt++) {
if ((x_seg_pt[seg_pt] + left_) > pt_ptr->x()) {
break;
}
}
// add the pt to the proper concomp
if (concomp_array[seg_pt]->Add(pt_ptr->x(), pt_ptr->y()) == false) {
delete []x_seg_pt;
delete []concomp_array;
return NULL;
}
pt_ptr = pt_ptr->Next();
}
delete []x_seg_pt;
(*concomp_cnt) = (seg_pt_cnt + 1);
return concomp_array;
}
// Shifts the co-ordinates of all points by the specified x & y deltas
void ConComp::Shift(int dx, int dy) {
ConCompPt *pt_ptr = head_;
while (pt_ptr != NULL) {
pt_ptr->Shift(dx, dy);
pt_ptr = pt_ptr->Next();
}
left_ += dx;
right_ += dx;
top_ += dy;
bottom_ += dy;
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/con_comp.cpp | C++ | asf20 | 6,871 |
/**********************************************************************
* File: feature_chebyshev.cpp
* Description: Implementation of the Chebyshev coefficients Feature Class
* Author: Ahmad Abdulkader
* Created: 2008
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string>
#include <vector>
#include <algorithm>
#include "feature_base.h"
#include "feature_hybrid.h"
#include "cube_utils.h"
#include "const.h"
#include "char_samp.h"
namespace tesseract {
FeatureHybrid::FeatureHybrid(TuningParams *params)
:FeatureBase(params) {
feature_bmp_ = new FeatureBmp(params);
feature_chebyshev_ = new FeatureChebyshev(params);
}
FeatureHybrid::~FeatureHybrid() {
delete feature_bmp_;
delete feature_chebyshev_;
}
// Render a visualization of the features to a CharSamp.
// This is mainly used by visual-debuggers
CharSamp *FeatureHybrid::ComputeFeatureBitmap(CharSamp *char_samp) {
return char_samp;
}
// Compute the features of a given CharSamp
bool FeatureHybrid::ComputeFeatures(CharSamp *char_samp, float *features) {
if (feature_bmp_ == NULL || feature_chebyshev_ == NULL) {
return false;
}
if (!feature_bmp_->ComputeFeatures(char_samp, features)) {
return false;
}
return feature_chebyshev_->ComputeFeatures(char_samp,
features + feature_bmp_->FeatureCnt());
}
} // namespace tesseract
| 1080228-arabicocr11 | cube/feature_hybrid.cpp | C++ | asf20 | 2,033 |
/**********************************************************************
* File: conv_net_classifier.h
* Description: Declaration of Convolutional-NeuralNet Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// The ConvNetCharClassifier inherits from the base classifier class:
// "CharClassifierBase". It implements a Convolutional Neural Net classifier
// instance of the base classifier. It uses the Tesseract Neural Net library
// The Neural Net takes a scaled version of a bitmap and feeds it to a
// Convolutional Neural Net as input and performs a FeedForward. Each output
// of the net corresponds to class_id in the CharSet passed at construction
// time.
// Afterwards, the outputs of the Net are "folded" using the folding set
// (if any)
#ifndef CONV_NET_CLASSIFIER_H
#define CONV_NET_CLASSIFIER_H
#include <string>
#include "char_samp.h"
#include "char_altlist.h"
#include "char_set.h"
#include "feature_base.h"
#include "classifier_base.h"
#include "neural_net.h"
#include "lang_model.h"
#include "tuning_params.h"
namespace tesseract {
// Folding Ratio is the ratio of the max-activation of members of a folding
// set that is used to compute the min-activation of the rest of the set
static const float kFoldingRatio = 0.75;
class ConvNetCharClassifier : public CharClassifier {
public:
ConvNetCharClassifier(CharSet *char_set, TuningParams *params,
FeatureBase *feat_extract);
virtual ~ConvNetCharClassifier();
// The main training function. Given a sample and a class ID the classifier
// updates its parameters according to its learning algorithm. This function
// is currently not implemented. TODO(ahmadab): implement end-2-end training
virtual bool Train(CharSamp *char_samp, int ClassID);
// A secondary function needed for training. Allows the trainer to set the
// value of any train-time paramter. This function is currently not
// implemented. TODO(ahmadab): implement end-2-end training
virtual bool SetLearnParam(char *var_name, float val);
// Externally sets the Neural Net used by the classifier. Used for training
void SetNet(tesseract::NeuralNet *net);
// Classifies an input charsamp and return a CharAltList object containing
// the possible candidates and corresponding scores
virtual CharAltList * Classify(CharSamp *char_samp);
// Computes the cost of a specific charsamp being a character (versus a
// non-character: part-of-a-character OR more-than-one-character)
virtual int CharCost(CharSamp *char_samp);
private:
// Neural Net object used for classification
tesseract::NeuralNet *char_net_;
// data buffers used to hold Neural Net inputs and outputs
float *net_input_;
float *net_output_;
// Init the classifier provided a data-path and a language string
virtual bool Init(const string &data_file_path, const string &lang,
LangModel *lang_mod);
// Loads the NeuralNets needed for the classifier
bool LoadNets(const string &data_file_path, const string &lang);
// Loads the folding sets provided a data-path and a language string
virtual bool LoadFoldingSets(const string &data_file_path,
const string &lang,
LangModel *lang_mod);
// Folds the output of the NeuralNet using the loaded folding sets
virtual void Fold();
// Scales the input char_samp and feeds it to the NeuralNet as input
bool RunNets(CharSamp *char_samp);
};
}
#endif // CONV_NET_CLASSIFIER_H
| 1080228-arabicocr11 | cube/conv_net_classifier.h | C++ | asf20 | 4,164 |
/**********************************************************************
* File: conv_net_classifier.h
* Description: Declaration of Convolutional-NeuralNet Character Classifier
* Author: Ahmad Abdulkader
* Created: 2007
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef HYBRID_NEURAL_NET_CLASSIFIER_H
#define HYBRID_NEURAL_NET_CLASSIFIER_H
#include <string>
#include <vector>
#include "char_samp.h"
#include "char_altlist.h"
#include "char_set.h"
#include "classifier_base.h"
#include "feature_base.h"
#include "lang_model.h"
#include "neural_net.h"
#include "tuning_params.h"
namespace tesseract {
// Folding Ratio is the ratio of the max-activation of members of a folding
// set that is used to compute the min-activation of the rest of the set
// static const float kFoldingRatio = 0.75; // see conv_net_classifier.h
class HybridNeuralNetCharClassifier : public CharClassifier {
public:
HybridNeuralNetCharClassifier(CharSet *char_set, TuningParams *params,
FeatureBase *feat_extract);
virtual ~HybridNeuralNetCharClassifier();
// The main training function. Given a sample and a class ID the classifier
// updates its parameters according to its learning algorithm. This function
// is currently not implemented. TODO(ahmadab): implement end-2-end training
virtual bool Train(CharSamp *char_samp, int ClassID);
// A secondary function needed for training. Allows the trainer to set the
// value of any train-time paramter. This function is currently not
// implemented. TODO(ahmadab): implement end-2-end training
virtual bool SetLearnParam(char *var_name, float val);
// Externally sets the Neural Net used by the classifier. Used for training
void SetNet(tesseract::NeuralNet *net);
// Classifies an input charsamp and return a CharAltList object containing
// the possible candidates and corresponding scores
virtual CharAltList *Classify(CharSamp *char_samp);
// Computes the cost of a specific charsamp being a character (versus a
// non-character: part-of-a-character OR more-than-one-character)
virtual int CharCost(CharSamp *char_samp);
private:
// Neural Net object used for classification
vector<tesseract::NeuralNet *> nets_;
vector<float> net_wgts_;
// data buffers used to hold Neural Net inputs and outputs
float *net_input_;
float *net_output_;
// Init the classifier provided a data-path and a language string
virtual bool Init(const string &data_file_path, const string &lang,
LangModel *lang_mod);
// Loads the NeuralNets needed for the classifier
bool LoadNets(const string &data_file_path, const string &lang);
// Load folding sets
// This function returns true on success or if the file can't be read,
// returns false if an error is encountered.
virtual bool LoadFoldingSets(const string &data_file_path,
const string &lang,
LangModel *lang_mod);
// Folds the output of the NeuralNet using the loaded folding sets
virtual void Fold();
// Scales the input char_samp and feeds it to the NeuralNet as input
bool RunNets(CharSamp *char_samp);
};
}
#endif // HYBRID_NEURAL_NET_CLASSIFIER_H
| 1080228-arabicocr11 | cube/hybrid_neural_net_classifier.h | C++ | asf20 | 3,828 |