code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231 values | license stringclasses 13 values | size int64 1 2.01M |
|---|---|---|---|---|---|
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.server.api.impl;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.testing.testify.risk.frontend.model.DataRequest;
import com.google.testing.testify.risk.frontend.model.Project;
import com.google.testing.testify.risk.frontend.server.service.DataService;
import com.google.testing.testify.risk.frontend.server.service.ProjectService;
import com.google.testing.testify.risk.frontend.server.service.UserService;
import com.google.testing.testify.risk.frontend.server.util.DataRequestDocumentGenerator;
import java.io.IOException;
import java.util.List;
import java.util.logging.Logger;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Servlet for exposing project data requests to third party tools, so that those requests may be
* fulfilled.
*
* @author chrsmith@google.com (Chris Smith)
*/
@Singleton
public class DataApiImpl extends HttpServlet {
private static final Logger log = Logger.getLogger(DataApiImpl.class.getName());
private final DataService dataService;
private final ProjectService projectService;
private final UserService userService;
@Inject
public DataApiImpl(DataService dataService, ProjectService projectService,
UserService userService) {
this.dataService = dataService;
this.projectService = projectService;
this.userService = userService;
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
log.info("DataRequestServlet::GET");
if (!userService.isUserLoggedIn()) {
resp.getWriter().print("You must be logged in to access this service.");
resp.sendError(5000);
return;
}
// Query all projects the current user (typically a role account) has EDIT access to.
List<Project> projectsUserCanEdit = projectService.queryProjectsUserHasEditAccessTo();
// Query all data requests for those projects.
List<DataRequest> relevantDataRequests = Lists.newArrayList();
for (Project project : projectsUserCanEdit) {
relevantDataRequests.addAll(dataService.getProjectRequests(project.getProjectId()));
}
// Generate the XML document and serve.
String xmlDocumentText = DataRequestDocumentGenerator.generateDocument(relevantDataRequests);
resp.getWriter().write(xmlDocumentText);
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
log.info("DataRequestServlet::POST");
resp.getWriter().write("Please call this URL using GET.");
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/server/api/impl/DataApiImpl.java | Java | asf20 | 3,314 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.server.api.impl;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TaskOptions.Method;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.testing.testify.risk.frontend.server.service.UserService;
import com.google.testing.testify.risk.frontend.server.task.UploadDataTask;
import com.google.testing.testify.risk.frontend.server.util.ServletUtils;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.logging.Logger;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* This servlet accepts user data (JSON encoded) and passes it off to a task which does the actual
* processing. {@link UploadDataTask}
*
* @author jimr@google.com (Jim Reardon)
*/
@Singleton
public class UploadApiImpl extends HttpServlet {
private static final Logger LOG = Logger.getLogger(UploadApiImpl.class.getName());
private final UserService userService;
@Inject
public UploadApiImpl(UserService userService) {
this.userService = userService;
}
@Override
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
LOG.info("UploadDataServlet::GET called, returning unsupported exception.");
error(resp, "<h1>GET is unsupported</h1>\nTo upload data, use POST.");
}
@Override
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
BufferedReader reader = req.getReader();
StringBuilder input = new StringBuilder();
while (reader.ready()) {
input.append(req.getReader().readLine());
}
LOG.info("Input received: " + input.toString());
JSONArray json;
try {
json = new JSONArray(input.toString());
} catch (JSONException e) {
LOG.warning("Couldn't parse JSON: " + e.toString());
error(resp, "Malformed JSON could not be parsed: " + e.toString());
return;
}
LOG.info("JSON received: " + json.toString());
JSONObject o;
TaskOptions task;
String email = userService.getEmail();
for (int i = 0; i < json.length(); i++) {
try {
o = json.getJSONObject(i);
task = TaskOptions.Builder.withUrl(UploadDataTask.URL).method(Method.POST)
.param("json", o.toString())
.param("user", email);
ServletUtils.queueWithRetries(UploadDataTask.QUEUE, task, "Processing data item upload");
} catch (JSONException e) {
LOG.warning("Couldn't parse item " + i + " in JSON array: " + e.toString());
resp.getOutputStream().print("<p>Couldn't parse item " + i + "</p>\n");
}
}
}
private void error(HttpServletResponse resp, String errorText) throws IOException {
resp.getOutputStream().print(errorText);
resp.sendError(500);
return;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/server/api/impl/UploadApiImpl.java | Java | asf20 | 3,615 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.server.util;
import com.google.appengine.api.taskqueue.Queue;
import com.google.appengine.api.taskqueue.QueueFactory;
import com.google.appengine.api.taskqueue.TaskOptions;
import com.google.appengine.api.taskqueue.TransientFailureException;
import com.google.appengine.api.utils.SystemProperty;
import com.google.testing.testify.risk.frontend.server.InsufficientPrivlegesException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.logging.Logger;
import javax.jdo.PersistenceManager;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.AddressException;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
/**
* Utility methods for servlets.
*
* @author jimr@google.com (Jim Reardon)
*/
public class ServletUtils {
private static final String URL =
System.getProperty("com.google.testing.testify.risk.frontend.url");
private static final Logger LOG = Logger.getLogger(ServletUtils.class.getName());
private static final int QUEUE_RETRIES = 10;
private ServletUtils() {} // COV_NF_LINE
/**
* Throws an access exception if user doesn't have access to do the operation. EG:
* ServerletUtils.requireAccess(userService.hasAccess(...)).
*
* @param hasAccess
*/
public static void requireAccess(boolean hasAccess) {
// TODO(jimr): It'd be nice if this was less clunky. Figure out a way to do that.
if (!hasAccess) {
throw new InsufficientPrivlegesException("You don't have access to perform that action.");
}
}
/**
* Lists returned from GAE's PersistanceManager are a private type which cannot be marshalled to
* client-side GWT code. This method simply copies elements from whatever input List<T> into a
* GWT-friendly detached ArrayList<T>.
*
* @param inputList the list to safenize.
* @param pm the persistence manager. This is needed in order to create a detached copy.
* @return a copied, serialization-safe list.
*/
public static <T> List<T> makeGwtSafe(List<T> inputList, PersistenceManager pm) {
List<T> copiedList = new ArrayList<T>(inputList.size());
for (T item : inputList) {
copiedList.add(pm.detachCopy(item));
}
return copiedList;
}
/**
* Lists returned from GAE's PersistanceManager are a private type which cannot be marshalled to
* client-side GWT code. This method simply copies elements from whatever input <T> into a
* GWT-friendly detached <T>.
*
* @param input the item to safenize.
* @param pm the persistence manager. This is needed in order to create a detached copy.
* @return a copied, serialization-safe item.
*/
public static <T> T makeGwtSafe(T input, PersistenceManager pm) {
return pm.detachCopy(input);
}
public static boolean queueWithRetries(String queueName, TaskOptions task, String description) {
Queue queue = QueueFactory.getQueue(queueName);
for (int i = 0; i < QUEUE_RETRIES; i++) {
try {
queue.add(task);
return true;
} catch (TransientFailureException e) {
LOG.warning("Retrying queue add for task due to queue failure: " + description);
}
}
LOG.severe("Could not enqueue task after " + QUEUE_RETRIES + "retries: " + description);
return false;
}
public static void notifyRemovedAccess(String from, List<String> emails, String accessType,
String project, String projectId) {
notifyAccessChanged(from, emails, accessType, project, projectId, false);
}
public static void notifyAddedAccess(String from, List<String> emails, String accessType,
String project, String projectId) {
notifyAccessChanged(from, emails, accessType, project, projectId, true);
}
private static void notifyAccessChanged(String from, List<String> emails, String accessType,
String project, String projectId, boolean isAdded) {
LOG.info("Trying to message users about change in " + accessType + " access.");
if (emails.size() < 1) {
LOG.warning("No emails specified to notify.");
return;
}
try {
String url = URL + "/#/" + projectId +"/project-settings";
String verb = isAdded ? "added" : "removed";
String change = "You have been " + verb + " as an " + accessType
+ " of Test Analytics Project: " + project;
Session session = Session.getDefaultInstance(new Properties(), null);
for (String email : emails) {
Message msg = new MimeMessage(session);
msg.setFrom(new InternetAddress(from, "Test Analytics on behalf of " + from));
LOG.info("Sending email to " + email + "(" + change + ")");
msg.addRecipient(Message.RecipientType.TO, new InternetAddress(email));
msg.setSubject(change);
msg.setText(change + "\n\nThis Test Analytics project's URL:\n\n" + url
+ "\n\nIf you believe this was a mistake, contact " + from + " who made the change.");
if (SystemProperty.environment.value().equals(
SystemProperty.Environment.Value.Production)) {
Transport.send(msg);
} else {
LOG.info("Not actually sending email; not in production environment.");
}
}
} catch (UnsupportedEncodingException e) { // COV_NF_START
LOG.severe("Couldn't send email.");
} catch (AddressException e) {
LOG.severe("Couldn't send email.");
} catch (MessagingException e) {
LOG.severe("Couldn't send email.");
} // COV_NF_END
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/server/util/ServletUtils.java | Java | asf20 | 6,278 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.server.util;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.google.testing.testify.risk.frontend.model.DataRequest;
import com.google.testing.testify.risk.frontend.model.DataRequestOption;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import java.io.StringWriter;
import java.util.Collection;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
/**
* Generates an XML document for external data collectors.
*
* @author chrsmith@google.com (Chris Smith)
*/
public class DataRequestDocumentGenerator {
private DataRequestDocumentGenerator() {} // COV_NF_LINE
/**
* Returns an XML document describing the data requests.
*/
public static String generateDocument(List<DataRequest> allDataRequests) {
try {
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder();
Document document = docBuilder.newDocument();
Element documentRoot = document.createElement("TestAnalytics");
document.appendChild(documentRoot);
// Group all requests by their parent project.
Multimap<Long, DataRequest> requestsByProject = getRequestsByProject(allDataRequests);
for (Long projectId : requestsByProject.keySet()) {
Element projectElement = document.createElement("DataRequests");
projectElement.setAttribute("ProjectID", Long.toString(projectId));
documentRoot.appendChild(projectElement);
// Group project requests by data source.
Collection<DataRequest> projectRequests = requestsByProject.get(projectId);
Multimap<String, DataRequest> requestsBySource = getRequestsByDataSource(projectRequests);
for (String sourceName : requestsBySource.keySet()) {
Element dataSourceElement = document.createElement("DataRequest");
dataSourceElement.setAttribute("Type", sourceName);
projectElement.appendChild(dataSourceElement);
// Write out the configuration parameter strings for the data source.
for (DataRequest request : requestsBySource.get(sourceName)) {
for (DataRequestOption option : request.getDataRequestOptions()) {
Element dataSourceParameter = document.createElement("Parameter");
dataSourceParameter.setAttribute("Name", option.getName());
dataSourceParameter.appendChild(document.createTextNode(option.getValue()));
dataSourceElement.appendChild(dataSourceParameter);
}
}
}
}
// Now dump the document in memory to a string.
Transformer transformer = TransformerFactory.newInstance().newTransformer();
DOMSource source = new DOMSource(document);
StreamResult result = new javax.xml.transform.stream.StreamResult(new StringWriter());
transformer.transform(source, result);
return result.getWriter().toString();
// COV_NF_START
} catch (TransformerConfigurationException tce) {
return "Error in transformer configuration.";
} catch (TransformerException te) {
return "Error transforming document.";
} catch (ParserConfigurationException pce) {
return "Error in parser configuration.";
}
// COV_NF_END
}
private static Multimap<Long, DataRequest> getRequestsByProject(
Collection<DataRequest> requests) {
Multimap<Long, DataRequest> requestsByProject = HashMultimap.create();
for (DataRequest request : requests) {
requestsByProject.put(request.getParentProjectId(), request);
}
return requestsByProject;
}
private static Multimap<String, DataRequest> getRequestsByDataSource(
Collection<DataRequest> requests) {
Multimap<String, DataRequest> requestsBySource = HashMultimap.create();
for (DataRequest request : requests) {
requestsBySource.put(request.getDataSourceName(), request);
}
return requestsBySource;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/server/util/DataRequestDocumentGenerator.java | Java | asf20 | 5,055 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import javax.jdo.annotations.Extension;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* JDO object for a Label. Labels can be assigned to any part of an ACC model.
* A label is stored as a two-state label (essentially, a name and value pair). This can
* represent things like:
* "Priority-P4" where P4 is the value to the name Priority.
*
* For labels where there is not a name/value structure, the label is simply stored as the name.
* "Security", for example, would have an empty value and the entire label text in name.
*
* The entire label is not stored as one text field, but can be generated by this class using
* the function getLabelText(). This function should be used instead of doing this manually.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class AccLabel implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
@Extension(vendorName = "datanucleus", key = "gae.encoded-pk", value = "true")
private String id;
@Persistent
private Long projectId;
@Persistent
private AccElementType elementType;
@Persistent
private Long elementId;
@Persistent
private String name;
@Persistent
private String value;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Long getProjectId() {
return projectId;
}
public void setProjectId(Long projectId) {
this.projectId = projectId;
}
public AccElementType getElementType() {
return elementType;
}
public void setElementType(AccElementType elementType) {
this.elementType = elementType;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
/**
* Will attempt to split a label into its name / value pair, or set it all as name if it is not
* a two-state label.
*/
public void setLabelText(String labelText) {
if (labelText == null) {
name = null;
value = null;
} else {
String[] split = labelText.split("-", 2);
if (split.length > 0) {
name = split[0];
if (split.length > 1) {
value = split[1];
}
}
}
}
/**
* Returns a combined label with the name and value in one string. Eg:
* name=Priority value=P4 would become "Priority-P4".
* name=Security value=null would become "Security".
* @return the combined label.
*/
public String getLabelText() {
String labelText = name;
if (value != null) {
labelText += "-" + value;
}
return labelText;
}
public void setElementId(Long elementId) {
this.elementId = elementId;
}
public Long getElementId() {
return elementId;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/AccLabel.java | Java | asf20 | 3,722 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.testing.testify.risk.frontend.shared.util.StringUtil;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Generalized representation of a bug or defect in a piece of software.
*
* @author chrsmith@google.com (Chris Smith)
*/
@PersistenceCapable(detachable = "true")
public class Bug implements Serializable, UploadedDatum {
/** Unique identifier to store the bug in App Engine. */
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long internalId;
/** Project ID of the project the bug belongs to. */
@Persistent
private Long parentProjectId;
/**
* Bug ID provided by the data provider. Must be unique across all bugs associated with this
* project.
*/
@Persistent
private Long externalId;
/** Bug severity. Sev 1 - severe, Sev 4 - ignorable.*/
@Persistent
private Long severity;
/** Bug priority. Pri 1 - fix now, Pri 4 - puntable. */
@Persistent
private Long priority;
/** Arbitrary title of the bug. */
@Persistent
private String title;
/** Component path, or similar, of the bug. */
@Persistent
private String path;
/** State, eg: Closed, Open, Active, etc **/
@Persistent
private String state;
/** Date this bug was originally reported **/
@Persistent
private Long stateDate;
/** Attribute this bug should be associated with (if any). */
@Persistent
private Long targetAttributeId;
/** Component this bug should be associated with (if any). */
@Persistent
private Long targetComponentId;
/** Capability this bug should be associated with (if any). */
@Persistent
private Long targetCapabilityId;
/**
* Bug group is a meta string to identify groups of bugs, typically for linking them to specific
* components. For example, if the bug database is organized by component then the bug
* group should be its path in the database.
*/
@Persistent
private Set<String> groups = new HashSet<String>();
/** URL to identify view more information about the bug. */
@Persistent
private String bugUrl;
@Override
public void setParentProjectId(Long parentProjectId) {
this.parentProjectId = parentProjectId;
}
@Override
public Long getParentProjectId() {
return parentProjectId;
}
@Override
public void setInternalId(Long internalId) {
this.internalId = internalId;
}
/**
* @return the bug's internal ID, which is unique across all projects.
*/
@Override
public Long getInternalId() {
return internalId;
}
@Override
public void setExternalId(Long externalId) {
this.externalId = externalId;
}
/**
* @return an arbitrary ID associated with the bug. (The bug ID in an external bug database.)
*/
@Override
public Long getExternalId() {
return externalId;
}
public void setSeverity(Long severity) {
this.severity = severity;
}
public Long getSeverity() {
return severity;
}
public void setPriority(Long priority) {
this.priority = priority;
}
public Long getPriority() {
return priority;
}
public void setTitle(String title) {
this.title = title;
}
public String getTitle() {
return title;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public void addBugGroup(String groupName) {
groups.add(groupName);
}
public void removeBugGroup(String groupName) {
groups.remove(groupName);
}
public void setBugGroups(Set<String> bugGroups) {
this.groups = bugGroups;
}
public Set<String> getBugGroups() {
return groups;
}
/**
* @return the group names associated with this bug as a comma separated list.
*/
public String getGroupsAsCommaSeparatedList() {
return StringUtil.listToCsv(groups);
}
public void setBugUrl(String bugUrl) {
this.bugUrl = bugUrl;
}
public String getBugUrl() {
return bugUrl;
}
@Override
public void setTargetCapabilityId(Long targetCapabilityId) {
this.targetCapabilityId = targetCapabilityId;
}
@Override
public Long getTargetCapabilityId() {
return targetCapabilityId;
}
@Override
public String getLinkText() {
return title;
}
@Override
public String getLinkUrl() {
return bugUrl;
}
@Override
public String getToolTip() {
StringBuilder text = new StringBuilder();
text.append("This bug is attached to the following groups: ");
text.append(this.getGroupsAsCommaSeparatedList());
return text.toString();
}
@Override
public boolean isAttachedToAttribute() {
return targetAttributeId != null;
}
@Override
public boolean isAttachedToComponent() {
return targetComponentId != null;
}
@Override
public boolean isAttachedToCapability() {
return targetCapabilityId != null;
}
@Override
public void setTargetAttributeId(Long targetAttributeId) {
this.targetAttributeId = targetAttributeId;
}
@Override
public Long getTargetAttributeId() {
return targetAttributeId;
}
@Override
public void setTargetComponentId(Long targetComponentId) {
this.targetComponentId = targetComponentId;
}
@Override
public Long getTargetComponentId() {
return targetComponentId;
}
@Override
public Long getStateDate() {
return stateDate;
}
public void setStateDate(Long stateDate) {
this.stateDate = stateDate;
}
@Override
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public DatumType getDatumType() {
return DatumType.BUGS;
}
@Override
public String getField(String field) {
if ("Title".equals(field)) {
return title;
} else if ("Path".equals(field)) {
return path;
} else if ("Labels".equals(field)) {
return getGroupsAsCommaSeparatedList();
}
return null;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Bug.java | Java | asf20 | 6,767 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.util.Collection;
/**
* Represents an Attribute x Component intersection for calculating risk.
*
* @author chrsmith@google.com (Chris Smith)
* @author jimr@google.com (Jim Reardon)
*/
public class CapabilityIntersectionData {
private final Attribute parentAttribute;
private final Component parentComponent;
private final Collection<Capability> cellCapabilities;
public CapabilityIntersectionData(
Attribute parentAttribute, Component parentComponent,
Collection<Capability> cellCapabilities) {
this.parentAttribute = parentAttribute;
this.parentComponent = parentComponent;
this.cellCapabilities = cellCapabilities;
}
public Attribute getParentAttribute() {
return parentAttribute;
}
public Component getParentComponent() {
return parentComponent;
}
/** Project Capabilities associated with the Attribute x Component. */
public Collection<Capability> getCapabilities() {
return cellCapabilities;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/CapabilityIntersectionData.java | Java | asf20 | 1,652 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.testing.testify.risk.frontend.shared.rpc.UserRpc.ProjectAccess;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.NotPersistent;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* JDO object for storing Project data.
*
* @author chrsmith@google.com (Chris Smith)
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class Project implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long projectId;
@Persistent
private String name;
/**
* Full paragraph or page-length description of the project.
*/
@Persistent
private String description;
/**
* This field controls whether or not the project is listed in public queries.
*/
@Persistent
private Boolean isPubliclyVisible = false;
/**
* Email addresses of users with owner access. They can do anything editors can do as well as
* delete the project..
*/
@Persistent
private List<String> projectOwners = new ArrayList<String>();
/**
* Email addresses of users with editor access. They can add, edit, or update project data.
*/
@Persistent
private List<String> projectEditors = new ArrayList<String>();
/**
* Email addresses of users with view access. They can view but not edit project data.
*/
@Persistent
private List<String> projectViewers = new ArrayList<String>();
/**
* Allows the server-side to set the permissions so UI code has easy access without executing
* an RPC. Server-side code should never rely on this value.
*/
@NotPersistent
private ProjectAccess cachedAccessLevel;
/**
* Returns the Project's ID. Note that this will return null in the case the project hasn't been
* persisted in the backing store (and doesn't have an ID yet).
*/
public Long getProjectId() {
return projectId;
}
public void setProjectId(long projectId) {
this.projectId = projectId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public void setIsPubliclyVisible(Boolean isPubliclyVisible) {
this.isPubliclyVisible = isPubliclyVisible;
}
public boolean getIsPubliclyVisible() {
return isPubliclyVisible;
}
public void addProjectOwner(String ownerEmailAddress) {
if (!projectOwners.contains(ownerEmailAddress)) {
projectOwners.add(ownerEmailAddress);
}
}
public void removeProjectOwner(String ownerEmailAddress) {
projectOwners.remove(ownerEmailAddress);
}
public void setProjectOwners(List<String> projectOwners) {
this.projectOwners = projectOwners;
}
public List<String> getProjectOwners() {
return projectOwners;
}
public void addProjectEditor(String editorEmailAddress) {
if (!projectEditors.contains(editorEmailAddress)) {
projectEditors.add(editorEmailAddress);
}
}
public void removeProjectEditor(String editorEmailAddress) {
projectEditors.remove(editorEmailAddress);
}
public void setProjectEditors(List<String> projectEditors) {
this.projectEditors = projectEditors;
}
public List<String> getProjectEditors() {
return projectEditors;
}
public void addProjectView(String viewEmailAddress) {
if (!projectViewers.contains(viewEmailAddress)) {
projectViewers.add(viewEmailAddress);
}
}
public void removeProjectViewer(String viewerEmailAddress) {
projectViewers.remove(viewerEmailAddress);
}
public void setProjectViewers(List<String> projectViewers) {
this.projectViewers = projectViewers;
}
public List<String> getProjectViewers() {
return projectViewers;
}
public void setCachedAccessLevel(ProjectAccess cachedAccessLevel) {
this.cachedAccessLevel = cachedAccessLevel;
}
public ProjectAccess getCachedAccessLevel() {
return cachedAccessLevel;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Project.java | Java | asf20 | 4,897 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.testing.testify.risk.frontend.shared.util.StringUtil;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Generalized representation of a code checkin.
*
* @author chrsmith@google.com (Chris Smith)
*/
@PersistenceCapable(detachable = "true")
public class Checkin implements Serializable, UploadedDatum {
/** Unique identifier to store the checkin in App Engine. */
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long internalId;
/** Project ID of the project the checkin belongs to. */
@Persistent
private Long parentProjectId;
/**
* Checkin ID provided by the data provider. Must be unique across all checknis associated with
* this project.
*/
@Persistent
private Long externalId;
@Persistent
private String summary;
/**
* Directories where files were touched by this checkin. E.g., if files:
* ADD alpha\beta\gamma\file1.txt
* DELETE alpha\beta\gamma\file2.txt
* EDIT alpha\beta\delta\file3.txt
*
* The set of directories would be { "alpha\beta\gamma", "alpha\beta\delta" }.
*/
@Persistent
private Set<String> directoriesTouched = new HashSet<String>();
/** URL to identify view more information about the checkin. */
@Persistent
private String changeUrl;
/** Submitted, pending, etc. */
@Persistent
private String state;
/** Date it entered current state. */
@Persistent
private Long stateDate;
/** ID of the Attribute this checkin applies to, if any. */
@Persistent
private Long targetAttributeId;
/** ID of the Component this checkin applies to, if any. */
@Persistent
private Long targetComponentId;
/** ID of the Capability this checkin applies to, if any. */
@Persistent
private Long targetCapabilityId;
@Override
public void setInternalId(Long internalId) {
this.internalId = internalId;
}
@Override
public Long getInternalId() {
return internalId;
}
@Override
public void setParentProjectId(Long parentProjectId) {
this.parentProjectId = parentProjectId;
}
@Override
public Long getParentProjectId() {
return parentProjectId;
}
@Override
public void setExternalId(Long externalId) {
this.externalId = externalId;
}
@Override
public Long getExternalId() {
return externalId;
}
public void setSummary(String summary) {
this.summary = summary;
}
public String getSummary() {
return summary;
}
public void setDirectoriesTouched(Set<String> directoriesTouched) {
this.directoriesTouched = directoriesTouched;
}
public Set<String> getDirectoriesTouched() {
return directoriesTouched;
}
/**
* @return the directories the checkin has touched as a comma separated list.
*/
public String getDirectoriesTouchedAsCommaSeparatedList() {
return StringUtil.listToCsv(directoriesTouched);
}
public void addDirectoryTouched(String directory) {
directoriesTouched.add(directory);
}
public void removeDirectoryTouched(String directory) {
directoriesTouched.remove(directory);
}
public void setChangeUrl(String changeUrl) {
this.changeUrl = changeUrl;
}
public String getChangeUrl() {
return changeUrl;
}
@Override
public void setTargetAttributeId(Long targetAttributeId) {
this.targetAttributeId = targetAttributeId;
}
@Override
public Long getTargetAttributeId() {
return targetAttributeId;
}
@Override
public void setTargetComponentId(Long targetComponentId) {
this.targetComponentId = targetComponentId;
}
@Override
public Long getTargetComponentId() {
return targetComponentId;
}
@Override
public void setTargetCapabilityId(Long targetCapabilityId) {
this.targetCapabilityId = targetCapabilityId;
}
@Override
public Long getTargetCapabilityId() {
return targetCapabilityId;
}
@Override
public String getLinkText() {
if (externalId == null) {
return "Checkin (no id)";
}
return ("Checkin #" + Long.toString(externalId));
}
@Override
public String getLinkUrl() {
return changeUrl;
}
@Override
public String getToolTip() {
StringBuilder text = new StringBuilder();
text.append(summary);
text.append("The following directories were touched: ");
text.append(getDirectoriesTouchedAsCommaSeparatedList());
return text.toString();
}
@Override
public boolean isAttachedToAttribute() {
return getTargetAttributeId() != null;
}
@Override
public boolean isAttachedToCapability() {
return getTargetCapabilityId() != null;
}
@Override
public boolean isAttachedToComponent() {
return getTargetComponentId() != null;
}
@Override
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public Long getStateDate() {
return stateDate;
}
public void setStateDate(Long stateDate) {
this.stateDate = stateDate;
}
@Override
public DatumType getDatumType() {
return DatumType.CHECKINS;
}
@Override
public String getField(String field) {
if ("Summary".equals(field)) {
return summary;
} else if ("Directories".equals(field)) {
return getDirectoriesTouchedAsCommaSeparatedList();
}
return null;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Checkin.java | Java | asf20 | 6,175 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.common.collect.Lists;
import java.util.List;
/**
* Supported data types.
*
* For each data type we keep track of how to refer to it in both a singular and plural sense.
* We also track what parts of the datum support filtering.
*
* @author jimr@google.com (Jim Reardon)
*/
public enum DatumType {
/**
* Filtering is currently executed inside the {@link Filter} class. That, in turn, relies
* upon each class that implements UploadedDatum to return any filterable field through the
* <em>getField</em> method.
*/
BUGS("Bugs", "Bug", Lists.newArrayList("Title", "Path", "Labels")),
TESTS("Tests", "Test", Lists.newArrayList("Title", "Labels")),
CHECKINS("Checkins", "Checkin", Lists.newArrayList("Summary", "Directories"));
private final String plural;
private final String singular;
private final List<String> filterTypes;
DatumType(String plural, String singular, List<String> filterTypes) {
this.plural = plural;
this.singular = singular;
this.filterTypes = filterTypes;
}
public String getPlural() {
return plural;
}
public String getSingular() {
return singular;
}
public List<String> getFilterTypes() {
return filterTypes;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/DatumType.java | Java | asf20 | 1,897 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import javax.jdo.annotations.Extension;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* An individual configuration option for a data request. For example, this is an example of
* a data request option for a Bug
*
* name = "ComponentPath"
* value = "\Testing\Tools\Test Analytics"
*
* This would import any bug under that component path.
*
* or
*
* name = "Hotlist"
* value = "123123"
*
* This would import the bugs in hotlist 123123.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class DataRequestOption implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
@Extension(vendorName = "datanucleus", key = "gae.encoded-pk", value = "true")
private String id;
@Persistent
private String name;
@Persistent
private String value;
@Persistent
private DataRequest dataRequest;
public DataRequestOption() {
}
public DataRequestOption(String name, String value) {
this.name = name;
this.value = value;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public DataRequest getDataRequest() {
return dataRequest;
}
public void setDataRequest(DataRequest dataRequest) {
this.dataRequest = dataRequest;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/DataRequestOption.java | Java | asf20 | 2,393 |
package com.google.testing.testify.risk.frontend.model;
import java.util.List;
/**
* Interface that defines some commonality between items that have labels. For use in making
* some functions generic.
*
* @author jimr@google.com (Jim Reardon)
*/
public interface HasLabels {
public Long getId();
public long getParentProjectId();
public void setAccLabels(List<AccLabel> labels);
public void addLabel(AccLabel label);
public List<AccLabel> getAccLabels();
public AccElementType getElementType();
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/HasLabels.java | Java | asf20 | 520 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.NotPersistent;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* JDO object for Capability.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class Capability implements Serializable, HasLabels {
private static final AccElementType ELEMENT_TYPE = AccElementType.CAPABILITY;
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long capabilityId;
@Persistent
private long parentProjectId;
@Persistent
private String name;
@Persistent
private String description;
@NotPersistent
private List<AccLabel> accLabels = new ArrayList<AccLabel>();
/** Parent Component. */
@Persistent
private long componentId;
/** Parent Attribute. */
@Persistent
private long attributeId;
@Persistent
private FailureRate failureRate = FailureRate.NA;
@Persistent
private UserImpact userImpact = UserImpact.NA;
@Persistent
private Long displayOrder = 0 - System.currentTimeMillis();
public Capability() {
}
/**
* Constructs a new Capability.
*
* @param parentProjectId ID of the owning Project.
* @param parentAttributeId Attribute ID of the parent Attribute.
* @param parentComponentId Component ID of the parent Component.
*/
public Capability(long parentProjectId, long parentAttributeId, long parentComponentId) {
this.parentProjectId = parentProjectId;
this.componentId = parentComponentId;
this.attributeId = parentAttributeId;
}
public Long getCapabilityId() {
return capabilityId;
}
@Override
public Long getId() {
return getCapabilityId();
}
@Override
public AccElementType getElementType() {
return ELEMENT_TYPE;
}
public void setCapabilityId(long capabilityId) {
this.capabilityId = capabilityId;
}
public void setParentProjectId(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
@Override
public long getParentProjectId() {
return parentProjectId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public long getComponentId() {
return componentId;
}
public void setComponentId(long componentId) {
this.componentId = componentId;
}
public long getAttributeId() {
return attributeId;
}
public void setAttributeId(long attributeId) {
this.attributeId = attributeId;
}
public UserImpact getUserImpact() {
return userImpact;
}
public void setUserImpact(UserImpact userImpact) {
this.userImpact = userImpact;
}
public FailureRate getFailureRate() {
return failureRate;
}
public void setFailureRate(FailureRate failureRate) {
this.failureRate = failureRate;
}
/**
* @return the stable intersection key. {@See getCapabilityIntersectionKey}
*/
public int getCapabilityIntersectionKey() {
return Capability.getCapabilityIntersectionKey(componentId, attributeId);
}
/**
* Given a parent Component and Attribute return a random, but stable integer index. This
* allows efficient lookup for Capabilities based on parent Components and Attribute pairs.
*
* @param component the parent component.
* @param attribute the parent attribute.
* @return a stable integer corresponding to the unique component / attribute pairing sutable
* for placing Capability lists into a map.
*/
public static int getCapabilityIntersectionKey(Component component, Attribute attribute) {
return Capability.getCapabilityIntersectionKey(
component.getComponentId(), attribute.getAttributeId());
}
/**
* Computes a capability intersection key given the raw component and attribute IDs.
*/
private static int getCapabilityIntersectionKey(long componentId, long attributeId) {
return Integer.valueOf((int) ((componentId << 16) ^ attributeId));
}
public long getDisplayOrder() {
return displayOrder;
}
public void setDisplayOrder(long displayOrder) {
this.displayOrder = displayOrder;
}
public void setDescription(String description) {
this.description = description;
}
public String getDescription() {
return description == null ? "" : description;
}
@Override
public List<AccLabel> getAccLabels() {
return accLabels;
}
public AccLabel getAccLabel(String accLabelId) {
for (AccLabel l : accLabels) {
if (accLabelId.equals(l.getId())) {
return l;
}
}
return null;
}
@Override
public void setAccLabels(List<AccLabel> labels) {
this.accLabels = labels;
}
public AccLabel addLabel(String labelText) {
AccLabel label = new AccLabel();
label.setProjectId(parentProjectId);
label.setElementId(capabilityId);
label.setElementType(ELEMENT_TYPE);
label.setLabelText(labelText);
accLabels.add(label);
return label;
}
public AccLabel addLabel(String name, String value) {
AccLabel label = new AccLabel();
label.setProjectId(parentProjectId);
label.setElementId(capabilityId);
label.setElementType(ELEMENT_TYPE);
label.setName(name);
label.setValue(value);
accLabels.add(label);
return label;
}
@Override
public void addLabel(AccLabel label) {
accLabels.add(label);
}
public void removeLabel(AccLabel label) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (label.getId() != null) {
if (label.getId().equals(l.getId())) {
i.remove();
}
} else {
if (label.getLabelText().equals(l.getLabelText())) {
i.remove();
}
}
}
}
public void removeLabel(String labelText) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (labelText.equals(l.getLabelText())) {
i.remove();
}
}
}
public void removeLabel(String name, String value) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (name.equals(l.getName()) && value.equals(l.getValue())) {
i.remove();
}
}
}
public void updateLabel(String oldLabelText, String newLabelText) {
for (AccLabel l : accLabels) {
if (oldLabelText.equals(l.getLabelText())) {
l.setLabelText(newLabelText);
}
}
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Capability.java | Java | asf20 | 7,320 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Represents an option for importing data and its options. For example, "Issue Tracker" would
* be a DataSource and its parameters are options for search, such as "Owner".
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class DataSource implements Serializable {
@SuppressWarnings("unused")
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long id;
@Persistent
private String name;
@Persistent
private List<String> parameters;
@Persistent
private Boolean internalOnly;
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setInternalOnly(Boolean internalOnly) {
this.internalOnly = internalOnly;
}
public Boolean isInternalOnly() {
return internalOnly;
}
public void setParameters(List<String> parameters) {
this.parameters = parameters;
}
public List<String> getParameters() {
return parameters;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/DataSource.java | Java | asf20 | 1,917 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Configuration object for gathering data from an external source.
*
* @author chrsmith@google.com (Chris Smith)
*/
@PersistenceCapable(detachable = "true")
public class DataRequest implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long requestId;
@Persistent
private long parentProjectId;
/** Name of the external data source. For example, "Google Code Bug Database". */
@Persistent
private String dataSourceName;
/** A custom name if this is, for example, the "Other..." box. */
@Persistent
private String customName;
/**
* List of options for this data request. For example, an option might be:
* Component Path -> { Project/Component/Path }
*/
@Persistent(mappedBy = "dataRequest", defaultFetchGroup = "true")
private List<DataRequestOption> dataRequestOptions = Lists.newArrayList();
public void setRequestId(Long requestId) {
this.requestId = requestId;
}
public Long getRequestId() {
return requestId;
}
public void setParentProjectId(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
public long getParentProjectId() {
return parentProjectId;
}
public void setDataSourceName(String dataSourceName) {
this.dataSourceName = dataSourceName;
}
public String getDataSourceName() {
return dataSourceName;
}
public void setCustomName(String customName) {
this.customName = customName;
}
public String getCustomName() {
return customName;
}
public List<DataRequestOption> getDataRequestOptions() {
return dataRequestOptions;
}
public void setDataRequestOptions(List<DataRequestOption> dataRequestOptions) {
this.dataRequestOptions = dataRequestOptions;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/DataRequest.java | Java | asf20 | 2,713 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import javax.jdo.annotations.Extension;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* An individual option for a filter. For example, this is an example of a FilterOption that
* could appear on a bug:
*
* type = "Title"
* value = "[security]"
*
* This would match any bug that has [security] inside its title.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class FilterOption implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
@Extension(vendorName = "datanucleus", key = "gae.encoded-pk", value = "true")
private String id;
@Persistent
private String type;
@Persistent
private String value;
@Persistent
private Filter filter;
public FilterOption() {
}
public FilterOption(String type, String value) {
this.type = type;
this.value = value;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Filter getFilter() {
return filter;
}
public void setFilter(Filter filter) {
this.filter = filter;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/FilterOption.java | Java | asf20 | 2,197 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
/**
* Contains the status of the current user (logged in, logged out, email, et cetera).
*
* @author jimr@google.com (Jim Reardon)
*/
public class LoginStatus implements Serializable {
private boolean isLoggedIn;
private String url;
private String email;
public LoginStatus() { }
/** Disable construction, see {@See LoggedInStatus} and {@See LoggedOutStatus}. */
public LoginStatus(boolean isLoggedIn, String url, String email) {
this.isLoggedIn = isLoggedIn;
this.url = url;
this.email = email;
}
public void setUrl(String url) {
this.url = url;
}
public String getUrl() {
return url;
}
public void setEmail(String email) {
this.email = email;
}
public String getEmail() {
return email;
}
public void setIsLoggedIn(boolean isLoggedIn) {
this.isLoggedIn = isLoggedIn;
}
public boolean getIsLoggedIn() {
return isLoggedIn;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/LoginStatus.java | Java | asf20 | 1,610 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.testing.testify.risk.frontend.shared.util.StringUtil;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Generalized representation of a testcase to mitigate risk in a piece of software.
*
* @author chrsmith@google.com (Chris Smith)
*/
@PersistenceCapable(detachable = "true")
public class TestCase implements Serializable, UploadedDatum {
/** Unique identifier for this test case. */
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long internalId;
/** Project ID of the project the test case belongs to. */
@Persistent
private Long parentProjectId;
/**
* Test case ID provided by the data provider. Must be unique across all bugs associated with this
* project.
*/
@Persistent
private Long externalId;
/** Arbitrary title of the test case. */
@Persistent
private String title;
/**
* Test cases have tags, which will be used to map it to Attributes, Components, Capabilities,
* and so on.
*/
@Persistent
private Set<String> tags = new HashSet<String>();
/** URL to identify view more information about the test case. */
@Persistent
private String testCaseUrl;
/** ID of the Attribute this testcase applies to, if any. */
@Persistent
private Long targetAttributeId;
/** ID of the Component this testcase applies to, if any. */
@Persistent
private Long targetComponentId;
/** ID of the Capability this testcase applies to, if any. */
@Persistent
private Long targetCapabilityId;
/** The status -- passed, failed, etc. */
@Persistent
private String state;
/** Result date. */
@Persistent
private Long stateDate;
@Override
public void setInternalId(Long internalId) {
this.internalId = internalId;
}
@Override
public Long getInternalId() {
return internalId;
}
@Override
public void setParentProjectId(Long parentProjectId) {
this.parentProjectId = parentProjectId;
}
@Override
public Long getParentProjectId() {
return parentProjectId;
}
@Override
public void setExternalId(Long externalId) {
this.externalId = externalId;
}
@Override
public Long getExternalId() {
return externalId;
}
public void setTitle(String title) {
this.title = title;
}
public String getTitle() {
return title;
}
public Set<String> getTags() {
return tags;
}
public void setTags(Set<String> tags) {
this.tags = tags;
}
public void removeTag(String tag) {
tags.remove(tag);
}
public void addTag(String tag) {
tags.add(tag);
}
/**
* @return the tags associated with this test case as a comma separated list.
*/
public String getTagsAsCommaSeparatedList() {
return StringUtil.listToCsv(tags);
}
public void setTestCaseUrl(String testCaseUrl) {
this.testCaseUrl = testCaseUrl;
}
public String getTestCaseUrl() {
return testCaseUrl;
}
@Override
public void setTargetAttributeId(Long targetAttributeId) {
this.targetAttributeId = targetAttributeId;
}
@Override
public Long getTargetAttributeId() {
return targetAttributeId;
}
@Override
public void setTargetComponentId(Long targetComponentId) {
this.targetComponentId = targetComponentId;
}
@Override
public Long getTargetComponentId() {
return targetComponentId;
}
@Override
public void setTargetCapabilityId(Long targetCapabilityId) {
this.targetCapabilityId = targetCapabilityId;
}
@Override
public Long getTargetCapabilityId() {
return targetCapabilityId;
}
@Override
public String getLinkText() {
return title;
}
@Override
public String getLinkUrl() {
return testCaseUrl;
}
@Override
public String getToolTip() {
StringBuilder text = new StringBuilder();
text.append("Last Result: ");
text.append(state == null ? "n/a" : state);
text.append(" This testcase is labeled with the following tags: ");
text.append(getTagsAsCommaSeparatedList());
return text.toString();
}
@Override
public boolean isAttachedToAttribute() {
return getTargetAttributeId() != null;
}
@Override
public boolean isAttachedToCapability() {
return getTargetCapabilityId() != null;
}
@Override
public boolean isAttachedToComponent() {
return getTargetComponentId() != null;
}
@Override
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public Long getStateDate() {
return stateDate;
}
public void setStateDate(Long stateDate) {
this.stateDate = stateDate;
}
@Override
public DatumType getDatumType() {
return DatumType.TESTS;
}
@Override
public String getField(String field) {
if ("Title".equals(field)) {
return title;
} else if ("Labels".equals(field)) {
return getTagsAsCommaSeparatedList();
}
return null;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/TestCase.java | Java | asf20 | 5,793 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
/**
* Enumeration for storing potential failure rates.
*
* @author chrsmith@google.com (Chris Smith)
*/
public enum UserImpact {
NA (-2, "n/a"),
MINIMAL (0, "Minimal"),
SOME (1, "Some"),
CONSIDERABLE (2, "Considerable"),
MAXIMAL (3, "Maximal");
final int ordinal;
final String description;
private UserImpact(int ordinal, String description) {
this.ordinal = ordinal;
this.description = description;
}
public int getOrdinal() {
return ordinal;
}
public String getDescription() {
return description;
}
/**
* Convert UserImpact.getDescription() into the original UserImpact instance. Enum.getValue
* cannot be used because that requires the "ALL_CAPS" form of the value, rather than
* the "User Friendly" getName version.
*/
public static UserImpact fromDescription(String name) {
for (UserImpact impact : UserImpact.values()) {
if (impact.getDescription().equals(name)) {
return impact;
}
}
return null;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/UserImpact.java | Java | asf20 | 1,675 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Stores a boolean that determines if a ACC member has been signed off upon. This could be a
* boolean on the ACC directly, but likely we will start to store much more data here -- an
* audit trail, etc, so this will start out as a very small class in expectation to grow.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class Signoff implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long id;
@Persistent
private long parentProjectId;
@Persistent
private AccElementType elementType;
@Persistent
private Long elementId;
@Persistent
private Boolean signedOff;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public long getParentProjectId() {
return parentProjectId;
}
public void setParentProjectId(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
public AccElementType getElementType() {
return elementType;
}
public void setElementType(AccElementType elementType) {
this.elementType = elementType;
}
public Long getElementId() {
return elementId;
}
public void setElementId(Long elementId) {
this.elementId = elementId;
}
public Boolean getSignedOff() {
return signedOff;
}
public void setSignedOff(Boolean signedOff) {
this.signedOff = signedOff;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Signoff.java | Java | asf20 | 2,314 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
/**
* Enum that lists all the possible element types in an ACC model.
*
* @author jimr@google.com (Jim Reardon)
*/
public enum AccElementType {
ATTRIBUTE("Attribute"),
COMPONENT("Component"),
CAPABILITY("Capability");
private String friendlyName;
private AccElementType(String friendlyName) {
this.friendlyName = friendlyName;
}
public String getFriendlyName() {
return friendlyName;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/AccElementType.java | Java | asf20 | 1,091 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* Object for tracking user information. For example, starred/favorite projects.
*
* @author chrsmith@google.com (Chris Smith)
*/
@PersistenceCapable(detachable = "true")
public class UserInfo implements Serializable {
@SuppressWarnings("unused")
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long id;
/** The User ID is the same ID returned by the App Engine UserService. */
@Persistent
private String userId;
/**
* User's current email. This should not be keyed off of as the user's
* email might change but still remain the same User ID.
*
* This can also be used to populate user id -- if user ID is null or missing, but email
* is present, upon login the user ID will be populated.
*/
@Persistent
private String currentEmail;
/**
* If a user is whitelisted, they will always have access to the application. Some users
* will not require to be whitelisted, if they are permitted by default (ie, @google.com).
*/
@Persistent
private Boolean isWhitelisted;
/** List of IDs of starred projects. */
@Persistent
private List<Long> starredProjects = Lists.newArrayList();
public void setUserId(String userId) {
this.userId = userId;
}
public String getUserId() {
return userId;
}
public void starProject(long projectId) {
starredProjects.add(projectId);
}
public void unstarProject(long projectId) {
starredProjects.remove(projectId);
}
public void setStarredProjects(List<Long> starredProjects) {
this.starredProjects = starredProjects;
}
public List<Long> getStarredProjects() {
return starredProjects;
}
public void setIsWhitelisted(boolean isWhitelisted) {
this.isWhitelisted = isWhitelisted;
}
public Boolean getIsWhitelisted() {
return isWhitelisted == null ? false : isWhitelisted;
}
public void setCurrentEmail(String currentEmail) {
this.currentEmail = currentEmail;
}
public String getCurrentEmail() {
return currentEmail;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/UserInfo.java | Java | asf20 | 2,975 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import com.google.common.collect.Lists;
import com.google.testing.testify.risk.frontend.model.DatumType;
import java.io.Serializable;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* A representation of a filter to automatically assign data to ACC parts.
*
* A Filter will automatically assign data uploaded to specific ACC pieces. For example,
* a Filter may say "assign any test labeled with 'Security' to the Security Attribute.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class Filter implements Serializable {
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long id;
@Persistent
private long parentProjectId;
@Persistent
private DatumType filterType;
@Persistent
private String filterConjunction;
@Persistent(mappedBy = "filter", defaultFetchGroup = "true")
private List<FilterOption> filterOptions = Lists.newArrayList();
@Persistent
private Long targetAttributeId;
@Persistent
private Long targetComponentId;
@Persistent
private Long targetCapabilityId;
/**
* Creates a friendly title for this filter.
* @return a title.
*/
public String getTitle() {
return filterType.getSingular() + " Filter";
}
public Long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public long getParentProjectId() {
return parentProjectId;
}
public void setParentProjectId(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
public Long getTargetAttributeId() {
return targetAttributeId;
}
public void setTargetAttributeId(Long targetAttributeId) {
this.targetAttributeId = targetAttributeId;
}
public Long getTargetComponentId() {
return targetComponentId;
}
public void setTargetComponentId(Long targetComponentId) {
this.targetComponentId = targetComponentId;
}
public Long getTargetCapabilityId() {
return targetCapabilityId;
}
public void setTargetCapabilityId(Long targetCapabilityId) {
this.targetCapabilityId = targetCapabilityId;
}
public DatumType getFilterType() {
return filterType;
}
public void setFilterType(DatumType filterType) {
this.filterType = filterType;
}
public List<FilterOption> getFilterOptions() {
return filterOptions;
}
public void setFilterOptions(List<FilterOption> filterOptions) {
this.filterOptions = filterOptions;
}
public void addFilterOption(String field, String value) {
filterOptions.add(new FilterOption(field, value));
}
public String getFilterConjunction() {
return filterConjunction;
}
public void setFilterConjunction(String filterConjunction) {
this.filterConjunction = filterConjunction;
}
public void apply(UploadedDatum item) {
if (item.getDatumType() != filterType) {
throw new IllegalArgumentException("Data types do not match; I filter "
+ filterType.getPlural() + " but received a " + item.getDatumType().getSingular());
}
if (filterOptions.size() < 1 ||
(targetAttributeId == null && targetCapabilityId == null && targetComponentId == null)) {
return;
}
boolean matchesAny = false;
boolean matchesAll = true;
for (FilterOption option : filterOptions) {
String value = item.getField(option.getType());
if (value != null) {
if (value.contains(option.getValue())) {
matchesAny = true;
} else {
matchesAll = false;
}
}
}
if (matchesAll || ("any".equals(filterConjunction) && matchesAny)) {
if (targetAttributeId != null) {
item.setTargetAttributeId(targetAttributeId);
}
if (targetComponentId != null) {
item.setTargetComponentId(targetComponentId);
}
if (targetCapabilityId != null) {
item.setTargetCapabilityId(targetCapabilityId);
}
}
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Filter.java | Java | asf20 | 4,742 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.NotPersistent;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* JDO object for Attribute.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class Attribute implements Serializable, HasLabels {
private static final AccElementType ELEMENT_TYPE = AccElementType.ATTRIBUTE;
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long attributeId;
@Persistent
private long parentProjectId;
@Persistent
private String name;
@Persistent
private String description;
/**
* Ordering hint for views displaying Attributes. Lower values should be put higher in the list.
* Note that value is NOT guaranteed to be unique or greater than zero.
*/
@Persistent
private Long displayOrder = 0 - System.currentTimeMillis();
@NotPersistent
private List<AccLabel> accLabels = new ArrayList<AccLabel>();
public Attribute() {}
public Attribute(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
/**
* @return the Attribute instance's attribute ID. It will be null if it has not been saved yet.
*/
public Long getAttributeId() {
return attributeId;
}
@Override
public Long getId() {
return getAttributeId();
}
@Override
public AccElementType getElementType() {
return ELEMENT_TYPE;
}
/**
* Sets the Attribute's ID. Note this should only be called as a direct result of persisting
* the Attribute in the data store.
*/
public void setAttributeId(long attributeId) {
this.attributeId = attributeId;
}
public void setParentProjectId(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
@Override
public long getParentProjectId() {
return parentProjectId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public void setDisplayOrder(long displayOrder) {
this.displayOrder = displayOrder;
}
public long getDisplayOrder() {
return displayOrder;
}
@Override
public List<AccLabel> getAccLabels() {
return accLabels;
}
public AccLabel getAccLabel(String accLabelId) {
for (AccLabel l : accLabels) {
if (accLabelId.equals(l.getId())) {
return l;
}
}
return null;
}
@Override
public void setAccLabels(List<AccLabel> labels) {
this.accLabels = labels;
}
public void addLabel(String labelText) {
AccLabel label = new AccLabel();
label.setProjectId(parentProjectId);
label.setElementId(attributeId);
label.setElementType(ELEMENT_TYPE);
label.setLabelText(labelText);
accLabels.add(label);
}
public void addLabel(String name, String value) {
AccLabel label = new AccLabel();
label.setProjectId(parentProjectId);
label.setElementId(attributeId);
label.setElementType(ELEMENT_TYPE);
label.setName(name);
label.setValue(value);
accLabels.add(label);
}
@Override
public void addLabel(AccLabel label) {
accLabels.add(label);
}
public void removeLabel(AccLabel label) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (label.getId() != null) {
if (label.getId().equals(l.getId())) {
i.remove();
}
} else {
if (label.getLabelText().equals(l.getLabelText())) {
i.remove();
}
}
}
}
public void removeLabel(String labelText) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (labelText.equals(l.getLabelText())) {
i.remove();
}
}
}
public void removeLabel(String name, String value) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (name.equals(l.getName()) && value.equals(l.getValue())) {
i.remove();
}
}
}
public void updateLabel(String oldLabelText, String newLabelText) {
for (AccLabel l : accLabels) {
if (oldLabelText.equals(l.getLabelText())) {
l.setLabelText(newLabelText);
}
}
}
public void setDescription(String description) {
this.description = description;
}
public String getDescription() {
return description == null ? "" : description;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Attribute.java | Java | asf20 | 5,289 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
/**
* Interface for surfacing inform from uploaded data to enable display.
*
* @author chrsmith@google.com (Chris Smith)
*/
public interface UploadedDatum {
public DatumType getDatumType();
/** Returns the text for any links to this datum. */
public String getLinkText();
/** Returns an external URL to refer to the uploaded datum. */
public String getLinkUrl();
/** Returns a Tool Tip to display when hovering over the datum. */
public String getToolTip();
/** State, such as Open, Passed, Failed, etc. */
public String getState();
/** Date the item entered the state, eg: day it passed, date it was submitted. */
public Long getStateDate();
/** Methods for checking if the datum is attached to an Attribute, Component, or Capability. */
public boolean isAttachedToAttribute();
public boolean isAttachedToComponent();
public boolean isAttachedToCapability();
public void setTargetCapabilityId(Long targetCapabilityId);
public Long getTargetCapabilityId();
public void setTargetAttributeId(Long targetAttributeId);
public Long getTargetAttributeId();
public void setTargetComponentId(Long targetComponentId);
public Long getTargetComponentId();
public Long getParentProjectId();
public void setParentProjectId(Long parentProjectId);
public Long getExternalId();
public void setExternalId(Long externalId);
public Long getInternalId();
public void setInternalId(Long internalId);
/** Allows generic access for filtering. */
public String getField(String field);
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/UploadedDatum.java | Java | asf20 | 2,204 |
// Copyright 2011 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
/**
* Simple pair class.
*
* @author jimr@google.com (Jim Reardon)
*/
public class Pair<A, B> implements Serializable {
public final A first;
public final B second;
public Pair(A first, B second) {
this.first = first;
this.second = second;
}
public A getFirst() {
return first;
}
public B getSecond() {
return second;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Pair.java | Java | asf20 | 1,060 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import javax.jdo.annotations.IdGeneratorStrategy;
import javax.jdo.annotations.NotPersistent;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.Persistent;
import javax.jdo.annotations.PrimaryKey;
/**
* JDO object for Component.
*
* @author jimr@google.com (Jim Reardon)
*/
@PersistenceCapable(detachable = "true")
public class Component implements Serializable, HasLabels {
private static final AccElementType ELEMENT_TYPE = AccElementType.COMPONENT;
@PrimaryKey
@Persistent(valueStrategy = IdGeneratorStrategy.IDENTITY)
private Long componentId;
@Persistent
private long parentProjectId;
@Persistent
private String name;
@Persistent
private String description;
/**
* Ordering hint for views displaying Attributes. Lower values should be put higher in the list.
* Note that value is NOT guaranteed to be unique or greater than zero.
*/
@Persistent
private Long displayOrder = 0 - System.currentTimeMillis();
@NotPersistent
private List<AccLabel> accLabels = new ArrayList<AccLabel>();
public Component() {
}
public Component(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
public Long getComponentId() {
return componentId;
}
@Override
public Long getId() {
return getComponentId();
}
@Override
public AccElementType getElementType() {
return ELEMENT_TYPE;
}
public void setComponentId(long componentId) {
this.componentId = componentId;
}
public void setParentProjectId(long parentProjectId) {
this.parentProjectId = parentProjectId;
}
@Override
public long getParentProjectId() {
return parentProjectId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public void setDisplayOrder(long displayOrder) {
this.displayOrder = displayOrder;
}
public long getDisplayOrder() {
return displayOrder;
}
@Override
public List<AccLabel> getAccLabels() {
return accLabels;
}
public AccLabel getAccLabel(String accLabelId) {
for (AccLabel l : accLabels) {
if (accLabelId.equals(l.getId())) {
return l;
}
}
return null;
}
@Override
public void setAccLabels(List<AccLabel> labels) {
this.accLabels = labels;
}
public void addLabel(String labelText) {
AccLabel label = new AccLabel();
label.setProjectId(parentProjectId);
label.setElementId(componentId);
label.setElementType(ELEMENT_TYPE);
label.setLabelText(labelText);
accLabels.add(label);
}
public void addLabel(String name, String value) {
AccLabel label = new AccLabel();
label.setProjectId(parentProjectId);
label.setElementId(componentId);
label.setElementType(ELEMENT_TYPE);
label.setName(name);
label.setValue(value);
accLabels.add(label);
}
@Override
public void addLabel(AccLabel label) {
accLabels.add(label);
}
public void removeLabel(AccLabel label) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (label.getId() != null) {
if (label.getId().equals(l.getId())) {
i.remove();
}
} else {
if (label.getLabelText().equals(l.getLabelText())) {
i.remove();
}
}
}
}
public void removeLabel(String labelText) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (labelText.equals(l.getLabelText())) {
i.remove();
}
}
}
public void removeLabel(String name, String value) {
Iterator<AccLabel> i = accLabels.iterator();
AccLabel l;
while (i.hasNext()) {
l = i.next();
if (name.equals(l.getName()) && value.equals(l.getValue())) {
i.remove();
}
}
}
public void updateLabel(String oldLabelText, String newLabelText) {
for (AccLabel l : accLabels) {
if (oldLabelText.equals(l.getLabelText())) {
l.setLabelText(newLabelText);
}
}
}
public void setDescription(String description) {
this.description = description;
}
public String getDescription() {
return description == null ? "" : description;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/Component.java | Java | asf20 | 5,037 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.model;
/**
* Enumeration for storing potential failure rates.
*
* @author chrsmith@google.com (Chris Smith)
*/
public enum FailureRate {
NA (-2, "n/a"),
VERY_RARELY (0, "Rarely"),
SELDOM (1, "Seldom"),
OCCASIONALLY (2, "Occasionally"),
OFTEN (3, "Often");
private final int ordinal;
private final String description;
private FailureRate(int ordinal, String description) {
this.ordinal = ordinal;
this.description = description;
}
public int getOrdinal() {
return ordinal;
}
public String getDescription() {
return description;
}
/**
* Convert FailureRate.getDescription() into the original FailureRate instance. Enum.getValue
* cannot be used because that requires the "ALL_CAPS" form of the value, rather than
* the "User Friendly" getName version.
*/
public static FailureRate fromDescription(String name) {
for (FailureRate rate : FailureRate.values()) {
if (rate.getDescription().equals(name)) {
return rate;
}
}
return null;
}
}
| 11shubhanjali-test | testanalytics_frontend/src/main/java/com/google/testing/testify/risk/frontend/model/FailureRate.java | Java | asf20 | 1,695 |
// Copyright 2010 Google Inc. All Rights Reseved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.testing.testify.risk.frontend.testing;
import com.google.gwt.user.client.rpc.AsyncCallback;
import org.easymock.EasyMock;
import org.easymock.IAnswer;
/**
* Utility class for simplifying Easy Mock unit tests.
*
* @author chrsmith@google.com (Chris Smith)
*/
public abstract class EasyMockUtils {
private EasyMockUtils() {}
/** Sets the return value for the last call to EasyMock. */
public static <T> void setLastReturnValue(final T result) {
EasyMock.expectLastCall().andReturn(result);
}
/**
* Gets the {@link AsyncCallback} (last parameter of the previous function call) and ensures that
* the onSuccess method gets called with the given result. This is needed because GWT's async RPC
* calls return their value as part of an {@link AsyncCallback}, which is difficult to mock.
*/
public static <T> void setLastAsyncCallbackSuccessWithResult(final T result) {
EasyMock.expectLastCall().andAnswer(new IAnswer<T>() {
@SuppressWarnings("unchecked")
@Override
public T answer() throws Throwable {
final Object[] arguments = EasyMock.getCurrentArguments();
AsyncCallback<T> callback = (AsyncCallback<T>) arguments[arguments.length - 1];
callback.onSuccess(result);
return null;
}
});
}
/**
* Gets the {@link AsyncCallback} (last parameter of the previous function call) and ensures that
* the onFailure method gets called with the given {@code Throwable}.
*/
public static <T> void setLastAsyncCallbackFailure(final Throwable exception) {
EasyMock.expectLastCall().andAnswer(new IAnswer<T>() {
@SuppressWarnings("unchecked")
@Override
public T answer() throws Throwable {
final Object[] arguments = EasyMock.getCurrentArguments();
AsyncCallback<T> callback = (AsyncCallback<T>) arguments[arguments.length - 1];
callback.onFailure(exception);
return null;
}
});
}
}
| 11shubhanjali-test | testanalytics_frontend/src/test/java/com/google/testing/testify/risk/frontend/testing/EasyMockUtils.java | Java | asf20 | 2,574 |
program kr;
{$APPTYPE CONSOLE}
uses
SysUtils;
type
pelement=^element;
element=record
value:integer;
next:pelement;
end;
var
head:pelement;
i:integer;
mass:array [0..10] of integer;
function dfb(var head:pelement) : Integer;
var tmp:pelement ;
begin
tmp:=head^.next^.next^.next^.next^.next;
head^.next^.next^.next^.next^.next:=head^.next^.next^.next^.next^.next^.next;
dfb:= tmp^.value;
dispose(tmp);
end;
procedure PL(head:pelement);
var current:pelement;
begin
current:=head;
while(current<>nil) do
begin
writeln(current^.value);
current:=current^.next;
end;
end;
procedure Atb(var head:pelement; num:integer);
var tmp:pelement;
begin
tmp:=head;
new(head);
head^.value:=num;
head^.next:=tmp;
end;
begin
head:=nil;
for i:=0 to 7 do
readln(mass[i]);
for i:=0 to 7 do
Atb(head,mass[i]);
dfb(head);
PL(head);
Readln;
end.
| 1010q | trunk/ 1010q/kr.dpr | Pascal | lgpl | 989 |
program Project1;
{$APPTYPE CONSOLE}
uses
SysUtils;
type
pelement=^element;
element=record
value:integer;
next:pelement;
end;
var
head:pelement;
i,a:integer;
mass:array of integer;
function dfb(var head:pelement) : Integer;
var tmp:pelement ;
begin
tmp:=head^.next^.next^.next^.next^.next;
head:=head^.next^.next^.next^.next^.next^.next;
dfb:= tmp^.value;
dispose(tmp);
end;
procedure PL(head:pelement);
var current:pelement;
begin
current:=head;
while(current<>nil) do
begin
writeln(current^.value);
current:=current^.next;
end;
end;
procedure Atb(var head:pelement; num:integer);
var tmp:pelement;
begin
tmp:=head;
new(head);
head^.value:=num;
head^.next:=tmp;
end;
begin
for i:=0 to a-1 do
readln(mass[i]);
for i:=0 to 7 do
Atb(mass[i]);
dfb(head^.next^.next^.next^.next^.next);
PL;
Readln;
end.
| 1010q | trunk/ 1010q/Project2.dpr | Pascal | lgpl | 959 |
program Project1;
{$APPTYPE CONSOLE}
uses
SysUtils;
type
pelement=^element;
element=record
value:integer;
next:pelement;
end;
var
head,current, tmp:pelement;
num,i,a:integer;
mass:array of integer;
begin
readln(a);
for i:=0 to a-1 do
readln(mass[i]);
For i:=0 to a-1 do
begin
tmp:=head;
new(head);
head^.value:=mass[i];
Head^.next:=tmp;
end;
tmp:=head^.next^.next^.next^.next;
head^.next^.next^.next:=head^.next^.next^.next^.next;
dispose(tmp);
current:=head;
while(current<>nil) do
begin
writeln(current^.value);
current:=current^.next;
end;
Readln;
end.
| 1010q | trunk/ 1010q/Project1.dpr | Pascal | lgpl | 680 |
<html>
<head>
<meta http-equiv=Content-Type content="text/html; charset=windows-1252">
<meta name=Generator content="Microsoft Word 14 (filtered)">
<style>
<!--
/* Font Definitions */
@font-face
{font-family:Calibri;
panose-1:2 15 5 2 2 2 4 3 2 4;}
/* Style Definitions */
p.MsoNormal, li.MsoNormal, div.MsoNormal
{margin-top:0cm;
margin-right:0cm;
margin-bottom:10.0pt;
margin-left:0cm;
line-height:115%;
font-size:11.0pt;
font-family:"Calibri","sans-serif";}
.MsoChpDefault
{font-family:"Calibri","sans-serif";}
.MsoPapDefault
{margin-bottom:10.0pt;
line-height:115%;}
@page WordSection1
{size:612.0pt 792.0pt;
margin:72.0pt 72.0pt 72.0pt 72.0pt;}
div.WordSection1
{page:WordSection1;}
-->
</style>
</head>
<body lang=EN-US>
<div class=WordSection1>
<p class=MsoNormal>Nguyễn Trúc Phương</p>
<p class=MsoNormal>Lê Quốc Huy</p>
</div>
</body>
</html>
| 11th131 | trunk/demo.html | HTML | asf20 | 886 |
/* -*-C-*-
********************************************************************************
*
* File: chopper.c (Formerly chopper.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Tue Jul 30 16:18:52 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include <math.h>
#include "chopper.h"
#include "assert.h"
#include "associate.h"
#include "blobs.h"
#include "callcpp.h"
#include "const.h"
#include "findseam.h"
#include "freelist.h"
#include "globals.h"
#include "makechop.h"
#include "render.h"
#include "pageres.h"
#include "seam.h"
#include "stopper.h"
#include "structures.h"
#include "unicharset.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
// Even though the limit on the number of chunks may now be removed, keep
// the same limit for repeatable behavior, and it may be a speed advantage.
static const int kMaxNumChunks = 64;
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**
* @name preserve_outline_tree
*
* Copy the list of outlines.
*/
void preserve_outline(EDGEPT *start) {
EDGEPT *srcpt;
if (start == NULL)
return;
srcpt = start;
do {
srcpt->flags[1] = 1;
srcpt = srcpt->next;
}
while (srcpt != start);
srcpt->flags[1] = 2;
}
/**************************************************************************/
void preserve_outline_tree(TESSLINE *srcline) {
TESSLINE *outline;
for (outline = srcline; outline != NULL; outline = outline->next) {
preserve_outline (outline->loop);
}
}
/**
* @name restore_outline_tree
*
* Copy the list of outlines.
*/
EDGEPT *restore_outline(EDGEPT *start) {
EDGEPT *srcpt;
EDGEPT *real_start;
if (start == NULL)
return NULL;
srcpt = start;
do {
if (srcpt->flags[1] == 2)
break;
srcpt = srcpt->next;
}
while (srcpt != start);
real_start = srcpt;
do {
srcpt = srcpt->next;
if (srcpt->prev->flags[1] == 0) {
remove_edgept(srcpt->prev);
}
}
while (srcpt != real_start);
return real_start;
}
/******************************************************************************/
void restore_outline_tree(TESSLINE *srcline) {
TESSLINE *outline;
for (outline = srcline; outline != NULL; outline = outline->next) {
outline->loop = restore_outline (outline->loop);
outline->start = outline->loop->pos;
}
}
// Helper runs all the checks on a seam to make sure it is valid.
// Returns the seam if OK, otherwise deletes the seam and returns NULL.
static SEAM* CheckSeam(int debug_level, inT32 blob_number, TWERD* word,
TBLOB* blob, TBLOB* other_blob,
const GenericVector<SEAM*>& seams, SEAM* seam) {
if (seam == NULL ||
blob->outlines == NULL ||
other_blob->outlines == NULL ||
total_containment(blob, other_blob) ||
check_blob(other_blob) ||
!(check_seam_order(blob, seam) &&
check_seam_order(other_blob, seam)) ||
any_shared_split_points(seams, seam) ||
!test_insert_seam(seams, word, blob_number)) {
word->blobs.remove(blob_number + 1);
if (seam) {
undo_seam(blob, other_blob, seam);
delete seam;
seam = NULL;
#ifndef GRAPHICS_DISABLED
if (debug_level) {
if (debug_level >2)
display_blob(blob, Red);
tprintf("\n** seam being removed ** \n");
}
#endif
} else {
delete other_blob;
}
return NULL;
}
return seam;
}
/**
* @name attempt_blob_chop
*
* Try to split the this blob after this one. Check to make sure that
* it was successful.
*/
namespace tesseract {
SEAM *Wordrec::attempt_blob_chop(TWERD *word, TBLOB *blob, inT32 blob_number,
bool italic_blob,
const GenericVector<SEAM*>& seams) {
if (repair_unchopped_blobs)
preserve_outline_tree (blob->outlines);
TBLOB *other_blob = TBLOB::ShallowCopy(*blob); /* Make new blob */
// Insert it into the word.
word->blobs.insert(other_blob, blob_number + 1);
SEAM *seam = NULL;
if (prioritize_division) {
TPOINT location;
if (divisible_blob(blob, italic_blob, &location)) {
seam = new SEAM(0.0f, location, NULL, NULL, NULL);
}
}
if (seam == NULL)
seam = pick_good_seam(blob);
if (chop_debug) {
if (seam != NULL)
print_seam("Good seam picked=", seam);
else
tprintf("\n** no seam picked *** \n");
}
if (seam) {
apply_seam(blob, other_blob, italic_blob, seam);
}
seam = CheckSeam(chop_debug, blob_number, word, blob, other_blob,
seams, seam);
if (seam == NULL) {
if (repair_unchopped_blobs)
restore_outline_tree(blob->outlines);
if (word->latin_script) {
// If the blob can simply be divided into outlines, then do that.
TPOINT location;
if (divisible_blob(blob, italic_blob, &location)) {
other_blob = TBLOB::ShallowCopy(*blob); /* Make new blob */
word->blobs.insert(other_blob, blob_number + 1);
seam = new SEAM(0.0f, location, NULL, NULL, NULL);
apply_seam(blob, other_blob, italic_blob, seam);
seam = CheckSeam(chop_debug, blob_number, word, blob, other_blob,
seams, seam);
}
}
}
return seam;
}
SEAM *Wordrec::chop_numbered_blob(TWERD *word, inT32 blob_number,
bool italic_blob,
const GenericVector<SEAM*>& seams) {
return attempt_blob_chop(word, word->blobs[blob_number], blob_number,
italic_blob, seams);
}
SEAM *Wordrec::chop_overlapping_blob(const GenericVector<TBOX>& boxes,
bool italic_blob, WERD_RES *word_res,
int *blob_number) {
TWERD *word = word_res->chopped_word;
for (*blob_number = 0; *blob_number < word->NumBlobs(); ++*blob_number) {
TBLOB *blob = word->blobs[*blob_number];
TPOINT topleft, botright;
topleft.x = blob->bounding_box().left();
topleft.y = blob->bounding_box().top();
botright.x = blob->bounding_box().right();
botright.y = blob->bounding_box().bottom();
TPOINT original_topleft, original_botright;
word_res->denorm.DenormTransform(NULL, topleft, &original_topleft);
word_res->denorm.DenormTransform(NULL, botright, &original_botright);
TBOX original_box = TBOX(original_topleft.x, original_botright.y,
original_botright.x, original_topleft.y);
bool almost_equal_box = false;
int num_overlap = 0;
for (int i = 0; i < boxes.size(); i++) {
if (original_box.overlap_fraction(boxes[i]) > 0.125)
num_overlap++;
if (original_box.almost_equal(boxes[i], 3))
almost_equal_box = true;
}
TPOINT location;
if (divisible_blob(blob, italic_blob, &location) ||
(!almost_equal_box && num_overlap > 1)) {
SEAM *seam = attempt_blob_chop(word, blob, *blob_number,
italic_blob, word_res->seam_array);
if (seam != NULL)
return seam;
}
}
*blob_number = -1;
return NULL;
}
} // namespace tesseract
/**
* @name any_shared_split_points
*
* Return true if any of the splits share a point with this one.
*/
int any_shared_split_points(const GenericVector<SEAM*>& seams, SEAM *seam) {
int length;
int index;
length = seams.size();
for (index = 0; index < length; index++)
if (shared_split_points(seams[index], seam))
return TRUE;
return FALSE;
}
/**
* @name check_blob
*
* @return true if blob has a non whole outline.
*/
int check_blob(TBLOB *blob) {
TESSLINE *outline;
EDGEPT *edgept;
for (outline = blob->outlines; outline != NULL; outline = outline->next) {
edgept = outline->loop;
do {
if (edgept == NULL)
break;
edgept = edgept->next;
}
while (edgept != outline->loop);
if (edgept == NULL)
return 1;
}
return 0;
}
namespace tesseract {
/**
* @name improve_one_blob
*
* Finds the best place to chop, based on the worst blob, fixpt, or next to
* a fragment, according to the input. Returns the SEAM corresponding to the
* chop point, if any is found, and the index in the ratings_matrix of the
* chopped blob. Note that blob_choices is just a copy of the pointers in the
* leading diagonal of the ratings MATRIX.
* Although the blob is chopped, the returned SEAM is yet to be inserted into
* word->seam_array and the resulting blobs are unclassified, so this function
* can be used by ApplyBox as well as during recognition.
*/
SEAM* Wordrec::improve_one_blob(const GenericVector<BLOB_CHOICE*>& blob_choices,
DANGERR *fixpt,
bool split_next_to_fragment,
bool italic_blob,
WERD_RES* word,
int* blob_number) {
float rating_ceiling = MAX_FLOAT32;
SEAM *seam = NULL;
do {
*blob_number = select_blob_to_split_from_fixpt(fixpt);
if (chop_debug) tprintf("blob_number from fixpt = %d\n", *blob_number);
bool split_point_from_dict = (*blob_number != -1);
if (split_point_from_dict) {
fixpt->clear();
} else {
*blob_number = select_blob_to_split(blob_choices, rating_ceiling,
split_next_to_fragment);
}
if (chop_debug) tprintf("blob_number = %d\n", *blob_number);
if (*blob_number == -1)
return NULL;
// TODO(rays) it may eventually help to allow italic_blob to be true,
seam = chop_numbered_blob(word->chopped_word, *blob_number, italic_blob,
word->seam_array);
if (seam != NULL)
return seam; // Success!
if (blob_choices[*blob_number] == NULL)
return NULL;
if (!split_point_from_dict) {
// We chopped the worst rated blob, try something else next time.
rating_ceiling = blob_choices[*blob_number]->rating();
}
} while (true);
return seam;
}
/**
* @name chop_one_blob
*
* Start with the current one-blob word and its classification. Find
* the worst blobs and try to divide it up to improve the ratings.
* Used for testing chopper.
*/
SEAM* Wordrec::chop_one_blob(const GenericVector<TBOX>& boxes,
const GenericVector<BLOB_CHOICE*>& blob_choices,
WERD_RES* word_res,
int* blob_number) {
if (prioritize_division) {
return chop_overlapping_blob(boxes, true, word_res, blob_number);
} else {
return improve_one_blob(blob_choices, NULL, false, true, word_res,
blob_number);
}
}
} // namespace tesseract
/**
* @name check_seam_order
*
* Make sure that each of the splits in this seam match to outlines
* in this blob. If any of the splits could not correspond to this
* blob then there is a problem (and FALSE should be returned to the
* caller).
*/
inT16 check_seam_order(TBLOB *blob, SEAM *seam) {
TESSLINE *outline;
inT8 found_em[3];
if (seam->split1 == NULL || blob == NULL)
return (TRUE);
found_em[0] = found_em[1] = found_em[2] = FALSE;
for (outline = blob->outlines; outline; outline = outline->next) {
if (!found_em[0] &&
((seam->split1 == NULL) ||
is_split_outline (outline, seam->split1))) {
found_em[0] = TRUE;
}
if (!found_em[1] &&
((seam->split2 == NULL) ||
is_split_outline (outline, seam->split2))) {
found_em[1] = TRUE;
}
if (!found_em[2] &&
((seam->split3 == NULL) ||
is_split_outline (outline, seam->split3))) {
found_em[2] = TRUE;
}
}
if (!found_em[0] || !found_em[1] || !found_em[2])
return (FALSE);
else
return (TRUE);
}
namespace tesseract {
/**
* @name chop_word_main
*
* Classify the blobs in this word and permute the results. Find the
* worst blob in the word and chop it up. Continue this process until
* a good answer has been found or all the blobs have been chopped up
* enough. The results are returned in the WERD_RES.
*/
void Wordrec::chop_word_main(WERD_RES *word) {
int num_blobs = word->chopped_word->NumBlobs();
if (word->ratings == NULL) {
word->ratings = new MATRIX(num_blobs, wordrec_max_join_chunks);
}
if (word->ratings->get(0, 0) == NULL) {
// Run initial classification.
for (int b = 0; b < num_blobs; ++b) {
BLOB_CHOICE_LIST* choices = classify_piece(word->seam_array, b, b,
"Initial:", word->chopped_word,
word->blamer_bundle);
word->ratings->put(b, b, choices);
}
} else {
// Blobs have been pre-classified. Set matrix cell for all blob choices
for (int col = 0; col < word->ratings->dimension(); ++col) {
for (int row = col; row < word->ratings->dimension() &&
row < col + word->ratings->bandwidth(); ++row) {
BLOB_CHOICE_LIST* choices = word->ratings->get(col, row);
if (choices != NULL) {
BLOB_CHOICE_IT bc_it(choices);
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
bc_it.data()->set_matrix_cell(col, row);
}
}
}
}
}
// Run Segmentation Search.
BestChoiceBundle best_choice_bundle(word->ratings->dimension());
SegSearch(word, &best_choice_bundle, word->blamer_bundle);
if (word->best_choice == NULL) {
// SegSearch found no valid paths, so just use the leading diagonal.
word->FakeWordFromRatings();
}
word->RebuildBestState();
// If we finished without a hyphen at the end of the word, let the next word
// be found in the dictionary.
if (word->word->flag(W_EOL) &&
!getDict().has_hyphen_end(*word->best_choice)) {
getDict().reset_hyphen_vars(true);
}
if (word->blamer_bundle != NULL && this->fill_lattice_ != NULL) {
CallFillLattice(*word->ratings, word->best_choices,
*word->uch_set, word->blamer_bundle);
}
if (wordrec_debug_level > 0) {
tprintf("Final Ratings Matrix:\n");
word->ratings->print(getDict().getUnicharset());
}
word->FilterWordChoices(getDict().stopper_debug_level);
}
/**
* @name improve_by_chopping
*
* Repeatedly chops the worst blob, classifying the new blobs fixing up all
* the data, and incrementally runs the segmentation search until a good word
* is found, or no more chops can be found.
*/
void Wordrec::improve_by_chopping(float rating_cert_scale,
WERD_RES* word,
BestChoiceBundle* best_choice_bundle,
BlamerBundle* blamer_bundle,
LMPainPoints* pain_points,
GenericVector<SegSearchPending>* pending) {
int blob_number;
do { // improvement loop.
// Make a simple vector of BLOB_CHOICEs to make it easy to pick which
// one to chop.
GenericVector<BLOB_CHOICE*> blob_choices;
int num_blobs = word->ratings->dimension();
for (int i = 0; i < num_blobs; ++i) {
BLOB_CHOICE_LIST* choices = word->ratings->get(i, i);
if (choices == NULL || choices->empty()) {
blob_choices.push_back(NULL);
} else {
BLOB_CHOICE_IT bc_it(choices);
blob_choices.push_back(bc_it.data());
}
}
SEAM* seam = improve_one_blob(blob_choices, &best_choice_bundle->fixpt,
false, false, word, &blob_number);
if (seam == NULL) break;
// A chop has been made. We have to correct all the data structures to
// take into account the extra bottom-level blob.
// Put the seam into the seam_array and correct everything else on the
// word: ratings matrix (including matrix location in the BLOB_CHOICES),
// states in WERD_CHOICEs, and blob widths.
word->InsertSeam(blob_number, seam);
// Insert a new entry in the beam array.
best_choice_bundle->beam.insert(new LanguageModelState, blob_number);
// Fixpts are outdated, but will get recalculated.
best_choice_bundle->fixpt.clear();
// Remap existing pain points.
pain_points->RemapForSplit(blob_number);
// Insert a new pending at the chop point.
pending->insert(SegSearchPending(), blob_number);
// Classify the two newly created blobs using ProcessSegSearchPainPoint,
// as that updates the pending correctly and adds new pain points.
MATRIX_COORD pain_point(blob_number, blob_number);
ProcessSegSearchPainPoint(0.0f, pain_point, "Chop1", pending, word,
pain_points, blamer_bundle);
pain_point.col = blob_number + 1;
pain_point.row = blob_number + 1;
ProcessSegSearchPainPoint(0.0f, pain_point, "Chop2", pending, word,
pain_points, blamer_bundle);
if (language_model_->language_model_ngram_on) {
// N-gram evaluation depends on the number of blobs in a chunk, so we
// have to re-evaluate everything in the word.
ResetNGramSearch(word, best_choice_bundle, pending);
blob_number = 0;
}
// Run language model incrementally. (Except with the n-gram model on.)
UpdateSegSearchNodes(rating_cert_scale, blob_number, pending,
word, pain_points, best_choice_bundle, blamer_bundle);
} while (!language_model_->AcceptableChoiceFound() &&
word->ratings->dimension() < kMaxNumChunks);
// If after running only the chopper best_choice is incorrect and no blame
// has been yet set, blame the classifier if best_choice is classifier's
// top choice and is a dictionary word (i.e. language model could not have
// helped). Otherwise blame the tradeoff between the classifier and
// the old language model (permuters).
if (word->blamer_bundle != NULL &&
word->blamer_bundle->incorrect_result_reason() == IRR_CORRECT &&
!word->blamer_bundle->ChoiceIsCorrect(word->best_choice)) {
bool valid_permuter = word->best_choice != NULL &&
Dict::valid_word_permuter(word->best_choice->permuter(), false);
word->blamer_bundle->BlameClassifierOrLangModel(word,
getDict().getUnicharset(),
valid_permuter,
wordrec_debug_blamer);
}
}
/**********************************************************************
* select_blob_to_split
*
* These are the results of the last classification. Find a likely
* place to apply splits. If none, return -1.
**********************************************************************/
int Wordrec::select_blob_to_split(
const GenericVector<BLOB_CHOICE*>& blob_choices,
float rating_ceiling, bool split_next_to_fragment) {
BLOB_CHOICE *blob_choice;
int x;
float worst = -MAX_FLOAT32;
int worst_index = -1;
float worst_near_fragment = -MAX_FLOAT32;
int worst_index_near_fragment = -1;
const CHAR_FRAGMENT **fragments = NULL;
if (chop_debug) {
if (rating_ceiling < MAX_FLOAT32)
tprintf("rating_ceiling = %8.4f\n", rating_ceiling);
else
tprintf("rating_ceiling = No Limit\n");
}
if (split_next_to_fragment && blob_choices.size() > 0) {
fragments = new const CHAR_FRAGMENT *[blob_choices.length()];
if (blob_choices[0] != NULL) {
fragments[0] = getDict().getUnicharset().get_fragment(
blob_choices[0]->unichar_id());
} else {
fragments[0] = NULL;
}
}
for (x = 0; x < blob_choices.size(); ++x) {
if (blob_choices[x] == NULL) {
if (fragments != NULL) {
delete[] fragments;
}
return x;
} else {
blob_choice = blob_choices[x];
// Populate fragments for the following position.
if (split_next_to_fragment && x+1 < blob_choices.size()) {
if (blob_choices[x + 1] != NULL) {
fragments[x + 1] = getDict().getUnicharset().get_fragment(
blob_choices[x + 1]->unichar_id());
} else {
fragments[x + 1] = NULL;
}
}
if (blob_choice->rating() < rating_ceiling &&
blob_choice->certainty() < tessedit_certainty_threshold) {
// Update worst and worst_index.
if (blob_choice->rating() > worst) {
worst_index = x;
worst = blob_choice->rating();
}
if (split_next_to_fragment) {
// Update worst_near_fragment and worst_index_near_fragment.
bool expand_following_fragment =
(x + 1 < blob_choices.size() &&
fragments[x+1] != NULL && !fragments[x+1]->is_beginning());
bool expand_preceding_fragment =
(x > 0 && fragments[x-1] != NULL && !fragments[x-1]->is_ending());
if ((expand_following_fragment || expand_preceding_fragment) &&
blob_choice->rating() > worst_near_fragment) {
worst_index_near_fragment = x;
worst_near_fragment = blob_choice->rating();
if (chop_debug) {
tprintf("worst_index_near_fragment=%d"
" expand_following_fragment=%d"
" expand_preceding_fragment=%d\n",
worst_index_near_fragment,
expand_following_fragment,
expand_preceding_fragment);
}
}
}
}
}
}
if (fragments != NULL) {
delete[] fragments;
}
// TODO(daria): maybe a threshold of badness for
// worst_near_fragment would be useful.
return worst_index_near_fragment != -1 ?
worst_index_near_fragment : worst_index;
}
/**********************************************************************
* select_blob_to_split_from_fixpt
*
* Given the fix point from a dictionary search, if there is a single
* dangerous blob that maps to multiple characters, return that blob
* index as a place we need to split. If none, return -1.
**********************************************************************/
int Wordrec::select_blob_to_split_from_fixpt(DANGERR *fixpt) {
if (!fixpt)
return -1;
for (int i = 0; i < fixpt->size(); i++) {
if ((*fixpt)[i].begin + 1 == (*fixpt)[i].end &&
(*fixpt)[i].dangerous &&
(*fixpt)[i].correct_is_ngram) {
return (*fixpt)[i].begin;
}
}
return -1;
}
} // namespace tesseract
/**********************************************************************
* total_containment
*
* Check to see if one of these outlines is totally contained within
* the bounding box of the other.
**********************************************************************/
inT16 total_containment(TBLOB *blob1, TBLOB *blob2) {
TBOX box1 = blob1->bounding_box();
TBOX box2 = blob2->bounding_box();
return box1.contains(box2) || box2.contains(box1);
}
| 1080228-arabicocr11 | wordrec/chopper.cpp | C | asf20 | 24,009 |
///////////////////////////////////////////////////////////////////////
// File: wordrec.cpp
// Description: wordrec class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "wordrec.h"
#include "language_model.h"
#include "params.h"
namespace tesseract {
Wordrec::Wordrec() :
// control parameters
BOOL_MEMBER(merge_fragments_in_matrix, TRUE,
"Merge the fragments in the ratings matrix and delete them"
" after merging", params()),
BOOL_MEMBER(wordrec_no_block, FALSE, "Don't output block information",
params()),
BOOL_MEMBER(wordrec_enable_assoc, TRUE, "Associator Enable",
params()),
BOOL_MEMBER(force_word_assoc, FALSE,
"force associator to run regardless of what enable_assoc is."
"This is used for CJK where component grouping is necessary.",
CCUtil::params()),
double_MEMBER(wordrec_worst_state, 1.0, "Worst segmentation state",
params()),
BOOL_MEMBER(fragments_guide_chopper, FALSE,
"Use information from fragments to guide chopping process",
params()),
INT_MEMBER(repair_unchopped_blobs, 1, "Fix blobs that aren't chopped",
params()),
double_MEMBER(tessedit_certainty_threshold, -2.25, "Good blob limit",
params()),
INT_MEMBER(chop_debug, 0, "Chop debug",
params()),
BOOL_MEMBER(chop_enable, 1, "Chop enable",
params()),
BOOL_MEMBER(chop_vertical_creep, 0, "Vertical creep",
params()),
INT_MEMBER(chop_split_length, 10000, "Split Length",
params()),
INT_MEMBER(chop_same_distance, 2, "Same distance",
params()),
INT_MEMBER(chop_min_outline_points, 6, "Min Number of Points on Outline",
params()),
INT_MEMBER(chop_seam_pile_size, 150, "Max number of seams in seam_pile",
params()),
BOOL_MEMBER(chop_new_seam_pile, 1, "Use new seam_pile", params()),
INT_MEMBER(chop_inside_angle, -50, "Min Inside Angle Bend",
params()),
INT_MEMBER(chop_min_outline_area, 2000, "Min Outline Area",
params()),
double_MEMBER(chop_split_dist_knob, 0.5, "Split length adjustment",
params()),
double_MEMBER(chop_overlap_knob, 0.9, "Split overlap adjustment",
params()),
double_MEMBER(chop_center_knob, 0.15, "Split center adjustment",
params()),
INT_MEMBER(chop_centered_maxwidth, 90, "Width of (smaller) chopped blobs "
"above which we don't care that a chop is not near the center.",
params()),
double_MEMBER(chop_sharpness_knob, 0.06, "Split sharpness adjustment",
params()),
double_MEMBER(chop_width_change_knob, 5.0, "Width change adjustment",
params()),
double_MEMBER(chop_ok_split, 100.0, "OK split limit",
params()),
double_MEMBER(chop_good_split, 50.0, "Good split limit",
params()),
INT_MEMBER(chop_x_y_weight, 3, "X / Y length weight",
params()),
INT_MEMBER(segment_adjust_debug, 0, "Segmentation adjustment debug",
params()),
BOOL_MEMBER(assume_fixed_pitch_char_segment, FALSE,
"include fixed-pitch heuristics in char segmentation",
params()),
INT_MEMBER(wordrec_debug_level, 0,
"Debug level for wordrec", params()),
INT_MEMBER(wordrec_max_join_chunks, 4,
"Max number of broken pieces to associate", params()),
BOOL_MEMBER(wordrec_skip_no_truth_words, false,
"Only run OCR for words that had truth recorded in BlamerBundle",
params()),
BOOL_MEMBER(wordrec_debug_blamer, false,
"Print blamer debug messages", params()),
BOOL_MEMBER(wordrec_run_blamer, false,
"Try to set the blame for errors", params()),
INT_MEMBER(segsearch_debug_level, 0,
"SegSearch debug level", params()),
INT_MEMBER(segsearch_max_pain_points, 2000,
"Maximum number of pain points stored in the queue",
params()),
INT_MEMBER(segsearch_max_futile_classifications, 20,
"Maximum number of pain point classifications per chunk that"
"did not result in finding a better word choice.",
params()),
double_MEMBER(segsearch_max_char_wh_ratio, 2.0,
"Maximum character width-to-height ratio", params()),
BOOL_MEMBER(save_alt_choices, true,
"Save alternative paths found during chopping"
" and segmentation search",
params()) {
prev_word_best_choice_ = NULL;
language_model_ = new LanguageModel(&get_fontinfo_table(),
&(getDict()));
fill_lattice_ = NULL;
}
Wordrec::~Wordrec() {
delete language_model_;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/wordrec.cpp | C++ | asf20 | 5,473 |
///////////////////////////////////////////////////////////////////////
// File: language_model.cpp
// Description: Functions that utilize the knowledge about the properties,
// structure and statistics of the language to help recognition.
// Author: Daria Antonova
// Created: Mon Nov 11 11:26:43 PST 2009
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <math.h>
#include "language_model.h"
#include "dawg.h"
#include "freelist.h"
#include "intproto.h"
#include "helpers.h"
#include "lm_state.h"
#include "lm_pain_points.h"
#include "matrix.h"
#include "params.h"
#include "params_training_featdef.h"
#if defined(_MSC_VER) || defined(ANDROID)
double log2(double n) {
return log(n) / log(2.0);
}
#endif // _MSC_VER
namespace tesseract {
const float LanguageModel::kMaxAvgNgramCost = 25.0f;
LanguageModel::LanguageModel(const UnicityTable<FontInfo> *fontinfo_table,
Dict *dict)
: INT_MEMBER(language_model_debug_level, 0, "Language model debug level",
dict->getCCUtil()->params()),
BOOL_INIT_MEMBER(language_model_ngram_on, false,
"Turn on/off the use of character ngram model",
dict->getCCUtil()->params()),
INT_MEMBER(language_model_ngram_order, 8,
"Maximum order of the character ngram model",
dict->getCCUtil()->params()),
INT_MEMBER(language_model_viterbi_list_max_num_prunable, 10,
"Maximum number of prunable (those for which"
" PrunablePath() is true) entries in each viterbi list"
" recorded in BLOB_CHOICEs",
dict->getCCUtil()->params()),
INT_MEMBER(language_model_viterbi_list_max_size, 500,
"Maximum size of viterbi lists recorded in BLOB_CHOICEs",
dict->getCCUtil()->params()),
double_MEMBER(language_model_ngram_small_prob, 0.000001,
"To avoid overly small denominators use this as the "
"floor of the probability returned by the ngram model.",
dict->getCCUtil()->params()),
double_MEMBER(language_model_ngram_nonmatch_score, -40.0,
"Average classifier score of a non-matching unichar.",
dict->getCCUtil()->params()),
BOOL_MEMBER(language_model_ngram_use_only_first_uft8_step, false,
"Use only the first UTF8 step of the given string"
" when computing log probabilities.",
dict->getCCUtil()->params()),
double_MEMBER(language_model_ngram_scale_factor, 0.03,
"Strength of the character ngram model relative to the"
" character classifier ",
dict->getCCUtil()->params()),
double_MEMBER(language_model_ngram_rating_factor, 16.0,
"Factor to bring log-probs into the same range as ratings"
" when multiplied by outline length ",
dict->getCCUtil()->params()),
BOOL_MEMBER(language_model_ngram_space_delimited_language, true,
"Words are delimited by space",
dict->getCCUtil()->params()),
INT_MEMBER(language_model_min_compound_length, 3,
"Minimum length of compound words",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_non_freq_dict_word, 0.1,
"Penalty for words not in the frequent word dictionary",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_non_dict_word, 0.15,
"Penalty for non-dictionary words",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_punc, 0.2,
"Penalty for inconsistent punctuation",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_case, 0.1,
"Penalty for inconsistent case",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_script, 0.5,
"Penalty for inconsistent script",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_chartype, 0.3,
"Penalty for inconsistent character type",
dict->getCCUtil()->params()),
// TODO(daria, rays): enable font consistency checking
// after improving font analysis.
double_MEMBER(language_model_penalty_font, 0.00,
"Penalty for inconsistent font",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_spacing, 0.05,
"Penalty for inconsistent spacing",
dict->getCCUtil()->params()),
double_MEMBER(language_model_penalty_increment, 0.01,
"Penalty increment",
dict->getCCUtil()->params()),
INT_MEMBER(wordrec_display_segmentations, 0, "Display Segmentations",
dict->getCCUtil()->params()),
BOOL_INIT_MEMBER(language_model_use_sigmoidal_certainty, false,
"Use sigmoidal score for certainty",
dict->getCCUtil()->params()),
fontinfo_table_(fontinfo_table), dict_(dict),
fixed_pitch_(false), max_char_wh_ratio_(0.0),
acceptable_choice_found_(false) {
ASSERT_HOST(dict_ != NULL);
dawg_args_ = new DawgArgs(NULL, new DawgPositionVector(), NO_PERM);
very_beginning_active_dawgs_ = new DawgPositionVector();
beginning_active_dawgs_ = new DawgPositionVector();
}
LanguageModel::~LanguageModel() {
delete very_beginning_active_dawgs_;
delete beginning_active_dawgs_;
delete dawg_args_->updated_dawgs;
delete dawg_args_;
}
void LanguageModel::InitForWord(const WERD_CHOICE *prev_word,
bool fixed_pitch, float max_char_wh_ratio,
float rating_cert_scale) {
fixed_pitch_ = fixed_pitch;
max_char_wh_ratio_ = max_char_wh_ratio;
rating_cert_scale_ = rating_cert_scale;
acceptable_choice_found_ = false;
correct_segmentation_explored_ = false;
// Initialize vectors with beginning DawgInfos.
very_beginning_active_dawgs_->clear();
dict_->init_active_dawgs(very_beginning_active_dawgs_, false);
beginning_active_dawgs_->clear();
dict_->default_dawgs(beginning_active_dawgs_, false);
// Fill prev_word_str_ with the last language_model_ngram_order
// unichars from prev_word.
if (language_model_ngram_on) {
if (prev_word != NULL && prev_word->unichar_string() != NULL) {
prev_word_str_ = prev_word->unichar_string();
if (language_model_ngram_space_delimited_language) prev_word_str_ += ' ';
} else {
prev_word_str_ = " ";
}
const char *str_ptr = prev_word_str_.string();
const char *str_end = str_ptr + prev_word_str_.length();
int step;
prev_word_unichar_step_len_ = 0;
while (str_ptr != str_end && (step = UNICHAR::utf8_step(str_ptr))) {
str_ptr += step;
++prev_word_unichar_step_len_;
}
ASSERT_HOST(str_ptr == str_end);
}
}
// Helper scans the collection of predecessors for competing siblings that
// have the same letter with the opposite case, setting competing_vse.
static void ScanParentsForCaseMix(const UNICHARSET& unicharset,
LanguageModelState* parent_node) {
if (parent_node == NULL) return;
ViterbiStateEntry_IT vit(&parent_node->viterbi_state_entries);
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
ViterbiStateEntry* vse = vit.data();
vse->competing_vse = NULL;
UNICHAR_ID unichar_id = vse->curr_b->unichar_id();
if (unicharset.get_isupper(unichar_id) ||
unicharset.get_islower(unichar_id)) {
UNICHAR_ID other_case = unicharset.get_other_case(unichar_id);
if (other_case == unichar_id) continue; // Not in unicharset.
// Find other case in same list. There could be multiple entries with
// the same unichar_id, but in theory, they should all point to the
// same BLOB_CHOICE, and that is what we will be using to decide
// which to keep.
ViterbiStateEntry_IT vit2(&parent_node->viterbi_state_entries);
for (vit2.mark_cycle_pt(); !vit2.cycled_list() &&
vit2.data()->curr_b->unichar_id() != other_case;
vit2.forward()) {}
if (!vit2.cycled_list()) {
vse->competing_vse = vit2.data();
}
}
}
}
// Helper returns true if the given choice has a better case variant before
// it in the choice_list that is not distinguishable by size.
static bool HasBetterCaseVariant(const UNICHARSET& unicharset,
const BLOB_CHOICE* choice,
BLOB_CHOICE_LIST* choices) {
UNICHAR_ID choice_id = choice->unichar_id();
UNICHAR_ID other_case = unicharset.get_other_case(choice_id);
if (other_case == choice_id || other_case == INVALID_UNICHAR_ID)
return false; // Not upper or lower or not in unicharset.
if (unicharset.SizesDistinct(choice_id, other_case))
return false; // Can be separated by size.
BLOB_CHOICE_IT bc_it(choices);
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
BLOB_CHOICE* better_choice = bc_it.data();
if (better_choice->unichar_id() == other_case)
return true; // Found an earlier instance of other_case.
else if (better_choice == choice)
return false; // Reached the original choice.
}
return false; // Should never happen, but just in case.
}
// UpdateState has the job of combining the ViterbiStateEntry lists on each
// of the choices on parent_list with each of the blob choices in curr_list,
// making a new ViterbiStateEntry for each sensible path.
// This could be a huge set of combinations, creating a lot of work only to
// be truncated by some beam limit, but only certain kinds of paths will
// continue at the next step:
// paths that are liked by the language model: either a DAWG or the n-gram
// model, where active.
// paths that represent some kind of top choice. The old permuter permuted
// the top raw classifier score, the top upper case word and the top lower-
// case word. UpdateState now concentrates its top-choice paths on top
// lower-case, top upper-case (or caseless alpha), and top digit sequence,
// with allowance for continuation of these paths through blobs where such
// a character does not appear in the choices list.
// GetNextParentVSE enforces some of these models to minimize the number of
// calls to AddViterbiStateEntry, even prior to looking at the language model.
// Thus an n-blob sequence of [l1I] will produce 3n calls to
// AddViterbiStateEntry instead of 3^n.
// Of course it isn't quite that simple as Title Case is handled by allowing
// lower case to continue an upper case initial, but it has to be detected
// in the combiner so it knows which upper case letters are initial alphas.
bool LanguageModel::UpdateState(
bool just_classified,
int curr_col, int curr_row,
BLOB_CHOICE_LIST *curr_list,
LanguageModelState *parent_node,
LMPainPoints *pain_points,
WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
if (language_model_debug_level > 0) {
tprintf("\nUpdateState: col=%d row=%d %s",
curr_col, curr_row, just_classified ? "just_classified" : "");
if (language_model_debug_level > 5)
tprintf("(parent=%p)\n", parent_node);
else
tprintf("\n");
}
// Initialize helper variables.
bool word_end = (curr_row+1 >= word_res->ratings->dimension());
bool new_changed = false;
float denom = (language_model_ngram_on) ? ComputeDenom(curr_list) : 1.0f;
const UNICHARSET& unicharset = dict_->getUnicharset();
BLOB_CHOICE *first_lower = NULL;
BLOB_CHOICE *first_upper = NULL;
BLOB_CHOICE *first_digit = NULL;
bool has_alnum_mix = false;
if (parent_node != NULL) {
int result = SetTopParentLowerUpperDigit(parent_node);
if (result < 0) {
if (language_model_debug_level > 0)
tprintf("No parents found to process\n");
return false;
}
if (result > 0)
has_alnum_mix = true;
}
if (!GetTopLowerUpperDigit(curr_list, &first_lower, &first_upper,
&first_digit))
has_alnum_mix = false;;
ScanParentsForCaseMix(unicharset, parent_node);
if (language_model_debug_level > 3 && parent_node != NULL) {
parent_node->Print("Parent viterbi list");
}
LanguageModelState *curr_state = best_choice_bundle->beam[curr_row];
// Call AddViterbiStateEntry() for each parent+child ViterbiStateEntry.
ViterbiStateEntry_IT vit;
BLOB_CHOICE_IT c_it(curr_list);
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
BLOB_CHOICE* choice = c_it.data();
// TODO(antonova): make sure commenting this out if ok for ngram
// model scoring (I think this was introduced to fix ngram model quirks).
// Skip NULL unichars unless it is the only choice.
//if (!curr_list->singleton() && c_it.data()->unichar_id() == 0) continue;
UNICHAR_ID unichar_id = choice->unichar_id();
if (unicharset.get_fragment(unichar_id)) {
continue; // Skip fragments.
}
// Set top choice flags.
LanguageModelFlagsType blob_choice_flags = kXhtConsistentFlag;
if (c_it.at_first() || !new_changed)
blob_choice_flags |= kSmallestRatingFlag;
if (first_lower == choice) blob_choice_flags |= kLowerCaseFlag;
if (first_upper == choice) blob_choice_flags |= kUpperCaseFlag;
if (first_digit == choice) blob_choice_flags |= kDigitFlag;
if (parent_node == NULL) {
// Process the beginning of a word.
// If there is a better case variant that is not distinguished by size,
// skip this blob choice, as we have no choice but to accept the result
// of the character classifier to distinguish between them, even if
// followed by an upper case.
// With words like iPoc, and other CamelBackWords, the lower-upper
// transition can only be achieved if the classifier has the correct case
// as the top choice, and leaving an initial I lower down the list
// increases the chances of choosing IPoc simply because it doesn't
// include such a transition. iPoc will beat iPOC and ipoc because
// the other words are baseline/x-height inconsistent.
if (HasBetterCaseVariant(unicharset, choice, curr_list))
continue;
// Upper counts as lower at the beginning of a word.
if (blob_choice_flags & kUpperCaseFlag)
blob_choice_flags |= kLowerCaseFlag;
new_changed |= AddViterbiStateEntry(
blob_choice_flags, denom, word_end, curr_col, curr_row,
choice, curr_state, NULL, pain_points,
word_res, best_choice_bundle, blamer_bundle);
} else {
// Get viterbi entries from each parent ViterbiStateEntry.
vit.set_to_list(&parent_node->viterbi_state_entries);
int vit_counter = 0;
vit.mark_cycle_pt();
ViterbiStateEntry* parent_vse = NULL;
LanguageModelFlagsType top_choice_flags;
while ((parent_vse = GetNextParentVSE(just_classified, has_alnum_mix,
c_it.data(), blob_choice_flags,
unicharset, word_res, &vit,
&top_choice_flags)) != NULL) {
// Skip pruned entries and do not look at prunable entries if already
// examined language_model_viterbi_list_max_num_prunable of those.
if (PrunablePath(*parent_vse) &&
(++vit_counter > language_model_viterbi_list_max_num_prunable ||
(language_model_ngram_on && parent_vse->ngram_info->pruned))) {
continue;
}
// If the parent has no alnum choice, (ie choice is the first in a
// string of alnum), and there is a better case variant that is not
// distinguished by size, skip this blob choice/parent, as with the
// initial blob treatment above.
if (!parent_vse->HasAlnumChoice(unicharset) &&
HasBetterCaseVariant(unicharset, choice, curr_list))
continue;
// Create a new ViterbiStateEntry if BLOB_CHOICE in c_it.data()
// looks good according to the Dawgs or character ngram model.
new_changed |= AddViterbiStateEntry(
top_choice_flags, denom, word_end, curr_col, curr_row,
c_it.data(), curr_state, parent_vse, pain_points,
word_res, best_choice_bundle, blamer_bundle);
}
}
}
return new_changed;
}
// Finds the first lower and upper case letter and first digit in curr_list.
// For non-upper/lower languages, alpha counts as upper.
// Uses the first character in the list in place of empty results.
// Returns true if both alpha and digits are found.
bool LanguageModel::GetTopLowerUpperDigit(BLOB_CHOICE_LIST *curr_list,
BLOB_CHOICE **first_lower,
BLOB_CHOICE **first_upper,
BLOB_CHOICE **first_digit) const {
BLOB_CHOICE_IT c_it(curr_list);
const UNICHARSET &unicharset = dict_->getUnicharset();
BLOB_CHOICE *first_unichar = NULL;
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
UNICHAR_ID unichar_id = c_it.data()->unichar_id();
if (unicharset.get_fragment(unichar_id)) continue; // skip fragments
if (first_unichar == NULL) first_unichar = c_it.data();
if (*first_lower == NULL && unicharset.get_islower(unichar_id)) {
*first_lower = c_it.data();
}
if (*first_upper == NULL && unicharset.get_isalpha(unichar_id) &&
!unicharset.get_islower(unichar_id)) {
*first_upper = c_it.data();
}
if (*first_digit == NULL && unicharset.get_isdigit(unichar_id)) {
*first_digit = c_it.data();
}
}
ASSERT_HOST(first_unichar != NULL);
bool mixed = (*first_lower != NULL || *first_upper != NULL) &&
*first_digit != NULL;
if (*first_lower == NULL) *first_lower = first_unichar;
if (*first_upper == NULL) *first_upper = first_unichar;
if (*first_digit == NULL) *first_digit = first_unichar;
return mixed;
}
// Forces there to be at least one entry in the overall set of the
// viterbi_state_entries of each element of parent_node that has the
// top_choice_flag set for lower, upper and digit using the same rules as
// GetTopLowerUpperDigit, setting the flag on the first found suitable
// candidate, whether or not the flag is set on some other parent.
// Returns 1 if both alpha and digits are found among the parents, -1 if no
// parents are found at all (a legitimate case), and 0 otherwise.
int LanguageModel::SetTopParentLowerUpperDigit(
LanguageModelState *parent_node) const {
if (parent_node == NULL) return -1;
UNICHAR_ID top_id = INVALID_UNICHAR_ID;
ViterbiStateEntry* top_lower = NULL;
ViterbiStateEntry* top_upper = NULL;
ViterbiStateEntry* top_digit = NULL;
ViterbiStateEntry* top_choice = NULL;
float lower_rating = 0.0f;
float upper_rating = 0.0f;
float digit_rating = 0.0f;
float top_rating = 0.0f;
const UNICHARSET &unicharset = dict_->getUnicharset();
ViterbiStateEntry_IT vit(&parent_node->viterbi_state_entries);
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
ViterbiStateEntry* vse = vit.data();
// INVALID_UNICHAR_ID should be treated like a zero-width joiner, so scan
// back to the real character if needed.
ViterbiStateEntry* unichar_vse = vse;
UNICHAR_ID unichar_id = unichar_vse->curr_b->unichar_id();
float rating = unichar_vse->curr_b->rating();
while (unichar_id == INVALID_UNICHAR_ID &&
unichar_vse->parent_vse != NULL) {
unichar_vse = unichar_vse->parent_vse;
unichar_id = unichar_vse->curr_b->unichar_id();
rating = unichar_vse->curr_b->rating();
}
if (unichar_id != INVALID_UNICHAR_ID) {
if (unicharset.get_islower(unichar_id)) {
if (top_lower == NULL || lower_rating > rating) {
top_lower = vse;
lower_rating = rating;
}
} else if (unicharset.get_isalpha(unichar_id)) {
if (top_upper == NULL || upper_rating > rating) {
top_upper = vse;
upper_rating = rating;
}
} else if (unicharset.get_isdigit(unichar_id)) {
if (top_digit == NULL || digit_rating > rating) {
top_digit = vse;
digit_rating = rating;
}
}
}
if (top_choice == NULL || top_rating > rating) {
top_choice = vse;
top_rating = rating;
top_id = unichar_id;
}
}
if (top_choice == NULL) return -1;
bool mixed = (top_lower != NULL || top_upper != NULL) &&
top_digit != NULL;
if (top_lower == NULL) top_lower = top_choice;
top_lower->top_choice_flags |= kLowerCaseFlag;
if (top_upper == NULL) top_upper = top_choice;
top_upper->top_choice_flags |= kUpperCaseFlag;
if (top_digit == NULL) top_digit = top_choice;
top_digit->top_choice_flags |= kDigitFlag;
top_choice->top_choice_flags |= kSmallestRatingFlag;
if (top_id != INVALID_UNICHAR_ID && dict_->compound_marker(top_id) &&
(top_choice->top_choice_flags &
(kLowerCaseFlag | kUpperCaseFlag | kDigitFlag))) {
// If the compound marker top choice carries any of the top alnum flags,
// then give it all of them, allowing words like I-295 to be chosen.
top_choice->top_choice_flags |=
kLowerCaseFlag | kUpperCaseFlag | kDigitFlag;
}
return mixed ? 1 : 0;
}
// Finds the next ViterbiStateEntry with which the given unichar_id can
// combine sensibly, taking into account any mixed alnum/mixed case
// situation, and whether this combination has been inspected before.
ViterbiStateEntry* LanguageModel::GetNextParentVSE(
bool just_classified, bool mixed_alnum, const BLOB_CHOICE* bc,
LanguageModelFlagsType blob_choice_flags, const UNICHARSET& unicharset,
WERD_RES* word_res, ViterbiStateEntry_IT* vse_it,
LanguageModelFlagsType* top_choice_flags) const {
for (; !vse_it->cycled_list(); vse_it->forward()) {
ViterbiStateEntry* parent_vse = vse_it->data();
// Only consider the parent if it has been updated or
// if the current ratings cell has just been classified.
if (!just_classified && !parent_vse->updated) continue;
if (language_model_debug_level > 2)
parent_vse->Print("Considering");
// If the parent is non-alnum, then upper counts as lower.
*top_choice_flags = blob_choice_flags;
if ((blob_choice_flags & kUpperCaseFlag) &&
!parent_vse->HasAlnumChoice(unicharset)) {
*top_choice_flags |= kLowerCaseFlag;
}
*top_choice_flags &= parent_vse->top_choice_flags;
UNICHAR_ID unichar_id = bc->unichar_id();
const BLOB_CHOICE* parent_b = parent_vse->curr_b;
UNICHAR_ID parent_id = parent_b->unichar_id();
// Digits do not bind to alphas if there is a mix in both parent and current
// or if the alpha is not the top choice.
if (unicharset.get_isdigit(unichar_id) &&
unicharset.get_isalpha(parent_id) &&
(mixed_alnum || *top_choice_flags == 0))
continue; // Digits don't bind to alphas.
// Likewise alphas do not bind to digits if there is a mix in both or if
// the digit is not the top choice.
if (unicharset.get_isalpha(unichar_id) &&
unicharset.get_isdigit(parent_id) &&
(mixed_alnum || *top_choice_flags == 0))
continue; // Alphas don't bind to digits.
// If there is a case mix of the same alpha in the parent list, then
// competing_vse is non-null and will be used to determine whether
// or not to bind the current blob choice.
if (parent_vse->competing_vse != NULL) {
const BLOB_CHOICE* competing_b = parent_vse->competing_vse->curr_b;
UNICHAR_ID other_id = competing_b->unichar_id();
if (language_model_debug_level >= 5) {
tprintf("Parent %s has competition %s\n",
unicharset.id_to_unichar(parent_id),
unicharset.id_to_unichar(other_id));
}
if (unicharset.SizesDistinct(parent_id, other_id)) {
// If other_id matches bc wrt position and size, and parent_id, doesn't,
// don't bind to the current parent.
if (bc->PosAndSizeAgree(*competing_b, word_res->x_height,
language_model_debug_level >= 5) &&
!bc->PosAndSizeAgree(*parent_b, word_res->x_height,
language_model_debug_level >= 5))
continue; // Competing blobchoice has a better vertical match.
}
}
vse_it->forward();
return parent_vse; // This one is good!
}
return NULL; // Ran out of possibilities.
}
bool LanguageModel::AddViterbiStateEntry(
LanguageModelFlagsType top_choice_flags,
float denom,
bool word_end,
int curr_col, int curr_row,
BLOB_CHOICE *b,
LanguageModelState *curr_state,
ViterbiStateEntry *parent_vse,
LMPainPoints *pain_points,
WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
ViterbiStateEntry_IT vit;
if (language_model_debug_level > 1) {
tprintf("AddViterbiStateEntry for unichar %s rating=%.4f"
" certainty=%.4f top_choice_flags=0x%x",
dict_->getUnicharset().id_to_unichar(b->unichar_id()),
b->rating(), b->certainty(), top_choice_flags);
if (language_model_debug_level > 5)
tprintf(" parent_vse=%p\n", parent_vse);
else
tprintf("\n");
}
// Check whether the list is full.
if (curr_state != NULL &&
curr_state->viterbi_state_entries_length >=
language_model_viterbi_list_max_size) {
if (language_model_debug_level > 1) {
tprintf("AddViterbiStateEntry: viterbi list is full!\n");
}
return false;
}
// Invoke Dawg language model component.
LanguageModelDawgInfo *dawg_info =
GenerateDawgInfo(word_end, curr_col, curr_row, *b, parent_vse);
float outline_length =
AssociateUtils::ComputeOutlineLength(rating_cert_scale_, *b);
// Invoke Ngram language model component.
LanguageModelNgramInfo *ngram_info = NULL;
if (language_model_ngram_on) {
ngram_info = GenerateNgramInfo(
dict_->getUnicharset().id_to_unichar(b->unichar_id()), b->certainty(),
denom, curr_col, curr_row, outline_length, parent_vse);
ASSERT_HOST(ngram_info != NULL);
}
bool liked_by_language_model = dawg_info != NULL ||
(ngram_info != NULL && !ngram_info->pruned);
// Quick escape if not liked by the language model, can't be consistent
// xheight, and not top choice.
if (!liked_by_language_model && top_choice_flags == 0) {
if (language_model_debug_level > 1) {
tprintf("Language model components very early pruned this entry\n");
}
delete ngram_info;
delete dawg_info;
return false;
}
// Check consistency of the path and set the relevant consistency_info.
LMConsistencyInfo consistency_info(
parent_vse != NULL ? &parent_vse->consistency_info : NULL);
// Start with just the x-height consistency, as it provides significant
// pruning opportunity.
consistency_info.ComputeXheightConsistency(
b, dict_->getUnicharset().get_ispunctuation(b->unichar_id()));
// Turn off xheight consistent flag if not consistent.
if (consistency_info.InconsistentXHeight()) {
top_choice_flags &= ~kXhtConsistentFlag;
}
// Quick escape if not liked by the language model, not consistent xheight,
// and not top choice.
if (!liked_by_language_model && top_choice_flags == 0) {
if (language_model_debug_level > 1) {
tprintf("Language model components early pruned this entry\n");
}
delete ngram_info;
delete dawg_info;
return false;
}
// Compute the rest of the consistency info.
FillConsistencyInfo(curr_col, word_end, b, parent_vse,
word_res, &consistency_info);
if (dawg_info != NULL && consistency_info.invalid_punc) {
consistency_info.invalid_punc = false; // do not penalize dict words
}
// Compute cost of associating the blobs that represent the current unichar.
AssociateStats associate_stats;
ComputeAssociateStats(curr_col, curr_row, max_char_wh_ratio_,
parent_vse, word_res, &associate_stats);
if (parent_vse != NULL) {
associate_stats.shape_cost += parent_vse->associate_stats.shape_cost;
associate_stats.bad_shape |= parent_vse->associate_stats.bad_shape;
}
// Create the new ViterbiStateEntry compute the adjusted cost of the path.
ViterbiStateEntry *new_vse = new ViterbiStateEntry(
parent_vse, b, 0.0, outline_length,
consistency_info, associate_stats, top_choice_flags, dawg_info,
ngram_info, (language_model_debug_level > 0) ?
dict_->getUnicharset().id_to_unichar(b->unichar_id()) : NULL);
new_vse->cost = ComputeAdjustedPathCost(new_vse);
if (language_model_debug_level >= 3)
tprintf("Adjusted cost = %g\n", new_vse->cost);
// Invoke Top Choice language model component to make the final adjustments
// to new_vse->top_choice_flags.
if (!curr_state->viterbi_state_entries.empty() && new_vse->top_choice_flags) {
GenerateTopChoiceInfo(new_vse, parent_vse, curr_state);
}
// If language model components did not like this unichar - return.
bool keep = new_vse->top_choice_flags || liked_by_language_model;
if (!(top_choice_flags & kSmallestRatingFlag) && // no non-top choice paths
consistency_info.inconsistent_script) { // with inconsistent script
keep = false;
}
if (!keep) {
if (language_model_debug_level > 1) {
tprintf("Language model components did not like this entry\n");
}
delete new_vse;
return false;
}
// Discard this entry if it represents a prunable path and
// language_model_viterbi_list_max_num_prunable such entries with a lower
// cost have already been recorded.
if (PrunablePath(*new_vse) &&
(curr_state->viterbi_state_entries_prunable_length >=
language_model_viterbi_list_max_num_prunable) &&
new_vse->cost >= curr_state->viterbi_state_entries_prunable_max_cost) {
if (language_model_debug_level > 1) {
tprintf("Discarded ViterbiEntry with high cost %g max cost %g\n",
new_vse->cost,
curr_state->viterbi_state_entries_prunable_max_cost);
}
delete new_vse;
return false;
}
// Update best choice if needed.
if (word_end) {
UpdateBestChoice(new_vse, pain_points, word_res,
best_choice_bundle, blamer_bundle);
// Discard the entry if UpdateBestChoice() found flaws in it.
if (new_vse->cost >= WERD_CHOICE::kBadRating &&
new_vse != best_choice_bundle->best_vse) {
if (language_model_debug_level > 1) {
tprintf("Discarded ViterbiEntry with high cost %g\n", new_vse->cost);
}
delete new_vse;
return false;
}
}
// Add the new ViterbiStateEntry and to curr_state->viterbi_state_entries.
curr_state->viterbi_state_entries.add_sorted(ViterbiStateEntry::Compare,
false, new_vse);
curr_state->viterbi_state_entries_length++;
if (PrunablePath(*new_vse)) {
curr_state->viterbi_state_entries_prunable_length++;
}
// Update lms->viterbi_state_entries_prunable_max_cost and clear
// top_choice_flags of entries with ratings_sum than new_vse->ratings_sum.
if ((curr_state->viterbi_state_entries_prunable_length >=
language_model_viterbi_list_max_num_prunable) ||
new_vse->top_choice_flags) {
ASSERT_HOST(!curr_state->viterbi_state_entries.empty());
int prunable_counter = language_model_viterbi_list_max_num_prunable;
vit.set_to_list(&(curr_state->viterbi_state_entries));
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
ViterbiStateEntry *curr_vse = vit.data();
// Clear the appropriate top choice flags of the entries in the
// list that have cost higher thank new_entry->cost
// (since they will not be top choices any more).
if (curr_vse->top_choice_flags && curr_vse != new_vse &&
curr_vse->cost > new_vse->cost) {
curr_vse->top_choice_flags &= ~(new_vse->top_choice_flags);
}
if (prunable_counter > 0 && PrunablePath(*curr_vse)) --prunable_counter;
// Update curr_state->viterbi_state_entries_prunable_max_cost.
if (prunable_counter == 0) {
curr_state->viterbi_state_entries_prunable_max_cost = vit.data()->cost;
if (language_model_debug_level > 1) {
tprintf("Set viterbi_state_entries_prunable_max_cost to %g\n",
curr_state->viterbi_state_entries_prunable_max_cost);
}
prunable_counter = -1; // stop counting
}
}
}
// Print the newly created ViterbiStateEntry.
if (language_model_debug_level > 2) {
new_vse->Print("New");
if (language_model_debug_level > 5)
curr_state->Print("Updated viterbi list");
}
return true;
}
void LanguageModel::GenerateTopChoiceInfo(ViterbiStateEntry *new_vse,
const ViterbiStateEntry *parent_vse,
LanguageModelState *lms) {
ViterbiStateEntry_IT vit(&(lms->viterbi_state_entries));
for (vit.mark_cycle_pt(); !vit.cycled_list() && new_vse->top_choice_flags &&
new_vse->cost >= vit.data()->cost; vit.forward()) {
// Clear the appropriate flags if the list already contains
// a top choice entry with a lower cost.
new_vse->top_choice_flags &= ~(vit.data()->top_choice_flags);
}
if (language_model_debug_level > 2) {
tprintf("GenerateTopChoiceInfo: top_choice_flags=0x%x\n",
new_vse->top_choice_flags);
}
}
LanguageModelDawgInfo *LanguageModel::GenerateDawgInfo(
bool word_end,
int curr_col, int curr_row,
const BLOB_CHOICE &b,
const ViterbiStateEntry *parent_vse) {
// Initialize active_dawgs from parent_vse if it is not NULL.
// Otherwise use very_beginning_active_dawgs_.
if (parent_vse == NULL) {
dawg_args_->active_dawgs = very_beginning_active_dawgs_;
dawg_args_->permuter = NO_PERM;
} else {
if (parent_vse->dawg_info == NULL) return NULL; // not a dict word path
dawg_args_->active_dawgs = parent_vse->dawg_info->active_dawgs;
dawg_args_->permuter = parent_vse->dawg_info->permuter;
}
// Deal with hyphenated words.
if (word_end && dict_->has_hyphen_end(b.unichar_id(), curr_col == 0)) {
if (language_model_debug_level > 0) tprintf("Hyphenated word found\n");
return new LanguageModelDawgInfo(dawg_args_->active_dawgs,
COMPOUND_PERM);
}
// Deal with compound words.
if (dict_->compound_marker(b.unichar_id()) &&
(parent_vse == NULL || parent_vse->dawg_info->permuter != NUMBER_PERM)) {
if (language_model_debug_level > 0) tprintf("Found compound marker\n");
// Do not allow compound operators at the beginning and end of the word.
// Do not allow more than one compound operator per word.
// Do not allow compounding of words with lengths shorter than
// language_model_min_compound_length
if (parent_vse == NULL || word_end ||
dawg_args_->permuter == COMPOUND_PERM ||
parent_vse->length < language_model_min_compound_length) return NULL;
int i;
// Check a that the path terminated before the current character is a word.
bool has_word_ending = false;
for (i = 0; i < parent_vse->dawg_info->active_dawgs->size(); ++i) {
const DawgPosition &pos = (*parent_vse->dawg_info->active_dawgs)[i];
const Dawg *pdawg = pos.dawg_index < 0
? NULL : dict_->GetDawg(pos.dawg_index);
if (pdawg == NULL || pos.back_to_punc) continue;;
if (pdawg->type() == DAWG_TYPE_WORD && pos.dawg_ref != NO_EDGE &&
pdawg->end_of_word(pos.dawg_ref)) {
has_word_ending = true;
break;
}
}
if (!has_word_ending) return NULL;
if (language_model_debug_level > 0) tprintf("Compound word found\n");
return new LanguageModelDawgInfo(beginning_active_dawgs_, COMPOUND_PERM);
} // done dealing with compound words
LanguageModelDawgInfo *dawg_info = NULL;
// Call LetterIsOkay().
// Use the normalized IDs so that all shapes of ' can be allowed in words
// like don't.
const GenericVector<UNICHAR_ID>& normed_ids =
dict_->getUnicharset().normed_ids(b.unichar_id());
DawgPositionVector tmp_active_dawgs;
for (int i = 0; i < normed_ids.size(); ++i) {
if (language_model_debug_level > 2)
tprintf("Test Letter OK for unichar %d, normed %d\n",
b.unichar_id(), normed_ids[i]);
dict_->LetterIsOkay(dawg_args_, normed_ids[i],
word_end && i == normed_ids.size() - 1);
if (dawg_args_->permuter == NO_PERM) {
break;
} else if (i < normed_ids.size() - 1) {
tmp_active_dawgs = *dawg_args_->updated_dawgs;
dawg_args_->active_dawgs = &tmp_active_dawgs;
}
if (language_model_debug_level > 2)
tprintf("Letter was OK for unichar %d, normed %d\n",
b.unichar_id(), normed_ids[i]);
}
dawg_args_->active_dawgs = NULL;
if (dawg_args_->permuter != NO_PERM) {
dawg_info = new LanguageModelDawgInfo(dawg_args_->updated_dawgs,
dawg_args_->permuter);
} else if (language_model_debug_level > 3) {
tprintf("Letter %s not OK!\n",
dict_->getUnicharset().id_to_unichar(b.unichar_id()));
}
return dawg_info;
}
LanguageModelNgramInfo *LanguageModel::GenerateNgramInfo(
const char *unichar, float certainty, float denom,
int curr_col, int curr_row, float outline_length,
const ViterbiStateEntry *parent_vse) {
// Initialize parent context.
const char *pcontext_ptr = "";
int pcontext_unichar_step_len = 0;
if (parent_vse == NULL) {
pcontext_ptr = prev_word_str_.string();
pcontext_unichar_step_len = prev_word_unichar_step_len_;
} else {
pcontext_ptr = parent_vse->ngram_info->context.string();
pcontext_unichar_step_len =
parent_vse->ngram_info->context_unichar_step_len;
}
// Compute p(unichar | parent context).
int unichar_step_len = 0;
bool pruned = false;
float ngram_cost;
float ngram_and_classifier_cost =
ComputeNgramCost(unichar, certainty, denom,
pcontext_ptr, &unichar_step_len,
&pruned, &ngram_cost);
// Normalize just the ngram_and_classifier_cost by outline_length.
// The ngram_cost is used by the params_model, so it needs to be left as-is,
// and the params model cost will be normalized by outline_length.
ngram_and_classifier_cost *=
outline_length / language_model_ngram_rating_factor;
// Add the ngram_cost of the parent.
if (parent_vse != NULL) {
ngram_and_classifier_cost +=
parent_vse->ngram_info->ngram_and_classifier_cost;
ngram_cost += parent_vse->ngram_info->ngram_cost;
}
// Shorten parent context string by unichar_step_len unichars.
int num_remove = (unichar_step_len + pcontext_unichar_step_len -
language_model_ngram_order);
if (num_remove > 0) pcontext_unichar_step_len -= num_remove;
while (num_remove > 0 && *pcontext_ptr != '\0') {
pcontext_ptr += UNICHAR::utf8_step(pcontext_ptr);
--num_remove;
}
// Decide whether to prune this ngram path and update changed accordingly.
if (parent_vse != NULL && parent_vse->ngram_info->pruned) pruned = true;
// Construct and return the new LanguageModelNgramInfo.
LanguageModelNgramInfo *ngram_info = new LanguageModelNgramInfo(
pcontext_ptr, pcontext_unichar_step_len, pruned, ngram_cost,
ngram_and_classifier_cost);
ngram_info->context += unichar;
ngram_info->context_unichar_step_len += unichar_step_len;
assert(ngram_info->context_unichar_step_len <= language_model_ngram_order);
return ngram_info;
}
float LanguageModel::ComputeNgramCost(const char *unichar,
float certainty,
float denom,
const char *context,
int *unichar_step_len,
bool *found_small_prob,
float *ngram_cost) {
const char *context_ptr = context;
char *modified_context = NULL;
char *modified_context_end = NULL;
const char *unichar_ptr = unichar;
const char *unichar_end = unichar_ptr + strlen(unichar_ptr);
float prob = 0.0f;
int step = 0;
while (unichar_ptr < unichar_end &&
(step = UNICHAR::utf8_step(unichar_ptr)) > 0) {
if (language_model_debug_level > 1) {
tprintf("prob(%s | %s)=%g\n", unichar_ptr, context_ptr,
dict_->ProbabilityInContext(context_ptr, -1, unichar_ptr, step));
}
prob += dict_->ProbabilityInContext(context_ptr, -1, unichar_ptr, step);
++(*unichar_step_len);
if (language_model_ngram_use_only_first_uft8_step) break;
unichar_ptr += step;
// If there are multiple UTF8 characters present in unichar, context is
// updated to include the previously examined characters from str,
// unless use_only_first_uft8_step is true.
if (unichar_ptr < unichar_end) {
if (modified_context == NULL) {
int context_len = strlen(context);
modified_context =
new char[context_len + strlen(unichar_ptr) + step + 1];
strncpy(modified_context, context, context_len);
modified_context_end = modified_context + context_len;
context_ptr = modified_context;
}
strncpy(modified_context_end, unichar_ptr - step, step);
modified_context_end += step;
*modified_context_end = '\0';
}
}
prob /= static_cast<float>(*unichar_step_len); // normalize
if (prob < language_model_ngram_small_prob) {
if (language_model_debug_level > 0) tprintf("Found small prob %g\n", prob);
*found_small_prob = true;
prob = language_model_ngram_small_prob;
}
*ngram_cost = -1.0*log2(prob);
float ngram_and_classifier_cost =
-1.0*log2(CertaintyScore(certainty)/denom) +
*ngram_cost * language_model_ngram_scale_factor;
if (language_model_debug_level > 1) {
tprintf("-log [ p(%s) * p(%s | %s) ] = -log2(%g*%g) = %g\n", unichar,
unichar, context_ptr, CertaintyScore(certainty)/denom, prob,
ngram_and_classifier_cost);
}
if (modified_context != NULL) delete[] modified_context;
return ngram_and_classifier_cost;
}
float LanguageModel::ComputeDenom(BLOB_CHOICE_LIST *curr_list) {
if (curr_list->empty()) return 1.0f;
float denom = 0.0f;
int len = 0;
BLOB_CHOICE_IT c_it(curr_list);
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
ASSERT_HOST(c_it.data() != NULL);
++len;
denom += CertaintyScore(c_it.data()->certainty());
}
assert(len != 0);
// The ideal situation would be to have the classifier scores for
// classifying each position as each of the characters in the unicharset.
// Since we can not do this because of speed, we add a very crude estimate
// of what these scores for the "missing" classifications would sum up to.
denom += (dict_->getUnicharset().size() - len) *
CertaintyScore(language_model_ngram_nonmatch_score);
return denom;
}
void LanguageModel::FillConsistencyInfo(
int curr_col,
bool word_end,
BLOB_CHOICE *b,
ViterbiStateEntry *parent_vse,
WERD_RES *word_res,
LMConsistencyInfo *consistency_info) {
const UNICHARSET &unicharset = dict_->getUnicharset();
UNICHAR_ID unichar_id = b->unichar_id();
BLOB_CHOICE* parent_b = parent_vse != NULL ? parent_vse->curr_b : NULL;
// Check punctuation validity.
if (unicharset.get_ispunctuation(unichar_id)) consistency_info->num_punc++;
if (dict_->GetPuncDawg() != NULL && !consistency_info->invalid_punc) {
if (dict_->compound_marker(unichar_id) && parent_b != NULL &&
(unicharset.get_isalpha(parent_b->unichar_id()) ||
unicharset.get_isdigit(parent_b->unichar_id()))) {
// reset punc_ref for compound words
consistency_info->punc_ref = NO_EDGE;
} else {
bool is_apos = dict_->is_apostrophe(unichar_id);
bool prev_is_numalpha = (parent_b != NULL &&
(unicharset.get_isalpha(parent_b->unichar_id()) ||
unicharset.get_isdigit(parent_b->unichar_id())));
UNICHAR_ID pattern_unichar_id =
(unicharset.get_isalpha(unichar_id) ||
unicharset.get_isdigit(unichar_id) ||
(is_apos && prev_is_numalpha)) ?
Dawg::kPatternUnicharID : unichar_id;
if (consistency_info->punc_ref == NO_EDGE ||
pattern_unichar_id != Dawg::kPatternUnicharID ||
dict_->GetPuncDawg()->edge_letter(consistency_info->punc_ref) !=
Dawg::kPatternUnicharID) {
NODE_REF node = Dict::GetStartingNode(dict_->GetPuncDawg(),
consistency_info->punc_ref);
consistency_info->punc_ref =
(node != NO_EDGE) ? dict_->GetPuncDawg()->edge_char_of(
node, pattern_unichar_id, word_end) : NO_EDGE;
if (consistency_info->punc_ref == NO_EDGE) {
consistency_info->invalid_punc = true;
}
}
}
}
// Update case related counters.
if (parent_vse != NULL && !word_end && dict_->compound_marker(unichar_id)) {
// Reset counters if we are dealing with a compound word.
consistency_info->num_lower = 0;
consistency_info->num_non_first_upper = 0;
}
else if (unicharset.get_islower(unichar_id)) {
consistency_info->num_lower++;
} else if ((parent_b != NULL) && unicharset.get_isupper(unichar_id)) {
if (unicharset.get_isupper(parent_b->unichar_id()) ||
consistency_info->num_lower > 0 ||
consistency_info->num_non_first_upper > 0) {
consistency_info->num_non_first_upper++;
}
}
// Initialize consistency_info->script_id (use script of unichar_id
// if it is not Common, use script id recorded by the parent otherwise).
// Set inconsistent_script to true if the script of the current unichar
// is not consistent with that of the parent.
consistency_info->script_id = unicharset.get_script(unichar_id);
// Hiragana and Katakana can mix with Han.
if (dict_->getUnicharset().han_sid() != dict_->getUnicharset().null_sid()) {
if ((unicharset.hiragana_sid() != unicharset.null_sid() &&
consistency_info->script_id == unicharset.hiragana_sid()) ||
(unicharset.katakana_sid() != unicharset.null_sid() &&
consistency_info->script_id == unicharset.katakana_sid())) {
consistency_info->script_id = dict_->getUnicharset().han_sid();
}
}
if (parent_vse != NULL &&
(parent_vse->consistency_info.script_id !=
dict_->getUnicharset().common_sid())) {
int parent_script_id = parent_vse->consistency_info.script_id;
// If script_id is Common, use script id of the parent instead.
if (consistency_info->script_id == dict_->getUnicharset().common_sid()) {
consistency_info->script_id = parent_script_id;
}
if (consistency_info->script_id != parent_script_id) {
consistency_info->inconsistent_script = true;
}
}
// Update chartype related counters.
if (unicharset.get_isalpha(unichar_id)) {
consistency_info->num_alphas++;
} else if (unicharset.get_isdigit(unichar_id)) {
consistency_info->num_digits++;
} else if (!unicharset.get_ispunctuation(unichar_id)) {
consistency_info->num_other++;
}
// Check font and spacing consistency.
if (fontinfo_table_->size() > 0 && parent_b != NULL) {
int fontinfo_id = -1;
if (parent_b->fontinfo_id() == b->fontinfo_id() ||
parent_b->fontinfo_id2() == b->fontinfo_id()) {
fontinfo_id = b->fontinfo_id();
} else if (parent_b->fontinfo_id() == b->fontinfo_id2() ||
parent_b->fontinfo_id2() == b->fontinfo_id2()) {
fontinfo_id = b->fontinfo_id2();
}
if(language_model_debug_level > 1) {
tprintf("pfont %s pfont %s font %s font2 %s common %s(%d)\n",
(parent_b->fontinfo_id() >= 0) ?
fontinfo_table_->get(parent_b->fontinfo_id()).name : "" ,
(parent_b->fontinfo_id2() >= 0) ?
fontinfo_table_->get(parent_b->fontinfo_id2()).name : "",
(b->fontinfo_id() >= 0) ?
fontinfo_table_->get(b->fontinfo_id()).name : "",
(fontinfo_id >= 0) ? fontinfo_table_->get(fontinfo_id).name : "",
(fontinfo_id >= 0) ? fontinfo_table_->get(fontinfo_id).name : "",
fontinfo_id);
}
if (!word_res->blob_widths.empty()) { // if we have widths/gaps info
bool expected_gap_found = false;
float expected_gap;
int temp_gap;
if (fontinfo_id >= 0) { // found a common font
ASSERT_HOST(fontinfo_id < fontinfo_table_->size());
if (fontinfo_table_->get(fontinfo_id).get_spacing(
parent_b->unichar_id(), unichar_id, &temp_gap)) {
expected_gap = temp_gap;
expected_gap_found = true;
}
} else {
consistency_info->inconsistent_font = true;
// Get an average of the expected gaps in each font
int num_addends = 0;
expected_gap = 0;
int temp_fid;
for (int i = 0; i < 4; ++i) {
if (i == 0) {
temp_fid = parent_b->fontinfo_id();
} else if (i == 1) {
temp_fid = parent_b->fontinfo_id2();
} else if (i == 2) {
temp_fid = b->fontinfo_id();
} else {
temp_fid = b->fontinfo_id2();
}
ASSERT_HOST(temp_fid < 0 || fontinfo_table_->size());
if (temp_fid >= 0 && fontinfo_table_->get(temp_fid).get_spacing(
parent_b->unichar_id(), unichar_id, &temp_gap)) {
expected_gap += temp_gap;
num_addends++;
}
}
expected_gap_found = (num_addends > 0);
if (num_addends > 0) {
expected_gap /= static_cast<float>(num_addends);
}
}
if (expected_gap_found) {
float actual_gap =
static_cast<float>(word_res->GetBlobsGap(curr_col-1));
float gap_ratio = expected_gap / actual_gap;
// TODO(rays) The gaps seem to be way off most of the time, saved by
// the error here that the ratio was compared to 1/2, when it should
// have been 0.5f. Find the source of the gaps discrepancy and put
// the 0.5f here in place of 0.0f.
// Test on 2476595.sj, pages 0 to 6. (In French.)
if (gap_ratio < 0.0f || gap_ratio > 2.0f) {
consistency_info->num_inconsistent_spaces++;
}
if (language_model_debug_level > 1) {
tprintf("spacing for %s(%d) %s(%d) col %d: expected %g actual %g\n",
unicharset.id_to_unichar(parent_b->unichar_id()),
parent_b->unichar_id(), unicharset.id_to_unichar(unichar_id),
unichar_id, curr_col, expected_gap, actual_gap);
}
}
}
}
}
float LanguageModel::ComputeAdjustedPathCost(ViterbiStateEntry *vse) {
ASSERT_HOST(vse != NULL);
if (params_model_.Initialized()) {
float features[PTRAIN_NUM_FEATURE_TYPES];
ExtractFeaturesFromPath(*vse, features);
float cost = params_model_.ComputeCost(features);
if (language_model_debug_level > 3) {
tprintf("ComputeAdjustedPathCost %g ParamsModel features:\n", cost);
if (language_model_debug_level >= 5) {
for (int f = 0; f < PTRAIN_NUM_FEATURE_TYPES; ++f) {
tprintf("%s=%g\n", kParamsTrainingFeatureTypeName[f], features[f]);
}
}
}
return cost * vse->outline_length;
} else {
float adjustment = 1.0f;
if (vse->dawg_info == NULL || vse->dawg_info->permuter != FREQ_DAWG_PERM) {
adjustment += language_model_penalty_non_freq_dict_word;
}
if (vse->dawg_info == NULL) {
adjustment += language_model_penalty_non_dict_word;
if (vse->length > language_model_min_compound_length) {
adjustment += ((vse->length - language_model_min_compound_length) *
language_model_penalty_increment);
}
}
if (vse->associate_stats.shape_cost > 0) {
adjustment += vse->associate_stats.shape_cost /
static_cast<float>(vse->length);
}
if (language_model_ngram_on) {
ASSERT_HOST(vse->ngram_info != NULL);
return vse->ngram_info->ngram_and_classifier_cost * adjustment;
} else {
adjustment += ComputeConsistencyAdjustment(vse->dawg_info,
vse->consistency_info);
return vse->ratings_sum * adjustment;
}
}
}
void LanguageModel::UpdateBestChoice(
ViterbiStateEntry *vse,
LMPainPoints *pain_points,
WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
bool truth_path;
WERD_CHOICE *word = ConstructWord(vse, word_res, &best_choice_bundle->fixpt,
blamer_bundle, &truth_path);
ASSERT_HOST(word != NULL);
if (dict_->stopper_debug_level >= 1) {
STRING word_str;
word->string_and_lengths(&word_str, NULL);
vse->Print(word_str.string());
}
if (language_model_debug_level > 0) {
word->print("UpdateBestChoice() constructed word");
}
// Record features from the current path if necessary.
ParamsTrainingHypothesis curr_hyp;
if (blamer_bundle != NULL) {
if (vse->dawg_info != NULL) vse->dawg_info->permuter =
static_cast<PermuterType>(word->permuter());
ExtractFeaturesFromPath(*vse, curr_hyp.features);
word->string_and_lengths(&(curr_hyp.str), NULL);
curr_hyp.cost = vse->cost; // record cost for error rate computations
if (language_model_debug_level > 0) {
tprintf("Raw features extracted from %s (cost=%g) [ ",
curr_hyp.str.string(), curr_hyp.cost);
for (int deb_i = 0; deb_i < PTRAIN_NUM_FEATURE_TYPES; ++deb_i) {
tprintf("%g ", curr_hyp.features[deb_i]);
}
tprintf("]\n");
}
// Record the current hypothesis in params_training_bundle.
blamer_bundle->AddHypothesis(curr_hyp);
if (truth_path)
blamer_bundle->UpdateBestRating(word->rating());
}
if (blamer_bundle != NULL && blamer_bundle->GuidedSegsearchStillGoing()) {
// The word was constructed solely for blamer_bundle->AddHypothesis, so
// we no longer need it.
delete word;
return;
}
if (word_res->chopped_word != NULL && !word_res->chopped_word->blobs.empty())
word->SetScriptPositions(false, word_res->chopped_word);
// Update and log new raw_choice if needed.
if (word_res->raw_choice == NULL ||
word->rating() < word_res->raw_choice->rating()) {
if (word_res->LogNewRawChoice(word) && language_model_debug_level > 0)
tprintf("Updated raw choice\n");
}
// Set the modified rating for best choice to vse->cost and log best choice.
word->set_rating(vse->cost);
// Call LogNewChoice() for best choice from Dict::adjust_word() since it
// computes adjust_factor that is used by the adaption code (e.g. by
// ClassifyAdaptableWord() to compute adaption acceptance thresholds).
// Note: the rating of the word is not adjusted.
dict_->adjust_word(word, vse->dawg_info == NULL,
vse->consistency_info.xht_decision, 0.0,
false, language_model_debug_level > 0);
// Hand ownership of the word over to the word_res.
if (!word_res->LogNewCookedChoice(dict_->tessedit_truncate_wordchoice_log,
dict_->stopper_debug_level >= 1, word)) {
// The word was so bad that it was deleted.
return;
}
if (word_res->best_choice == word) {
// Word was the new best.
if (dict_->AcceptableChoice(*word, vse->consistency_info.xht_decision) &&
AcceptablePath(*vse)) {
acceptable_choice_found_ = true;
}
// Update best_choice_bundle.
best_choice_bundle->updated = true;
best_choice_bundle->best_vse = vse;
if (language_model_debug_level > 0) {
tprintf("Updated best choice\n");
word->print_state("New state ");
}
// Update hyphen state if we are dealing with a dictionary word.
if (vse->dawg_info != NULL) {
if (dict_->has_hyphen_end(*word)) {
dict_->set_hyphen_word(*word, *(dawg_args_->active_dawgs));
} else {
dict_->reset_hyphen_vars(true);
}
}
if (blamer_bundle != NULL) {
blamer_bundle->set_best_choice_is_dict_and_top_choice(
vse->dawg_info != NULL && vse->top_choice_flags);
}
}
if (wordrec_display_segmentations && word_res->chopped_word != NULL) {
word->DisplaySegmentation(word_res->chopped_word);
}
}
void LanguageModel::ExtractFeaturesFromPath(
const ViterbiStateEntry &vse, float features[]) {
memset(features, 0, sizeof(float) * PTRAIN_NUM_FEATURE_TYPES);
// Record dictionary match info.
int len = vse.length <= kMaxSmallWordUnichars ? 0 :
vse.length <= kMaxMediumWordUnichars ? 1 : 2;
if (vse.dawg_info != NULL) {
int permuter = vse.dawg_info->permuter;
if (permuter == NUMBER_PERM || permuter == USER_PATTERN_PERM) {
if (vse.consistency_info.num_digits == vse.length) {
features[PTRAIN_DIGITS_SHORT+len] = 1.0;
} else {
features[PTRAIN_NUM_SHORT+len] = 1.0;
}
} else if (permuter == DOC_DAWG_PERM) {
features[PTRAIN_DOC_SHORT+len] = 1.0;
} else if (permuter == SYSTEM_DAWG_PERM || permuter == USER_DAWG_PERM ||
permuter == COMPOUND_PERM) {
features[PTRAIN_DICT_SHORT+len] = 1.0;
} else if (permuter == FREQ_DAWG_PERM) {
features[PTRAIN_FREQ_SHORT+len] = 1.0;
}
}
// Record shape cost feature (normalized by path length).
features[PTRAIN_SHAPE_COST_PER_CHAR] =
vse.associate_stats.shape_cost / static_cast<float>(vse.length);
// Record ngram cost. (normalized by the path length).
features[PTRAIN_NGRAM_COST_PER_CHAR] = 0.0;
if (vse.ngram_info != NULL) {
features[PTRAIN_NGRAM_COST_PER_CHAR] =
vse.ngram_info->ngram_cost / static_cast<float>(vse.length);
}
// Record consistency-related features.
// Disabled this feature for due to its poor performance.
// features[PTRAIN_NUM_BAD_PUNC] = vse.consistency_info.NumInconsistentPunc();
features[PTRAIN_NUM_BAD_CASE] = vse.consistency_info.NumInconsistentCase();
features[PTRAIN_XHEIGHT_CONSISTENCY] = vse.consistency_info.xht_decision;
features[PTRAIN_NUM_BAD_CHAR_TYPE] = vse.dawg_info == NULL ?
vse.consistency_info.NumInconsistentChartype() : 0.0;
features[PTRAIN_NUM_BAD_SPACING] =
vse.consistency_info.NumInconsistentSpaces();
// Disabled this feature for now due to its poor performance.
// features[PTRAIN_NUM_BAD_FONT] = vse.consistency_info.inconsistent_font;
// Classifier-related features.
features[PTRAIN_RATING_PER_CHAR] =
vse.ratings_sum / static_cast<float>(vse.outline_length);
}
WERD_CHOICE *LanguageModel::ConstructWord(
ViterbiStateEntry *vse,
WERD_RES *word_res,
DANGERR *fixpt,
BlamerBundle *blamer_bundle,
bool *truth_path) {
if (truth_path != NULL) {
*truth_path =
(blamer_bundle != NULL &&
vse->length == blamer_bundle->correct_segmentation_length());
}
BLOB_CHOICE *curr_b = vse->curr_b;
ViterbiStateEntry *curr_vse = vse;
int i;
bool compound = dict_->hyphenated(); // treat hyphenated words as compound
// Re-compute the variance of the width-to-height ratios (since we now
// can compute the mean over the whole word).
float full_wh_ratio_mean = 0.0f;
if (vse->associate_stats.full_wh_ratio_var != 0.0f) {
vse->associate_stats.shape_cost -= vse->associate_stats.full_wh_ratio_var;
full_wh_ratio_mean = (vse->associate_stats.full_wh_ratio_total /
static_cast<float>(vse->length));
vse->associate_stats.full_wh_ratio_var = 0.0f;
}
// Construct a WERD_CHOICE by tracing parent pointers.
WERD_CHOICE *word = new WERD_CHOICE(word_res->uch_set, vse->length);
word->set_length(vse->length);
int total_blobs = 0;
for (i = (vse->length-1); i >= 0; --i) {
if (blamer_bundle != NULL && truth_path != NULL && *truth_path &&
!blamer_bundle->MatrixPositionCorrect(i, curr_b->matrix_cell())) {
*truth_path = false;
}
// The number of blobs used for this choice is row - col + 1.
int num_blobs = curr_b->matrix_cell().row - curr_b->matrix_cell().col + 1;
total_blobs += num_blobs;
word->set_blob_choice(i, num_blobs, curr_b);
// Update the width-to-height ratio variance. Useful non-space delimited
// languages to ensure that the blobs are of uniform width.
// Skip leading and trailing punctuation when computing the variance.
if ((full_wh_ratio_mean != 0.0f &&
((curr_vse != vse && curr_vse->parent_vse != NULL) ||
!dict_->getUnicharset().get_ispunctuation(curr_b->unichar_id())))) {
vse->associate_stats.full_wh_ratio_var +=
pow(full_wh_ratio_mean - curr_vse->associate_stats.full_wh_ratio, 2);
if (language_model_debug_level > 2) {
tprintf("full_wh_ratio_var += (%g-%g)^2\n",
full_wh_ratio_mean, curr_vse->associate_stats.full_wh_ratio);
}
}
// Mark the word as compound if compound permuter was set for any of
// the unichars on the path (usually this will happen for unichars
// that are compounding operators, like "-" and "/").
if (!compound && curr_vse->dawg_info &&
curr_vse->dawg_info->permuter == COMPOUND_PERM) compound = true;
// Update curr_* pointers.
curr_vse = curr_vse->parent_vse;
if (curr_vse == NULL) break;
curr_b = curr_vse->curr_b;
}
ASSERT_HOST(i == 0); // check that we recorded all the unichar ids.
ASSERT_HOST(total_blobs == word_res->ratings->dimension());
// Re-adjust shape cost to include the updated width-to-height variance.
if (full_wh_ratio_mean != 0.0f) {
vse->associate_stats.shape_cost += vse->associate_stats.full_wh_ratio_var;
}
word->set_rating(vse->ratings_sum);
word->set_certainty(vse->min_certainty);
word->set_x_heights(vse->consistency_info.BodyMinXHeight(),
vse->consistency_info.BodyMaxXHeight());
if (vse->dawg_info != NULL) {
word->set_permuter(compound ? COMPOUND_PERM : vse->dawg_info->permuter);
} else if (language_model_ngram_on && !vse->ngram_info->pruned) {
word->set_permuter(NGRAM_PERM);
} else if (vse->top_choice_flags) {
word->set_permuter(TOP_CHOICE_PERM);
} else {
word->set_permuter(NO_PERM);
}
word->set_dangerous_ambig_found_(!dict_->NoDangerousAmbig(word, fixpt, true,
word_res->ratings));
return word;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/language_model.cpp | C++ | asf20 | 63,473 |
/* -*-C-*-
********************************************************************************
*
* File: gradechop.h (Formerly gradechop.h)
* Description:
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Tue Jul 9 16:40:39 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef GRADECHOP_H
#define GRADECHOP_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "seam.h"
#include "ndminx.h"
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
typedef inT16 BOUNDS_RECT[4];
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* partial_split_priority
*
* Assign a priority to this split based on the features that it has.
* Grade it according to the different rating schemes and return the
* value of its goodness.
**********************************************************************/
#define partial_split_priority(split) \
(grade_split_length (split) + \
grade_sharpness (split)) \
/**********************************************************************
* split_bounds_overlap
*
* Check to see if this split might overlap with this outline. Return
* TRUE if there is a positive overlap in the bounding boxes of the two.
**********************************************************************/
#define split_bounds_overlap(split,outline) \
(outline->topleft.x <= MAX (split->point1->pos.x,split->point2->pos.x) && \
outline->botright.x >= MIN (split->point1->pos.x,split->point2->pos.x) && \
outline->botright.y <= MAX (split->point1->pos.y,split->point2->pos.y) && \
outline->topleft.y >= MIN (split->point1->pos.y,split->point2->pos.y))
#endif
| 1080228-arabicocr11 | wordrec/gradechop.h | C | asf20 | 2,865 |
///////////////////////////////////////////////////////////////////////
// File: pain_points.cpp
// Description: Functions that utilize the knowledge about the properties
// of the paths explored by the segmentation search in order
// to "pain points" - the locations in the ratings matrix
// which should be classified next.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "lm_pain_points.h"
#include "associate.h"
#include "dict.h"
#include "genericheap.h"
#include "lm_state.h"
#include "matrix.h"
#include "pageres.h"
namespace tesseract {
const float LMPainPoints::kDefaultPainPointPriorityAdjustment = 2.0f;
const float LMPainPoints::kLooseMaxCharWhRatio = 2.5f;
LMPainPointsType LMPainPoints::Deque(MATRIX_COORD *pp, float *priority) {
for (int h = 0; h < LM_PPTYPE_NUM; ++h) {
if (pain_points_heaps_[h].empty()) continue;
*priority = pain_points_heaps_[h].PeekTop().key;
*pp = pain_points_heaps_[h].PeekTop().data;
pain_points_heaps_[h].Pop(NULL);
return static_cast<LMPainPointsType>(h);
}
return LM_PPTYPE_NUM;
}
void LMPainPoints::GenerateInitial(WERD_RES *word_res) {
MATRIX *ratings = word_res->ratings;
AssociateStats associate_stats;
for (int col = 0; col < ratings->dimension(); ++col) {
int row_end = MIN(ratings->dimension(), col + ratings->bandwidth() + 1);
for (int row = col + 1; row < row_end; ++row) {
MATRIX_COORD coord(col, row);
if (coord.Valid(*ratings) &&
ratings->get(col, row) != NOT_CLASSIFIED) continue;
// Add an initial pain point if needed.
if (ratings->Classified(col, row - 1, dict_->WildcardID()) ||
(col + 1 < ratings->dimension() &&
ratings->Classified(col + 1, row, dict_->WildcardID()))) {
GeneratePainPoint(col, row, LM_PPTYPE_SHAPE, 0.0,
true, max_char_wh_ratio_, word_res);
}
}
}
}
void LMPainPoints::GenerateFromPath(float rating_cert_scale,
ViterbiStateEntry *vse,
WERD_RES *word_res) {
ViterbiStateEntry *curr_vse = vse;
BLOB_CHOICE *curr_b = vse->curr_b;
// The following pain point generation and priority calculation approaches
// prioritize exploring paths with low average rating of the known part of
// the path, while not relying on the ratings of the pieces to be combined.
//
// A pain point to combine the neighbors is generated for each pair of
// neighboring blobs on the path (the path is represented by vse argument
// given to GenerateFromPath()). The priority of each pain point is set to
// the average rating (per outline length) of the path, not including the
// ratings of the blobs to be combined.
// The ratings of the blobs to be combined are not used to calculate the
// priority, since it is not possible to determine from their magnitude
// whether it will be beneficial to combine the blobs. The reason is that
// chopped junk blobs (/ | - ') can have very good (low) ratings, however
// combining them will be beneficial. Blobs with high ratings might be
// over-joined pieces of characters, but also could be blobs from an unseen
// font or chopped pieces of complex characters.
while (curr_vse->parent_vse != NULL) {
ViterbiStateEntry* parent_vse = curr_vse->parent_vse;
const MATRIX_COORD& curr_cell = curr_b->matrix_cell();
const MATRIX_COORD& parent_cell = parent_vse->curr_b->matrix_cell();
MATRIX_COORD pain_coord(parent_cell.col, curr_cell.row);
if (!pain_coord.Valid(*word_res->ratings) ||
!word_res->ratings->Classified(parent_cell.col, curr_cell.row,
dict_->WildcardID())) {
// rat_subtr contains ratings sum of the two adjacent blobs to be merged.
// rat_subtr will be subtracted from the ratings sum of the path, since
// the blobs will be joined into a new blob, whose rating is yet unknown.
float rat_subtr = curr_b->rating() + parent_vse->curr_b->rating();
// ol_subtr contains the outline length of the blobs that will be joined.
float ol_subtr =
AssociateUtils::ComputeOutlineLength(rating_cert_scale, *curr_b) +
AssociateUtils::ComputeOutlineLength(rating_cert_scale,
*(parent_vse->curr_b));
// ol_dif is the outline of the path without the two blobs to be joined.
float ol_dif = vse->outline_length - ol_subtr;
// priority is set to the average rating of the path per unit of outline,
// not counting the ratings of the pieces to be joined.
float priority = ol_dif > 0 ? (vse->ratings_sum-rat_subtr)/ol_dif : 0.0;
GeneratePainPoint(pain_coord.col, pain_coord.row, LM_PPTYPE_PATH,
priority, true, max_char_wh_ratio_, word_res);
} else if (debug_level_ > 3) {
tprintf("NO pain point (Classified) for col=%d row=%d type=%s\n",
pain_coord.col, pain_coord.row,
LMPainPointsTypeName[LM_PPTYPE_PATH]);
BLOB_CHOICE_IT b_it(word_res->ratings->get(pain_coord.col,
pain_coord.row));
for (b_it.mark_cycle_pt(); !b_it.cycled_list(); b_it.forward()) {
BLOB_CHOICE* choice = b_it.data();
choice->print_full();
}
}
curr_vse = parent_vse;
curr_b = curr_vse->curr_b;
}
}
void LMPainPoints::GenerateFromAmbigs(const DANGERR &fixpt,
ViterbiStateEntry *vse,
WERD_RES *word_res) {
// Begins and ends in DANGERR vector now record the blob indices as used
// by the ratings matrix.
for (int d = 0; d < fixpt.size(); ++d) {
const DANGERR_INFO &danger = fixpt[d];
// Only use dangerous ambiguities.
if (danger.dangerous) {
GeneratePainPoint(danger.begin, danger.end - 1,
LM_PPTYPE_AMBIG, vse->cost, true,
kLooseMaxCharWhRatio, word_res);
}
}
}
bool LMPainPoints::GeneratePainPoint(
int col, int row, LMPainPointsType pp_type, float special_priority,
bool ok_to_extend, float max_char_wh_ratio,
WERD_RES *word_res) {
MATRIX_COORD coord(col, row);
if (coord.Valid(*word_res->ratings) &&
word_res->ratings->Classified(col, row, dict_->WildcardID())) {
return false;
}
if (debug_level_ > 3) {
tprintf("Generating pain point for col=%d row=%d type=%s\n",
col, row, LMPainPointsTypeName[pp_type]);
}
// Compute associate stats.
AssociateStats associate_stats;
AssociateUtils::ComputeStats(col, row, NULL, 0, fixed_pitch_,
max_char_wh_ratio, word_res, debug_level_,
&associate_stats);
// For fixed-pitch fonts/languages: if the current combined blob overlaps
// the next blob on the right and it is ok to extend the blob, try extending
// the blob until there is no overlap with the next blob on the right or
// until the width-to-height ratio becomes too large.
if (ok_to_extend) {
while (associate_stats.bad_fixed_pitch_right_gap &&
row + 1 < word_res->ratings->dimension() &&
!associate_stats.bad_fixed_pitch_wh_ratio) {
AssociateUtils::ComputeStats(col, ++row, NULL, 0, fixed_pitch_,
max_char_wh_ratio, word_res, debug_level_,
&associate_stats);
}
}
if (associate_stats.bad_shape) {
if (debug_level_ > 3) {
tprintf("Discarded pain point with a bad shape\n");
}
return false;
}
// Insert the new pain point into pain_points_heap_.
if (pain_points_heaps_[pp_type].size() < max_heap_size_) {
// Compute pain point priority.
float priority;
if (pp_type == LM_PPTYPE_PATH) {
priority = special_priority;
} else {
priority = associate_stats.gap_sum;
}
MatrixCoordPair pain_point(priority, MATRIX_COORD(col, row));
pain_points_heaps_[pp_type].Push(&pain_point);
if (debug_level_) {
tprintf("Added pain point with priority %g\n", priority);
}
return true;
} else {
if (debug_level_) tprintf("Pain points heap is full\n");
return false;
}
}
// Adjusts the pain point coordinates to cope with expansion of the ratings
// matrix due to a split of the blob with the given index.
void LMPainPoints::RemapForSplit(int index) {
for (int i = 0; i < LM_PPTYPE_NUM; ++i) {
GenericVector<MatrixCoordPair>* heap = pain_points_heaps_[i].heap();
for (int j = 0; j < heap->size(); ++j)
(*heap)[j].data.MapForSplit(index);
}
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/lm_pain_points.cpp | C++ | asf20 | 9,372 |
/* -*-C-*-
********************************************************************************
*
* File: plotedges.h (Formerly plotedges.h)
* Description: Convert the various data type into line lists
* Author: Mark Seaman, OCR Technology
* Created: Fri Jul 28 13:14:48 1989
* Modified: Mon May 13 09:34:51 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef PLOTEDGES_H
#define PLOTEDGES_H
#include "callcpp.h"
#include "oldlist.h"
#include "blobs.h"
#include "split.h"
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
extern ScrollView *edge_window; /* Window for edges */
/*----------------------------------------------------------------------
Macros
----------------------------------------------------------------------*/
/**********************************************************************
* update_edge_window
*
* Refresh the display of the edge window.
**********************************************************************/
#define update_edge_window() \
if (wordrec_display_splits) { \
c_make_current (edge_window); \
} \
/**********************************************************************
* edge_window_wait
*
* Wait for someone to click in the edges window.
**********************************************************************/
#define edge_window_wait() \
if (wordrec_display_splits) window_wait (edge_window)
/*----------------------------------------------------------------------
F u n c t i o n s
---------------------------------------------------------------------*/
void display_edgepts(LIST outlines);
void draw_blob_edges(TBLOB *blob);
void mark_outline(EDGEPT *edgept);
void mark_split(SPLIT *split);
#endif
| 1080228-arabicocr11 | wordrec/plotedges.h | C | asf20 | 2,692 |
///////////////////////////////////////////////////////////////////////
// File: language_model.h
// Description: Functions that utilize the knowledge about the properties,
// structure and statistics of the language to help segmentation
// search.
// Author: Daria Antonova
// Created: Mon Nov 11 11:26:43 PST 2009
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_LANGUAGE_MODEL_H_
#define TESSERACT_WORDREC_LANGUAGE_MODEL_H_
#include "associate.h"
#include "dawg.h"
#include "dict.h"
#include "fontinfo.h"
#include "intproto.h"
#include "lm_consistency.h"
#include "lm_pain_points.h"
#include "lm_state.h"
#include "matrix.h"
#include "params.h"
#include "pageres.h"
#include "params_model.h"
namespace tesseract {
// This class that contains the data structures and functions necessary
// to represent and use the knowledge about the language.
class LanguageModel {
public:
// Masks for keeping track of top choices that should not be pruned out.
static const LanguageModelFlagsType kSmallestRatingFlag = 0x1;
static const LanguageModelFlagsType kLowerCaseFlag = 0x2;
static const LanguageModelFlagsType kUpperCaseFlag = 0x4;
static const LanguageModelFlagsType kDigitFlag = 0x8;
static const LanguageModelFlagsType kXhtConsistentFlag = 0x10;
// Denominator for normalizing per-letter ngram cost when deriving
// penalty adjustments.
static const float kMaxAvgNgramCost;
LanguageModel(const UnicityTable<FontInfo> *fontinfo_table, Dict *dict);
~LanguageModel();
// Fills the given floats array with features extracted from path represented
// by the given ViterbiStateEntry. See ccstruct/params_training_featdef.h
// for feature information.
// Note: the function assumes that features points to an array of size
// PTRAIN_NUM_FEATURE_TYPES.
static void ExtractFeaturesFromPath(const ViterbiStateEntry &vse,
float features[]);
// Updates data structures that are used for the duration of the segmentation
// search on the current word;
void InitForWord(const WERD_CHOICE *prev_word,
bool fixed_pitch, float max_char_wh_ratio,
float rating_cert_scale);
// Updates language model state of the given BLOB_CHOICE_LIST (from
// the ratings matrix) a its parent. Updates pain_points if new
// problematic points are found in the segmentation graph.
//
// At most language_model_viterbi_list_size are kept in each
// LanguageModelState.viterbi_state_entries list.
// At most language_model_viterbi_list_max_num_prunable of those are prunable
// (non-dictionary) paths.
// The entries that represent dictionary word paths are kept at the front
// of the list.
// The list ordered by cost that is computed collectively by several
// language model components (currently dawg and ngram components).
bool UpdateState(
bool just_classified,
int curr_col, int curr_row,
BLOB_CHOICE_LIST *curr_list,
LanguageModelState *parent_node,
LMPainPoints *pain_points,
WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle);
// Returns true if an acceptable best choice was discovered.
inline bool AcceptableChoiceFound() { return acceptable_choice_found_; }
inline void SetAcceptableChoiceFound(bool val) {
acceptable_choice_found_ = val;
}
// Returns the reference to ParamsModel.
inline ParamsModel &getParamsModel() { return params_model_; }
protected:
inline float CertaintyScore(float cert) {
if (language_model_use_sigmoidal_certainty) {
// cert is assumed to be between 0 and -dict_->certainty_scale.
// If you enable language_model_use_sigmoidal_certainty, you
// need to adjust language_model_ngram_nonmatch_score as well.
cert = -cert / dict_->certainty_scale;
return 1.0f / (1.0f + exp(10.0f * cert));
} else {
return (-1.0f / cert);
}
}
inline float ComputeAdjustment(int num_problems, float penalty) {
if (num_problems == 0) return 0.0f;
if (num_problems == 1) return penalty;
return (penalty + (language_model_penalty_increment *
static_cast<float>(num_problems-1)));
}
// Computes the adjustment to the ratings sum based on the given
// consistency_info. The paths with invalid punctuation, inconsistent
// case and character type are penalized proportionally to the number
// of inconsistencies on the path.
inline float ComputeConsistencyAdjustment(
const LanguageModelDawgInfo *dawg_info,
const LMConsistencyInfo &consistency_info) {
if (dawg_info != NULL) {
return ComputeAdjustment(consistency_info.NumInconsistentCase(),
language_model_penalty_case) +
(consistency_info.inconsistent_script ?
language_model_penalty_script : 0.0f);
}
return (ComputeAdjustment(consistency_info.NumInconsistentPunc(),
language_model_penalty_punc) +
ComputeAdjustment(consistency_info.NumInconsistentCase(),
language_model_penalty_case) +
ComputeAdjustment(consistency_info.NumInconsistentChartype(),
language_model_penalty_chartype) +
ComputeAdjustment(consistency_info.NumInconsistentSpaces(),
language_model_penalty_spacing) +
(consistency_info.inconsistent_script ?
language_model_penalty_script : 0.0f) +
(consistency_info.inconsistent_font ?
language_model_penalty_font : 0.0f));
}
// Returns an adjusted ratings sum that includes inconsistency penalties,
// penalties for non-dictionary paths and paths with dips in ngram
// probability.
float ComputeAdjustedPathCost(ViterbiStateEntry *vse);
// Finds the first lower and upper case letter and first digit in curr_list.
// Uses the first character in the list in place of empty results.
// Returns true if both alpha and digits are found.
bool GetTopLowerUpperDigit(BLOB_CHOICE_LIST *curr_list,
BLOB_CHOICE **first_lower,
BLOB_CHOICE **first_upper,
BLOB_CHOICE **first_digit) const;
// Forces there to be at least one entry in the overall set of the
// viterbi_state_entries of each element of parent_node that has the
// top_choice_flag set for lower, upper and digit using the same rules as
// GetTopLowerUpperDigit, setting the flag on the first found suitable
// candidate, whether or not the flag is set on some other parent.
// Returns 1 if both alpha and digits are found among the parents, -1 if no
// parents are found at all (a legitimate case), and 0 otherwise.
int SetTopParentLowerUpperDigit(LanguageModelState *parent_node) const;
// Finds the next ViterbiStateEntry with which the given unichar_id can
// combine sensibly, taking into account any mixed alnum/mixed case
// situation, and whether this combination has been inspected before.
ViterbiStateEntry* GetNextParentVSE(
bool just_classified, bool mixed_alnum,
const BLOB_CHOICE* bc, LanguageModelFlagsType blob_choice_flags,
const UNICHARSET& unicharset, WERD_RES* word_res,
ViterbiStateEntry_IT* vse_it,
LanguageModelFlagsType* top_choice_flags) const;
// Helper function that computes the cost of the path composed of the
// path in the given parent ViterbiStateEntry and the given BLOB_CHOICE.
// If the new path looks good enough, adds a new ViterbiStateEntry to the
// list of viterbi entries in the given BLOB_CHOICE and returns true.
bool AddViterbiStateEntry(
LanguageModelFlagsType top_choice_flags, float denom, bool word_end,
int curr_col, int curr_row, BLOB_CHOICE *b,
LanguageModelState *curr_state, ViterbiStateEntry *parent_vse,
LMPainPoints *pain_points, WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle);
// Determines whether a potential entry is a true top choice and
// updates changed accordingly.
//
// Note: The function assumes that b, top_choice_flags and changed
// are not NULL.
void GenerateTopChoiceInfo(ViterbiStateEntry *new_vse,
const ViterbiStateEntry *parent_vse,
LanguageModelState *lms);
// Calls dict_->LetterIsOk() with DawgArgs initialized from parent_vse and
// unichar from b.unichar_id(). Constructs and returns LanguageModelDawgInfo
// with updated active dawgs, constraints and permuter.
//
// Note: the caller is responsible for deleting the returned pointer.
LanguageModelDawgInfo *GenerateDawgInfo(bool word_end,
int curr_col, int curr_row,
const BLOB_CHOICE &b,
const ViterbiStateEntry *parent_vse);
// Computes p(unichar | parent context) and records it in ngram_cost.
// If b.unichar_id() is an unlikely continuation of the parent context
// sets found_small_prob to true and returns NULL.
// Otherwise creates a new LanguageModelNgramInfo entry containing the
// updated context (that includes b.unichar_id() at the end) and returns it.
//
// Note: the caller is responsible for deleting the returned pointer.
LanguageModelNgramInfo *GenerateNgramInfo(
const char *unichar, float certainty, float denom,
int curr_col, int curr_row, float outline_length,
const ViterbiStateEntry *parent_vse);
// Computes -(log(prob(classifier)) + log(prob(ngram model)))
// for the given unichar in the given context. If there are multiple
// unichars at one position - takes the average of their probabilities.
// UNICHAR::utf8_step() is used to separate out individual UTF8 characters,
// since probability_in_context() can only handle one at a time (while
// unicharset might contain ngrams and glyphs composed from multiple UTF8
// characters).
float ComputeNgramCost(const char *unichar, float certainty, float denom,
const char *context, int *unichar_step_len,
bool *found_small_prob, float *ngram_prob);
// Computes the normalization factors for the classifier confidences
// (used by ComputeNgramCost()).
float ComputeDenom(BLOB_CHOICE_LIST *curr_list);
// Fills the given consistenty_info based on parent_vse.consistency_info
// and on the consistency of the given unichar_id with parent_vse.
void FillConsistencyInfo(
int curr_col, bool word_end, BLOB_CHOICE *b,
ViterbiStateEntry *parent_vse,
WERD_RES *word_res,
LMConsistencyInfo *consistency_info);
// Constructs WERD_CHOICE by recording unichar_ids of the BLOB_CHOICEs
// on the path represented by the given BLOB_CHOICE and language model
// state entries (lmse, dse). The path is re-constructed by following
// the parent pointers in the the lang model state entries). If the
// constructed WERD_CHOICE is better than the best/raw choice recorded
// in the best_choice_bundle, this function updates the corresponding
// fields and sets best_choice_bunldle->updated to true.
void UpdateBestChoice(ViterbiStateEntry *vse,
LMPainPoints *pain_points,
WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle);
// Constructs a WERD_CHOICE by tracing parent pointers starting with
// the given LanguageModelStateEntry. Returns the constructed word.
// Updates best_char_choices, certainties and state if they are not
// NULL (best_char_choices and certainties are assumed to have the
// length equal to lmse->length).
// The caller is responsible for freeing memory associated with the
// returned WERD_CHOICE.
WERD_CHOICE *ConstructWord(ViterbiStateEntry *vse,
WERD_RES *word_res,
DANGERR *fixpt,
BlamerBundle *blamer_bundle,
bool *truth_path);
// Wrapper around AssociateUtils::ComputeStats().
inline void ComputeAssociateStats(int col, int row,
float max_char_wh_ratio,
ViterbiStateEntry *parent_vse,
WERD_RES *word_res,
AssociateStats *associate_stats) {
AssociateUtils::ComputeStats(
col, row,
(parent_vse != NULL) ? &(parent_vse->associate_stats) : NULL,
(parent_vse != NULL) ? parent_vse->length : 0,
fixed_pitch_, max_char_wh_ratio,
word_res, language_model_debug_level > 2, associate_stats);
}
// Returns true if the path with such top_choice_flags and dawg_info
// could be pruned out (i.e. is neither a system/user/frequent dictionary
// nor a top choice path).
// In non-space delimited languages all paths can be "somewhat" dictionary
// words. In such languages we can not do dictionary-driven path pruning,
// so paths with non-empty dawg_info are considered prunable.
inline bool PrunablePath(const ViterbiStateEntry &vse) {
if (vse.top_choice_flags) return false;
if (vse.dawg_info != NULL &&
(vse.dawg_info->permuter == SYSTEM_DAWG_PERM ||
vse.dawg_info->permuter == USER_DAWG_PERM ||
vse.dawg_info->permuter == FREQ_DAWG_PERM)) return false;
return true;
}
// Returns true if the given ViterbiStateEntry represents an acceptable path.
inline bool AcceptablePath(const ViterbiStateEntry &vse) {
return (vse.dawg_info != NULL || vse.Consistent() ||
(vse.ngram_info != NULL && !vse.ngram_info->pruned));
}
public:
// Parameters.
INT_VAR_H(language_model_debug_level, 0, "Language model debug level");
BOOL_VAR_H(language_model_ngram_on, false,
"Turn on/off the use of character ngram model");
INT_VAR_H(language_model_ngram_order, 8,
"Maximum order of the character ngram model");
INT_VAR_H(language_model_viterbi_list_max_num_prunable, 10,
"Maximum number of prunable (those for which PrunablePath() is"
" true) entries in each viterbi list recorded in BLOB_CHOICEs");
INT_VAR_H(language_model_viterbi_list_max_size, 500,
"Maximum size of viterbi lists recorded in BLOB_CHOICEs");
double_VAR_H(language_model_ngram_small_prob, 0.000001,
"To avoid overly small denominators use this as the floor"
" of the probability returned by the ngram model");
double_VAR_H(language_model_ngram_nonmatch_score, -40.0,
"Average classifier score of a non-matching unichar");
BOOL_VAR_H(language_model_ngram_use_only_first_uft8_step, false,
"Use only the first UTF8 step of the given string"
" when computing log probabilities");
double_VAR_H(language_model_ngram_scale_factor, 0.03,
"Strength of the character ngram model relative to the"
" character classifier ");
double_VAR_H(language_model_ngram_rating_factor, 16.0,
"Factor to bring log-probs into the same range as ratings"
" when multiplied by outline length ");
BOOL_VAR_H(language_model_ngram_space_delimited_language, true,
"Words are delimited by space");
INT_VAR_H(language_model_min_compound_length, 3,
"Minimum length of compound words");
// Penalties used for adjusting path costs and final word rating.
double_VAR_H(language_model_penalty_non_freq_dict_word, 0.1,
"Penalty for words not in the frequent word dictionary");
double_VAR_H(language_model_penalty_non_dict_word, 0.15,
"Penalty for non-dictionary words");
double_VAR_H(language_model_penalty_punc, 0.2,
"Penalty for inconsistent punctuation");
double_VAR_H(language_model_penalty_case, 0.1,
"Penalty for inconsistent case");
double_VAR_H(language_model_penalty_script, 0.5,
"Penalty for inconsistent script");
double_VAR_H(language_model_penalty_chartype, 0.3,
"Penalty for inconsistent character type");
double_VAR_H(language_model_penalty_font, 0.00,
"Penalty for inconsistent font");
double_VAR_H(language_model_penalty_spacing, 0.05,
"Penalty for inconsistent spacing");
double_VAR_H(language_model_penalty_increment, 0.01, "Penalty increment");
INT_VAR_H(wordrec_display_segmentations, 0, "Display Segmentations");
BOOL_VAR_H(language_model_use_sigmoidal_certainty, false,
"Use sigmoidal score for certainty");
protected:
// Member Variables.
// Temporary DawgArgs struct that is re-used across different words to
// avoid dynamic memory re-allocation (should be cleared before each use).
DawgArgs *dawg_args_;
// Scaling for recovering blob outline length from rating and certainty.
float rating_cert_scale_;
// The following variables are set at construction time.
// Pointer to fontinfo table (not owned by LanguageModel).
const UnicityTable<FontInfo> *fontinfo_table_;
// Pointer to Dict class, that is used for querying the dictionaries
// (the pointer is not owned by LanguageModel).
Dict *dict_;
// TODO(daria): the following variables should become LanguageModel params
// when the old code in bestfirst.cpp and heuristic.cpp is deprecated.
//
// Set to true if we are dealing with fixed pitch text
// (set to assume_fixed_pitch_char_segment).
bool fixed_pitch_;
// Max char width-to-height ratio allowed
// (set to segsearch_max_char_wh_ratio).
float max_char_wh_ratio_;
// The following variables are initialized with InitForWord().
// String representation of the classification of the previous word
// (since this is only used by the character ngram model component,
// only the last language_model_ngram_order of the word are stored).
STRING prev_word_str_;
int prev_word_unichar_step_len_;
// Active dawg vector.
DawgPositionVector *very_beginning_active_dawgs_; // includes continuation
DawgPositionVector *beginning_active_dawgs_;
// Set to true if acceptable choice was discovered.
// Note: it would be nice to use this to terminate the search once an
// acceptable choices is found. However we do not do that and once an
// acceptable choice is found we finish looking for alternative choices
// in the current segmentation graph and then exit the search (no more
// classifications are done after an acceptable choice is found).
// This is needed in order to let the search find the words very close to
// the best choice in rating (e.g. what/What, Cat/cat, etc) and log these
// choices. This way the stopper will know that the best choice is not
// ambiguous (i.e. there are best choices in the best choice list that have
// ratings close to the very best one) and will be less likely to mis-adapt.
bool acceptable_choice_found_;
// Set to true if a choice representing correct segmentation was explored.
bool correct_segmentation_explored_;
// Params models containing weights for for computing ViterbiStateEntry costs.
ParamsModel params_model_;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_LANGUAGE_MODEL_H_
| 1080228-arabicocr11 | wordrec/language_model.h | C++ | asf20 | 20,050 |
/* -*-C-*-
********************************************************************************
*
* File: chop.c (Formerly chop.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Tue Jul 30 16:41:11 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "chop.h"
#include "outlines.h"
#include "olutil.h"
#include "callcpp.h"
#include "plotedges.h"
#include "const.h"
#include "wordrec.h"
#include <math.h>
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
namespace tesseract {
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**
* @name point_priority
*
* Assign a priority to and edge point that might be used as part of a
* split. The argument should be of type EDGEPT.
*/
PRIORITY Wordrec::point_priority(EDGEPT *point) {
return (PRIORITY)angle_change(point->prev, point, point->next);
}
/**
* @name add_point_to_list
*
* Add an edge point to a POINT_GROUP containg a list of other points.
*/
void Wordrec::add_point_to_list(PointHeap* point_heap, EDGEPT *point) {
if (point_heap->size() < MAX_NUM_POINTS - 2) {
PointPair pair(point_priority(point), point);
point_heap->Push(&pair);
}
#ifndef GRAPHICS_DISABLED
if (chop_debug > 2)
mark_outline(point);
#endif
}
/**
* @name angle_change
*
* Return the change in angle (degrees) of the line segments between
* points one and two, and two and three.
*/
int Wordrec::angle_change(EDGEPT *point1, EDGEPT *point2, EDGEPT *point3) {
VECTOR vector1;
VECTOR vector2;
int angle;
float length;
/* Compute angle */
vector1.x = point2->pos.x - point1->pos.x;
vector1.y = point2->pos.y - point1->pos.y;
vector2.x = point3->pos.x - point2->pos.x;
vector2.y = point3->pos.y - point2->pos.y;
/* Use cross product */
length = (float)sqrt((float)LENGTH(vector1) * LENGTH(vector2));
if ((int) length == 0)
return (0);
angle = static_cast<int>(floor(asin(CROSS (vector1, vector2) /
length) / PI * 180.0 + 0.5));
/* Use dot product */
if (SCALAR (vector1, vector2) < 0)
angle = 180 - angle;
/* Adjust angle */
if (angle > 180)
angle -= 360;
if (angle <= -180)
angle += 360;
return (angle);
}
/**
* @name is_little_chunk
*
* Return TRUE if one of the pieces resulting from this split would
* less than some number of edge points.
*/
int Wordrec::is_little_chunk(EDGEPT *point1, EDGEPT *point2) {
EDGEPT *p = point1; /* Iterator */
int counter = 0;
do {
/* Go from P1 to P2 */
if (is_same_edgept (point2, p)) {
if (is_small_area (point1, point2))
return (TRUE);
else
break;
}
p = p->next;
}
while ((p != point1) && (counter++ < chop_min_outline_points));
/* Go from P2 to P1 */
p = point2;
counter = 0;
do {
if (is_same_edgept (point1, p)) {
return (is_small_area (point2, point1));
}
p = p->next;
}
while ((p != point2) && (counter++ < chop_min_outline_points));
return (FALSE);
}
/**
* @name is_small_area
*
* Test the area defined by a split accross this outline.
*/
int Wordrec::is_small_area(EDGEPT *point1, EDGEPT *point2) {
EDGEPT *p = point1->next; /* Iterator */
int area = 0;
TPOINT origin;
do {
/* Go from P1 to P2 */
origin.x = p->pos.x - point1->pos.x;
origin.y = p->pos.y - point1->pos.y;
area += CROSS (origin, p->vec);
p = p->next;
}
while (!is_same_edgept (point2, p));
return (area < chop_min_outline_area);
}
/**
* @name pick_close_point
*
* Choose the edge point that is closest to the critical point. This
* point may not be exactly vertical from the critical point.
*/
EDGEPT *Wordrec::pick_close_point(EDGEPT *critical_point,
EDGEPT *vertical_point,
int *best_dist) {
EDGEPT *best_point = NULL;
int this_distance;
int found_better;
do {
found_better = FALSE;
this_distance = edgept_dist (critical_point, vertical_point);
if (this_distance <= *best_dist) {
if (!(same_point (critical_point->pos, vertical_point->pos) ||
same_point (critical_point->pos, vertical_point->next->pos) ||
(best_point && same_point (best_point->pos, vertical_point->pos)) ||
is_exterior_point (critical_point, vertical_point))) {
*best_dist = this_distance;
best_point = vertical_point;
if (chop_vertical_creep)
found_better = TRUE;
}
}
vertical_point = vertical_point->next;
}
while (found_better == TRUE);
return (best_point);
}
/**
* @name prioritize_points
*
* Find a list of edge points from the outer outline of this blob. For
* each of these points assign a priority. Sort these points using a
* heap structure so that they can be visited in order.
*/
void Wordrec::prioritize_points(TESSLINE *outline, PointHeap* points) {
EDGEPT *this_point;
EDGEPT *local_min = NULL;
EDGEPT *local_max = NULL;
this_point = outline->loop;
local_min = this_point;
local_max = this_point;
do {
if (this_point->vec.y < 0) {
/* Look for minima */
if (local_max != NULL)
new_max_point(local_max, points);
else if (is_inside_angle (this_point))
add_point_to_list(points, this_point);
local_max = NULL;
local_min = this_point->next;
}
else if (this_point->vec.y > 0) {
/* Look for maxima */
if (local_min != NULL)
new_min_point(local_min, points);
else if (is_inside_angle (this_point))
add_point_to_list(points, this_point);
local_min = NULL;
local_max = this_point->next;
}
else {
/* Flat area */
if (local_max != NULL) {
if (local_max->prev->vec.y != 0) {
new_max_point(local_max, points);
}
local_max = this_point->next;
local_min = NULL;
}
else {
if (local_min->prev->vec.y != 0) {
new_min_point(local_min, points);
}
local_min = this_point->next;
local_max = NULL;
}
}
/* Next point */
this_point = this_point->next;
}
while (this_point != outline->loop);
}
/**
* @name new_min_point
*
* Found a new minimum point try to decide whether to save it or not.
* Return the new value for the local minimum. If a point is saved then
* the local minimum is reset to NULL.
*/
void Wordrec::new_min_point(EDGEPT *local_min, PointHeap* points) {
inT16 dir;
dir = direction (local_min);
if (dir < 0) {
add_point_to_list(points, local_min);
return;
}
if (dir == 0 && point_priority (local_min) < 0) {
add_point_to_list(points, local_min);
return;
}
}
/**
* @name new_max_point
*
* Found a new minimum point try to decide whether to save it or not.
* Return the new value for the local minimum. If a point is saved then
* the local minimum is reset to NULL.
*/
void Wordrec::new_max_point(EDGEPT *local_max, PointHeap* points) {
inT16 dir;
dir = direction (local_max);
if (dir > 0) {
add_point_to_list(points, local_max);
return;
}
if (dir == 0 && point_priority (local_max) < 0) {
add_point_to_list(points, local_max);
return;
}
}
/**
* @name vertical_projection_point
*
* For one point on the outline, find the corresponding point on the
* other side of the outline that is a likely projection for a split
* point. This is done by iterating through the edge points until the
* X value of the point being looked at is greater than the X value of
* the split point. Ensure that the point being returned is not right
* next to the split point. Return the edge point in *best_point as
* a result, and any points that were newly created are also saved on
* the new_points list.
*/
void Wordrec::vertical_projection_point(EDGEPT *split_point, EDGEPT *target_point,
EDGEPT** best_point,
EDGEPT_CLIST *new_points) {
EDGEPT *p; /* Iterator */
EDGEPT *this_edgept; /* Iterator */
EDGEPT_C_IT new_point_it(new_points);
int x = split_point->pos.x; /* X value of vertical */
int best_dist = LARGE_DISTANCE;/* Best point found */
if (*best_point != NULL)
best_dist = edgept_dist(split_point, *best_point);
p = target_point;
/* Look at each edge point */
do {
if (((p->pos.x <= x && x <= p->next->pos.x) ||
(p->next->pos.x <= x && x <= p->pos.x)) &&
!same_point(split_point->pos, p->pos) &&
!same_point(split_point->pos, p->next->pos) &&
!p->IsChopPt() &&
(*best_point == NULL || !same_point((*best_point)->pos, p->pos))) {
if (near_point(split_point, p, p->next, &this_edgept)) {
new_point_it.add_before_then_move(this_edgept);
}
if (*best_point == NULL)
best_dist = edgept_dist (split_point, this_edgept);
this_edgept =
pick_close_point(split_point, this_edgept, &best_dist);
if (this_edgept)
*best_point = this_edgept;
}
p = p->next;
}
while (p != target_point);
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/chop.cpp | C | asf20 | 10,453 |
/* -*-C-*-
********************************************************************************
*
* File: render.c (Formerly render.c)
* Description: Convert the various data type into line lists
* Author: Mark Seaman, OCR Technology
* Created: Fri Jul 28 13:14:48 1989
* Modified: Mon Jul 15 10:23:37 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#include "render.h"
#include "blobs.h"
#ifdef __UNIX__
#include <assert.h>
#endif
#include <math.h>
#include "vecfuncs.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
ScrollView *blob_window = NULL;
C_COL color_list[] = {
Red, Cyan, Yellow, Blue, Green, White
};
BOOL_VAR(wordrec_display_all_blobs, 0, "Display Blobs");
BOOL_VAR(wordrec_display_all_words, 0, "Display Words");
BOOL_VAR(wordrec_blob_pause, 0, "Blob pause");
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**********************************************************************
* display_blob
*
* Macro to display blob in a window.
**********************************************************************/
void display_blob(TBLOB *blob, C_COL color) {
/* Size of drawable */
if (blob_window == NULL) {
blob_window = c_create_window ("Blobs", 520, 10,
500, 256, -1000.0, 1000.0, 0.0, 256.0);
}
else {
c_clear_window(blob_window);
}
render_blob(blob_window, blob, color);
}
/**********************************************************************
* render_blob
*
* Create a list of line segments that represent the expanded outline
* that was supplied as input.
**********************************************************************/
void render_blob(void *window, TBLOB *blob, C_COL color) {
/* No outline */
if (!blob)
return;
render_outline (window, blob->outlines, color);
}
/**********************************************************************
* render_edgepts
*
* Create a list of line segments that represent the expanded outline
* that was supplied as input.
**********************************************************************/
void render_edgepts(void *window, EDGEPT *edgept, C_COL color) {
if (!edgept)
return;
float x = edgept->pos.x;
float y = edgept->pos.y;
EDGEPT *this_edge = edgept;
c_line_color_index(window, color);
c_move(window, x, y);
do {
this_edge = this_edge->next;
x = this_edge->pos.x;
y = this_edge->pos.y;
c_draw(window, x, y);
}
while (edgept != this_edge);
}
/**********************************************************************
* render_outline
*
* Create a list of line segments that represent the expanded outline
* that was supplied as input.
**********************************************************************/
void render_outline(void *window,
TESSLINE *outline,
C_COL color) {
/* No outline */
if (!outline)
return;
/* Draw Compact outline */
if (outline->loop)
render_edgepts (window, outline->loop, color);
/* Add on next outlines */
render_outline (window, outline->next, color);
}
#endif // GRAPHICS_DISABLED
| 1080228-arabicocr11 | wordrec/render.cpp | C | asf20 | 4,257 |
/* -*-C-*-
********************************************************************************
*
* File: findseam.c (Formerly findseam.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Tue Jul 30 15:44:59 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "findseam.h"
#include "gradechop.h"
#include "olutil.h"
#include "plotedges.h"
#include "outlines.h"
#include "freelist.h"
#include "seam.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
#define SPLIT_CLOSENESS 20/* Difference in x value */
/* How many to keep */
#define MAX_NUM_SEAMS 150
/* How many to keep */
#define MAX_OLD_SEAMS 150
#define NO_FULL_PRIORITY -1/* Special marker for pri. */
/* Evalute right away */
#define BAD_PRIORITY 9999.0
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
namespace tesseract {
/**********************************************************************
* add_seam_to_queue
*
* Adds the given new_seam to the seams priority queue, unless it is full
* and the new seam is worse than the worst.
**********************************************************************/
void Wordrec::add_seam_to_queue(float new_priority, SEAM *new_seam,
SeamQueue* seams) {
if (new_seam == NULL) return;
if (chop_debug) {
tprintf("Pushing new seam with priority %g :", new_priority);
print_seam("seam: ", new_seam);
}
if (seams->size() >= MAX_NUM_SEAMS) {
SeamPair old_pair(0, NULL);
if (seams->PopWorst(&old_pair) && old_pair.key() <= new_priority) {
if (chop_debug) {
tprintf("Old seam staying with priority %g\n", old_pair.key());
}
delete new_seam;
seams->Push(&old_pair);
return;
} else if (chop_debug) {
tprintf("New seam with priority %g beats old worst seam with %g\n",
new_priority, old_pair.key());
}
}
SeamPair new_pair(new_priority, new_seam);
seams->Push(&new_pair);
}
/**********************************************************************
* choose_best_seam
*
* Choose the best seam that can be created by assembling this a
* collection of splits. A queue of all the possible seams is
* maintained. Each new split received is placed in that queue with
* its partial priority value. These values in the seam queue are
* evaluated and combined until a good enough seam is found. If no
* further good seams are being found then this function returns to the
* caller, who will send more splits. If this function is called with
* a split of NULL, then no further splits can be supplied by the
* caller.
**********************************************************************/
void Wordrec::choose_best_seam(SeamQueue* seam_queue,
SPLIT *split,
PRIORITY priority,
SEAM **seam_result,
TBLOB *blob,
SeamPile* seam_pile) {
SEAM *seam;
char str[80];
float my_priority;
/* Add seam of split */
my_priority = priority;
if (split != NULL) {
TPOINT split_point = split->point1->pos;
split_point += split->point2->pos;
split_point /= 2;
seam = new SEAM(my_priority, split_point, split, NULL, NULL);
if (chop_debug > 1)
print_seam ("Partial priority ", seam);
add_seam_to_queue(my_priority, seam, seam_queue);
if (my_priority > chop_good_split)
return;
}
TBOX bbox = blob->bounding_box();
/* Queue loop */
while (!seam_queue->empty()) {
SeamPair seam_pair;
seam_queue->Pop(&seam_pair);
seam = seam_pair.extract_data();
/* Set full priority */
my_priority = seam_priority(seam, bbox.left(), bbox.right());
if (chop_debug) {
sprintf (str, "Full my_priority %0.0f, ", my_priority);
print_seam(str, seam);
}
if ((*seam_result == NULL || (*seam_result)->priority > my_priority) &&
my_priority < chop_ok_split) {
/* No crossing */
if (constrained_split(seam->split1, blob)) {
delete *seam_result;
*seam_result = new SEAM(*seam);
(*seam_result)->priority = my_priority;
} else {
delete seam;
seam = NULL;
my_priority = BAD_PRIORITY;
}
}
if (my_priority < chop_good_split) {
if (seam)
delete seam;
return; /* Made good answer */
}
if (seam) {
/* Combine with others */
if (seam_pile->size() < chop_seam_pile_size) {
combine_seam(*seam_pile, seam, seam_queue);
SeamDecPair pair(seam_pair.key(), seam);
seam_pile->Push(&pair);
} else if (chop_new_seam_pile &&
seam_pile->size() == chop_seam_pile_size &&
seam_pile->PeekTop().key() > seam_pair.key()) {
combine_seam(*seam_pile, seam, seam_queue);
SeamDecPair pair;
seam_pile->Pop(&pair); // pop the worst.
// Replace the seam in pair (deleting the old one) with
// the new seam and score, then push back into the heap.
pair.set_key(seam_pair.key());
pair.set_data(seam);
seam_pile->Push(&pair);
} else {
delete seam;
}
}
my_priority = seam_queue->empty() ? NO_FULL_PRIORITY
: seam_queue->PeekTop().key();
if ((my_priority > chop_ok_split) ||
(my_priority > chop_good_split && split))
return;
}
}
/**********************************************************************
* combine_seam
*
* Find other seams to combine with this one. The new seams that result
* from this union should be added to the seam queue. The return value
* tells whether or not any additional seams were added to the queue.
**********************************************************************/
void Wordrec::combine_seam(const SeamPile& seam_pile,
const SEAM* seam, SeamQueue* seam_queue) {
register inT16 dist;
inT16 bottom1, top1;
inT16 bottom2, top2;
SEAM *new_one;
const SEAM *this_one;
bottom1 = seam->split1->point1->pos.y;
if (seam->split1->point2->pos.y >= bottom1)
top1 = seam->split1->point2->pos.y;
else {
top1 = bottom1;
bottom1 = seam->split1->point2->pos.y;
}
if (seam->split2 != NULL) {
bottom2 = seam->split2->point1->pos.y;
if (seam->split2->point2->pos.y >= bottom2)
top2 = seam->split2->point2->pos.y;
else {
top2 = bottom2;
bottom2 = seam->split2->point2->pos.y;
}
}
else {
bottom2 = bottom1;
top2 = top1;
}
for (int x = 0; x < seam_pile.size(); ++x) {
this_one = seam_pile.get(x).data();
dist = seam->location.x - this_one->location.x;
if (-SPLIT_CLOSENESS < dist &&
dist < SPLIT_CLOSENESS &&
seam->priority + this_one->priority < chop_ok_split) {
inT16 split1_point1_y = this_one->split1->point1->pos.y;
inT16 split1_point2_y = this_one->split1->point2->pos.y;
inT16 split2_point1_y = 0;
inT16 split2_point2_y = 0;
if (this_one->split2) {
split2_point1_y = this_one->split2->point1->pos.y;
split2_point2_y = this_one->split2->point2->pos.y;
}
if (
/*!tessedit_fix_sideways_chops || */
(
/* this_one->split1 always exists */
(
((split1_point1_y >= top1 && split1_point2_y >= top1) ||
(split1_point1_y <= bottom1 && split1_point2_y <= bottom1))
&&
((split1_point1_y >= top2 && split1_point2_y >= top2) ||
(split1_point1_y <= bottom2 && split1_point2_y <= bottom2))
)
)
&&
(
this_one->split2 == NULL ||
(
((split2_point1_y >= top1 && split2_point2_y >= top1) ||
(split2_point1_y <= bottom1 && split2_point2_y <= bottom1))
&&
((split2_point1_y >= top2 && split2_point2_y >= top2) ||
(split2_point1_y <= bottom2 && split2_point2_y <= bottom2))
)
)
) {
new_one = join_two_seams (seam, this_one);
if (new_one != NULL) {
if (chop_debug > 1)
print_seam ("Combo priority ", new_one);
add_seam_to_queue(new_one->priority, new_one, seam_queue);
}
}
}
}
}
/**********************************************************************
* constrained_split
*
* Constrain this split to obey certain rules. It must not cross any
* inner outline. It must not cut off a small chunk of the outline.
**********************************************************************/
inT16 Wordrec::constrained_split(SPLIT *split, TBLOB *blob) {
TESSLINE *outline;
if (is_little_chunk (split->point1, split->point2))
return (FALSE);
for (outline = blob->outlines; outline; outline = outline->next) {
if (split_bounds_overlap (split, outline) &&
crosses_outline (split->point1, split->point2, outline->loop)) {
return (FALSE);
}
}
return (TRUE);
}
/**********************************************************************
* pick_good_seam
*
* Find and return a good seam that will split this blob into two pieces.
* Work from the outlines provided.
**********************************************************************/
SEAM *Wordrec::pick_good_seam(TBLOB *blob) {
SeamPile seam_pile(chop_seam_pile_size);
EDGEPT *points[MAX_NUM_POINTS];
EDGEPT_CLIST new_points;
SEAM *seam = NULL;
TESSLINE *outline;
inT16 num_points = 0;
#ifndef GRAPHICS_DISABLED
if (chop_debug > 2)
wordrec_display_splits.set_value(true);
draw_blob_edges(blob);
#endif
PointHeap point_heap(MAX_NUM_POINTS);
for (outline = blob->outlines; outline; outline = outline->next)
prioritize_points(outline, &point_heap);
while (!point_heap.empty() && num_points < MAX_NUM_POINTS) {
points[num_points++] = point_heap.PeekTop().data;
point_heap.Pop(NULL);
}
/* Initialize queue */
SeamQueue seam_queue(MAX_NUM_SEAMS);
try_point_pairs(points, num_points, &seam_queue, &seam_pile, &seam, blob);
try_vertical_splits(points, num_points, &new_points,
&seam_queue, &seam_pile, &seam, blob);
if (seam == NULL) {
choose_best_seam(&seam_queue, NULL, BAD_PRIORITY, &seam, blob, &seam_pile);
}
else if (seam->priority > chop_good_split) {
choose_best_seam(&seam_queue, NULL, seam->priority,
&seam, blob, &seam_pile);
}
EDGEPT_C_IT it(&new_points);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
EDGEPT *inserted_point = it.data();
if (!point_used_by_seam(seam, inserted_point)) {
for (outline = blob->outlines; outline; outline = outline->next) {
if (outline->loop == inserted_point) {
outline->loop = outline->loop->next;
}
}
remove_edgept(inserted_point);
}
}
if (seam) {
if (seam->priority > chop_ok_split) {
delete seam;
seam = NULL;
}
#ifndef GRAPHICS_DISABLED
else if (wordrec_display_splits) {
if (seam->split1)
mark_split (seam->split1);
if (seam->split2)
mark_split (seam->split2);
if (seam->split3)
mark_split (seam->split3);
if (chop_debug > 2) {
update_edge_window();
edge_window_wait();
}
}
#endif
}
if (chop_debug)
wordrec_display_splits.set_value(false);
return (seam);
}
/**********************************************************************
* seam_priority
*
* Assign a full priority value to the seam.
**********************************************************************/
PRIORITY Wordrec::seam_priority(SEAM *seam, inT16 xmin, inT16 xmax) {
PRIORITY priority;
if (seam->split1 == NULL)
priority = 0;
else if (seam->split2 == NULL) {
priority = (seam->priority +
full_split_priority (seam->split1, xmin, xmax));
}
else if (seam->split3 == NULL) {
split_outline (seam->split2->point1, seam->split2->point2);
priority = (seam->priority +
full_split_priority (seam->split1, xmin, xmax));
unsplit_outlines (seam->split2->point1, seam->split2->point2);
}
else {
split_outline (seam->split2->point1, seam->split2->point2);
split_outline (seam->split3->point1, seam->split3->point2);
priority = (seam->priority +
full_split_priority (seam->split1, xmin, xmax));
unsplit_outlines (seam->split3->point1, seam->split3->point2);
unsplit_outlines (seam->split2->point1, seam->split2->point2);
}
return (priority);
}
/**********************************************************************
* try_point_pairs
*
* Try all the splits that are produced by pairing critical points
* together. See if any of them are suitable for use. Use a seam
* queue and seam pile that have already been initialized and used.
**********************************************************************/
void Wordrec::try_point_pairs(EDGEPT * points[MAX_NUM_POINTS],
inT16 num_points,
SeamQueue* seam_queue,
SeamPile* seam_pile,
SEAM ** seam,
TBLOB * blob) {
inT16 x;
inT16 y;
SPLIT *split;
PRIORITY priority;
for (x = 0; x < num_points; x++) {
for (y = x + 1; y < num_points; y++) {
if (points[y] &&
weighted_edgept_dist(points[x], points[y],
chop_x_y_weight) < chop_split_length &&
points[x] != points[y]->next &&
points[y] != points[x]->next &&
!is_exterior_point(points[x], points[y]) &&
!is_exterior_point(points[y], points[x])) {
split = new_split (points[x], points[y]);
priority = partial_split_priority (split);
choose_best_seam(seam_queue, split, priority, seam, blob, seam_pile);
}
}
}
}
/**********************************************************************
* try_vertical_splits
*
* Try all the splits that are produced by vertical projection to see
* if any of them are suitable for use. Use a seam queue and seam pile
* that have already been initialized and used.
* Return in new_points a collection of points that were inserted into
* the blob while examining vertical splits and which may safely be
* removed once a seam is chosen if they are not part of the seam.
**********************************************************************/
void Wordrec::try_vertical_splits(EDGEPT * points[MAX_NUM_POINTS],
inT16 num_points,
EDGEPT_CLIST *new_points,
SeamQueue* seam_queue,
SeamPile* seam_pile,
SEAM ** seam,
TBLOB * blob) {
EDGEPT *vertical_point = NULL;
SPLIT *split;
inT16 x;
PRIORITY priority;
TESSLINE *outline;
for (x = 0; x < num_points; x++) {
vertical_point = NULL;
for (outline = blob->outlines; outline; outline = outline->next) {
vertical_projection_point(points[x], outline->loop,
&vertical_point, new_points);
}
if (vertical_point &&
points[x] != vertical_point->next &&
vertical_point != points[x]->next &&
weighted_edgept_dist(points[x], vertical_point,
chop_x_y_weight) < chop_split_length) {
split = new_split (points[x], vertical_point);
priority = partial_split_priority (split);
choose_best_seam(seam_queue, split, priority, seam, blob, seam_pile);
}
}
}
}
| 1080228-arabicocr11 | wordrec/findseam.cpp | C | asf20 | 17,147 |
/* -*-C-*-
********************************************************************************
*
* File: makechop.c (Formerly makechop.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Mon Jul 29 15:50:42 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "makechop.h"
#include "blobs.h"
#include "render.h"
#include "structures.h"
#ifdef __UNIX__
#include <assert.h>
#include <unistd.h>
#endif
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
/*----------------------------------------------------------------------
Public Function Code
----------------------------------------------------------------------*/
/**********************************************************************
* apply_seam
*
* Split this blob into two blobs by applying the splits included in
* the seam description.
**********************************************************************/
void apply_seam(TBLOB *blob, TBLOB *other_blob, bool italic_blob, SEAM *seam) {
if (seam->split1 == NULL) {
divide_blobs(blob, other_blob, italic_blob, seam->location);
}
else if (seam->split2 == NULL) {
make_split_blobs(blob, other_blob, italic_blob, seam);
}
else if (seam->split3 == NULL) {
make_double_split(blob, other_blob, italic_blob, seam);
}
else {
make_triple_split(blob, other_blob, italic_blob, seam);
}
}
/**********************************************************************
* form_two_blobs
*
* Group the outlines from the first blob into both of them. Do so
* according to the information about the split.
**********************************************************************/
void form_two_blobs(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
const TPOINT& location) {
setup_blob_outlines(blob);
divide_blobs(blob, other_blob, italic_blob, location);
eliminate_duplicate_outlines(blob);
eliminate_duplicate_outlines(other_blob);
correct_blob_order(blob, other_blob);
}
/**********************************************************************
* make_double_split
*
* Create two blobs out of one by splitting the original one in half.
* Return the resultant blobs for classification.
**********************************************************************/
void make_double_split(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
SEAM *seam) {
make_single_split(blob->outlines, seam->split1);
make_single_split(blob->outlines, seam->split2);
form_two_blobs(blob, other_blob, italic_blob, seam->location);
}
/**********************************************************************
* make_single_split
*
* Create two outlines out of one by splitting the original one in half.
* Return the resultant outlines.
**********************************************************************/
void make_single_split(TESSLINE *outlines, SPLIT *split) {
assert (outlines != NULL);
split_outline (split->point1, split->point2);
while (outlines->next != NULL)
outlines = outlines->next;
outlines->next = new TESSLINE;
outlines->next->loop = split->point1;
outlines->next->ComputeBoundingBox();
outlines = outlines->next;
outlines->next = new TESSLINE;
outlines->next->loop = split->point2;
outlines->next->ComputeBoundingBox();
outlines->next->next = NULL;
}
/**********************************************************************
* make_split_blobs
*
* Create two blobs out of one by splitting the original one in half.
* Return the resultant blobs for classification.
**********************************************************************/
void make_split_blobs(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
SEAM *seam) {
make_single_split(blob->outlines, seam->split1);
form_two_blobs (blob, other_blob, italic_blob, seam->location);
}
/**********************************************************************
* make_triple_split
*
* Create two blobs out of one by splitting the original one in half.
* This splitting is accomplished by applying three separate splits on
* the outlines. Three of the starting outlines will produce two ending
* outlines. Return the resultant blobs for classification.
**********************************************************************/
void make_triple_split(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
SEAM *seam) {
make_single_split(blob->outlines, seam->split1);
make_single_split(blob->outlines, seam->split2);
make_single_split(blob->outlines, seam->split3);
form_two_blobs(blob, other_blob, italic_blob, seam->location);
}
/**********************************************************************
* undo_seam
*
* Remove the seam between these two blobs. Produce one blob as a
* result. The seam may consist of one, two, or three splits. Each
* of these split must be removed from the outlines.
**********************************************************************/
void undo_seam(TBLOB *blob, TBLOB *other_blob, SEAM *seam) {
TESSLINE *outline;
if (!seam)
return; /* Append other blob outlines */
if (blob->outlines == NULL) {
blob->outlines = other_blob->outlines;
other_blob->outlines = NULL;
}
outline = blob->outlines;
while (outline->next)
outline = outline->next;
outline->next = other_blob->outlines;
other_blob->outlines = NULL;
delete other_blob;
if (seam->split1 == NULL) {
}
else if (seam->split2 == NULL) {
undo_single_split (blob, seam->split1);
}
else if (seam->split3 == NULL) {
undo_single_split (blob, seam->split1);
undo_single_split (blob, seam->split2);
}
else {
undo_single_split (blob, seam->split3);
undo_single_split (blob, seam->split2);
undo_single_split (blob, seam->split1);
}
setup_blob_outlines(blob);
eliminate_duplicate_outlines(blob);
}
/**********************************************************************
* undo_single_split
*
* Undo a seam that is made by a single split. Perform the correct
* magic to reconstruct the appropriate set of outline data structures.
**********************************************************************/
void undo_single_split(TBLOB *blob, SPLIT *split) {
TESSLINE *outline1;
TESSLINE *outline2;
/* Modify edge points */
unsplit_outlines (split->point1, split->point2);
outline1 = new TESSLINE;
outline1->next = blob->outlines;
blob->outlines = outline1;
outline1->loop = split->point1;
outline2 = new TESSLINE;
outline2->next = blob->outlines;
blob->outlines = outline2;
outline2->loop = split->point2;
}
| 1080228-arabicocr11 | wordrec/makechop.cpp | C | asf20 | 7,651 |
/* -*-C-*-
********************************************************************************
*
* File: chop.h (Formerly chop.h)
* Description:
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Wed Jul 10 14:47:37 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*******************************************************************************/
#ifndef CHOP_H
#define CHOP_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "genericheap.h"
#include "kdpair.h"
#include "seam.h"
/*----------------------------------------------------------------------
T y p e s
---------------------------------------------------------------------*/
#define MAX_NUM_POINTS 50
// The PointPair elements do NOT own the EDGEPTs.
typedef tesseract::KDPairInc<float, EDGEPT*> PointPair;
typedef tesseract::GenericHeap<PointPair> PointHeap;
#endif
| 1080228-arabicocr11 | wordrec/chop.h | C | asf20 | 1,720 |
///////////////////////////////////////////////////////////////////////
// File: params_model.h
// Description: Trained feature serialization for language parameter training.
// Author: David Eger
// Created: Mon Jun 11 11:26:42 PDT 2012
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_PARAMS_MODEL_H_
#define TESSERACT_WORDREC_PARAMS_MODEL_H_
#include "params_training_featdef.h"
#include "ratngs.h"
#include "strngs.h"
namespace tesseract {
// Represents the learned weights for a given language.
class ParamsModel {
public:
// Enum for expressing OCR pass.
enum PassEnum {
PTRAIN_PASS1,
PTRAIN_PASS2,
PTRAIN_NUM_PASSES
};
ParamsModel() : pass_(PTRAIN_PASS1) {}
ParamsModel(const char *lang, const GenericVector<float> &weights) :
lang_(lang), pass_(PTRAIN_PASS1) { weights_vec_[pass_] = weights; }
inline bool Initialized() {
return weights_vec_[pass_].size() == PTRAIN_NUM_FEATURE_TYPES;
}
// Prints out feature weights.
void Print();
// Clears weights for all passes.
void Clear() {
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) weights_vec_[p].clear();
}
// Copies the weights of the given params model.
void Copy(const ParamsModel &other_model);
// Applies params model weights to the given features.
// Assumes that features is an array of size PTRAIN_NUM_FEATURE_TYPES.
float ComputeCost(const float features[]) const;
bool Equivalent(const ParamsModel &that) const;
// Returns true on success.
bool SaveToFile(const char *full_path) const;
// Returns true on success.
bool LoadFromFile(const char *lang, const char *full_path);
bool LoadFromFp(const char *lang, FILE *fp, inT64 end_offset);
const GenericVector<float>& weights() const {
return weights_vec_[pass_];
}
const GenericVector<float>& weights_for_pass(PassEnum pass) const {
return weights_vec_[pass];
}
void SetPass(PassEnum pass) { pass_ = pass; }
private:
bool ParseLine(char *line, char **key, float *val);
STRING lang_;
// Set to the current pass type and used to determine which set of weights
// should be used for ComputeCost() and other functions.
PassEnum pass_;
// Several sets of weights for various OCR passes (e.g. pass1 with adaption,
// pass2 without adaption, etc).
GenericVector<float> weights_vec_[PTRAIN_NUM_PASSES];
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_PARAMS_MODEL_H_
| 1080228-arabicocr11 | wordrec/params_model.h | C++ | asf20 | 3,051 |
/**********************************************************************
* File: drawfx.cpp (Formerly drawfx.c)
* Description: Draw things to do with feature extraction.
* Author: Ray Smith
* Created: Mon Jan 27 11:02:16 GMT 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include "drawfx.h"
#include "normalis.h"
#include "werd.h"
#ifndef GRAPHICS_DISABLED
#define FXDEMOWIN "FXDemo"
#define FXDEMOXPOS 250
#define FXDEMOYPOS 0
#define FXDEMOXSIZE 600
#define FXDEMOYSIZE 256
#define BLN_MAX 512 //max coord for bln
#define WERDWIDTH (BLN_MAX*20)
#define DECENT_WERD_WIDTH (5*kBlnXHeight)
//title of window
#define DEBUG_WIN_NAME "FXDebug"
#define DEBUG_XPOS 0
#define DEBUG_YPOS 120
#define DEBUG_XSIZE 80
#define DEBUG_YSIZE 32
#define YMAX 3508
#define XMAX 2550
#define MAXEDGELENGTH 1024 //max steps inoutline
#define EXTERN
EXTERN STRING_VAR (fx_debugfile, DEBUG_WIN_NAME, "Name of debugfile");
EXTERN ScrollView* fx_win = NULL;
EXTERN FILE *fx_debug = NULL;
/**********************************************************************
* create_fx_win
*
* Create the fx window used to show the fit.
**********************************************************************/
void create_fx_win() { //make features win
fx_win = new ScrollView (FXDEMOWIN,
FXDEMOXPOS, FXDEMOYPOS, FXDEMOXSIZE, FXDEMOYSIZE,
WERDWIDTH*2, BLN_MAX*2, true);
}
/**********************************************************************
* clear_fx_win
*
* Clear the fx window and draw on the base/mean lines.
**********************************************************************/
void clear_fx_win() { //make features win
fx_win->Clear();
fx_win->Pen(64,64,64);
fx_win->Line(-WERDWIDTH, kBlnBaselineOffset, WERDWIDTH, kBlnBaselineOffset);
fx_win->Line(-WERDWIDTH, kBlnXHeight + kBlnBaselineOffset, WERDWIDTH,
kBlnXHeight + kBlnBaselineOffset);
}
#endif // GRAPHICS_DISABLED
/**********************************************************************
* create_fxdebug_win
*
* Create the fx window used to show the fit.
**********************************************************************/
void create_fxdebug_win() { //make gradients win
}
| 1080228-arabicocr11 | wordrec/drawfx.cpp | C++ | asf20 | 3,014 |
/**********************************************************************
* File: drawfx.h (Formerly drawfx.h)
* Description: Draw things to do with feature extraction.
* Author: Ray Smith
* Created: Mon Jan 27 11:02:16 GMT 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef DRAWFX_H
#define DRAWFX_H
#include "params.h"
#include "scrollview.h"
extern STRING_VAR_H (fx_debugfile, DEBUG_WIN_NAME, "Name of debugfile");
extern ScrollView* fx_win;
extern FILE *fx_debug;
void create_fx_win(); //make features win
void clear_fx_win(); //make features win
void create_fxdebug_win(); //make gradients win
#endif
| 1080228-arabicocr11 | wordrec/drawfx.h | C | asf20 | 1,294 |
///////////////////////////////////////////////////////////////////////
// File: wordrec.h
// Description: wordrec class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_WORDREC_H__
#define TESSERACT_WORDREC_WORDREC_H__
#include "associate.h"
#include "classify.h"
#include "dict.h"
#include "language_model.h"
#include "ratngs.h"
#include "matrix.h"
#include "gradechop.h"
#include "seam.h"
#include "findseam.h"
#include "callcpp.h"
class WERD_RES;
namespace tesseract {
// A class for storing which nodes are to be processed by the segmentation
// search. There is a single SegSearchPending for each column in the ratings
// matrix, and it indicates whether the segsearch should combine all
// BLOB_CHOICES in the column, or just the given row with the parents
// corresponding to *this SegSearchPending, and whether only updated parent
// ViterbiStateEntries should be combined, or all, with the BLOB_CHOICEs.
class SegSearchPending {
public:
SegSearchPending()
: classified_row_(-1),
revisit_whole_column_(false),
column_classified_(false) {}
// Marks the whole column as just classified. Used to start a search on
// a newly initialized ratings matrix.
void SetColumnClassified() {
column_classified_ = true;
}
// Marks the matrix entry at the given row as just classified.
// Used after classifying a new matrix cell.
// Additional to, not overriding a previous RevisitWholeColumn.
void SetBlobClassified(int row) {
classified_row_ = row;
}
// Marks the whole column as needing work, but not just classified.
// Used when the parent vse list is updated.
// Additional to, not overriding a previous SetBlobClassified.
void RevisitWholeColumn() {
revisit_whole_column_ = true;
}
// Clears *this to indicate no work to do.
void Clear() {
classified_row_ = -1;
revisit_whole_column_ = false;
column_classified_ = false;
}
// Returns true if there are updates to do in the column that *this
// represents.
bool WorkToDo() const {
return revisit_whole_column_ || column_classified_ || classified_row_ >= 0;
}
// Returns true if the given row was just classified.
bool IsRowJustClassified(int row) const {
return row == classified_row_ || column_classified_;
}
// Returns the single row to process if there is only one, otherwise -1.
int SingleRow() const {
return revisit_whole_column_ || column_classified_ ? -1 : classified_row_;
}
private:
// If non-negative, indicates the single row in the ratings matrix that has
// just been classified, and so should be combined with all the parents in the
// column that this SegSearchPending represents.
// Operates independently of revisit_whole_column.
int classified_row_;
// If revisit_whole_column is true, then all BLOB_CHOICEs in this column will
// be processed, but classified_row can indicate a row that is newly
// classified. Overridden if column_classified is true.
bool revisit_whole_column_;
// If column_classified is true, parent vses are processed with all rows
// regardless of whether they are just updated, overriding
// revisit_whole_column and classified_row.
bool column_classified_;
};
/* ccmain/tstruct.cpp *********************************************************/
class FRAGMENT:public ELIST_LINK
{
public:
FRAGMENT() { //constructor
}
FRAGMENT(EDGEPT *head_pt, //start
EDGEPT *tail_pt); //end
ICOORD head; //coords of start
ICOORD tail; //coords of end
EDGEPT *headpt; //start point
EDGEPT *tailpt; //end point
};
ELISTIZEH(FRAGMENT)
class Wordrec : public Classify {
public:
// config parameters *******************************************************
BOOL_VAR_H(merge_fragments_in_matrix, TRUE,
"Merge the fragments in the ratings matrix and delete them "
"after merging");
BOOL_VAR_H(wordrec_no_block, FALSE, "Don't output block information");
BOOL_VAR_H(wordrec_enable_assoc, TRUE, "Associator Enable");
BOOL_VAR_H(force_word_assoc, FALSE,
"force associator to run regardless of what enable_assoc is."
"This is used for CJK where component grouping is necessary.");
double_VAR_H(wordrec_worst_state, 1, "Worst segmentation state");
BOOL_VAR_H(fragments_guide_chopper, FALSE,
"Use information from fragments to guide chopping process");
INT_VAR_H(repair_unchopped_blobs, 1, "Fix blobs that aren't chopped");
double_VAR_H(tessedit_certainty_threshold, -2.25, "Good blob limit");
INT_VAR_H(chop_debug, 0, "Chop debug");
BOOL_VAR_H(chop_enable, 1, "Chop enable");
BOOL_VAR_H(chop_vertical_creep, 0, "Vertical creep");
INT_VAR_H(chop_split_length, 10000, "Split Length");
INT_VAR_H(chop_same_distance, 2, "Same distance");
INT_VAR_H(chop_min_outline_points, 6, "Min Number of Points on Outline");
INT_VAR_H(chop_seam_pile_size, 150, "Max number of seams in seam_pile");
BOOL_VAR_H(chop_new_seam_pile, 1, "Use new seam_pile");
INT_VAR_H(chop_inside_angle, -50, "Min Inside Angle Bend");
INT_VAR_H(chop_min_outline_area, 2000, "Min Outline Area");
double_VAR_H(chop_split_dist_knob, 0.5, "Split length adjustment");
double_VAR_H(chop_overlap_knob, 0.9, "Split overlap adjustment");
double_VAR_H(chop_center_knob, 0.15, "Split center adjustment");
INT_VAR_H(chop_centered_maxwidth, 90, "Width of (smaller) chopped blobs "
"above which we don't care that a chop is not near the center.");
double_VAR_H(chop_sharpness_knob, 0.06, "Split sharpness adjustment");
double_VAR_H(chop_width_change_knob, 5.0, "Width change adjustment");
double_VAR_H(chop_ok_split, 100.0, "OK split limit");
double_VAR_H(chop_good_split, 50.0, "Good split limit");
INT_VAR_H(chop_x_y_weight, 3, "X / Y length weight");
INT_VAR_H(segment_adjust_debug, 0, "Segmentation adjustment debug");
BOOL_VAR_H(assume_fixed_pitch_char_segment, FALSE,
"include fixed-pitch heuristics in char segmentation");
INT_VAR_H(wordrec_debug_level, 0, "Debug level for wordrec");
INT_VAR_H(wordrec_max_join_chunks, 4,
"Max number of broken pieces to associate");
BOOL_VAR_H(wordrec_skip_no_truth_words, false,
"Only run OCR for words that had truth recorded in BlamerBundle");
BOOL_VAR_H(wordrec_debug_blamer, false, "Print blamer debug messages");
BOOL_VAR_H(wordrec_run_blamer, false, "Try to set the blame for errors");
INT_VAR_H(segsearch_debug_level, 0, "SegSearch debug level");
INT_VAR_H(segsearch_max_pain_points, 2000,
"Maximum number of pain points stored in the queue");
INT_VAR_H(segsearch_max_futile_classifications, 10,
"Maximum number of pain point classifications per word.");
double_VAR_H(segsearch_max_char_wh_ratio, 2.0,
"Maximum character width-to-height ratio");
BOOL_VAR_H(save_alt_choices, true,
"Save alternative paths found during chopping "
"and segmentation search");
// methods from wordrec/*.cpp ***********************************************
Wordrec();
virtual ~Wordrec();
// Fills word->alt_choices with alternative paths found during
// chopping/segmentation search that are kept in best_choices.
void SaveAltChoices(const LIST &best_choices, WERD_RES *word);
// Fills character choice lattice in the given BlamerBundle
// using the given ratings matrix and best choice list.
void FillLattice(const MATRIX &ratings, const WERD_CHOICE_LIST &best_choices,
const UNICHARSET &unicharset, BlamerBundle *blamer_bundle);
// Calls fill_lattice_ member function
// (assumes that fill_lattice_ is not NULL).
void CallFillLattice(const MATRIX &ratings,
const WERD_CHOICE_LIST &best_choices,
const UNICHARSET &unicharset,
BlamerBundle *blamer_bundle) {
(this->*fill_lattice_)(ratings, best_choices, unicharset, blamer_bundle);
}
// tface.cpp
void program_editup(const char *textbase,
bool init_classifier,
bool init_permute);
void cc_recog(WERD_RES *word);
void program_editdown(inT32 elasped_time);
void set_pass1();
void set_pass2();
int end_recog();
BLOB_CHOICE_LIST *call_matcher(TBLOB* blob);
int dict_word(const WERD_CHOICE &word);
// wordclass.cpp
BLOB_CHOICE_LIST *classify_blob(TBLOB *blob,
const char *string,
C_COL color,
BlamerBundle *blamer_bundle);
// segsearch.cpp
// SegSearch works on the lower diagonal matrix of BLOB_CHOICE_LISTs.
// Each entry in the matrix represents the classification choice
// for a chunk, i.e. an entry in row 2, column 1 represents the list
// of ratings for the chunks 1 and 2 classified as a single blob.
// The entries on the diagonal of the matrix are classifier choice lists
// for a single chunk from the maximal segmentation.
//
// The ratings matrix given to SegSearch represents the segmentation
// graph / trellis for the current word. The nodes in the graph are the
// individual BLOB_CHOICEs in each of the BLOB_CHOICE_LISTs in the ratings
// matrix. The children of each node (nodes connected by outgoing links)
// are the entries in the column that is equal to node's row+1. The parents
// (nodes connected by the incoming links) are the entries in the row that
// is equal to the node's column-1. Here is an example ratings matrix:
//
// 0 1 2 3 4
// -------------------------
// 0| c,( |
// 1| d l,1 |
// 2| o |
// 3| c,( |
// 4| g,y l,1 |
// -------------------------
//
// In the example above node "o" has children (outgoing connection to nodes)
// "c","(","g","y" and parents (incoming connections from nodes) "l","1","d".
//
// The objective of the search is to find the least cost path, where the cost
// is determined by the language model components and the properties of the
// cut between the blobs on the path. SegSearch starts by populating the
// matrix with the all the entries that were classified by the chopper and
// finding the initial best path. Based on the classifier ratings, language
// model scores and the properties of each cut, a list of "pain points" is
// constructed - those are the points on the path where the choices do not
// look consistent with the neighboring choices, the cuts look particularly
// problematic, or the certainties of the blobs are low. The most troublesome
// "pain point" is picked from the list and the new entry in the ratings
// matrix corresponding to this "pain point" is filled in. Then the language
// model state is updated to reflect the new classification and the new
// "pain points" are added to the list and the next most troublesome
// "pain point" is determined. This continues until either the word choice
// composed from the best paths in the segmentation graph is "good enough"
// (e.g. above a certain certainty threshold, is an unambiguous dictionary
// word, etc) or there are no more "pain points" to explore.
//
// If associate_blobs is set to false no new classifications will be done
// to combine blobs. Segmentation search will run only one "iteration"
// on the classifications already recorded in chunks_record.ratings.
//
// Note: this function assumes that word_res, best_choice_bundle arguments
// are not NULL.
void SegSearch(WERD_RES* word_res,
BestChoiceBundle* best_choice_bundle,
BlamerBundle* blamer_bundle);
// Setup and run just the initial segsearch on an established matrix,
// without doing any additional chopping or joining.
void WordSearch(WERD_RES* word_res);
// Setup and run just the initial segsearch on an established matrix,
// without doing any additional chopping or joining.
// (Internal factored version that can be used as part of the main SegSearch.)
void InitialSegSearch(WERD_RES* word_res, LMPainPoints* pain_points,
GenericVector<SegSearchPending>* pending,
BestChoiceBundle* best_choice_bundle,
BlamerBundle* blamer_bundle);
// Runs SegSearch() function (above) without needing a best_choice_bundle
// or blamer_bundle. Used for testing.
void DoSegSearch(WERD_RES* word_res);
// chop.cpp
PRIORITY point_priority(EDGEPT *point);
void add_point_to_list(PointHeap* point_heap, EDGEPT *point);
int angle_change(EDGEPT *point1, EDGEPT *point2, EDGEPT *point3);
int is_little_chunk(EDGEPT *point1, EDGEPT *point2);
int is_small_area(EDGEPT *point1, EDGEPT *point2);
EDGEPT *pick_close_point(EDGEPT *critical_point,
EDGEPT *vertical_point,
int *best_dist);
void prioritize_points(TESSLINE *outline, PointHeap* points);
void new_min_point(EDGEPT *local_min, PointHeap* points);
void new_max_point(EDGEPT *local_max, PointHeap* points);
void vertical_projection_point(EDGEPT *split_point, EDGEPT *target_point,
EDGEPT** best_point,
EDGEPT_CLIST *new_points);
// chopper.cpp
SEAM *attempt_blob_chop(TWERD *word, TBLOB *blob, inT32 blob_number,
bool italic_blob, const GenericVector<SEAM*>& seams);
SEAM *chop_numbered_blob(TWERD *word, inT32 blob_number,
bool italic_blob, const GenericVector<SEAM*>& seams);
SEAM *chop_overlapping_blob(const GenericVector<TBOX>& boxes,
bool italic_blob,
WERD_RES *word_res, int *blob_number);
SEAM *improve_one_blob(const GenericVector<BLOB_CHOICE*> &blob_choices,
DANGERR *fixpt,
bool split_next_to_fragment,
bool italic_blob,
WERD_RES *word,
int *blob_number);
SEAM *chop_one_blob(const GenericVector<TBOX> &boxes,
const GenericVector<BLOB_CHOICE*> &blob_choices,
WERD_RES *word_res,
int *blob_number);
void chop_word_main(WERD_RES *word);
void improve_by_chopping(float rating_cert_scale,
WERD_RES *word,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle,
LMPainPoints *pain_points,
GenericVector<SegSearchPending>* pending);
int select_blob_to_split(const GenericVector<BLOB_CHOICE*> &blob_choices,
float rating_ceiling,
bool split_next_to_fragment);
int select_blob_to_split_from_fixpt(DANGERR *fixpt);
// findseam.cpp
void add_seam_to_queue(float new_priority, SEAM *new_seam, SeamQueue* seams);
void choose_best_seam(SeamQueue* seam_queue,
SPLIT *split,
PRIORITY priority,
SEAM **seam_result,
TBLOB *blob,
SeamPile* seam_pile);
void combine_seam(const SeamPile& seam_pile,
const SEAM* seam, SeamQueue* seam_queue);
inT16 constrained_split(SPLIT *split, TBLOB *blob);
SEAM *pick_good_seam(TBLOB *blob);
PRIORITY seam_priority(SEAM *seam, inT16 xmin, inT16 xmax);
void try_point_pairs (EDGEPT * points[MAX_NUM_POINTS],
inT16 num_points,
SeamQueue* seam_queue,
SeamPile* seam_pile,
SEAM ** seam, TBLOB * blob);
void try_vertical_splits(EDGEPT * points[MAX_NUM_POINTS],
inT16 num_points,
EDGEPT_CLIST *new_points,
SeamQueue* seam_queue,
SeamPile* seam_pile,
SEAM ** seam, TBLOB * blob);
// gradechop.cpp
PRIORITY full_split_priority(SPLIT *split, inT16 xmin, inT16 xmax);
PRIORITY grade_center_of_blob(register BOUNDS_RECT rect);
PRIORITY grade_overlap(register BOUNDS_RECT rect);
PRIORITY grade_split_length(register SPLIT *split);
PRIORITY grade_sharpness(register SPLIT *split);
PRIORITY grade_width_change(register BOUNDS_RECT rect);
void set_outline_bounds(register EDGEPT *point1,
register EDGEPT *point2,
BOUNDS_RECT rect);
// outlines.cpp
int crosses_outline(EDGEPT *p0, EDGEPT *p1, EDGEPT *outline);
int is_crossed(TPOINT a0, TPOINT a1, TPOINT b0, TPOINT b1);
int is_same_edgept(EDGEPT *p1, EDGEPT *p2);
bool near_point(EDGEPT *point, EDGEPT *line_pt_0, EDGEPT *line_pt_1,
EDGEPT **near_pt);
void reverse_outline(EDGEPT *outline);
// pieces.cpp
virtual BLOB_CHOICE_LIST *classify_piece(const GenericVector<SEAM*>& seams,
inT16 start,
inT16 end,
const char* description,
TWERD *word,
BlamerBundle *blamer_bundle);
// Try to merge fragments in the ratings matrix and put the result in
// the corresponding row and column
void merge_fragments(MATRIX *ratings,
inT16 num_blobs);
// Recursively go through the ratings matrix to find lists of fragments
// to be merged in the function merge_and_put_fragment_lists.
// current_frag is the postion of the piece we are looking for.
// current_row is the row in the rating matrix we are currently at.
// start is the row we started initially, so that we can know where
// to append the results to the matrix. num_frag_parts is the total
// number of pieces we are looking for and num_blobs is the size of the
// ratings matrix.
void get_fragment_lists(inT16 current_frag,
inT16 current_row,
inT16 start,
inT16 num_frag_parts,
inT16 num_blobs,
MATRIX *ratings,
BLOB_CHOICE_LIST *choice_lists);
// Merge the fragment lists in choice_lists and append it to the
// ratings matrix
void merge_and_put_fragment_lists(inT16 row,
inT16 column,
inT16 num_frag_parts,
BLOB_CHOICE_LIST *choice_lists,
MATRIX *ratings);
// Filter the fragment list so that the filtered_choices only contain
// fragments that are in the correct position. choices is the list
// that we are going to filter. fragment_pos is the position in the
// fragment that we are looking for and num_frag_parts is the the
// total number of pieces. The result will be appended to
// filtered_choices.
void fill_filtered_fragment_list(BLOB_CHOICE_LIST *choices,
int fragment_pos,
int num_frag_parts,
BLOB_CHOICE_LIST *filtered_choices);
// Member variables.
LanguageModel *language_model_;
PRIORITY pass2_ok_split;
// Stores the best choice for the previous word in the paragraph.
// This variable is modified by PAGE_RES_IT when iterating over
// words to OCR on the page.
WERD_CHOICE *prev_word_best_choice_;
// Sums of blame reasons computed by the blamer.
GenericVector<int> blame_reasons_;
// Function used to fill char choice lattices.
void (Wordrec::*fill_lattice_)(const MATRIX &ratings,
const WERD_CHOICE_LIST &best_choices,
const UNICHARSET &unicharset,
BlamerBundle *blamer_bundle);
protected:
inline bool SegSearchDone(int num_futile_classifications) {
return (language_model_->AcceptableChoiceFound() ||
num_futile_classifications >=
segsearch_max_futile_classifications);
}
// Updates the language model state recorded for the child entries specified
// in pending[starting_col]. Enqueues the children of the updated entries
// into pending and proceeds to update (and remove from pending) all the
// remaining entries in pending[col] (col >= starting_col). Upon termination
// of this function all the pending[col] lists will be empty.
//
// The arguments:
//
// starting_col: index of the column in chunks_record->ratings from
// which the update should be started
//
// pending: list of entries listing chunks_record->ratings entries
// that should be updated
//
// pain_points: priority heap listing the pain points generated by
// the language model
//
// temp_pain_points: temporary storage for tentative pain points generated
// by the language model after a single call to LanguageModel::UpdateState()
// (the argument is passed in rather than created before each
// LanguageModel::UpdateState() call to avoid dynamic memory re-allocation)
//
// best_choice_bundle: a collection of variables that should be updated
// if a new best choice is found
//
void UpdateSegSearchNodes(
float rating_cert_scale,
int starting_col,
GenericVector<SegSearchPending>* pending,
WERD_RES *word_res,
LMPainPoints *pain_points,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle);
// Process the given pain point: classify the corresponding blob, enqueue
// new pain points to join the newly classified blob with its neighbors.
void ProcessSegSearchPainPoint(float pain_point_priority,
const MATRIX_COORD &pain_point,
const char* pain_point_type,
GenericVector<SegSearchPending>* pending,
WERD_RES *word_res,
LMPainPoints *pain_points,
BlamerBundle *blamer_bundle);
// Resets enough of the results so that the Viterbi search is re-run.
// Needed when the n-gram model is enabled, as the multi-length comparison
// implementation will re-value existing paths to worse values.
void ResetNGramSearch(WERD_RES* word_res,
BestChoiceBundle* best_choice_bundle,
GenericVector<SegSearchPending>* pending);
// Add pain points for classifying blobs on the correct segmentation path
// (so that we can evaluate correct segmentation path and discover the reason
// for incorrect result).
void InitBlamerForSegSearch(WERD_RES *word_res,
LMPainPoints *pain_points,
BlamerBundle *blamer_bundle,
STRING *blamer_debug);
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_WORDREC_H__
| 1080228-arabicocr11 | wordrec/wordrec.h | C++ | asf20 | 23,833 |
///////////////////////////////////////////////////////////////////////
// File: params_model.cpp
// Description: Trained language model parameters.
// Author: David Eger
// Created: Mon Jun 11 11:26:42 PDT 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "params_model.h"
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include "bitvector.h"
#include "tprintf.h"
namespace tesseract {
// Scale factor to apply to params model scores.
static const float kScoreScaleFactor = 100.0f;
// Minimum cost result to return.
static const float kMinFinalCost = 0.001f;
// Maximum cost result to return.
static const float kMaxFinalCost = 100.0f;
void ParamsModel::Print() {
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) {
tprintf("ParamsModel for pass %d lang %s\n", p, lang_.string());
for (int i = 0; i < weights_vec_[p].size(); ++i) {
tprintf("%s = %g\n", kParamsTrainingFeatureTypeName[i],
weights_vec_[p][i]);
}
}
}
void ParamsModel::Copy(const ParamsModel &other_model) {
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) {
weights_vec_[p] = other_model.weights_for_pass(
static_cast<PassEnum>(p));
}
}
// Given a (modifiable) line, parse out a key / value pair.
// Return true on success.
bool ParamsModel::ParseLine(char *line, char** key, float *val) {
if (line[0] == '#')
return false;
int end_of_key = 0;
while (line[end_of_key] && !isspace(line[end_of_key])) end_of_key++;
if (!line[end_of_key]) {
tprintf("ParamsModel::Incomplete line %s\n", line);
return false;
}
line[end_of_key++] = 0;
*key = line;
if (sscanf(line + end_of_key, " %f", val) != 1)
return false;
return true;
}
// Applies params model weights to the given features.
// Assumes that features is an array of size PTRAIN_NUM_FEATURE_TYPES.
// The cost is set to a number that can be multiplied by the outline length,
// as with the old ratings scheme. This enables words of different length
// and combinations of words to be compared meaningfully.
float ParamsModel::ComputeCost(const float features[]) const {
float unnorm_score = 0.0;
for (int f = 0; f < PTRAIN_NUM_FEATURE_TYPES; ++f) {
unnorm_score += weights_vec_[pass_][f] * features[f];
}
return ClipToRange(-unnorm_score / kScoreScaleFactor,
kMinFinalCost, kMaxFinalCost);
}
bool ParamsModel::Equivalent(const ParamsModel &that) const {
float epsilon = 0.0001;
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) {
if (weights_vec_[p].size() != that.weights_vec_[p].size()) return false;
for (int i = 0; i < weights_vec_[p].size(); i++) {
if (weights_vec_[p][i] != that.weights_vec_[p][i] &&
fabs(weights_vec_[p][i] - that.weights_vec_[p][i]) > epsilon)
return false;
}
}
return true;
}
bool ParamsModel::LoadFromFile(
const char *lang,
const char *full_path) {
FILE *fp = fopen(full_path, "rb");
if (!fp) {
tprintf("Error opening file %s\n", full_path);
return false;
}
bool result = LoadFromFp(lang, fp, -1);
fclose(fp);
return result;
}
bool ParamsModel::LoadFromFp(const char *lang, FILE *fp, inT64 end_offset) {
const int kMaxLineSize = 100;
char line[kMaxLineSize];
BitVector present;
present.Init(PTRAIN_NUM_FEATURE_TYPES);
lang_ = lang;
// Load weights for passes with adaption on.
GenericVector<float> &weights = weights_vec_[pass_];
weights.init_to_size(PTRAIN_NUM_FEATURE_TYPES, 0.0);
while ((end_offset < 0 || ftell(fp) < end_offset) &&
fgets(line, kMaxLineSize, fp)) {
char *key = NULL;
float value;
if (!ParseLine(line, &key, &value))
continue;
int idx = ParamsTrainingFeatureByName(key);
if (idx < 0) {
tprintf("ParamsModel::Unknown parameter %s\n", key);
continue;
}
if (!present[idx]) {
present.SetValue(idx, true);
}
weights[idx] = value;
}
bool complete = (present.NumSetBits() == PTRAIN_NUM_FEATURE_TYPES);
if (!complete) {
for (int i = 0; i < PTRAIN_NUM_FEATURE_TYPES; i++) {
if (!present[i]) {
tprintf("Missing field %s.\n", kParamsTrainingFeatureTypeName[i]);
}
}
lang_ = "";
weights.truncate(0);
}
return complete;
}
bool ParamsModel::SaveToFile(const char *full_path) const {
const GenericVector<float> &weights = weights_vec_[pass_];
if (weights.size() != PTRAIN_NUM_FEATURE_TYPES) {
tprintf("Refusing to save ParamsModel that has not been initialized.\n");
return false;
}
FILE *fp = fopen(full_path, "wb");
if (!fp) {
tprintf("Could not open %s for writing.\n", full_path);
return false;
}
bool all_good = true;
for (int i = 0; i < weights.size(); i++) {
if (fprintf(fp, "%s %f\n", kParamsTrainingFeatureTypeName[i], weights[i])
< 0) {
all_good = false;
}
}
fclose(fp);
return all_good;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/params_model.cpp | C++ | asf20 | 5,498 |
/* -*-C-*-
********************************************************************************
*
* File: render.h (Formerly render.h)
* Description: Convert the various data type into line lists
* Author: Mark Seaman, OCR Technology
* Created: Fri Jul 28 13:14:48 1989
* Modified: Fri Apr 26 09:59:45 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef RENDER_H
#define RENDER_H
#include "host.h"
#include "callcpp.h"
#include "blobs.h"
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
extern ScrollView *blob_window; /* Window for blobs */
extern C_COL color_list[]; /* Colors for outlines */
extern BOOL_VAR_H(wordrec_display_all_blobs, 0, "Display Blobs");
extern BOOL_VAR_H(wordrec_display_all_words, 0, "Display Words");
extern BOOL_VAR_H(wordrec_blob_pause, 0, "Blob pause");
#define NUM_COLORS 6
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
void display_blob(TBLOB *blob, C_COL color);
void render_blob(void *window, TBLOB *blob, C_COL color);
void render_edgepts(void *window, EDGEPT *edgept, C_COL color);
void render_outline(void *window,
TESSLINE *outline,
C_COL color);
#endif
| 1080228-arabicocr11 | wordrec/render.h | C | asf20 | 2,213 |
///////////////////////////////////////////////////////////////////////
// File: associate.h
// Description: Structs, classes, typedefs useful for the segmentation
// search. Functions for scoring segmentation paths according
// to their character widths, gap widths and seam cuts.
// Author: Daria Antonova
// Created: Mon Mar 8 11:26:43 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef ASSOCIATE_H
#define ASSOCIATE_H
#include "blobs.h"
#include "elst.h"
#include "ratngs.h"
#include "seam.h"
#include "split.h"
class WERD_RES;
namespace tesseract {
// Statisitcs about character widths, gaps and seams.
struct AssociateStats {
AssociateStats() { Clear(); }
void Clear() {
shape_cost = 0.0f;
bad_shape = false;
full_wh_ratio = 0.0f;
full_wh_ratio_total = 0.0f;
full_wh_ratio_var = 0.0f;
bad_fixed_pitch_right_gap = false;
bad_fixed_pitch_wh_ratio = false;
gap_sum = 0;
}
void Print() {
tprintf("AssociateStats: w(%g %d) s(%g %d)\n", shape_cost, bad_shape);
}
float shape_cost; // cost of blob shape
bool bad_shape; // true if the shape of the blob is unacceptable
float full_wh_ratio; // width-to-hight ratio + gap on the right
float full_wh_ratio_total; // sum of width-to-hight ratios
// on the path terminating at this blob
float full_wh_ratio_var; // variance of full_wh_ratios on the path
bool bad_fixed_pitch_right_gap; // true if there is no gap before
// the blob on the right
bool bad_fixed_pitch_wh_ratio; // true if the blobs has width-to-hight
// ratio > kMaxFixedPitchCharAspectRatio
int gap_sum; // sum of gaps within the blob
};
// Utility functions for scoring segmentation paths according to their
// character widths, gap widths, seam characteristics.
class AssociateUtils {
public:
static const float kMaxFixedPitchCharAspectRatio;
static const float kMinGap;
// Returns outline length of the given blob is computed as:
// rating_cert_scale * rating / certainty
// Since from Wordrec::SegSearch() in segsearch.cpp
// rating_cert_scale = -1.0 * getDict().certainty_scale / rating_scale
// And from Classify::ConvertMatchesToChoices() in adaptmatch.cpp
// Rating = Certainty = next.rating
// Rating *= rating_scale * Results->BlobLength
// Certainty *= -(getDict().certainty_scale)
static inline float ComputeOutlineLength(float rating_cert_scale,
const BLOB_CHOICE &b) {
return rating_cert_scale * b.rating() / b.certainty();
}
static inline float ComputeRating(float rating_cert_scale,
float cert, int width) {
return static_cast<float>(width) * cert / rating_cert_scale;
}
// Computes character widths, gaps and seams stats given the
// AssociateStats of the path so far, col, row of the blob that
// is being added to the path, and WERD_RES containing information
// about character widths, gaps and seams.
// Fills associate_cost with the combined shape, gap and seam cost
// of adding a unichar from (col, row) to the path (note that since
// this function could be used to compute the prioritization for
// pain points, (col, row) entry might not be classified yet; thus
// information in the (col, row) entry of the ratings matrix is not used).
//
// Note: the function assumes that word_res, stats and
// associate_cost pointers are not NULL.
static void ComputeStats(int col, int row,
const AssociateStats *parent_stats,
int parent_path_length,
bool fixed_pitch,
float max_char_wh_ratio,
WERD_RES *word_res,
bool debug,
AssociateStats *stats);
// Returns the width cost for fixed-pitch text.
static float FixedPitchWidthCost(float norm_width, float right_gap,
bool end_pos, float max_char_wh_ratio);
// Returns the gap cost for fixed-pitch text (penalizes vertically
// overlapping components).
static inline float FixedPitchGapCost(float norm_gap, bool end_pos) {
return (norm_gap < 0.05 && !end_pos) ? 5.0f : 0.0f;
}
};
} // namespace tesseract
#endif
| 1080228-arabicocr11 | wordrec/associate.h | C++ | asf20 | 5,048 |
///////////////////////////////////////////////////////////////////////
// File: segsearch.h
// Description: Segmentation search functions.
// Author: Daria Antonova
// Created: Mon Jun 23 11:26:43 PDT 2008
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "wordrec.h"
#include "associate.h"
#include "language_model.h"
#include "matrix.h"
#include "params.h"
#include "lm_pain_points.h"
#include "ratngs.h"
namespace tesseract {
void Wordrec::DoSegSearch(WERD_RES* word_res) {
BestChoiceBundle best_choice_bundle(word_res->ratings->dimension());
// Run Segmentation Search.
SegSearch(word_res, &best_choice_bundle, NULL);
}
void Wordrec::SegSearch(WERD_RES* word_res,
BestChoiceBundle* best_choice_bundle,
BlamerBundle* blamer_bundle) {
LMPainPoints pain_points(segsearch_max_pain_points,
segsearch_max_char_wh_ratio,
assume_fixed_pitch_char_segment,
&getDict(), segsearch_debug_level);
// Compute scaling factor that will help us recover blob outline length
// from classifier rating and certainty for the blob.
float rating_cert_scale = -1.0 * getDict().certainty_scale / rating_scale;
GenericVector<SegSearchPending> pending;
InitialSegSearch(word_res, &pain_points, &pending, best_choice_bundle,
blamer_bundle);
if (!SegSearchDone(0)) { // find a better choice
if (chop_enable && word_res->chopped_word != NULL) {
improve_by_chopping(rating_cert_scale, word_res, best_choice_bundle,
blamer_bundle, &pain_points, &pending);
}
if (chop_debug)
print_seams("Final seam list:", word_res->seam_array);
if (blamer_bundle != NULL &&
!blamer_bundle->ChoiceIsCorrect(word_res->best_choice)) {
blamer_bundle->SetChopperBlame(word_res, wordrec_debug_blamer);
}
}
// Keep trying to find a better path by fixing the "pain points".
MATRIX_COORD pain_point;
float pain_point_priority;
int num_futile_classifications = 0;
STRING blamer_debug;
while (wordrec_enable_assoc &&
(!SegSearchDone(num_futile_classifications) ||
(blamer_bundle != NULL &&
blamer_bundle->GuidedSegsearchStillGoing()))) {
// Get the next valid "pain point".
bool found_nothing = true;
LMPainPointsType pp_type;
while ((pp_type = pain_points.Deque(&pain_point, &pain_point_priority)) !=
LM_PPTYPE_NUM) {
if (!pain_point.Valid(*word_res->ratings)) {
word_res->ratings->IncreaseBandSize(
pain_point.row - pain_point.col + 1);
}
if (pain_point.Valid(*word_res->ratings) &&
!word_res->ratings->Classified(pain_point.col, pain_point.row,
getDict().WildcardID())) {
found_nothing = false;
break;
}
}
if (found_nothing) {
if (segsearch_debug_level > 0) tprintf("Pain points queue is empty\n");
break;
}
ProcessSegSearchPainPoint(pain_point_priority, pain_point,
LMPainPoints::PainPointDescription(pp_type),
&pending, word_res, &pain_points, blamer_bundle);
UpdateSegSearchNodes(rating_cert_scale, pain_point.col, &pending,
word_res, &pain_points, best_choice_bundle,
blamer_bundle);
if (!best_choice_bundle->updated) ++num_futile_classifications;
if (segsearch_debug_level > 0) {
tprintf("num_futile_classifications %d\n", num_futile_classifications);
}
best_choice_bundle->updated = false; // reset updated
// See if it's time to terminate SegSearch or time for starting a guided
// search for the true path to find the blame for the incorrect best_choice.
if (SegSearchDone(num_futile_classifications) &&
blamer_bundle != NULL &&
blamer_bundle->GuidedSegsearchNeeded(word_res->best_choice)) {
InitBlamerForSegSearch(word_res, &pain_points, blamer_bundle,
&blamer_debug);
}
} // end while loop exploring alternative paths
if (blamer_bundle != NULL) {
blamer_bundle->FinishSegSearch(word_res->best_choice,
wordrec_debug_blamer, &blamer_debug);
}
if (segsearch_debug_level > 0) {
tprintf("Done with SegSearch (AcceptableChoiceFound: %d)\n",
language_model_->AcceptableChoiceFound());
}
}
// Setup and run just the initial segsearch on an established matrix,
// without doing any additional chopping or joining.
void Wordrec::WordSearch(WERD_RES* word_res) {
LMPainPoints pain_points(segsearch_max_pain_points,
segsearch_max_char_wh_ratio,
assume_fixed_pitch_char_segment,
&getDict(), segsearch_debug_level);
GenericVector<SegSearchPending> pending;
BestChoiceBundle best_choice_bundle(word_res->ratings->dimension());
// Run Segmentation Search.
InitialSegSearch(word_res, &pain_points, &pending, &best_choice_bundle, NULL);
if (segsearch_debug_level > 0) {
tprintf("Ending ratings matrix%s:\n",
wordrec_enable_assoc ? " (with assoc)" : "");
word_res->ratings->print(getDict().getUnicharset());
}
}
// Setup and run just the initial segsearch on an established matrix,
// without doing any additional chopping or joining.
// (Internal factored version that can be used as part of the main SegSearch.)
void Wordrec::InitialSegSearch(WERD_RES* word_res, LMPainPoints* pain_points,
GenericVector<SegSearchPending>* pending,
BestChoiceBundle* best_choice_bundle,
BlamerBundle* blamer_bundle) {
if (segsearch_debug_level > 0) {
tprintf("Starting SegSearch on ratings matrix%s:\n",
wordrec_enable_assoc ? " (with assoc)" : "");
word_res->ratings->print(getDict().getUnicharset());
}
pain_points->GenerateInitial(word_res);
// Compute scaling factor that will help us recover blob outline length
// from classifier rating and certainty for the blob.
float rating_cert_scale = -1.0 * getDict().certainty_scale / rating_scale;
language_model_->InitForWord(prev_word_best_choice_,
assume_fixed_pitch_char_segment,
segsearch_max_char_wh_ratio, rating_cert_scale);
// Initialize blamer-related information: map character boxes recorded in
// blamer_bundle->norm_truth_word to the corresponding i,j indices in the
// ratings matrix. We expect this step to succeed, since when running the
// chopper we checked that the correct chops are present.
if (blamer_bundle != NULL) {
blamer_bundle->SetupCorrectSegmentation(word_res->chopped_word,
wordrec_debug_blamer);
}
// pending[col] tells whether there is update work to do to combine
// best_choice_bundle->beam[col - 1] with some BLOB_CHOICEs in matrix[col, *].
// As the language model state is updated, pending entries are modified to
// minimize duplication of work. It is important that during the update the
// children are considered in the non-decreasing order of their column, since
// this guarantees that all the parents would be up to date before an update
// of a child is done.
pending->init_to_size(word_res->ratings->dimension(), SegSearchPending());
// Search the ratings matrix for the initial best path.
(*pending)[0].SetColumnClassified();
UpdateSegSearchNodes(rating_cert_scale, 0, pending, word_res,
pain_points, best_choice_bundle, blamer_bundle);
}
void Wordrec::UpdateSegSearchNodes(
float rating_cert_scale,
int starting_col,
GenericVector<SegSearchPending>* pending,
WERD_RES *word_res,
LMPainPoints *pain_points,
BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
MATRIX *ratings = word_res->ratings;
ASSERT_HOST(ratings->dimension() == pending->size());
ASSERT_HOST(ratings->dimension() == best_choice_bundle->beam.size());
for (int col = starting_col; col < ratings->dimension(); ++col) {
if (!(*pending)[col].WorkToDo()) continue;
int first_row = col;
int last_row = MIN(ratings->dimension() - 1,
col + ratings->bandwidth() - 1);
if ((*pending)[col].SingleRow() >= 0) {
first_row = last_row = (*pending)[col].SingleRow();
}
if (segsearch_debug_level > 0) {
tprintf("\n\nUpdateSegSearchNodes: col=%d, rows=[%d,%d], alljust=%d\n",
col, first_row, last_row,
(*pending)[col].IsRowJustClassified(MAX_INT32));
}
// Iterate over the pending list for this column.
for (int row = first_row; row <= last_row; ++row) {
// Update language model state of this child+parent pair.
BLOB_CHOICE_LIST *current_node = ratings->get(col, row);
LanguageModelState *parent_node =
col == 0 ? NULL : best_choice_bundle->beam[col - 1];
if (current_node != NULL &&
language_model_->UpdateState((*pending)[col].IsRowJustClassified(row),
col, row, current_node, parent_node,
pain_points, word_res,
best_choice_bundle, blamer_bundle) &&
row + 1 < ratings->dimension()) {
// Since the language model state of this entry changed, process all
// the child column.
(*pending)[row + 1].RevisitWholeColumn();
if (segsearch_debug_level > 0) {
tprintf("Added child col=%d to pending\n", row + 1);
}
} // end if UpdateState.
} // end for row.
} // end for col.
if (best_choice_bundle->best_vse != NULL) {
ASSERT_HOST(word_res->StatesAllValid());
if (best_choice_bundle->best_vse->updated) {
pain_points->GenerateFromPath(rating_cert_scale,
best_choice_bundle->best_vse, word_res);
if (!best_choice_bundle->fixpt.empty()) {
pain_points->GenerateFromAmbigs(best_choice_bundle->fixpt,
best_choice_bundle->best_vse, word_res);
}
}
}
// The segsearch is completed. Reset all updated flags on all VSEs and reset
// all pendings.
for (int col = 0; col < pending->size(); ++col) {
(*pending)[col].Clear();
ViterbiStateEntry_IT
vse_it(&best_choice_bundle->beam[col]->viterbi_state_entries);
for (vse_it.mark_cycle_pt(); !vse_it.cycled_list(); vse_it.forward()) {
vse_it.data()->updated = false;
}
}
}
void Wordrec::ProcessSegSearchPainPoint(
float pain_point_priority,
const MATRIX_COORD &pain_point, const char* pain_point_type,
GenericVector<SegSearchPending>* pending, WERD_RES *word_res,
LMPainPoints *pain_points, BlamerBundle *blamer_bundle) {
if (segsearch_debug_level > 0) {
tprintf("Classifying pain point %s priority=%.4f, col=%d, row=%d\n",
pain_point_type, pain_point_priority,
pain_point.col, pain_point.row);
}
ASSERT_HOST(pain_points != NULL);
MATRIX *ratings = word_res->ratings;
// Classify blob [pain_point.col pain_point.row]
if (!pain_point.Valid(*ratings)) {
ratings->IncreaseBandSize(pain_point.row + 1 - pain_point.col);
}
ASSERT_HOST(pain_point.Valid(*ratings));
BLOB_CHOICE_LIST *classified = classify_piece(word_res->seam_array,
pain_point.col, pain_point.row,
pain_point_type,
word_res->chopped_word,
blamer_bundle);
BLOB_CHOICE_LIST *lst = ratings->get(pain_point.col, pain_point.row);
if (lst == NULL) {
ratings->put(pain_point.col, pain_point.row, classified);
} else {
// We can not delete old BLOB_CHOICEs, since they might contain
// ViterbiStateEntries that are parents of other "active" entries.
// Thus if the matrix cell already contains classifications we add
// the new ones to the beginning of the list.
BLOB_CHOICE_IT it(lst);
it.add_list_before(classified);
delete classified; // safe to delete, since empty after add_list_before()
classified = NULL;
}
if (segsearch_debug_level > 0) {
print_ratings_list("Updated ratings matrix with a new entry:",
ratings->get(pain_point.col, pain_point.row),
getDict().getUnicharset());
ratings->print(getDict().getUnicharset());
}
// Insert initial "pain points" to join the newly classified blob
// with its left and right neighbors.
if (classified != NULL && !classified->empty()) {
if (pain_point.col > 0) {
pain_points->GeneratePainPoint(
pain_point.col - 1, pain_point.row, LM_PPTYPE_SHAPE, 0.0,
true, segsearch_max_char_wh_ratio, word_res);
}
if (pain_point.row + 1 < ratings->dimension()) {
pain_points->GeneratePainPoint(
pain_point.col, pain_point.row + 1, LM_PPTYPE_SHAPE, 0.0,
true, segsearch_max_char_wh_ratio, word_res);
}
}
(*pending)[pain_point.col].SetBlobClassified(pain_point.row);
}
// Resets enough of the results so that the Viterbi search is re-run.
// Needed when the n-gram model is enabled, as the multi-length comparison
// implementation will re-value existing paths to worse values.
void Wordrec::ResetNGramSearch(WERD_RES* word_res,
BestChoiceBundle* best_choice_bundle,
GenericVector<SegSearchPending>* pending) {
// TODO(rays) More refactoring required here.
// Delete existing viterbi states.
for (int col = 0; col < best_choice_bundle->beam.size(); ++col) {
best_choice_bundle->beam[col]->Clear();
}
// Reset best_choice_bundle.
word_res->ClearWordChoices();
best_choice_bundle->best_vse = NULL;
// Clear out all existing pendings and add a new one for the first column.
(*pending)[0].SetColumnClassified();
for (int i = 1; i < pending->size(); ++i)
(*pending)[i].Clear();
}
void Wordrec::InitBlamerForSegSearch(WERD_RES *word_res,
LMPainPoints *pain_points,
BlamerBundle *blamer_bundle,
STRING *blamer_debug) {
pain_points->Clear(); // Clear pain points heap.
TessResultCallback2<bool, int, int>* pp_cb = NewPermanentTessCallback(
pain_points, &LMPainPoints::GenerateForBlamer,
static_cast<double>(segsearch_max_char_wh_ratio), word_res);
blamer_bundle->InitForSegSearch(word_res->best_choice, word_res->ratings,
getDict().WildcardID(), wordrec_debug_blamer,
blamer_debug, pp_cb);
delete pp_cb;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/segsearch.cpp | C++ | asf20 | 15,569 |
/* -*-C-*-
********************************************************************************
*
* File: outlines.h (Formerly outlines.h)
* Description: Combinatorial Splitter
* Author: Mark Seaman, OCR Technology
* Created: Thu Jul 27 11:27:55 1989
* Modified: Wed May 15 17:28:47 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef OUTLINES_H
#define OUTLINES_H
#include "blobs.h"
#include "chop.h"
#include <math.h>
/*----------------------------------------------------------------------
C o n s t a n t s
----------------------------------------------------------------------*/
#define LARGE_DISTANCE 100000 /* Used for closest dist */
#define MIN_BLOB_SIZE 10 /* Big units */
#define MAX_ASPECT_RATIO 2.5 /* Widest character */
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* same_point
*
* Return TRUE if the point values are the same. The parameters must
* be of type POINT.
**********************************************************************/
#define same_point(p1,p2) \
((abs (p1.x - p2.x) < chop_same_distance) && \
(abs (p1.y - p2.y) < chop_same_distance))
/**********************************************************************
* dist_square
*
* Return the square of the distance between these two points. The
* parameters must be of type POINT.
**********************************************************************/
#define dist_square(p1,p2) \
((p2.x - p1.x) * (p2.x - p1.x) + \
(p2.y - p1.y) * (p2.y - p1.y))
/**********************************************************************
* closest
*
* The expression provides the EDGEPT that is closest to the point in
* question. All three parameters must be of type EDGEPT.
**********************************************************************/
#define closest(test_p,p1,p2) \
(p1 ? \
(p2 ? \
((dist_square (test_p->pos, p1->pos) < \
dist_square (test_p->pos, p2->pos)) ? \
p1 : \
p2) : \
p1) : \
p2)
/**********************************************************************
* edgept_dist
*
* Return the distance (squared) between the two edge points.
**********************************************************************/
#define edgept_dist(p1,p2) \
(dist_square ((p1)->pos, (p2)->pos))
/**********************************************************************
* is_exterior_point
*
* Return TRUE if the point supplied is an exterior projection from the
* outline.
**********************************************************************/
#define is_exterior_point(edge,point) \
(same_point (edge->prev->pos, point->pos) || \
same_point (edge->next->pos, point->pos) || \
(angle_change (edge->prev, edge, edge->next) - \
angle_change (edge->prev, edge, point) > 20))
/**********************************************************************
* is_equal
*
* Return TRUE if the POINTs are equal.
**********************************************************************/
#define is_equal(p1,p2) \
(((p1).x == (p2).x) && ((p1).y == (p2).y))
/**********************************************************************
* is_on_line
*
* Return TRUE if the point is on the line segment between the two end
* points. The two end points are included as part of the line. The
* parameters must be of type POINT.
**********************************************************************/
#define is_on_line(p,p0,p1) \
(within_range ((p).x, (p0).x, (p1).x) && \
within_range ((p).y, (p0).y, (p1).y))
/**********************************************************************
* within_range
*
* Return TRUE if the first number is in between the second two numbers.
* Return FALSE otherwise.
**********************************************************************/
#define within_range(x,x0,x1) \
(((x0 <= x) && (x <= x1)) || ((x1 <= x) && (x <= x0)))
#endif
| 1080228-arabicocr11 | wordrec/outlines.h | C | asf20 | 5,127 |
///////////////////////////////////////////////////////////////////////
// File: associate.cpp
// Description: Functions for scoring segmentation paths according to
// their character widths, gap widths and seam cuts.
// Author: Daria Antonova
// Created: Mon Mar 8 11:26:43 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <stdio.h>
#ifdef __UNIX__
#include <assert.h>
#endif
#include <math.h>
#include "associate.h"
#include "normalis.h"
#include "pageres.h"
namespace tesseract {
const float AssociateUtils::kMaxFixedPitchCharAspectRatio = 2.0f;
const float AssociateUtils::kMinGap = 0.03f;
void AssociateUtils::ComputeStats(int col, int row,
const AssociateStats *parent_stats,
int parent_path_length,
bool fixed_pitch,
float max_char_wh_ratio,
WERD_RES *word_res,
bool debug,
AssociateStats *stats) {
stats->Clear();
ASSERT_HOST(word_res != NULL);
if (word_res->blob_widths.empty()) {
return;
}
if (debug) {
tprintf("AssociateUtils::ComputeStats() for col=%d, row=%d%s\n",
col, row, fixed_pitch ? " (fixed pitch)" : "");
}
float normalizing_height = kBlnXHeight;
ROW* blob_row = word_res->blob_row;
// TODO(rays/daria) Can unicharset.script_has_xheight be useful here?
if (fixed_pitch && blob_row != NULL) {
// For fixed pitch language like CJK, we use the full text height
// as the normalizing factor so we are not dependent on xheight
// calculation.
if (blob_row->body_size() > 0.0f) {
normalizing_height = word_res->denorm.y_scale() * blob_row->body_size();
} else {
normalizing_height = word_res->denorm.y_scale() *
(blob_row->x_height() + blob_row->ascenders());
}
if (debug) {
tprintf("normalizing height = %g (scale %g xheight %g ascenders %g)\n",
normalizing_height, word_res->denorm.y_scale(),
blob_row->x_height(), blob_row->ascenders());
}
}
float wh_ratio = word_res->GetBlobsWidth(col, row) / normalizing_height;
if (wh_ratio > max_char_wh_ratio) stats->bad_shape = true;
// Compute the gap sum for this shape. If there are only negative or only
// positive gaps, record their sum in stats->gap_sum. However, if there is
// a mixture, record only the sum of the positive gaps.
// TODO(antonova): explain fragment.
int negative_gap_sum = 0;
for (int c = col; c < row; ++c) {
int gap = word_res->GetBlobsGap(c);
(gap > 0) ? stats->gap_sum += gap : negative_gap_sum += gap;
}
if (stats->gap_sum == 0) stats->gap_sum = negative_gap_sum;
if (debug) {
tprintf("wh_ratio=%g (max_char_wh_ratio=%g) gap_sum=%d %s\n",
wh_ratio, max_char_wh_ratio, stats->gap_sum,
stats->bad_shape ? "bad_shape" : "");
}
// Compute shape_cost (for fixed pitch mode).
if (fixed_pitch) {
bool end_row = (row == (word_res->ratings->dimension() - 1));
// Ensure that the blob has gaps on the left and the right sides
// (except for beginning and ending punctuation) and that there is
// no cutting through ink at the blob boundaries.
if (col > 0) {
float left_gap = word_res->GetBlobsGap(col - 1) / normalizing_height;
SEAM *left_seam = word_res->seam_array[col - 1];
if ((!end_row && left_gap < kMinGap) || left_seam->priority > 0.0f) {
stats->bad_shape = true;
}
if (debug) {
tprintf("left_gap %g, left_seam %g %s\n", left_gap, left_seam->priority,
stats->bad_shape ? "bad_shape" : "");
}
}
float right_gap = 0.0f;
if (!end_row) {
right_gap = word_res->GetBlobsGap(row) / normalizing_height;
SEAM *right_seam = word_res->seam_array[row];
if (right_gap < kMinGap || right_seam->priority > 0.0f) {
stats->bad_shape = true;
if (right_gap < kMinGap) stats->bad_fixed_pitch_right_gap = true;
}
if (debug) {
tprintf("right_gap %g right_seam %g %s\n",
right_gap, right_seam->priority,
stats->bad_shape ? "bad_shape" : "");
}
}
// Impose additional segmentation penalties if blob widths or gaps
// distribution don't fit a fixed-pitch model.
// Since we only know the widths and gaps of the path explored so far,
// the means and variances are computed for the path so far (not
// considering characters to the right of the last character on the path).
stats->full_wh_ratio = wh_ratio + right_gap;
if (parent_stats != NULL) {
stats->full_wh_ratio_total =
(parent_stats->full_wh_ratio_total + stats->full_wh_ratio);
float mean =
stats->full_wh_ratio_total / static_cast<float>(parent_path_length+1);
stats->full_wh_ratio_var =
parent_stats->full_wh_ratio_var + pow(mean-stats->full_wh_ratio, 2);
} else {
stats->full_wh_ratio_total = stats->full_wh_ratio;
}
if (debug) {
tprintf("full_wh_ratio %g full_wh_ratio_total %g full_wh_ratio_var %g\n",
stats->full_wh_ratio, stats->full_wh_ratio_total,
stats->full_wh_ratio_var);
}
stats->shape_cost =
FixedPitchWidthCost(wh_ratio, right_gap, end_row, max_char_wh_ratio);
// For some reason Tesseract prefers to treat the whole CJ words
// as one blob when the initial segmentation is particularly bad.
// This hack is to avoid favoring such states.
if (col == 0 && end_row && wh_ratio > max_char_wh_ratio) {
stats->shape_cost += 10;
}
stats->shape_cost += stats->full_wh_ratio_var;
if (debug) tprintf("shape_cost %g\n", stats->shape_cost);
}
}
float AssociateUtils::FixedPitchWidthCost(float norm_width,
float right_gap,
bool end_pos,
float max_char_wh_ratio) {
float cost = 0.0f;
if (norm_width > max_char_wh_ratio) cost += norm_width;
if (norm_width > kMaxFixedPitchCharAspectRatio)
cost += norm_width * norm_width; // extra penalty for merging CJK chars
// Penalize skinny blobs, except for punctuation in the last position.
if (norm_width+right_gap < 0.5f && !end_pos) {
cost += 1.0f - (norm_width + right_gap);
}
return cost;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/associate.cpp | C++ | asf20 | 7,101 |
/* -*-C-*-
********************************************************************************
*
* File: outlines.c (Formerly outlines.c)
* Description: Combinatorial Splitter
* Author: Mark Seaman, OCR Technology
* Created: Thu Jul 27 08:59:01 1989
* Modified: Wed Jul 10 14:56:49 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
********************************************************************************
* Revision 1.2 89/09/15 09:24:41 09:24:41 marks (Mark Seaman)
* First released version of Combinatorial splitter code
**/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "outlines.h"
#include "wordrec.h"
#ifdef __UNIX__
#include <assert.h>
#endif
namespace tesseract {
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* crosses_outline
*
* Check to see if this line crosses over this outline. If it does
* return TRUE.
**********************************************************************/
int Wordrec::crosses_outline(EDGEPT *p0, /* Start of line */
EDGEPT *p1, /* End of line */
EDGEPT *outline) { /* Outline to check */
EDGEPT *pt = outline;
do {
if (is_crossed (p0->pos, p1->pos, pt->pos, pt->next->pos))
return (TRUE);
pt = pt->next;
}
while (pt != outline);
return (FALSE);
}
/**********************************************************************
* is_crossed
*
* Return TRUE when the two line segments cross each other. Find out
* where the projected lines would cross and then check to see if the
* point of intersection lies on both of the line segments. If it does
* then these two segments cross.
**********************************************************************/
int Wordrec::is_crossed(TPOINT a0, TPOINT a1, TPOINT b0, TPOINT b1) {
int b0a1xb0b1, b0b1xb0a0;
int a1b1xa1a0, a1a0xa1b0;
TPOINT b0a1, b0a0, a1b1, b0b1, a1a0;
b0a1.x = a1.x - b0.x;
b0a0.x = a0.x - b0.x;
a1b1.x = b1.x - a1.x;
b0b1.x = b1.x - b0.x;
a1a0.x = a0.x - a1.x;
b0a1.y = a1.y - b0.y;
b0a0.y = a0.y - b0.y;
a1b1.y = b1.y - a1.y;
b0b1.y = b1.y - b0.y;
a1a0.y = a0.y - a1.y;
b0a1xb0b1 = CROSS (b0a1, b0b1);
b0b1xb0a0 = CROSS (b0b1, b0a0);
a1b1xa1a0 = CROSS (a1b1, a1a0);
/*a1a0xa1b0=CROSS(a1a0,a1b0); */
a1a0xa1b0 = -CROSS (a1a0, b0a1);
return ((b0a1xb0b1 > 0 && b0b1xb0a0 > 0)
|| (b0a1xb0b1 < 0 && b0b1xb0a0 < 0))
&& ((a1b1xa1a0 > 0 && a1a0xa1b0 > 0) || (a1b1xa1a0 < 0 && a1a0xa1b0 < 0));
}
/**********************************************************************
* is_same_edgept
*
* Return true if the points are identical.
**********************************************************************/
int Wordrec::is_same_edgept(EDGEPT *p1, EDGEPT *p2) {
return (p1 == p2);
}
/**********************************************************************
* near_point
*
* Find the point on a line segment that is closest to a point not on
* the line segment. Return that point in near_pt. Returns whether
* near_pt was newly created.
**********************************************************************/
bool Wordrec::near_point(EDGEPT *point,
EDGEPT *line_pt_0, EDGEPT *line_pt_1,
EDGEPT **near_pt) {
TPOINT p;
float slope;
float intercept;
float x0 = line_pt_0->pos.x;
float x1 = line_pt_1->pos.x;
float y0 = line_pt_0->pos.y;
float y1 = line_pt_1->pos.y;
if (x0 == x1) {
/* Handle vertical line */
p.x = (inT16) x0;
p.y = point->pos.y;
}
else {
/* Slope and intercept */
slope = (y0 - y1) / (x0 - x1);
intercept = y1 - x1 * slope;
/* Find perpendicular */
p.x = (inT16) ((point->pos.x + (point->pos.y - intercept) * slope) /
(slope * slope + 1));
p.y = (inT16) (slope * p.x + intercept);
}
if (is_on_line (p, line_pt_0->pos, line_pt_1->pos) &&
(!same_point (p, line_pt_0->pos)) && (!same_point (p, line_pt_1->pos))) {
/* Intersection on line */
*near_pt = make_edgept(p.x, p.y, line_pt_1, line_pt_0);
return true;
} else { /* Intersection not on line */
*near_pt = closest(point, line_pt_0, line_pt_1);
return false;
}
}
/**********************************************************************
* reverse_outline
*
* Change the direction of the outline. If it was clockwise make it
* counter-clockwise and vice versa. Do this by swapping each of the
* next and prev fields of each edge point.
**********************************************************************/
void Wordrec::reverse_outline(EDGEPT *outline) {
EDGEPT *edgept = outline;
EDGEPT *temp;
do {
/* Swap next and prev */
temp = edgept->prev;
edgept->prev = edgept->next;
edgept->next = temp;
/* Set up vec field */
edgept->vec.x = edgept->next->pos.x - edgept->pos.x;
edgept->vec.y = edgept->next->pos.y - edgept->pos.y;
edgept = edgept->prev; /* Go to next point */
}
while (edgept != outline);
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/outlines.cpp | C | asf20 | 6,142 |
/* -*-C-*-
********************************************************************************
*
* File: chopper.h (Formerly chopper.h)
* Description:
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Wed May 15 14:24:26 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef CHOPPER_H
#define CHOPPER_H
#include "cutil.h"
#include "matrix.h"
#include "seam.h"
#include "stopper.h"
void preserve_outline(EDGEPT *start);
void preserve_outline_tree(TESSLINE *srcline);
EDGEPT *restore_outline(EDGEPT *start);
void restore_outline_tree(TESSLINE *srcline);
int any_shared_split_points(const GenericVector<SEAM*>& seams, SEAM *seam);
int check_blob(TBLOB *blob);
inT16 check_seam_order(TBLOB *blob, SEAM *seam);
inT16 total_containment(TBLOB *blob1, TBLOB *blob2);
#endif
| 1080228-arabicocr11 | wordrec/chopper.h | C | asf20 | 1,604 |
/* -*-C-*-
********************************************************************************
*
* File: gradechop.c (Formerly gradechop.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Tue Jul 30 16:06:27 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "gradechop.h"
#include "wordrec.h"
#include "olutil.h"
#include "chop.h"
#include "ndminx.h"
#include <math.h>
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
#define CENTER_GRADE_CAP 25.0
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* find_bounds_loop
*
* This is a macro to be used by set_outline_bounds.
**********************************************************************/
#define find_bounds_loop(point1,point2,x_min,x_max) \
x_min = point2->pos.x; \
x_max = point2->pos.x; \
\
this_point = point1; \
do { \
x_min = MIN (this_point->pos.x, x_min); \
x_max = MAX (this_point->pos.x, x_max); \
this_point = this_point->next; \
} \
while (this_point != point2 && this_point != point1) \
namespace tesseract {
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* full_split_priority
*
* Assign a priority to this split based on the features that it has.
* Part of the priority has already been calculated so just return the
* additional amount for the bounding box type information.
**********************************************************************/
PRIORITY Wordrec::full_split_priority(SPLIT *split, inT16 xmin, inT16 xmax) {
BOUNDS_RECT rect;
set_outline_bounds (split->point1, split->point2, rect);
if (xmin < MIN (rect[0], rect[2]) && xmax > MAX (rect[1], rect[3]))
return (999.0);
return (grade_overlap (rect) +
grade_center_of_blob (rect) + grade_width_change (rect));
}
/**********************************************************************
* grade_center_of_blob
*
* Return a grade for the a split. Rank it on closeness to the center
* of the original blob
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_center_of_blob(register BOUNDS_RECT rect) {
register PRIORITY grade;
int width1 = rect[1] - rect[0];
int width2 = rect[3] - rect[2];
if (width1 > chop_centered_maxwidth &&
width2 > chop_centered_maxwidth) {
return 0.0;
}
grade = width1 - width2;
if (grade < 0)
grade = -grade;
grade *= chop_center_knob;
grade = MIN (CENTER_GRADE_CAP, grade);
return (MAX (0.0, grade));
}
/**********************************************************************
* grade_overlap
*
* Return a grade for this split for the overlap of the resultant blobs.
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_overlap(register BOUNDS_RECT rect) {
register PRIORITY grade;
register inT16 width1;
register inT16 width2;
register inT16 overlap;
width1 = rect[3] - rect[2];
width2 = rect[1] - rect[0];
overlap = MIN (rect[1], rect[3]) - MAX (rect[0], rect[2]);
width1 = MIN (width1, width2);
if (overlap == width1)
return (100.0); /* Total overlap */
width1 = 2 * overlap - width1; /* Extra penalty for too */
overlap += MAX (0, width1); /* much overlap */
grade = overlap * chop_overlap_knob;
return (MAX (0.0, grade));
}
/**********************************************************************
* grade_split_length
*
* Return a grade for the length of this split.
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_split_length(register SPLIT *split) {
register PRIORITY grade;
register float split_length;
split_length = weighted_edgept_dist (split->point1, split->point2,
chop_x_y_weight);
if (split_length <= 0)
grade = 0;
else
grade = sqrt (split_length) * chop_split_dist_knob;
return (MAX (0.0, grade));
}
/**********************************************************************
* grade_sharpness
*
* Return a grade for the sharpness of this split.
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_sharpness(register SPLIT *split) {
register PRIORITY grade;
grade = point_priority (split->point1) + point_priority (split->point2);
if (grade < -360.0)
grade = 0;
else
grade += 360.0;
grade *= chop_sharpness_knob; /* Values 0 to -360 */
return (grade);
}
/**********************************************************************
* grade_width_change
*
* Return a grade for the change in width of the resultant blobs.
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_width_change(register BOUNDS_RECT rect) {
register PRIORITY grade;
register inT32 width1;
register inT32 width2;
width1 = rect[3] - rect[2];
width2 = rect[1] - rect[0];
grade = 20 - (MAX (rect[1], rect[3])
- MIN (rect[0], rect[2]) - MAX (width1, width2));
grade *= chop_width_change_knob;
return (MAX (0.0, grade));
}
/**********************************************************************
* set_outline_bounds
*
* Set up the limits for the x coordinate of the outline.
**********************************************************************/
void Wordrec::set_outline_bounds(register EDGEPT *point1,
register EDGEPT *point2,
BOUNDS_RECT rect) {
register EDGEPT *this_point;
register inT16 x_min;
register inT16 x_max;
find_bounds_loop(point1, point2, x_min, x_max);
rect[0] = x_min;
rect[1] = x_max;
find_bounds_loop(point2, point1, x_min, x_max);
rect[2] = x_min;
rect[3] = x_max;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/gradechop.cpp | C | asf20 | 7,647 |
///////////////////////////////////////////////////////////////////////
// File: lm_pain_points.h
// Description: Functions that utilize the knowledge about the properties
// of the paths explored by the segmentation search in order
// to generate "pain points" - the locations in the ratings
// matrix which should be classified next.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_PAIN_POINTS_H_
#define TESSERACT_WORDREC_PAIN_POINTS_H_
#include "associate.h"
#include "dict.h"
#include "genericheap.h"
#include "lm_state.h"
namespace tesseract {
// Heap of pain points used for determining where to chop/join.
typedef GenericHeap<MatrixCoordPair> PainPointHeap;
// Types of pain points (ordered in the decreasing level of importance).
enum LMPainPointsType {
LM_PPTYPE_BLAMER,
LM_PPTYPE_AMBIG,
LM_PPTYPE_PATH,
LM_PPTYPE_SHAPE,
LM_PPTYPE_NUM
};
static const char * const LMPainPointsTypeName[] = {
"LM_PPTYPE_BLAMER",
"LM_PPTYPE_AMBIGS",
"LM_PPTYPE_PATH",
"LM_PPTYPE_SHAPE",
};
class LMPainPoints {
public:
static const float kDefaultPainPointPriorityAdjustment;
// If there is a significant drop in character ngram probability or a
// dangerous ambiguity make the thresholds on what blob combinations
// can be classified looser.
static const float kLooseMaxCharWhRatio;
// Returns a description of the type of a pain point.
static const char* PainPointDescription(LMPainPointsType type) {
return LMPainPointsTypeName[type];
}
LMPainPoints(int max, float rat, bool fp, const Dict *d, int deb) :
max_heap_size_(max), max_char_wh_ratio_(rat), fixed_pitch_(fp),
dict_(d), debug_level_(deb) {}
~LMPainPoints() {}
// Returns true if the heap of pain points of pp_type is not empty().
inline bool HasPainPoints(LMPainPointsType pp_type) const {
return !pain_points_heaps_[pp_type].empty();
}
// Dequeues the next pain point from the pain points queue and copies
// its contents and priority to *pp and *priority.
// Returns LM_PPTYPE_NUM if pain points queue is empty, otherwise the type.
LMPainPointsType Deque(MATRIX_COORD *pp, float *priority);
// Clears pain points heap.
void Clear() {
for (int h = 0; h < LM_PPTYPE_NUM; ++h) pain_points_heaps_[h].clear();
}
// For each cell, generate a "pain point" if the cell is not classified
// and has a left or right neighbor that was classified.
void GenerateInitial(WERD_RES *word_res);
// Generate pain points from the given path.
void GenerateFromPath(float rating_cert_scale, ViterbiStateEntry *vse,
WERD_RES *word_res);
// Generate pain points from dangerous ambiguities in best choice.
void GenerateFromAmbigs(const DANGERR &fixpt, ViterbiStateEntry *vse,
WERD_RES *word_res);
// Generate a pain point for the blamer.
bool GenerateForBlamer(double max_char_wh_ratio, WERD_RES *word_res,
int col, int row) {
return GeneratePainPoint(col, row, LM_PPTYPE_BLAMER, 0.0, false,
max_char_wh_ratio, word_res);
}
// Adds a pain point to classify chunks_record->ratings(col, row).
// Returns true if a new pain point was added to an appropriate heap.
// Pain point priority is set to special_priority for pain points of
// LM_PPTYPE_AMBIG or LM_PPTYPE_PATH, for other pain points
// AssociateStats::gap_sum is used.
bool GeneratePainPoint(int col, int row, LMPainPointsType pp_type,
float special_priority, bool ok_to_extend,
float max_char_wh_ratio,
WERD_RES *word_res);
// Adjusts the pain point coordinates to cope with expansion of the ratings
// matrix due to a split of the blob with the given index.
void RemapForSplit(int index);
private:
// Priority queues containing pain points generated by the language model
// The priority is set by the language model components, adjustments like
// seam cost and width priority are factored into the priority.
PainPointHeap pain_points_heaps_[LM_PPTYPE_NUM];
// Maximum number of points to keep in the heap.
int max_heap_size_;
// Maximum character width/height ratio.
float max_char_wh_ratio_;
// Set to true if fixed pitch should be assumed.
bool fixed_pitch_;
// Cached pointer to dictionary.
const Dict *dict_;
// Debug level for print statements.
int debug_level_;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_PAIN_POINTS_H_
| 1080228-arabicocr11 | wordrec/lm_pain_points.h | C++ | asf20 | 5,249 |
AM_CPPFLAGS += \
-I$(top_srcdir)/ccstruct -I$(top_srcdir)/ccutil \
-I$(top_srcdir)/cutil -I$(top_srcdir)/classify \
-I$(top_srcdir)/dict \
-I$(top_srcdir)/viewer
if VISIBILITY
AM_CPPFLAGS += -DTESS_EXPORTS \
-fvisibility=hidden -fvisibility-inlines-hidden
endif
noinst_HEADERS = \
associate.h chop.h \
chopper.h drawfx.h findseam.h gradechop.h \
language_model.h lm_consistency.h lm_pain_points.h lm_state.h \
makechop.h measure.h \
olutil.h outlines.h params_model.h plotedges.h \
render.h \
wordrec.h
if !USING_MULTIPLELIBS
noinst_LTLIBRARIES = libtesseract_wordrec.la
else
lib_LTLIBRARIES = libtesseract_wordrec.la
libtesseract_wordrec_la_LDFLAGS = -version-info $(GENERIC_LIBRARY_VERSION)
libtesseract_wordrec_la_LIBADD = \
../ccstruct/libtesseract_ccstruct.la \
../ccutil/libtesseract_ccutil.la \
../cutil/libtesseract_cutil.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../viewer/libtesseract_viewer.la
endif
libtesseract_wordrec_la_SOURCES = \
associate.cpp chop.cpp chopper.cpp \
drawfx.cpp findseam.cpp gradechop.cpp \
language_model.cpp lm_consistency.cpp lm_pain_points.cpp lm_state.cpp \
makechop.cpp \
olutil.cpp outlines.cpp params_model.cpp pieces.cpp \
plotedges.cpp render.cpp segsearch.cpp \
tface.cpp wordclass.cpp wordrec.cpp
| 1080228-arabicocr11 | wordrec/Makefile.am | Makefile | asf20 | 1,379 |
///////////////////////////////////////////////////////////////////////
// File: lm_consistency.cpp
// Description: Struct for recording consistency of the paths representing
// OCR hypotheses.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////
#include "lm_consistency.h"
#include "associate.h"
#include "dict.h"
#include "ratngs.h"
namespace tesseract {
void LMConsistencyInfo::ComputeXheightConsistency(
const BLOB_CHOICE *b, bool is_punc) {
if (xht_decision == XH_INCONSISTENT)
return; // It isn't going to get any better.
// Compute xheight consistency.
bool parent_null = xht_sp < 0;
int parent_sp = xht_sp;
// Debug strings.
if (b->yshift() > LMConsistencyInfo::kShiftThresh) {
xht_sp = LMConsistencyInfo::kSUP;
} else if (b->yshift() < -LMConsistencyInfo::kShiftThresh) {
xht_sp = LMConsistencyInfo::kSUB;
} else {
xht_sp = LMConsistencyInfo::kNORM;
}
xht_count[xht_sp]++;
if (is_punc) xht_count_punc[xht_sp]++;
if (!parent_null) {
xpos_entropy += abs(parent_sp - xht_sp);
}
// TODO(eger): Figure out a better way to account for small caps.
// For the first character not y-shifted, we only care if it is too small.
// Too large is common in drop caps and small caps.
// inT16 small_xht = b->min_xheight();
// if (parent_vse == NULL && sp == LanguageModelConsistencyInfo::kNORM) {
// small_xht = 0;
// }
IntersectRange(b->min_xheight(), b->max_xheight(),
&(xht_lo[xht_sp]), &(xht_hi[xht_sp]));
// Compute xheight inconsistency kinds.
if (parent_null) {
if (xht_count[kNORM] == 1) {
xht_decision = XH_GOOD;
} else {
xht_decision = XH_SUBNORMAL;
}
return;
}
// When we intersect the ranges of xheights in pixels for all characters in
// each position (subscript, normal, superscript),
// How much range must be left? 0? [exactly one pixel height for xheight] 1?
// TODO(eger): Extend this code to take a prior for the rest of the line.
const int kMinIntersectedXHeightRange = 0;
for (int i = 0; i < kNumPos; i++) {
if (xht_lo[i] > xht_hi[i] - kMinIntersectedXHeightRange) {
xht_decision = XH_INCONSISTENT;
return;
}
}
// Reject as improbable anything where there's much punctuation in subscript
// or superscript regions.
if (xht_count_punc[kSUB] > xht_count[kSUB] * 0.4 ||
xht_count_punc[kSUP] > xht_count[kSUP] * 0.4) {
xht_decision = XH_INCONSISTENT;
return;
}
// Now check that the subscript and superscript aren't too small relative to
// the mainline.
double mainline_xht = static_cast<double>(xht_lo[kNORM]);
double kMinSizeRatio = 0.4;
if (mainline_xht > 0.0 &&
(static_cast<double>(xht_hi[kSUB]) / mainline_xht < kMinSizeRatio ||
static_cast<double>(xht_hi[kSUP]) / mainline_xht < kMinSizeRatio)) {
xht_decision = XH_INCONSISTENT;
return;
}
// TODO(eger): Check into inconsistency of super/subscript y offsets.
if (xpos_entropy > kMaxEntropy) {
xht_decision = XH_INCONSISTENT;
return;
}
if (xht_count[kSUB] == 0 && xht_count[kSUP] == 0) {
xht_decision = XH_GOOD;
return;
}
xht_decision = XH_SUBNORMAL;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/lm_consistency.cpp | C++ | asf20 | 3,898 |
///////////////////////////////////////////////////////////////////////
// File: lm_state.cpp
// Description: Structures and functionality for capturing the state of
// segmentation search guided by the language model.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "lm_state.h"
namespace tesseract {
ELISTIZE(ViterbiStateEntry);
void ViterbiStateEntry::Print(const char *msg) const {
tprintf("%s ViterbiStateEntry", msg);
if (updated) tprintf("(NEW)");
if (this->debug_str != NULL) {
tprintf(" str=%s", this->debug_str->string());
}
tprintf(" with ratings_sum=%.4f length=%d cost=%.6f",
this->ratings_sum, this->length, this->cost);
if (this->top_choice_flags) {
tprintf(" top_choice_flags=0x%x", this->top_choice_flags);
}
if (!this->Consistent()) {
tprintf(" inconsistent=(punc %d case %d chartype %d script %d font %d)",
this->consistency_info.NumInconsistentPunc(),
this->consistency_info.NumInconsistentCase(),
this->consistency_info.NumInconsistentChartype(),
this->consistency_info.inconsistent_script,
this->consistency_info.inconsistent_font);
}
if (this->dawg_info) tprintf(" permuter=%d", this->dawg_info->permuter);
if (this->ngram_info) {
tprintf(" ngram_cl_cost=%g context=%s ngram pruned=%d",
this->ngram_info->ngram_and_classifier_cost,
this->ngram_info->context.string(),
this->ngram_info->pruned);
}
if (this->associate_stats.shape_cost > 0.0f) {
tprintf(" shape_cost=%g", this->associate_stats.shape_cost);
}
tprintf(" %s",
XHeightConsistencyEnumName[this->consistency_info.xht_decision]);
tprintf("\n");
}
// Clears the viterbi search state back to its initial conditions.
void LanguageModelState::Clear() {
viterbi_state_entries.clear();
viterbi_state_entries_prunable_length = 0;
viterbi_state_entries_prunable_max_cost = MAX_FLOAT32;
viterbi_state_entries_length = 0;
}
void LanguageModelState::Print(const char *msg) {
tprintf("%s VSEs (max_cost=%g prn_len=%d tot_len=%d):\n",
msg, viterbi_state_entries_prunable_max_cost,
viterbi_state_entries_prunable_length, viterbi_state_entries_length);
ViterbiStateEntry_IT vit(&viterbi_state_entries);
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
vit.data()->Print("");
}
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/lm_state.cpp | C++ | asf20 | 3,116 |
/* -*-C-*-
********************************************************************************
*
* File: pieces.c (Formerly pieces.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Mon May 20 12:12:35 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "blobs.h"
#include "freelist.h"
#include "helpers.h"
#include "matrix.h"
#include "ndminx.h"
#include "ratngs.h"
#include "seam.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* classify_piece
*
* Create a larger piece from a collection of smaller ones. Classify
* it and return the results. Take the large piece apart to leave
* the collection of small pieces un modified.
**********************************************************************/
namespace tesseract {
BLOB_CHOICE_LIST *Wordrec::classify_piece(const GenericVector<SEAM*>& seams,
inT16 start,
inT16 end,
const char* description,
TWERD *word,
BlamerBundle *blamer_bundle) {
if (end > start) join_pieces(seams, start, end, word);
BLOB_CHOICE_LIST *choices = classify_blob(word->blobs[start], description,
White, blamer_bundle);
// Set the matrix_cell_ entries in all the BLOB_CHOICES.
BLOB_CHOICE_IT bc_it(choices);
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
bc_it.data()->set_matrix_cell(start, end);
}
if (end > start) break_pieces(seams, start, end, word);
return (choices);
}
template<class BLOB_CHOICE>
int SortByUnicharID(const void *void1, const void *void2) {
const BLOB_CHOICE *p1 = *reinterpret_cast<const BLOB_CHOICE * const *>(void1);
const BLOB_CHOICE *p2 = *reinterpret_cast<const BLOB_CHOICE * const *>(void2);
return p1->unichar_id() - p2->unichar_id();
}
template<class BLOB_CHOICE>
int SortByRating(const void *void1, const void *void2) {
const BLOB_CHOICE *p1 = *reinterpret_cast<const BLOB_CHOICE * const *>(void1);
const BLOB_CHOICE *p2 = *reinterpret_cast<const BLOB_CHOICE * const *>(void2);
if (p1->rating() < p2->rating())
return 1;
return -1;
}
/**********************************************************************
* fill_filtered_fragment_list
*
* Filter the fragment list so that the filtered_choices only contain
* fragments that are in the correct position. choices is the list
* that we are going to filter. fragment_pos is the position in the
* fragment that we are looking for and num_frag_parts is the the
* total number of pieces. The result will be appended to
* filtered_choices.
**********************************************************************/
void Wordrec::fill_filtered_fragment_list(BLOB_CHOICE_LIST *choices,
int fragment_pos,
int num_frag_parts,
BLOB_CHOICE_LIST *filtered_choices) {
BLOB_CHOICE_IT filtered_choices_it(filtered_choices);
BLOB_CHOICE_IT choices_it(choices);
for (choices_it.mark_cycle_pt(); !choices_it.cycled_list();
choices_it.forward()) {
UNICHAR_ID choice_unichar_id = choices_it.data()->unichar_id();
const CHAR_FRAGMENT *frag = unicharset.get_fragment(choice_unichar_id);
if (frag != NULL && frag->get_pos() == fragment_pos &&
frag->get_total() == num_frag_parts) {
// Recover the unichar_id of the unichar that this fragment is
// a part of
BLOB_CHOICE *b = new BLOB_CHOICE(*choices_it.data());
int original_unichar = unicharset.unichar_to_id(frag->get_unichar());
b->set_unichar_id(original_unichar);
filtered_choices_it.add_to_end(b);
}
}
filtered_choices->sort(SortByUnicharID<BLOB_CHOICE>);
}
/**********************************************************************
* merge_and_put_fragment_lists
*
* Merge the fragment lists in choice_lists and append it to the
* ratings matrix.
**********************************************************************/
void Wordrec::merge_and_put_fragment_lists(inT16 row, inT16 column,
inT16 num_frag_parts,
BLOB_CHOICE_LIST *choice_lists,
MATRIX *ratings) {
BLOB_CHOICE_IT *choice_lists_it = new BLOB_CHOICE_IT[num_frag_parts];
for (int i = 0; i < num_frag_parts; i++) {
choice_lists_it[i].set_to_list(&choice_lists[i]);
choice_lists_it[i].mark_cycle_pt();
}
BLOB_CHOICE_LIST *merged_choice = ratings->get(row, column);
if (merged_choice == NULL)
merged_choice = new BLOB_CHOICE_LIST;
bool end_of_list = false;
BLOB_CHOICE_IT merged_choice_it(merged_choice);
while (!end_of_list) {
// Find the maximum unichar_id of the current entry the iterators
// are pointing at
UNICHAR_ID max_unichar_id = choice_lists_it[0].data()->unichar_id();
for (int i = 0; i < num_frag_parts; i++) {
UNICHAR_ID unichar_id = choice_lists_it[i].data()->unichar_id();
if (max_unichar_id < unichar_id) {
max_unichar_id = unichar_id;
}
}
// Move the each iterators until it gets to an entry that has a
// value greater than or equal to max_unichar_id
for (int i = 0; i < num_frag_parts; i++) {
UNICHAR_ID unichar_id = choice_lists_it[i].data()->unichar_id();
while (!choice_lists_it[i].cycled_list() &&
unichar_id < max_unichar_id) {
choice_lists_it[i].forward();
unichar_id = choice_lists_it[i].data()->unichar_id();
}
if (choice_lists_it[i].cycled_list()) {
end_of_list = true;
break;
}
}
if (end_of_list)
break;
// Checks if the fragments are parts of the same character
UNICHAR_ID first_unichar_id = choice_lists_it[0].data()->unichar_id();
bool same_unichar = true;
for (int i = 1; i < num_frag_parts; i++) {
UNICHAR_ID unichar_id = choice_lists_it[i].data()->unichar_id();
if (unichar_id != first_unichar_id) {
same_unichar = false;
break;
}
}
if (same_unichar) {
// Add the merged character to the result
UNICHAR_ID merged_unichar_id = first_unichar_id;
inT16 merged_fontinfo_id = choice_lists_it[0].data()->fontinfo_id();
inT16 merged_fontinfo_id2 = choice_lists_it[0].data()->fontinfo_id2();
float merged_min_xheight = choice_lists_it[0].data()->min_xheight();
float merged_max_xheight = choice_lists_it[0].data()->max_xheight();
float positive_yshift = 0, negative_yshift = 0;
int merged_script_id = choice_lists_it[0].data()->script_id();
BlobChoiceClassifier classifier = choice_lists_it[0].data()->classifier();
float merged_rating = 0, merged_certainty = 0;
for (int i = 0; i < num_frag_parts; i++) {
float rating = choice_lists_it[i].data()->rating();
float certainty = choice_lists_it[i].data()->certainty();
if (i == 0 || certainty < merged_certainty)
merged_certainty = certainty;
merged_rating += rating;
choice_lists_it[i].forward();
if (choice_lists_it[i].cycled_list())
end_of_list = true;
IntersectRange(choice_lists_it[i].data()->min_xheight(),
choice_lists_it[i].data()->max_xheight(),
&merged_min_xheight, &merged_max_xheight);
float yshift = choice_lists_it[i].data()->yshift();
if (yshift > positive_yshift) positive_yshift = yshift;
if (yshift < negative_yshift) negative_yshift = yshift;
}
float merged_yshift = positive_yshift != 0
? (negative_yshift != 0 ? 0 : positive_yshift)
: negative_yshift;
merged_choice_it.add_to_end(new BLOB_CHOICE(merged_unichar_id,
merged_rating,
merged_certainty,
merged_fontinfo_id,
merged_fontinfo_id2,
merged_script_id,
merged_min_xheight,
merged_max_xheight,
merged_yshift,
classifier));
}
}
if (classify_debug_level)
print_ratings_list("Merged Fragments", merged_choice,
unicharset);
if (merged_choice->empty())
delete merged_choice;
else
ratings->put(row, column, merged_choice);
delete [] choice_lists_it;
}
/**********************************************************************
* get_fragment_lists
*
* Recursively go through the ratings matrix to find lists of fragments
* to be merged in the function merge_and_put_fragment_lists.
* current_frag is the postion of the piece we are looking for.
* current_row is the row in the rating matrix we are currently at.
* start is the row we started initially, so that we can know where
* to append the results to the matrix. num_frag_parts is the total
* number of pieces we are looking for and num_blobs is the size of the
* ratings matrix.
**********************************************************************/
void Wordrec::get_fragment_lists(inT16 current_frag, inT16 current_row,
inT16 start, inT16 num_frag_parts,
inT16 num_blobs, MATRIX *ratings,
BLOB_CHOICE_LIST *choice_lists) {
if (current_frag == num_frag_parts) {
merge_and_put_fragment_lists(start, current_row - 1, num_frag_parts,
choice_lists, ratings);
return;
}
for (inT16 x = current_row; x < num_blobs; x++) {
BLOB_CHOICE_LIST *choices = ratings->get(current_row, x);
if (choices == NULL)
continue;
fill_filtered_fragment_list(choices, current_frag, num_frag_parts,
&choice_lists[current_frag]);
if (!choice_lists[current_frag].empty()) {
get_fragment_lists(current_frag + 1, x + 1, start, num_frag_parts,
num_blobs, ratings, choice_lists);
choice_lists[current_frag].clear();
}
}
}
/**********************************************************************
* merge_fragments
*
* Try to merge fragments in the ratings matrix and put the result in
* the corresponding row and column
**********************************************************************/
void Wordrec::merge_fragments(MATRIX *ratings, inT16 num_blobs) {
BLOB_CHOICE_LIST choice_lists[CHAR_FRAGMENT::kMaxChunks];
for (inT16 start = 0; start < num_blobs; start++) {
for (int frag_parts = 2; frag_parts <= CHAR_FRAGMENT::kMaxChunks;
frag_parts++) {
get_fragment_lists(0, start, start, frag_parts, num_blobs,
ratings, choice_lists);
}
}
// Delete fragments from the rating matrix
for (inT16 x = 0; x < num_blobs; x++) {
for (inT16 y = x; y < num_blobs; y++) {
BLOB_CHOICE_LIST *choices = ratings->get(x, y);
if (choices != NULL) {
BLOB_CHOICE_IT choices_it(choices);
for (choices_it.mark_cycle_pt(); !choices_it.cycled_list();
choices_it.forward()) {
UNICHAR_ID choice_unichar_id = choices_it.data()->unichar_id();
const CHAR_FRAGMENT *frag =
unicharset.get_fragment(choice_unichar_id);
if (frag != NULL)
delete choices_it.extract();
}
}
}
}
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/pieces.cpp | C | asf20 | 13,118 |
/* -*-C-*-
********************************************************************************
*
* File: olutil.h (Formerly olutil.h)
* Description:
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Wed Jul 10 14:21:55 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef OLUTIL_H
#define OLUTIL_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "blobs.h"
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* is_inside_angle
*
* Return true if the edgept supplied as input is an inside angle. This
* is determined by the angular change of the vectors from point to
* point.
**********************************************************************/
#define is_inside_angle(pt) \
(angle_change ((pt)->prev, (pt), (pt)->next) < chop_inside_angle)
/**********************************************************************
* same_outline_bounds
*
* Return TRUE if these two outlines have the same bounds.
**********************************************************************/
#define same_outline_bounds(outline,other_outline) \
(outline->topleft.x == other_outline->topleft.x && \
outline->topleft.y == other_outline->topleft.y && \
outline->botright.x == other_outline->botright.x && \
outline->botright.y == other_outline->botright.y) \
/**********************************************************************
* weighted_edgept_dist
*
* Return the distance (squared) between the two edge points.
**********************************************************************/
#define weighted_edgept_dist(p1,p2,chop_x_y_weight) \
(((p1)->pos.x - (p2)->pos.x) * \
((p1)->pos.x - (p2)->pos.x) * chop_x_y_weight + \
((p1)->pos.y - (p2)->pos.y) * \
((p1)->pos.y - (p2)->pos.y))
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
void correct_blob_order(TBLOB *blob1, TBLOB *blob2);
void eliminate_duplicate_outlines(TBLOB *blob);
void setup_blob_outlines(TBLOB *blob);
#endif
| 1080228-arabicocr11 | wordrec/olutil.h | C | asf20 | 3,254 |
/**********************************************************************
* File: tface.c (Formerly tface.c)
* Description: C side of the Tess/tessedit C/C++ interface.
* Author: Ray Smith
* Created: Mon Apr 27 11:57:06 BST 1992
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "callcpp.h"
#include "chop.h"
#include "chopper.h"
#include "danerror.h"
#include "fxdefs.h"
#include "globals.h"
#include "gradechop.h"
#include "pageres.h"
#include "wordrec.h"
#include "featdefs.h"
#include "params_model.h"
#include <math.h>
#ifdef __UNIX__
#include <unistd.h>
#endif
namespace tesseract {
/**
* @name program_editup
*
* Initialize all the things in the program that need to be initialized.
* init_permute determines whether to initialize the permute functions
* and Dawg models.
*/
void Wordrec::program_editup(const char *textbase,
bool init_classifier,
bool init_dict) {
if (textbase != NULL) imagefile = textbase;
InitFeatureDefs(&feature_defs_);
SetupExtractors(&feature_defs_);
InitAdaptiveClassifier(init_classifier);
if (init_dict) getDict().Load(Dict::GlobalDawgCache());
pass2_ok_split = chop_ok_split;
}
/**
* @name end_recog
*
* Cleanup and exit the recog program.
*/
int Wordrec::end_recog() {
program_editdown (0);
return (0);
}
/**
* @name program_editdown
*
* This function holds any nessessary post processing for the Wise Owl
* program.
*/
void Wordrec::program_editdown(inT32 elasped_time) {
EndAdaptiveClassifier();
getDict().End();
}
/**
* @name set_pass1
*
* Get ready to do some pass 1 stuff.
*/
void Wordrec::set_pass1() {
chop_ok_split.set_value(70.0);
language_model_->getParamsModel().SetPass(ParamsModel::PTRAIN_PASS1);
SettupPass1();
}
/**
* @name set_pass2
*
* Get ready to do some pass 2 stuff.
*/
void Wordrec::set_pass2() {
chop_ok_split.set_value(pass2_ok_split);
language_model_->getParamsModel().SetPass(ParamsModel::PTRAIN_PASS2);
SettupPass2();
}
/**
* @name cc_recog
*
* Recognize a word.
*/
void Wordrec::cc_recog(WERD_RES *word) {
getDict().reset_hyphen_vars(word->word->flag(W_EOL));
chop_word_main(word);
word->DebugWordChoices(getDict().stopper_debug_level >= 1,
getDict().word_to_debug.string());
ASSERT_HOST(word->StatesAllValid());
}
/**
* @name dict_word()
*
* Test the dictionaries, returning NO_PERM (0) if not found, or one
* of the PermuterType values if found, according to the dictionary.
*/
int Wordrec::dict_word(const WERD_CHOICE &word) {
return getDict().valid_word(word);
}
/**
* @name call_matcher
*
* Called from Tess with a blob in tess form.
* The blob may need rotating to the correct orientation for classification.
*/
BLOB_CHOICE_LIST *Wordrec::call_matcher(TBLOB *tessblob) {
// Rotate the blob for classification if necessary.
TBLOB* rotated_blob = tessblob->ClassifyNormalizeIfNeeded();
if (rotated_blob == NULL) {
rotated_blob = tessblob;
}
BLOB_CHOICE_LIST *ratings = new BLOB_CHOICE_LIST(); // matcher result
AdaptiveClassifier(rotated_blob, ratings);
if (rotated_blob != tessblob) {
delete rotated_blob;
}
return ratings;
}
} // namespace tesseract
| 1080228-arabicocr11 | wordrec/tface.cpp | C++ | asf20 | 3,879 |
/* -*-C-*-
********************************************************************************
*
* File: wordclass.c (Formerly wordclass.c)
* Description: Word classifier
* Author: Mark Seaman, OCR Technology
* Created: Tue Jan 30 14:03:25 1990
* Modified: Fri Jul 12 16:03:06 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1990, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I N C L U D E S
----------------------------------------------------------------------*/
#include <assert.h>
#include <stdio.h>
#include "associate.h"
#include "render.h"
#include "callcpp.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
namespace tesseract {
/**
* @name classify_blob
*
* Classify the this blob if it is not already recorded in the match
* table. Attempt to recognize this blob as a character. The recognition
* rating for this blob will be stored as a part of the blob. This value
* will also be returned to the caller.
* @param blob Current blob
* @param string The string to display in ScrollView
* @param color The colour to use when displayed with ScrollView
*/
BLOB_CHOICE_LIST *Wordrec::classify_blob(TBLOB *blob,
const char *string, C_COL color,
BlamerBundle *blamer_bundle) {
#ifndef GRAPHICS_DISABLED
if (wordrec_display_all_blobs)
display_blob(blob, color);
#endif
// TODO(rays) collapse with call_matcher and move all to wordrec.cpp.
BLOB_CHOICE_LIST* choices = call_matcher(blob);
// If a blob with the same bounding box as one of the truth character
// bounding boxes is not classified as the corresponding truth character
// blame character classifier for incorrect answer.
if (blamer_bundle != NULL) {
blamer_bundle->BlameClassifier(getDict().getUnicharset(),
blob->bounding_box(),
*choices,
wordrec_debug_blamer);
}
#ifndef GRAPHICS_DISABLED
if (classify_debug_level && string)
print_ratings_list(string, choices, getDict().getUnicharset());
if (wordrec_blob_pause)
window_wait(blob_window);
#endif
return choices;
}
} // namespace tesseract;
| 1080228-arabicocr11 | wordrec/wordclass.cpp | C | asf20 | 3,301 |
/* -*-C-*-
********************************************************************************
*
* File: measure.h (Formerly measure.h)
* Description: Statistics for a group of single measurements
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Mon Apr 8 09:42:28 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
********************************************************************************
*/
#ifndef MEASURE_H
#define MEASURE_H
/*
----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------
*/
#include <math.h>
/*
----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------
*/
typedef struct
{
long num_samples;
float sum_of_samples;
float sum_of_squares;
} MEASUREMENT;
/*
----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------
*/
/**********************************************************************
* add_sample
*
* Add one more sample to a measurement.
**********************************************************************/
#define ADD_SAMPLE(m,s) \
(m.sum_of_samples += (float) (s), \
m.sum_of_squares += (float) (s) * (float) (s), \
++m.num_samples)
/**********************************************************************
* mean
*
* Return the mean value of the measurement.
**********************************************************************/
#define MEAN(m) \
((m).num_samples ? \
((float) ((m).sum_of_samples / (m).num_samples)) : \
0)
/**********************************************************************
* new_measurement
*
* Initalize a record to hold a measurement of a group of individual
* samples.
**********************************************************************/
#define new_measurement(m) \
((m).num_samples = 0, \
(m).sum_of_samples = 0, \
(m).sum_of_squares = 0)
/**********************************************************************
* number_of_samples
*
* Return the number of samples in a measurement.
**********************************************************************/
#define number_of_samples(m) \
((m).num_samples)
/**********************************************************************
* standard_deviation
*
* Return the standard deviation of the measurement.
**********************************************************************/
#define standard_deviation(m) \
((float) sqrt (VARIANCE (m)))
/**********************************************************************
* variance
*
* Return the variance of the measurement.
**********************************************************************/
#define VARIANCE(m) \
(((m).num_samples > 1) ? \
((float) \
(((m).num_samples * (m).sum_of_squares - \
(m).sum_of_samples * (m).sum_of_samples) / \
(((m).num_samples - 1) * (m).num_samples))) : \
0)
/**********************************************************************
* print_summary
*
* Summarize a MEASUREMENT record.
**********************************************************************/
#define print_summary(string,measure) \
cprintf ("\t%-20s \tn = %d, \tm = %4.2f, \ts = %4.2f\n ", \
string, \
number_of_samples (measure), \
MEAN (measure), \
standard_deviation (measure))
#endif
| 1080228-arabicocr11 | wordrec/measure.h | C | asf20 | 4,575 |
/* -*-C-*-
********************************************************************************
*
* File: makechop.h (Formerly makechop.h)
* Description:
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Mon Jul 29 13:33:23 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef MAKECHOP_H
#define MAKECHOP_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "chop.h"
#include "olutil.h"
/*----------------------------------------------------------------------
M a c r o s
---------------------------------------------------------------------*/
/**********************************************************************
* is_split_outline
*
* Check to see if both sides of the split fall within the bounding
* box of this outline.
**********************************************************************/
#define is_split_outline(outline,split) \
(outline->Contains(split->point1->pos) && \
outline->Contains(split->point2->pos)) \
/*----------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------*/
void apply_seam(TBLOB *blob, TBLOB *other_blob, bool italic_blob, SEAM *seam);
void form_two_blobs(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
const TPOINT& location);
void make_double_split(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
SEAM *seam);
void make_single_split(TESSLINE *outlines, SPLIT *split);
void make_split_blobs(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
SEAM *seam);
void make_triple_split(TBLOB *blob, TBLOB *other_blob, bool italic_blob,
SEAM *seam);
void undo_seam(TBLOB *blob, TBLOB *other_blob, SEAM *seam);
void undo_single_split(TBLOB *blob, SPLIT *split);
#endif
| 1080228-arabicocr11 | wordrec/makechop.h | C | asf20 | 2,766 |
/* -*-C-*-
********************************************************************************
*
* File: findseam.h (Formerly findseam.h)
* Description:
* Author: Mark Seaman, SW Productivity
* Created: Fri Oct 16 14:37:00 1987
* Modified: Thu May 16 17:05:17 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifndef FINDSEAM_H
#define FINDSEAM_H
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "seam.h"
#include "genericheap.h"
#include "kdpair.h"
#include "chop.h"
// The SeamPair elements own their SEAMs and delete them upon destruction.
typedef tesseract::KDPtrPairInc<float, SEAM> SeamPair;
typedef tesseract::GenericHeap<SeamPair> SeamQueue;
typedef tesseract::KDPtrPairDec<float, SEAM> SeamDecPair;
typedef tesseract::GenericHeap<SeamDecPair> SeamPile;
#endif
| 1080228-arabicocr11 | wordrec/findseam.h | C | asf20 | 1,697 |
///////////////////////////////////////////////////////////////////////
// File: lm_state.h
// Description: Structures and functionality for capturing the state of
// segmentation search guided by the language model.
//
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_LANGUAGE_MODEL_DEFS_H_
#define TESSERACT_WORDREC_LANGUAGE_MODEL_DEFS_H_
#include "associate.h"
#include "elst.h"
#include "dawg.h"
#include "lm_consistency.h"
#include "matrix.h"
#include "ratngs.h"
#include "stopper.h"
#include "strngs.h"
namespace tesseract {
// Used for expressing various language model flags.
typedef unsigned char LanguageModelFlagsType;
// The following structs are used for storing the state of the language model
// in the segmentation search graph. In this graph the nodes are BLOB_CHOICEs
// and the links are the relationships between the underlying blobs (see
// segsearch.h for a more detailed description).
// Each of the BLOB_CHOICEs contains LanguageModelState struct, which has
// a list of N best paths (list of ViterbiStateEntry) explored by the Viterbi
// search leading up to and including this BLOB_CHOICE.
// Each ViterbiStateEntry contains information from various components of the
// language model: dawgs in which the path is found, character ngram model
// probability of the path, script/chartype/font consistency info, state for
// language-specific heuristics (e.g. hyphenated and compound words, lower/upper
// case preferences, etc).
// Each ViterbiStateEntry also contains the parent pointer, so that the path
// that it represents (WERD_CHOICE) can be constructed by following these
// parent pointers.
// Struct for storing additional information used by Dawg language model
// component. It stores the set of active dawgs in which the sequence of
// letters on a path can be found.
struct LanguageModelDawgInfo {
LanguageModelDawgInfo(DawgPositionVector *a, PermuterType pt) : permuter(pt) {
active_dawgs = new DawgPositionVector(*a);
}
~LanguageModelDawgInfo() {
delete active_dawgs;
}
DawgPositionVector *active_dawgs;
PermuterType permuter;
};
// Struct for storing additional information used by Ngram language model
// component.
struct LanguageModelNgramInfo {
LanguageModelNgramInfo(const char *c, int l, bool p, float nc, float ncc)
: context(c), context_unichar_step_len(l), pruned(p), ngram_cost(nc),
ngram_and_classifier_cost(ncc) {}
STRING context; // context string
// Length of the context measured by advancing using UNICHAR::utf8_step()
// (should be at most the order of the character ngram model used).
int context_unichar_step_len;
// The paths with pruned set are pruned out from the perspective of the
// character ngram model. They are explored further because they represent
// a dictionary match or a top choice. Thus ngram_info is still computed
// for them in order to calculate the combined cost.
bool pruned;
// -ln(P_ngram_model(path))
float ngram_cost;
// -[ ln(P_classifier(path)) + scale_factor * ln(P_ngram_model(path)) ]
float ngram_and_classifier_cost;
};
// Struct for storing the information about a path in the segmentation graph
// explored by Viterbi search.
struct ViterbiStateEntry : public ELIST_LINK {
ViterbiStateEntry(ViterbiStateEntry *pe,
BLOB_CHOICE *b, float c, float ol,
const LMConsistencyInfo &ci,
const AssociateStats &as,
LanguageModelFlagsType tcf,
LanguageModelDawgInfo *d,
LanguageModelNgramInfo *n,
const char *debug_uch)
: cost(c), curr_b(b), parent_vse(pe), competing_vse(NULL),
ratings_sum(b->rating()),
min_certainty(b->certainty()), adapted(b->IsAdapted()), length(1),
outline_length(ol), consistency_info(ci), associate_stats(as),
top_choice_flags(tcf), dawg_info(d), ngram_info(n),
updated(true) {
debug_str = (debug_uch == NULL) ? NULL : new STRING();
if (pe != NULL) {
ratings_sum += pe->ratings_sum;
if (pe->min_certainty < min_certainty) {
min_certainty = pe->min_certainty;
}
adapted += pe->adapted;
length += pe->length;
outline_length += pe->outline_length;
if (debug_uch != NULL) *debug_str += *(pe->debug_str);
}
if (debug_str != NULL && debug_uch != NULL) *debug_str += debug_uch;
}
~ViterbiStateEntry() {
delete dawg_info;
delete ngram_info;
delete debug_str;
}
// Comparator function for sorting ViterbiStateEntry_LISTs in
// non-increasing order of costs.
static int Compare(const void *e1, const void *e2) {
const ViterbiStateEntry *ve1 =
*reinterpret_cast<const ViterbiStateEntry * const *>(e1);
const ViterbiStateEntry *ve2 =
*reinterpret_cast<const ViterbiStateEntry * const *>(e2);
return (ve1->cost < ve2->cost) ? -1 : 1;
}
inline bool Consistent() const {
if (dawg_info != NULL && consistency_info.NumInconsistentCase() == 0) {
return true;
}
return consistency_info.Consistent();
}
// Returns true if this VSE has an alphanumeric character as its classifier
// result.
bool HasAlnumChoice(const UNICHARSET& unicharset) {
if (curr_b == NULL) return false;
UNICHAR_ID unichar_id = curr_b->unichar_id();
if (unicharset.get_isalpha(unichar_id) ||
unicharset.get_isdigit(unichar_id))
return true;
return false;
}
void Print(const char *msg) const;
// The cost is an adjusted ratings sum, that is adjusted by all the language
// model components that use Viterbi search.
float cost;
// Pointers to BLOB_CHOICE and parent ViterbiStateEntry (not owned by this).
BLOB_CHOICE *curr_b;
ViterbiStateEntry *parent_vse;
// Pointer to a case-competing ViterbiStateEntry in the same list that
// represents a path ending in the same letter of the opposite case.
ViterbiStateEntry *competing_vse;
// Various information about the characters on the path represented
// by this ViterbiStateEntry.
float ratings_sum; // sum of ratings of character on the path
float min_certainty; // minimum certainty on the path
int adapted; // number of BLOB_CHOICES from adapted templates
int length; // number of characters on the path
float outline_length; // length of the outline so far
LMConsistencyInfo consistency_info; // path consistency info
AssociateStats associate_stats; // character widths/gaps/seams
// Flags for marking the entry as a top choice path with
// the smallest rating or lower/upper case letters).
LanguageModelFlagsType top_choice_flags;
// Extra information maintained by Dawg laguage model component
// (owned by ViterbiStateEntry).
LanguageModelDawgInfo *dawg_info;
// Extra information maintained by Ngram laguage model component
// (owned by ViterbiStateEntry).
LanguageModelNgramInfo *ngram_info;
bool updated; // set to true if the entry has just been created/updated
// UTF8 string representing the path corresponding to this vse.
// Populated only in when language_model_debug_level > 0.
STRING *debug_str;
};
ELISTIZEH(ViterbiStateEntry);
// Struct to store information maintained by various language model components.
struct LanguageModelState {
LanguageModelState() :
viterbi_state_entries_prunable_length(0),
viterbi_state_entries_prunable_max_cost(MAX_FLOAT32),
viterbi_state_entries_length(0) {}
~LanguageModelState() {}
// Clears the viterbi search state back to its initial conditions.
void Clear();
void Print(const char *msg);
// Storage for the Viterbi state.
ViterbiStateEntry_LIST viterbi_state_entries;
// Number and max cost of prunable paths in viterbi_state_entries.
int viterbi_state_entries_prunable_length;
float viterbi_state_entries_prunable_max_cost;
// Total number of entries in viterbi_state_entries.
int viterbi_state_entries_length;
};
// Bundle together all the things pertaining to the best choice/state.
struct BestChoiceBundle {
explicit BestChoiceBundle(int matrix_dimension)
: updated(false), best_vse(NULL) {
beam.reserve(matrix_dimension);
for (int i = 0; i < matrix_dimension; ++i)
beam.push_back(new LanguageModelState);
}
~BestChoiceBundle() {}
// Flag to indicate whether anything was changed.
bool updated;
// Places to try to fix the word suggested by ambiguity checking.
DANGERR fixpt;
// The beam. One LanguageModelState containing a list of ViterbiStateEntry per
// row in the ratings matrix containing all VSEs whose BLOB_CHOICE is
// somewhere in the corresponding row.
PointerVector<LanguageModelState> beam;
// Best ViterbiStateEntry and BLOB_CHOICE.
ViterbiStateEntry *best_vse;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_LANGUAGE_MODEL_DEFS_H_
| 1080228-arabicocr11 | wordrec/lm_state.h | C++ | asf20 | 9,544 |
/* -*-C-*-
********************************************************************************
*
* File: plotedges.c (Formerly plotedges.c)
* Description: Graphics routines for "Edges" and "Outlines" windows
* Author: Mark Seaman, OCR Technology
* Created: Fri Jul 28 13:14:48 1989
* Modified: Tue Jul 9 17:22:22 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Experimental (Do Not Distribute)
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
#ifdef __UNIX__
#include <assert.h>
#endif
#include "plotedges.h"
#include "render.h"
#include "split.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
ScrollView *edge_window = NULL;
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* display_edgepts
*
* Macro to display edge points in a window.
**********************************************************************/
void display_edgepts(LIST outlines) {
void *window;
/* Set up window */
if (edge_window == NULL) {
edge_window = c_create_window ("Edges", 750, 150,
400, 128, -400.0, 400.0, 0.0, 256.0);
}
else {
c_clear_window(edge_window);
}
/* Render the outlines */
window = edge_window;
/* Reclaim old memory */
iterate(outlines) {
render_edgepts (window, (EDGEPT *) first_node (outlines), White);
}
}
/**********************************************************************
* draw_blob_edges
*
* Display the edges of this blob in the edges window.
**********************************************************************/
void draw_blob_edges(TBLOB *blob) {
TESSLINE *ol;
LIST edge_list = NIL_LIST;
if (wordrec_display_splits) {
for (ol = blob->outlines; ol != NULL; ol = ol->next)
push_on (edge_list, ol->loop);
display_edgepts(edge_list);
destroy(edge_list);
}
}
/**********************************************************************
* mark_outline
*
* Make a mark on the edges window at a particular location.
**********************************************************************/
void mark_outline(EDGEPT *edgept) { /* Start of point list */
void *window = edge_window;
float x = edgept->pos.x;
float y = edgept->pos.y;
c_line_color_index(window, Red);
c_move(window, x, y);
x -= 4;
y -= 12;
c_draw(window, x, y);
x -= 2;
y += 4;
c_draw(window, x, y);
x -= 4;
y += 2;
c_draw(window, x, y);
x += 10;
y += 6;
c_draw(window, x, y);
c_make_current(window);
}
/**********************************************************************
* mark_split
*
* Set up the marks list to be displayed in subsequent updates and draw
* the marks in the current window. The marks are stored in the second
* sublist. The first sublist is left unmodified.
**********************************************************************/
void mark_split(SPLIT *split) {
void *window = edge_window;
c_line_color_index(window, Green);
c_move (window, (float) split->point1->pos.x, (float) split->point1->pos.y);
c_draw (window, (float) split->point2->pos.x, (float) split->point2->pos.y);
c_make_current(window);
}
#endif // GRAPHICS_DISABLED
| 1080228-arabicocr11 | wordrec/plotedges.cpp | C | asf20 | 4,263 |
/* -*-C-*-
********************************************************************************
*
* File: olutil.c (Formerly olutil.c)
* Description:
* Author: Mark Seaman, OCR Technology
* Created: Fri Oct 16 14:37:00 1987
* Modified: Fri May 17 13:11:24 1991 (Mark Seaman) marks@hpgrlt
* Language: C
* Package: N/A
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "olutil.h"
#include "structures.h"
#include "blobs.h"
#include "const.h"
#ifdef __UNIX__
#include <assert.h>
#endif
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* correct_blob_order
*
* Check to see if the blobs are in the correct order. If they are not
* then swap which outlines are attached to which blobs.
**********************************************************************/
void correct_blob_order(TBLOB *blob1, TBLOB *blob2) {
TPOINT origin1;
TPOINT origin2;
TESSLINE *temp;
blob_origin(blob1, &origin1);
blob_origin(blob2, &origin2);
if (origin1.x > origin2.x) {
temp = blob2->outlines;
blob2->outlines = blob1->outlines;
blob1->outlines = temp;
}
}
/**********************************************************************
* eliminate_duplicate_outlines
*
* Find and delete any duplicate outline records in this blob.
**********************************************************************/
void eliminate_duplicate_outlines(TBLOB *blob) {
TESSLINE *outline;
TESSLINE *other_outline;
TESSLINE *last_outline;
for (outline = blob->outlines; outline; outline = outline->next) {
for (last_outline = outline, other_outline = outline->next;
other_outline;
last_outline = other_outline, other_outline = other_outline->next) {
if (same_outline_bounds (outline, other_outline)) {
last_outline->next = other_outline->next;
// This doesn't leak - the outlines share the EDGEPTs.
other_outline->loop = NULL;
delete other_outline;
other_outline = last_outline;
// If it is part of a cut, then it can't be a hole any more.
outline->is_hole = false;
}
}
}
}
/**********************************************************************
* setup_blob_outlines
*
* Set up each of the outlines in this blob.
**********************************************************************/
void setup_blob_outlines(TBLOB *blob) {
TESSLINE *outline;
for (outline = blob->outlines; outline; outline = outline->next) {
outline->ComputeBoundingBox();
}
}
| 1080228-arabicocr11 | wordrec/olutil.cpp | C | asf20 | 3,567 |
///////////////////////////////////////////////////////////////////////
// File: lm_consistency.h
// Description: Struct for recording consistency of the paths representing
// OCR hypotheses.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////
#include "dawg.h"
#include "dict.h"
#include "host.h"
#include "ratngs.h"
#ifndef TESSERACT_WORDREC_CONSISTENCY_H_
#define TESSERACT_WORDREC_CONSISTENCY_H_
namespace tesseract {
static const char * const XHeightConsistencyEnumName[] = {
"XH_GOOD",
"XH_SUBNORMAL",
"XH_INCONSISTENT",
};
// Struct for keeping track of the consistency of the path.
struct LMConsistencyInfo {
enum ChartypeEnum { CT_NONE, CT_ALPHA, CT_DIGIT, CT_OTHER};
// How much do characters have to be shifted away from normal parameters
// before we say they're not normal?
static const int kShiftThresh = 1;
// How much shifting from subscript to superscript and back
// before we declare shenanigans?
static const int kMaxEntropy = 1;
// Script positions - order important for entropy calculation.
static const int kSUB = 0, kNORM = 1, kSUP = 2;
static const int kNumPos = 3;
explicit LMConsistencyInfo(const LMConsistencyInfo* parent_info) {
if (parent_info == NULL) {
// Initialize from scratch.
num_alphas = 0;
num_digits = 0;
num_punc = 0;
num_other = 0;
chartype = CT_NONE;
punc_ref = NO_EDGE;
invalid_punc = false;
num_non_first_upper = 0;
num_lower = 0;
script_id = 0;
inconsistent_script = false;
num_inconsistent_spaces = 0;
inconsistent_font = false;
// Initialize XHeight stats.
for (int i = 0; i < kNumPos; i++) {
xht_count[i] = 0;
xht_count_punc[i] = 0;
xht_lo[i] = 0;
xht_hi[i] = 256; // kBlnCellHeight
}
xht_sp = -1; // This invalid value indicates that there was no parent.
xpos_entropy = 0;
xht_decision = XH_GOOD;
} else {
// Copy parent info
*this = *parent_info;
}
}
inline int NumInconsistentPunc() const {
return invalid_punc ? num_punc : 0;
}
inline int NumInconsistentCase() const {
return (num_non_first_upper > num_lower) ? num_lower : num_non_first_upper;
}
inline int NumInconsistentChartype() const {
return (NumInconsistentPunc() + num_other +
((num_alphas > num_digits) ? num_digits : num_alphas));
}
inline bool Consistent() const {
return (NumInconsistentPunc() == 0 && NumInconsistentCase() == 0 &&
NumInconsistentChartype() == 0 && !inconsistent_script &&
!inconsistent_font && !InconsistentXHeight());
}
inline int NumInconsistentSpaces() const {
return num_inconsistent_spaces;
}
inline int InconsistentXHeight() const {
return xht_decision == XH_INCONSISTENT;
}
void ComputeXheightConsistency(const BLOB_CHOICE *b, bool is_punc);
float BodyMinXHeight() const {
if (InconsistentXHeight())
return 0.0f;
return xht_lo[kNORM];
}
float BodyMaxXHeight() const {
if (InconsistentXHeight())
return static_cast<float>(MAX_INT16);
return xht_hi[kNORM];
}
int num_alphas;
int num_digits;
int num_punc;
int num_other;
ChartypeEnum chartype;
EDGE_REF punc_ref;
bool invalid_punc;
int num_non_first_upper;
int num_lower;
int script_id;
bool inconsistent_script;
int num_inconsistent_spaces;
bool inconsistent_font;
// Metrics clumped by position.
float xht_lo[kNumPos];
float xht_hi[kNumPos];
inT16 xht_count[kNumPos];
inT16 xht_count_punc[kNumPos];
inT16 xht_sp;
inT16 xpos_entropy;
XHeightConsistencyEnum xht_decision;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_CONSISTENCY_H_
| 1080228-arabicocr11 | wordrec/lm_consistency.h | C++ | asf20 | 4,424 |
/**********************************************************************
* File: pango_font_info.h
* Description: Font-related objects and helper functions
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_PANGO_FONT_INFO_H_
#define TESSERACT_TRAINING_PANGO_FONT_INFO_H_
#include <string>
#include <utility>
#include <vector>
#include "hashfn.h"
#include "host.h"
#include "util.h"
#include "pango/pango-font.h"
typedef signed int char32;
namespace tesseract {
// Data holder class for a font, intented to avoid having to work with Pango or
// FontConfig-specific objects directly.
class PangoFontInfo {
public:
enum FontTypeEnum {
UNKNOWN,
SERIF,
SANS_SERIF,
DECORATIVE,
};
PangoFontInfo();
// Initialize from parsing a font description name, defined as a string of the
// format:
// "FamilyName [FaceName] [PointSize]"
// where a missing FaceName implies the default regular face.
// eg. "Arial Italic 12", "Verdana"
//
// FaceName is a combination of:
// [StyleName] [Variant] [Weight] [Stretch]
// with (all optional) Pango-defined values of:
// StyleName: Oblique, Italic
// Variant : Small-Caps
// Weight : Ultra-Light, Light, Medium, Semi-Bold, Bold, Ultra-Bold, Heavy
// Stretch : Ultra-Condensed, Extra-Condensed, Condensed, Semi-Condensed,
// Semi-Expanded, Expanded, Extra-Expanded, Ultra-Expanded.
explicit PangoFontInfo(const string& name);
bool ParseFontDescriptionName(const string& name);
// Returns true if the font have codepoint coverage for the specified text.
bool CoversUTF8Text(const char* utf8_text, int byte_length) const;
// Modifies string to remove unicode points that are not covered by the
// font. Returns the number of characters dropped.
int DropUncoveredChars(string* utf8_text) const;
// Returns true if the entire string can be rendered by the font with full
// character coverage and no unknown glyph or dotted-circle glyph
// substitutions on encountering a badly formed unicode sequence.
// If true, returns individual graphemes. Any whitespace characters in the
// original string are also included in the list.
bool CanRenderString(const char* utf8_word, int len,
vector<string>* graphemes) const;
bool CanRenderString(const char* utf8_word, int len) const;
// Retrieves the x_bearing and x_advance for the given utf8 character in the
// font. Returns false if the glyph for the character could not be found in
// the font.
// Ref: http://freetype.sourceforge.net/freetype2/docs/glyphs/glyphs-3.html
bool GetSpacingProperties(const string& utf8_char,
int* x_bearing, int* x_advance) const;
// Accessors
string DescriptionName() const;
// Font Family name eg. "Arial"
const string& family_name() const { return family_name_; }
// Size in points (1/72"), rounded to the nearest integer.
const int font_size() const { return font_size_; }
const bool is_bold() const { return is_bold_; }
const bool is_italic() const { return is_italic_; }
const bool is_smallcaps() const { return is_smallcaps_; }
const bool is_monospace() const { return is_monospace_; }
const bool is_fraktur() const { return is_fraktur_; }
const FontTypeEnum font_type() const { return font_type_; }
const int resolution() const { return resolution_; }
void set_resolution(const int resolution) {
resolution_ = resolution;
}
private:
friend class FontUtils;
void Clear();
bool ParseFontDescription(const PangoFontDescription* desc);
// Returns the PangoFont structure corresponding to the closest available font
// in the font map.
PangoFont* ToPangoFont() const;
// Font properties set automatically from parsing the font description name.
string family_name_;
int font_size_;
bool is_bold_;
bool is_italic_;
bool is_smallcaps_;
bool is_monospace_;
bool is_fraktur_;
FontTypeEnum font_type_;
// The Pango description that was used to initialize the instance.
PangoFontDescription* desc_;
// Default output resolution to assume for GetSpacingProperties() and any
// other methods that returns pixel values.
int resolution_;
private:
PangoFontInfo(const PangoFontInfo&);
void operator=(const PangoFontInfo&);
};
// Static utility methods for querying font availability and font-selection
// based on codepoint coverage.
class FontUtils {
public:
// Returns true if the font of the given description name is available in the
// target directory specified by --fonts_dir
static bool IsAvailableFont(const char* font_desc);
// Outputs description names of available fonts.
static const vector<string>& ListAvailableFonts();
// Picks font among available fonts that covers and can render the given word,
// and returns the font description name and the decomposition of the word to
// graphemes. Returns false if no suitable font was found.
static bool SelectFont(const char* utf8_word, const int utf8_len,
string* font_name, vector<string>* graphemes);
// Picks font among all_fonts that covers and can render the given word,
// and returns the font description name and the decomposition of the word to
// graphemes. Returns false if no suitable font was found.
static bool SelectFont(const char* utf8_word, const int utf8_len,
const vector<string>& all_fonts,
string* font_name, vector<string>* graphemes);
// Returns a bitmask where the value of true at index 'n' implies that unicode
// value 'n' is renderable by at least one available font.
static void GetAllRenderableCharacters(vector<bool>* unichar_bitmap);
// Variant of the above function that inspects only the provided font names.
static void GetAllRenderableCharacters(const vector<string>& font_names,
vector<bool>* unichar_bitmap);
static void GetAllRenderableCharacters(const string& font_name,
vector<bool>* unichar_bitmap);
// NOTE: The following utilities were written to be backward compatible with
// StringRender.
// BestFonts returns a font name and a bit vector of the characters it
// can render for the fonts that score within some fraction of the best
// font on the characters in the given hash map.
// In the flags vector, each flag is set according to whether the
// corresponding character (in order of iterating ch_map) can be rendered.
// The return string is a list of the acceptable fonts that were used.
static string BestFonts(const unordered_map<char32, inT64>& ch_map,
vector<std::pair<const char*, vector<bool> > >* font_flag);
// FontScore returns the weighted renderability score of the given
// hash map character table in the given font. The unweighted score
// is also returned in raw_score.
// The values in the bool vector ch_flags correspond to whether the
// corresponding character (in order of iterating ch_map) can be rendered.
static int FontScore(const unordered_map<char32, inT64>& ch_map,
const string& fontname, int* raw_score,
vector<bool>* ch_flags);
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_PANGO_FONT_INFO_H_
| 1080228-arabicocr11 | training/pango_font_info.h | C++ | asf20 | 8,049 |
/**********************************************************************
* File: tlog.h
* Description: Variant of printf with logging level controllable by a
* commandline flag.
* Author: Ranjith Unnikrishnan
* Created: Wed Nov 20 2013
*
* (C) Copyright 2013, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_TLOG_H_
#define TESSERACT_TRAINING_TLOG_H_
#include "commandlineflags.h"
#include "errcode.h"
#include "tprintf.h"
DECLARE_INT_PARAM_FLAG(tlog_level);
// Variant guarded by the numeric logging level parameter FLAGS_tlog_level
// (default 0). Code using ParseCommandLineFlags() can control its value using
// the --tlog_level commandline argument. Otherwise it must be specified in a
// config file like other params.
#define tlog(level, ...) { \
if (FLAGS_tlog_level >= level) { \
tprintf_internal(__VA_ARGS__); \
} \
}
#define TLOG_IS_ON(level) (FLAGS_tlog_level >= level)
#define TLOG_FATAL(msg...) \
{ \
tprintf(msg); \
ASSERT_FAILED.error("", ABORT, "in file %s, line %d", \
__FILE__, __LINE__); \
}
#endif // TESSERACT_TRAINING_TLOG_H_
| 1080228-arabicocr11 | training/tlog.h | C | asf20 | 2,065 |
/**********************************************************************
* File: fileio.h
* Description: File I/O utilities.
* Author: Samuel Charron
* Created: Tuesday, July 9, 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
* by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_FILEIO_H_
#define TESSERACT_TRAINING_FILEIO_H_
#include <stddef.h>
#include <cstdio>
#include <string>
#ifdef USE_STD_NAMESPACE
using std::string;
#endif
namespace tesseract {
// A class to manipulate FILE*s.
class File {
public:
// Try to open the file 'filename' in mode 'mode'.
// Stop the program if it cannot open it.
static FILE* OpenOrDie(const string& filename, const string& mode);
static FILE* Open(const string& filename, const string& mode);
// Try to open the file 'filename' and to write 'str' in it.
// Stop the program if it fails.
static void WriteStringToFileOrDie(const string& str, const string& filename);
// Return true if the file 'filename' is readable.
static bool Readable(const string& filename);
static void ReadFileToStringOrDie(const string& filename, string* out);
static bool ReadFileToString(const string& filename, string* out);
// Helper methods
// Concatenate file paths removing any extra intervening '/' symbols.
static string JoinPath(const string& prefix, const string& suffix);
// Delete a filename or all filenames matching a glob pattern.
static bool Delete(const char* pathname);
static bool DeleteMatchingFiles(const char* pattern);
};
// A class to manipulate Files for reading.
class InputBuffer {
public:
explicit InputBuffer(FILE* stream);
// 'size' is ignored.
InputBuffer(FILE* stream, size_t size);
~InputBuffer();
// Read data until end-of-file or a \n is read.
// The data is stored in '*out', excluding the \n if present.
// Return false if an error occurs or at end-of-file, true otherwise.
bool ReadLine(string* out);
// Close the FILE* used by InputBuffer.
// Return false if an error occurs, true otherwise.
bool CloseFile();
private:
FILE* stream_;
int filesize_;
};
// A class to manipulate Files for writing.
class OutputBuffer {
public:
explicit OutputBuffer(FILE* stream);
// 'size' is ignored.
OutputBuffer(FILE* stream, size_t size);
~OutputBuffer();
// Write string 'str' to the open FILE*.
void WriteString(const string& str);
// Close the FILE* used by InputBuffer.
// Return false if an error occurs, true otherwise.
bool CloseFile();
private:
FILE* stream_;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_FILEIO_H_
| 1080228-arabicocr11 | training/fileio.h | C++ | asf20 | 3,189 |
///////////////////////////////////////////////////////////////////////
// File: wordlist2dawg.cpp
// Description: Program to generate a DAWG from a word list file
// Author: Thomas Kielbus
// Created: Thu May 10 18:11:42 PDT 2007
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Given a file that contains a list of words (one word per line) this program
// generates the corresponding squished DAWG file.
#include <stdio.h>
#include "classify.h"
#include "dawg.h"
#include "dict.h"
#include "emalloc.h"
#include "freelist.h"
#include "helpers.h"
#include "serialis.h"
#include "trie.h"
#include "unicharset.h"
int main(int argc, char** argv) {
if (!(argc == 4 || (argc == 5 && strcmp(argv[1], "-t") == 0) ||
(argc == 6 && strcmp(argv[1], "-r") == 0))) {
printf("Usage: %s [-t | -r [reverse policy] ] word_list_file"
" dawg_file unicharset_file\n", argv[0]);
return 1;
}
tesseract::Classify *classify = new tesseract::Classify();
int argv_index = 0;
if (argc == 5) ++argv_index;
tesseract::Trie::RTLReversePolicy reverse_policy =
tesseract::Trie::RRP_DO_NO_REVERSE;
if (argc == 6) {
++argv_index;
int tmp_int;
sscanf(argv[++argv_index], "%d", &tmp_int);
reverse_policy = static_cast<tesseract::Trie::RTLReversePolicy>(tmp_int);
tprintf("Set reverse_policy to %s\n",
tesseract::Trie::get_reverse_policy_name(reverse_policy));
}
if (argc == 7) argv_index += 3;
const char* wordlist_filename = argv[++argv_index];
const char* dawg_filename = argv[++argv_index];
const char* unicharset_file = argv[++argv_index];
tprintf("Loading unicharset from '%s'\n", unicharset_file);
if (!classify->getDict().getUnicharset().load_from_file(unicharset_file)) {
tprintf("Failed to load unicharset from '%s'\n", unicharset_file);
delete classify;
return 1;
}
const UNICHARSET &unicharset = classify->getDict().getUnicharset();
if (argc == 4 || argc == 6) {
tesseract::Trie trie(
// the first 3 arguments are not used in this case
tesseract::DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM,
unicharset.size(), classify->getDict().dawg_debug_level);
tprintf("Reading word list from '%s'\n", wordlist_filename);
if (!trie.read_and_add_word_list(wordlist_filename, unicharset,
reverse_policy)) {
tprintf("Failed to add word list from '%s'\n", wordlist_filename);
exit(1);
}
tprintf("Reducing Trie to SquishedDawg\n");
tesseract::SquishedDawg *dawg = trie.trie_to_dawg();
if (dawg != NULL && dawg->NumEdges() > 0) {
tprintf("Writing squished DAWG to '%s'\n", dawg_filename);
dawg->write_squished_dawg(dawg_filename);
} else {
tprintf("Dawg is empty, skip producing the output file\n");
}
delete dawg;
} else if (argc == 5) {
tprintf("Loading dawg DAWG from '%s'\n", dawg_filename);
tesseract::SquishedDawg words(
dawg_filename,
// these 3 arguments are not used in this case
tesseract::DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM,
classify->getDict().dawg_debug_level);
tprintf("Checking word list from '%s'\n", wordlist_filename);
words.check_for_words(wordlist_filename, unicharset, true);
} else { // should never get here
tprintf("Invalid command-line options\n");
exit(1);
}
delete classify;
return 0;
}
| 1080228-arabicocr11 | training/wordlist2dawg.cpp | C++ | asf20 | 4,008 |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Filename: shapeclustering.cpp
// Purpose: Generates a master shape table to merge similarly-shaped
// training data of whole, partial or multiple characters.
// Author: Ray Smith
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifndef USE_STD_NAMESPACE
#include "base/commandlineflags.h"
#endif
#include "commontraining.h"
#include "mastertrainer.h"
#include "params.h"
#include "strngs.h"
INT_PARAM_FLAG(display_cloud_font, -1,
"Display cloud of this font, canonical_class1");
INT_PARAM_FLAG(display_canonical_font, -1,
"Display canonical sample of this font, canonical_class2");
STRING_PARAM_FLAG(canonical_class1, "", "Class to show ambigs for");
STRING_PARAM_FLAG(canonical_class2, "", "Class to show ambigs for");
// Loads training data, if requested displays debug information, otherwise
// creates the master shape table by shape clustering and writes it to a file.
// If FLAGS_display_cloud_font is set, then the cloud features of
// FLAGS_canonical_class1/FLAGS_display_cloud_font are shown in green ON TOP
// OF the red canonical features of FLAGS_canonical_class2/
// FLAGS_display_canonical_font, so as to show which canonical features are
// NOT in the cloud.
// Otherwise, if FLAGS_canonical_class1 is set, prints a table of font-wise
// cluster distances between FLAGS_canonical_class1 and FLAGS_canonical_class2.
int main(int argc, char **argv) {
ParseArguments(&argc, &argv);
STRING file_prefix;
tesseract::MasterTrainer* trainer = tesseract::LoadTrainingData(
argc, argv, false, NULL, &file_prefix);
if (!trainer)
return 1;
if (FLAGS_display_cloud_font >= 0) {
#ifndef GRAPHICS_DISABLED
trainer->DisplaySamples(FLAGS_canonical_class1.c_str(),
FLAGS_display_cloud_font,
FLAGS_canonical_class2.c_str(),
FLAGS_display_canonical_font);
#endif // GRAPHICS_DISABLED
return 0;
} else if (!FLAGS_canonical_class1.empty()) {
trainer->DebugCanonical(FLAGS_canonical_class1.c_str(),
FLAGS_canonical_class2.c_str());
return 0;
}
trainer->SetupMasterShapes();
WriteShapeTable(file_prefix, trainer->master_shapes());
delete trainer;
return 0;
} /* main */
| 1080228-arabicocr11 | training/shapeclustering.cpp | C++ | asf20 | 2,921 |
// Copyright 2008 Google Inc. All Rights Reserved.
// Author: scharron@google.com (Samuel Charron)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "commontraining.h"
#include "allheaders.h"
#include "ccutil.h"
#include "classify.h"
#include "cluster.h"
#include "clusttool.h"
#include "efio.h"
#include "emalloc.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "freelist.h"
#include "globals.h"
#include "intfeaturespace.h"
#include "mastertrainer.h"
#include "mf.h"
#include "ndminx.h"
#include "oldlist.h"
#include "params.h"
#include "shapetable.h"
#include "tessdatamanager.h"
#include "tessopt.h"
#include "tprintf.h"
#include "unicity_table.h"
#include <math.h>
using tesseract::CCUtil;
using tesseract::FontInfo;
using tesseract::IntFeatureSpace;
using tesseract::ParamUtils;
using tesseract::ShapeTable;
// Global Variables.
// global variable to hold configuration parameters to control clustering
// -M 0.625 -B 0.05 -I 1.0 -C 1e-6.
CLUSTERCONFIG Config = { elliptical, 0.625, 0.05, 1.0, 1e-6, 0 };
FEATURE_DEFS_STRUCT feature_defs;
CCUtil ccutil;
INT_PARAM_FLAG(debug_level, 0, "Level of Trainer debugging");
INT_PARAM_FLAG(load_images, 0, "Load images with tr files");
STRING_PARAM_FLAG(configfile, "", "File to load more configs from");
STRING_PARAM_FLAG(D, "", "Directory to write output files to");
STRING_PARAM_FLAG(F, "font_properties", "File listing font properties");
STRING_PARAM_FLAG(X, "", "File listing font xheights");
STRING_PARAM_FLAG(U, "unicharset", "File to load unicharset from");
STRING_PARAM_FLAG(O, "", "File to write unicharset to");
STRING_PARAM_FLAG(T, "", "File to load trainer from");
STRING_PARAM_FLAG(output_trainer, "", "File to write trainer to");
STRING_PARAM_FLAG(test_ch, "", "UTF8 test character string");
DOUBLE_PARAM_FLAG(clusterconfig_min_samples_fraction, Config.MinSamples,
"Min number of samples per proto as % of total");
DOUBLE_PARAM_FLAG(clusterconfig_max_illegal, Config.MaxIllegal,
"Max percentage of samples in a cluster which have more"
" than 1 feature in that cluster");
DOUBLE_PARAM_FLAG(clusterconfig_independence, Config.Independence,
"Desired independence between dimensions");
DOUBLE_PARAM_FLAG(clusterconfig_confidence, Config.Confidence,
"Desired confidence in prototypes created");
/*
** Parameters:
** argc number of command line arguments to parse
** argv command line arguments
** Globals:
** Config current clustering parameters
** Operation:
** This routine parses the command line arguments that were
** passed to the program and ses them to set relevant
** training-related global parameters
** Return: none
** Exceptions: Illegal options terminate the program.
*/
void ParseArguments(int* argc, char ***argv) {
STRING usage;
if (*argc) {
usage += (*argv)[0];
}
usage += " [.tr files ...]";
tesseract::ParseCommandLineFlags(usage.c_str(), argc, argv, true);
// Record the index of the first non-flag argument to 1, since we set
// remove_flags to true when parsing the flags.
tessoptind = 1;
// Set some global values based on the flags.
Config.MinSamples =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_min_samples_fraction)));
Config.MaxIllegal =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_max_illegal)));
Config.Independence =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_independence)));
Config.Confidence =
MAX(0.0, MIN(1.0, double(FLAGS_clusterconfig_confidence)));
// Set additional parameters from config file if specified.
if (!FLAGS_configfile.empty()) {
tesseract::ParamUtils::ReadParamsFile(
FLAGS_configfile.c_str(),
tesseract::SET_PARAM_CONSTRAINT_NON_INIT_ONLY,
ccutil.params());
}
}
namespace tesseract {
// Helper loads shape table from the given file.
ShapeTable* LoadShapeTable(const STRING& file_prefix) {
ShapeTable* shape_table = NULL;
STRING shape_table_file = file_prefix;
shape_table_file += kShapeTableFileSuffix;
FILE* shape_fp = fopen(shape_table_file.string(), "rb");
if (shape_fp != NULL) {
shape_table = new ShapeTable;
if (!shape_table->DeSerialize(false, shape_fp)) {
delete shape_table;
shape_table = NULL;
tprintf("Error: Failed to read shape table %s\n",
shape_table_file.string());
} else {
int num_shapes = shape_table->NumShapes();
tprintf("Read shape table %s of %d shapes\n",
shape_table_file.string(), num_shapes);
}
fclose(shape_fp);
} else {
tprintf("Warning: No shape table file present: %s\n",
shape_table_file.string());
}
return shape_table;
}
// Helper to write the shape_table.
void WriteShapeTable(const STRING& file_prefix, const ShapeTable& shape_table) {
STRING shape_table_file = file_prefix;
shape_table_file += kShapeTableFileSuffix;
FILE* fp = fopen(shape_table_file.string(), "wb");
if (fp != NULL) {
if (!shape_table.Serialize(fp)) {
fprintf(stderr, "Error writing shape table: %s\n",
shape_table_file.string());
}
fclose(fp);
} else {
fprintf(stderr, "Error creating shape table: %s\n",
shape_table_file.string());
}
}
// Creates a MasterTraininer and loads the training data into it:
// Initializes feature_defs and IntegerFX.
// Loads the shape_table if shape_table != NULL.
// Loads initial unicharset from -U command-line option.
// If FLAGS_T is set, loads the majority of data from there, else:
// Loads font info from -F option.
// Loads xheights from -X option.
// Loads samples from .tr files in remaining command-line args.
// Deletes outliers and computes canonical samples.
// If FLAGS_output_trainer is set, saves the trainer for future use.
// Computes canonical and cloud features.
// If shape_table is not NULL, but failed to load, make a fake flat one,
// as shape clustering was not run.
MasterTrainer* LoadTrainingData(int argc, const char* const * argv,
bool replication,
ShapeTable** shape_table,
STRING* file_prefix) {
InitFeatureDefs(&feature_defs);
InitIntegerFX();
*file_prefix = "";
if (!FLAGS_D.empty()) {
*file_prefix += FLAGS_D.c_str();
*file_prefix += "/";
}
// If we are shape clustering (NULL shape_table) or we successfully load
// a shape_table written by a previous shape clustering, then
// shape_analysis will be true, meaning that the MasterTrainer will replace
// some members of the unicharset with their fragments.
bool shape_analysis = false;
if (shape_table != NULL) {
*shape_table = LoadShapeTable(*file_prefix);
if (*shape_table != NULL)
shape_analysis = true;
} else {
shape_analysis = true;
}
MasterTrainer* trainer = new MasterTrainer(NM_CHAR_ANISOTROPIC,
shape_analysis,
replication,
FLAGS_debug_level);
IntFeatureSpace fs;
fs.Init(kBoostXYBuckets, kBoostXYBuckets, kBoostDirBuckets);
if (FLAGS_T.empty()) {
trainer->LoadUnicharset(FLAGS_U.c_str());
// Get basic font information from font_properties.
if (!FLAGS_F.empty()) {
if (!trainer->LoadFontInfo(FLAGS_F.c_str())) {
delete trainer;
return NULL;
}
}
if (!FLAGS_X.empty()) {
if (!trainer->LoadXHeights(FLAGS_X.c_str())) {
delete trainer;
return NULL;
}
}
trainer->SetFeatureSpace(fs);
const char* page_name;
// Load training data from .tr files on the command line.
while ((page_name = GetNextFilename(argc, argv)) != NULL) {
tprintf("Reading %s ...\n", page_name);
trainer->ReadTrainingSamples(page_name, feature_defs, false);
// If there is a file with [lang].[fontname].exp[num].fontinfo present,
// read font spacing information in to fontinfo_table.
int pagename_len = strlen(page_name);
char *fontinfo_file_name = new char[pagename_len + 7];
strncpy(fontinfo_file_name, page_name, pagename_len - 2); // remove "tr"
strcpy(fontinfo_file_name + pagename_len - 2, "fontinfo"); // +"fontinfo"
trainer->AddSpacingInfo(fontinfo_file_name);
delete[] fontinfo_file_name;
// Load the images into memory if required by the classifier.
if (FLAGS_load_images) {
STRING image_name = page_name;
// Chop off the tr and replace with tif. Extension must be tif!
image_name.truncate_at(image_name.length() - 2);
image_name += "tif";
trainer->LoadPageImages(image_name.string());
}
}
trainer->PostLoadCleanup();
// Write the master trainer if required.
if (!FLAGS_output_trainer.empty()) {
FILE* fp = fopen(FLAGS_output_trainer.c_str(), "wb");
if (fp == NULL) {
tprintf("Can't create saved trainer data!\n");
} else {
trainer->Serialize(fp);
fclose(fp);
}
}
} else {
bool success = false;
tprintf("Loading master trainer from file:%s\n",
FLAGS_T.c_str());
FILE* fp = fopen(FLAGS_T.c_str(), "rb");
if (fp == NULL) {
tprintf("Can't read file %s to initialize master trainer\n",
FLAGS_T.c_str());
} else {
success = trainer->DeSerialize(false, fp);
fclose(fp);
}
if (!success) {
tprintf("Deserialize of master trainer failed!\n");
delete trainer;
return NULL;
}
trainer->SetFeatureSpace(fs);
}
trainer->PreTrainingSetup();
if (!FLAGS_O.empty() &&
!trainer->unicharset().save_to_file(FLAGS_O.c_str())) {
fprintf(stderr, "Failed to save unicharset to file %s\n", FLAGS_O.c_str());
delete trainer;
return NULL;
}
if (shape_table != NULL) {
// If we previously failed to load a shapetable, then shape clustering
// wasn't run so make a flat one now.
if (*shape_table == NULL) {
*shape_table = new ShapeTable;
trainer->SetupFlatShapeTable(*shape_table);
tprintf("Flat shape table summary: %s\n",
(*shape_table)->SummaryStr().string());
}
(*shape_table)->set_unicharset(trainer->unicharset());
}
return trainer;
}
} // namespace tesseract.
/*---------------------------------------------------------------------------*/
const char *GetNextFilename(int argc, const char* const * argv) {
/*
** Parameters: none
** Globals:
** tessoptind defined by tessopt sys call
** Operation:
** This routine returns the next command line argument. If
** there are no remaining command line arguments, it returns
** NULL. This routine should only be called after all option
** arguments have been parsed and removed with ParseArguments.
** Return: Next command line argument or NULL.
** Exceptions: none
** History: Fri Aug 18 09:34:12 1989, DSJ, Created.
*/
if (tessoptind < argc)
return argv[tessoptind++];
else
return NULL;
} /* GetNextFilename */
/*---------------------------------------------------------------------------*/
LABELEDLIST FindList (
LIST List,
char *Label)
/*
** Parameters:
** List list to search
** Label label to search for
** Globals: none
** Operation:
** This routine searches thru a list of labeled lists to find
** a list with the specified label. If a matching labeled list
** cannot be found, NULL is returned.
** Return: Labeled list with the specified Label or NULL.
** Exceptions: none
** History: Fri Aug 18 15:57:41 1989, DSJ, Created.
*/
{
LABELEDLIST LabeledList;
iterate (List)
{
LabeledList = (LABELEDLIST) first_node (List);
if (strcmp (LabeledList->Label, Label) == 0)
return (LabeledList);
}
return (NULL);
} /* FindList */
/*---------------------------------------------------------------------------*/
LABELEDLIST NewLabeledList (
const char *Label)
/*
** Parameters:
** Label label for new list
** Globals: none
** Operation:
** This routine allocates a new, empty labeled list and gives
** it the specified label.
** Return: New, empty labeled list.
** Exceptions: none
** History: Fri Aug 18 16:08:46 1989, DSJ, Created.
*/
{
LABELEDLIST LabeledList;
LabeledList = (LABELEDLIST) Emalloc (sizeof (LABELEDLISTNODE));
LabeledList->Label = (char*)Emalloc (strlen (Label)+1);
strcpy (LabeledList->Label, Label);
LabeledList->List = NIL_LIST;
LabeledList->SampleCount = 0;
LabeledList->font_sample_count = 0;
return (LabeledList);
} /* NewLabeledList */
/*---------------------------------------------------------------------------*/
// TODO(rays) This is now used only by cntraining. Convert cntraining to use
// the new method or get rid of it entirely.
void ReadTrainingSamples(const FEATURE_DEFS_STRUCT& feature_defs,
const char *feature_name, int max_samples,
UNICHARSET* unicharset,
FILE* file, LIST* training_samples) {
/*
** Parameters:
** file open text file to read samples from
** Globals: none
** Operation:
** This routine reads training samples from a file and
** places them into a data structure which organizes the
** samples by FontName and CharName. It then returns this
** data structure.
** Return: none
** Exceptions: none
** History: Fri Aug 18 13:11:39 1989, DSJ, Created.
** Tue May 17 1998 simplifications to structure, illiminated
** font, and feature specification levels of structure.
*/
char buffer[2048];
char unichar[UNICHAR_LEN + 1];
LABELEDLIST char_sample;
FEATURE_SET feature_samples;
CHAR_DESC char_desc;
int i;
int feature_type = ShortNameToFeatureType(feature_defs, feature_name);
// Zero out the font_sample_count for all the classes.
LIST it = *training_samples;
iterate(it) {
char_sample = reinterpret_cast<LABELEDLIST>(first_node(it));
char_sample->font_sample_count = 0;
}
while (fgets(buffer, 2048, file) != NULL) {
if (buffer[0] == '\n')
continue;
sscanf(buffer, "%*s %s", unichar);
if (unicharset != NULL && !unicharset->contains_unichar(unichar)) {
unicharset->unichar_insert(unichar);
if (unicharset->size() > MAX_NUM_CLASSES) {
tprintf("Error: Size of unicharset in training is "
"greater than MAX_NUM_CLASSES\n");
exit(1);
}
}
char_sample = FindList(*training_samples, unichar);
if (char_sample == NULL) {
char_sample = NewLabeledList(unichar);
*training_samples = push(*training_samples, char_sample);
}
char_desc = ReadCharDescription(feature_defs, file);
feature_samples = char_desc->FeatureSets[feature_type];
if (char_sample->font_sample_count < max_samples || max_samples <= 0) {
char_sample->List = push(char_sample->List, feature_samples);
char_sample->SampleCount++;
char_sample->font_sample_count++;
} else {
FreeFeatureSet(feature_samples);
}
for (i = 0; i < char_desc->NumFeatureSets; i++) {
if (feature_type != i)
FreeFeatureSet(char_desc->FeatureSets[i]);
}
free(char_desc);
}
} // ReadTrainingSamples
/*---------------------------------------------------------------------------*/
void FreeTrainingSamples(LIST CharList) {
/*
** Parameters:
** FontList list of all fonts in document
** Globals: none
** Operation:
** This routine deallocates all of the space allocated to
** the specified list of training samples.
** Return: none
** Exceptions: none
** History: Fri Aug 18 17:44:27 1989, DSJ, Created.
*/
LABELEDLIST char_sample;
FEATURE_SET FeatureSet;
LIST FeatureList;
iterate(CharList) { /* iterate thru all of the fonts */
char_sample = (LABELEDLIST) first_node(CharList);
FeatureList = char_sample->List;
iterate(FeatureList) { /* iterate thru all of the classes */
FeatureSet = (FEATURE_SET) first_node(FeatureList);
FreeFeatureSet(FeatureSet);
}
FreeLabeledList(char_sample);
}
destroy(CharList);
} /* FreeTrainingSamples */
/*---------------------------------------------------------------------------*/
void FreeLabeledList(LABELEDLIST LabeledList) {
/*
** Parameters:
** LabeledList labeled list to be freed
** Globals: none
** Operation:
** This routine deallocates all of the memory consumed by
** a labeled list. It does not free any memory which may be
** consumed by the items in the list.
** Return: none
** Exceptions: none
** History: Fri Aug 18 17:52:45 1989, DSJ, Created.
*/
destroy(LabeledList->List);
free(LabeledList->Label);
free(LabeledList);
} /* FreeLabeledList */
/*---------------------------------------------------------------------------*/
CLUSTERER *SetUpForClustering(const FEATURE_DEFS_STRUCT &FeatureDefs,
LABELEDLIST char_sample,
const char* program_feature_type) {
/*
** Parameters:
** char_sample: LABELEDLIST that holds all the feature information for a
** given character.
** Globals:
** None
** Operation:
** This routine reads samples from a LABELEDLIST and enters
** those samples into a clusterer data structure. This
** data structure is then returned to the caller.
** Return:
** Pointer to new clusterer data structure.
** Exceptions:
** None
** History:
** 8/16/89, DSJ, Created.
*/
uinT16 N;
int i, j;
FLOAT32 *Sample = NULL;
CLUSTERER *Clusterer;
inT32 CharID;
LIST FeatureList = NULL;
FEATURE_SET FeatureSet = NULL;
int desc_index = ShortNameToFeatureType(FeatureDefs, program_feature_type);
N = FeatureDefs.FeatureDesc[desc_index]->NumParams;
Clusterer = MakeClusterer(N, FeatureDefs.FeatureDesc[desc_index]->ParamDesc);
FeatureList = char_sample->List;
CharID = 0;
iterate(FeatureList) {
FeatureSet = (FEATURE_SET) first_node(FeatureList);
for (i = 0; i < FeatureSet->MaxNumFeatures; i++) {
if (Sample == NULL)
Sample = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (j = 0; j < N; j++)
Sample[j] = FeatureSet->Features[i]->Params[j];
MakeSample (Clusterer, Sample, CharID);
}
CharID++;
}
if ( Sample != NULL ) free( Sample );
return( Clusterer );
} /* SetUpForClustering */
/*------------------------------------------------------------------------*/
void MergeInsignificantProtos(LIST ProtoList, const char* label,
CLUSTERER *Clusterer, CLUSTERCONFIG *Config) {
PROTOTYPE *Prototype;
bool debug = strcmp(FLAGS_test_ch.c_str(), label) == 0;
LIST pProtoList = ProtoList;
iterate(pProtoList) {
Prototype = (PROTOTYPE *) first_node (pProtoList);
if (Prototype->Significant || Prototype->Merged)
continue;
FLOAT32 best_dist = 0.125;
PROTOTYPE* best_match = NULL;
// Find the nearest alive prototype.
LIST list_it = ProtoList;
iterate(list_it) {
PROTOTYPE* test_p = (PROTOTYPE *) first_node (list_it);
if (test_p != Prototype && !test_p->Merged) {
FLOAT32 dist = ComputeDistance(Clusterer->SampleSize,
Clusterer->ParamDesc,
Prototype->Mean, test_p->Mean);
if (dist < best_dist) {
best_match = test_p;
best_dist = dist;
}
}
}
if (best_match != NULL && !best_match->Significant) {
if (debug)
tprintf("Merging red clusters (%d+%d) at %g,%g and %g,%g\n",
best_match->NumSamples, Prototype->NumSamples,
best_match->Mean[0], best_match->Mean[1],
Prototype->Mean[0], Prototype->Mean[1]);
best_match->NumSamples = MergeClusters(Clusterer->SampleSize,
Clusterer->ParamDesc,
best_match->NumSamples,
Prototype->NumSamples,
best_match->Mean,
best_match->Mean, Prototype->Mean);
Prototype->NumSamples = 0;
Prototype->Merged = 1;
} else if (best_match != NULL) {
if (debug)
tprintf("Red proto at %g,%g matched a green one at %g,%g\n",
Prototype->Mean[0], Prototype->Mean[1],
best_match->Mean[0], best_match->Mean[1]);
Prototype->Merged = 1;
}
}
// Mark significant those that now have enough samples.
int min_samples = (inT32) (Config->MinSamples * Clusterer->NumChar);
pProtoList = ProtoList;
iterate(pProtoList) {
Prototype = (PROTOTYPE *) first_node (pProtoList);
// Process insignificant protos that do not match a green one
if (!Prototype->Significant && Prototype->NumSamples >= min_samples &&
!Prototype->Merged) {
if (debug)
tprintf("Red proto at %g,%g becoming green\n",
Prototype->Mean[0], Prototype->Mean[1]);
Prototype->Significant = true;
}
}
} /* MergeInsignificantProtos */
/*-----------------------------------------------------------------------------*/
void CleanUpUnusedData(
LIST ProtoList)
{
PROTOTYPE* Prototype;
iterate(ProtoList)
{
Prototype = (PROTOTYPE *) first_node (ProtoList);
if(Prototype->Variance.Elliptical != NULL)
{
memfree(Prototype->Variance.Elliptical);
Prototype->Variance.Elliptical = NULL;
}
if(Prototype->Magnitude.Elliptical != NULL)
{
memfree(Prototype->Magnitude.Elliptical);
Prototype->Magnitude.Elliptical = NULL;
}
if(Prototype->Weight.Elliptical != NULL)
{
memfree(Prototype->Weight.Elliptical);
Prototype->Weight.Elliptical = NULL;
}
}
}
/*------------------------------------------------------------------------*/
LIST RemoveInsignificantProtos(
LIST ProtoList,
BOOL8 KeepSigProtos,
BOOL8 KeepInsigProtos,
int N)
{
LIST NewProtoList = NIL_LIST;
LIST pProtoList;
PROTOTYPE* Proto;
PROTOTYPE* NewProto;
int i;
pProtoList = ProtoList;
iterate(pProtoList)
{
Proto = (PROTOTYPE *) first_node (pProtoList);
if ((Proto->Significant && KeepSigProtos) ||
(!Proto->Significant && KeepInsigProtos))
{
NewProto = (PROTOTYPE *)Emalloc(sizeof(PROTOTYPE));
NewProto->Mean = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
NewProto->Significant = Proto->Significant;
NewProto->Style = Proto->Style;
NewProto->NumSamples = Proto->NumSamples;
NewProto->Cluster = NULL;
NewProto->Distrib = NULL;
for (i=0; i < N; i++)
NewProto->Mean[i] = Proto->Mean[i];
if (Proto->Variance.Elliptical != NULL)
{
NewProto->Variance.Elliptical = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (i=0; i < N; i++)
NewProto->Variance.Elliptical[i] = Proto->Variance.Elliptical[i];
}
else
NewProto->Variance.Elliptical = NULL;
//---------------------------------------------
if (Proto->Magnitude.Elliptical != NULL)
{
NewProto->Magnitude.Elliptical = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (i=0; i < N; i++)
NewProto->Magnitude.Elliptical[i] = Proto->Magnitude.Elliptical[i];
}
else
NewProto->Magnitude.Elliptical = NULL;
//------------------------------------------------
if (Proto->Weight.Elliptical != NULL)
{
NewProto->Weight.Elliptical = (FLOAT32 *)Emalloc(N * sizeof(FLOAT32));
for (i=0; i < N; i++)
NewProto->Weight.Elliptical[i] = Proto->Weight.Elliptical[i];
}
else
NewProto->Weight.Elliptical = NULL;
NewProto->TotalMagnitude = Proto->TotalMagnitude;
NewProto->LogMagnitude = Proto->LogMagnitude;
NewProtoList = push_last(NewProtoList, NewProto);
}
}
FreeProtoList(&ProtoList);
return (NewProtoList);
} /* RemoveInsignificantProtos */
/*----------------------------------------------------------------------------*/
MERGE_CLASS FindClass (
LIST List,
const char *Label)
{
MERGE_CLASS MergeClass;
iterate (List)
{
MergeClass = (MERGE_CLASS) first_node (List);
if (strcmp (MergeClass->Label, Label) == 0)
return (MergeClass);
}
return (NULL);
} /* FindClass */
/*---------------------------------------------------------------------------*/
MERGE_CLASS NewLabeledClass (
const char *Label)
{
MERGE_CLASS MergeClass;
MergeClass = new MERGE_CLASS_NODE;
MergeClass->Label = (char*)Emalloc (strlen (Label)+1);
strcpy (MergeClass->Label, Label);
MergeClass->Class = NewClass (MAX_NUM_PROTOS, MAX_NUM_CONFIGS);
return (MergeClass);
} /* NewLabeledClass */
/*-----------------------------------------------------------------------------*/
void FreeLabeledClassList (
LIST ClassList)
/*
** Parameters:
** FontList list of all fonts in document
** Globals: none
** Operation:
** This routine deallocates all of the space allocated to
** the specified list of training samples.
** Return: none
** Exceptions: none
** History: Fri Aug 18 17:44:27 1989, DSJ, Created.
*/
{
MERGE_CLASS MergeClass;
iterate (ClassList) /* iterate thru all of the fonts */
{
MergeClass = (MERGE_CLASS) first_node (ClassList);
free (MergeClass->Label);
FreeClass(MergeClass->Class);
delete MergeClass;
}
destroy (ClassList);
} /* FreeLabeledClassList */
/** SetUpForFloat2Int **************************************************/
CLASS_STRUCT* SetUpForFloat2Int(const UNICHARSET& unicharset,
LIST LabeledClassList) {
MERGE_CLASS MergeClass;
CLASS_TYPE Class;
int NumProtos;
int NumConfigs;
int NumWords;
int i, j;
float Values[3];
PROTO NewProto;
PROTO OldProto;
BIT_VECTOR NewConfig;
BIT_VECTOR OldConfig;
// printf("Float2Int ...\n");
CLASS_STRUCT* float_classes = new CLASS_STRUCT[unicharset.size()];
iterate(LabeledClassList)
{
UnicityTableEqEq<int> font_set;
MergeClass = (MERGE_CLASS) first_node (LabeledClassList);
Class = &float_classes[unicharset.unichar_to_id(MergeClass->Label)];
NumProtos = MergeClass->Class->NumProtos;
NumConfigs = MergeClass->Class->NumConfigs;
font_set.move(&MergeClass->Class->font_set);
Class->NumProtos = NumProtos;
Class->MaxNumProtos = NumProtos;
Class->Prototypes = (PROTO) Emalloc (sizeof(PROTO_STRUCT) * NumProtos);
for(i=0; i < NumProtos; i++)
{
NewProto = ProtoIn(Class, i);
OldProto = ProtoIn(MergeClass->Class, i);
Values[0] = OldProto->X;
Values[1] = OldProto->Y;
Values[2] = OldProto->Angle;
Normalize(Values);
NewProto->X = OldProto->X;
NewProto->Y = OldProto->Y;
NewProto->Length = OldProto->Length;
NewProto->Angle = OldProto->Angle;
NewProto->A = Values[0];
NewProto->B = Values[1];
NewProto->C = Values[2];
}
Class->NumConfigs = NumConfigs;
Class->MaxNumConfigs = NumConfigs;
Class->font_set.move(&font_set);
Class->Configurations = (BIT_VECTOR*) Emalloc (sizeof(BIT_VECTOR) * NumConfigs);
NumWords = WordsInVectorOfSize(NumProtos);
for(i=0; i < NumConfigs; i++)
{
NewConfig = NewBitVector(NumProtos);
OldConfig = MergeClass->Class->Configurations[i];
for(j=0; j < NumWords; j++)
NewConfig[j] = OldConfig[j];
Class->Configurations[i] = NewConfig;
}
}
return float_classes;
} // SetUpForFloat2Int
/*--------------------------------------------------------------------------*/
void Normalize (
float *Values)
{
register float Slope;
register float Intercept;
register float Normalizer;
Slope = tan (Values [2] * 2 * PI);
Intercept = Values [1] - Slope * Values [0];
Normalizer = 1 / sqrt (Slope * Slope + 1.0);
Values [0] = Slope * Normalizer;
Values [1] = - Normalizer;
Values [2] = Intercept * Normalizer;
} // Normalize
/*-------------------------------------------------------------------------*/
void FreeNormProtoList (
LIST CharList)
{
LABELEDLIST char_sample;
iterate (CharList) /* iterate thru all of the fonts */
{
char_sample = (LABELEDLIST) first_node (CharList);
FreeLabeledList (char_sample);
}
destroy (CharList);
} // FreeNormProtoList
/*---------------------------------------------------------------------------*/
void AddToNormProtosList(
LIST* NormProtoList,
LIST ProtoList,
char* CharName)
{
PROTOTYPE* Proto;
LABELEDLIST LabeledProtoList;
LabeledProtoList = NewLabeledList(CharName);
iterate(ProtoList)
{
Proto = (PROTOTYPE *) first_node (ProtoList);
LabeledProtoList->List = push(LabeledProtoList->List, Proto);
}
*NormProtoList = push(*NormProtoList, LabeledProtoList);
}
/*---------------------------------------------------------------------------*/
int NumberOfProtos(
LIST ProtoList,
BOOL8 CountSigProtos,
BOOL8 CountInsigProtos)
{
int N = 0;
PROTOTYPE *Proto;
iterate(ProtoList)
{
Proto = (PROTOTYPE *) first_node ( ProtoList );
if (( Proto->Significant && CountSigProtos ) ||
( ! Proto->Significant && CountInsigProtos ) )
N++;
}
return(N);
}
| 1080228-arabicocr11 | training/commontraining.cpp | C++ | asf20 | 29,792 |
/******************************************************************************
** Filename: mftraining.c
** Purpose: Separates training pages into files for each character.
** Strips from files only the features and there parameters of
the feature type mf.
** Author: Dan Johnson
** Revisment: Christy Russon
** Environment: HPUX 6.5
** Library: HPUX 6.5
** History: Fri Aug 18 08:53:50 1989, DSJ, Created.
** 5/25/90, DSJ, Adapted to multiple feature types.
** Tuesday, May 17, 1998 Changes made to make feature specific and
** simplify structures. First step in simplifying training process.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#include <string.h>
#include <stdio.h>
#define _USE_MATH_DEFINES
#include <math.h>
#ifdef _WIN32
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#endif
#include "classify.h"
#include "cluster.h"
#include "clusttool.h"
#include "commontraining.h"
#include "danerror.h"
#include "efio.h"
#include "emalloc.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "genericvector.h"
#include "indexmapbidi.h"
#include "intproto.h"
#include "mastertrainer.h"
#include "mergenf.h"
#include "mf.h"
#include "ndminx.h"
#include "ocrfeatures.h"
#include "oldlist.h"
#include "protos.h"
#include "shapetable.h"
#include "tessopt.h"
#include "tprintf.h"
#include "unicity_table.h"
using tesseract::Classify;
using tesseract::FontInfo;
using tesseract::FontSpacingInfo;
using tesseract::IndexMapBiDi;
using tesseract::MasterTrainer;
using tesseract::Shape;
using tesseract::ShapeTable;
#define PROGRAM_FEATURE_TYPE "mf"
// Max length of a fake shape label.
const int kMaxShapeLabelLength = 10;
DECLARE_STRING_PARAM_FLAG(test_ch);
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
int main (
int argc,
char **argv);
/*----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
static void DisplayProtoList(const char* ch, LIST protolist) {
void* window = c_create_window("Char samples", 50, 200,
520, 520, -130.0, 130.0, -130.0, 130.0);
LIST proto = protolist;
iterate(proto) {
PROTOTYPE* prototype = reinterpret_cast<PROTOTYPE *>(first_node(proto));
if (prototype->Significant)
c_line_color_index(window, Green);
else if (prototype->NumSamples == 0)
c_line_color_index(window, Blue);
else if (prototype->Merged)
c_line_color_index(window, Magenta);
else
c_line_color_index(window, Red);
float x = CenterX(prototype->Mean);
float y = CenterY(prototype->Mean);
double angle = OrientationOf(prototype->Mean) * 2 * M_PI;
float dx = static_cast<float>(LengthOf(prototype->Mean) * cos(angle) / 2);
float dy = static_cast<float>(LengthOf(prototype->Mean) * sin(angle) / 2);
c_move(window, (x - dx) * 256, (y - dy) * 256);
c_draw(window, (x + dx) * 256, (y + dy) * 256);
if (prototype->Significant)
tprintf("Green proto at (%g,%g)+(%g,%g) %d samples\n",
x, y, dx, dy, prototype->NumSamples);
else if (prototype->NumSamples > 0 && !prototype->Merged)
tprintf("Red proto at (%g,%g)+(%g,%g) %d samples\n",
x, y, dx, dy, prototype->NumSamples);
}
c_make_current(window);
}
#endif // GRAPHICS_DISABLED
// Helper to run clustering on a single config.
// Mostly copied from the old mftraining, but with renamed variables.
static LIST ClusterOneConfig(int shape_id, const char* class_label,
LIST mf_classes,
const ShapeTable& shape_table,
MasterTrainer* trainer) {
int num_samples;
CLUSTERER *clusterer = trainer->SetupForClustering(shape_table,
feature_defs,
shape_id,
&num_samples);
Config.MagicSamples = num_samples;
LIST proto_list = ClusterSamples(clusterer, &Config);
CleanUpUnusedData(proto_list);
// Merge protos where reasonable to make more of them significant by
// representing almost all samples of the class/font.
MergeInsignificantProtos(proto_list, class_label, clusterer, &Config);
#ifndef GRAPHICS_DISABLED
if (strcmp(FLAGS_test_ch.c_str(), class_label) == 0)
DisplayProtoList(FLAGS_test_ch.c_str(), proto_list);
#endif // GRAPHICS_DISABLED
// Delete the protos that will not be used in the inttemp output file.
proto_list = RemoveInsignificantProtos(proto_list, true,
false,
clusterer->SampleSize);
FreeClusterer(clusterer);
MERGE_CLASS merge_class = FindClass(mf_classes, class_label);
if (merge_class == NULL) {
merge_class = NewLabeledClass(class_label);
mf_classes = push(mf_classes, merge_class);
}
int config_id = AddConfigToClass(merge_class->Class);
merge_class->Class->font_set.push_back(shape_id);
LIST proto_it = proto_list;
iterate(proto_it) {
PROTOTYPE* prototype = reinterpret_cast<PROTOTYPE*>(first_node(proto_it));
// See if proto can be approximated by existing proto.
int p_id = FindClosestExistingProto(merge_class->Class,
merge_class->NumMerged, prototype);
if (p_id == NO_PROTO) {
// Need to make a new proto, as it doesn't match anything.
p_id = AddProtoToClass(merge_class->Class);
MakeNewFromOld(ProtoIn(merge_class->Class, p_id), prototype);
merge_class->NumMerged[p_id] = 1;
} else {
PROTO_STRUCT dummy_proto;
MakeNewFromOld(&dummy_proto, prototype);
// Merge with the similar proto.
ComputeMergedProto(ProtoIn(merge_class->Class, p_id), &dummy_proto,
static_cast<FLOAT32>(merge_class->NumMerged[p_id]),
1.0,
ProtoIn(merge_class->Class, p_id));
merge_class->NumMerged[p_id]++;
}
AddProtoToConfig(p_id, merge_class->Class->Configurations[config_id]);
}
FreeProtoList(&proto_list);
return mf_classes;
}
// Helper to setup the config map.
// Setup an index mapping from the shapes in the shape table to the classes
// that will be trained. In keeping with the original design, each shape
// with the same list of unichars becomes a different class and the configs
// represent the different combinations of fonts.
static void SetupConfigMap(ShapeTable* shape_table, IndexMapBiDi* config_map) {
int num_configs = shape_table->NumShapes();
config_map->Init(num_configs, true);
config_map->Setup();
for (int c1 = 0; c1 < num_configs; ++c1) {
// Only process ids that are not already merged.
if (config_map->SparseToCompact(c1) == c1) {
Shape* shape1 = shape_table->MutableShape(c1);
// Find all the subsequent shapes that are equal.
for (int c2 = c1 + 1; c2 < num_configs; ++c2) {
if (shape_table->MutableShape(c2)->IsEqualUnichars(shape1)) {
config_map->Merge(c1, c2);
}
}
}
}
config_map->CompleteMerges();
}
/*---------------------------------------------------------------------------*/
int main (int argc, char **argv) {
/*
** Parameters:
** argc number of command line arguments
** argv array of command line arguments
** Globals: none
** Operation:
** This program reads in a text file consisting of feature
** samples from a training page in the following format:
**
** FontName UTF8-char-str xmin ymin xmax ymax page-number
** NumberOfFeatureTypes(N)
** FeatureTypeName1 NumberOfFeatures(M)
** Feature1
** ...
** FeatureM
** FeatureTypeName2 NumberOfFeatures(M)
** Feature1
** ...
** FeatureM
** ...
** FeatureTypeNameN NumberOfFeatures(M)
** Feature1
** ...
** FeatureM
** FontName CharName ...
**
** The result of this program is a binary inttemp file used by
** the OCR engine.
** Return: none
** Exceptions: none
** History: Fri Aug 18 08:56:17 1989, DSJ, Created.
** Mon May 18 1998, Christy Russson, Revistion started.
*/
ParseArguments(&argc, &argv);
ShapeTable* shape_table = NULL;
STRING file_prefix;
// Load the training data.
MasterTrainer* trainer = tesseract::LoadTrainingData(argc, argv,
false,
&shape_table,
&file_prefix);
if (trainer == NULL)
return 1; // Failed.
// Setup an index mapping from the shapes in the shape table to the classes
// that will be trained. In keeping with the original design, each shape
// with the same list of unichars becomes a different class and the configs
// represent the different combinations of fonts.
IndexMapBiDi config_map;
SetupConfigMap(shape_table, &config_map);
WriteShapeTable(file_prefix, *shape_table);
// If the shape_table is flat, then either we didn't run shape clustering, or
// it did nothing, so we just output the trainer's unicharset.
// Otherwise shape_set will hold a fake unicharset with an entry for each
// shape in the shape table, and we will output that instead.
UNICHARSET shape_set;
const UNICHARSET* unicharset = &trainer->unicharset();
// If we ran shapeclustering (and it worked) then at least one shape will
// have multiple unichars, so we have to build a fake unicharset.
if (shape_table->AnyMultipleUnichars()) {
unicharset = &shape_set;
// Now build a fake unicharset for the compact shape space to keep the
// output modules happy that we are doing things correctly.
int num_shapes = config_map.CompactSize();
for (int s = 0; s < num_shapes; ++s) {
char shape_label[kMaxShapeLabelLength + 1];
snprintf(shape_label, kMaxShapeLabelLength, "sh%04d", s);
shape_set.unichar_insert(shape_label);
}
}
// Now train each config separately.
int num_configs = shape_table->NumShapes();
LIST mf_classes = NIL_LIST;
for (int s = 0; s < num_configs; ++s) {
int unichar_id, font_id;
if (unicharset == &shape_set) {
// Using fake unichar_ids from the config_map/shape_set.
unichar_id = config_map.SparseToCompact(s);
} else {
// Get the real unichar_id from the shape table/unicharset.
shape_table->GetFirstUnicharAndFont(s, &unichar_id, &font_id);
}
const char* class_label = unicharset->id_to_unichar(unichar_id);
mf_classes = ClusterOneConfig(s, class_label, mf_classes, *shape_table,
trainer);
}
STRING inttemp_file = file_prefix;
inttemp_file += "inttemp";
STRING pffmtable_file = file_prefix;
pffmtable_file += "pffmtable";
CLASS_STRUCT* float_classes = SetUpForFloat2Int(*unicharset, mf_classes);
// Now write the inttemp and pffmtable.
trainer->WriteInttempAndPFFMTable(trainer->unicharset(), *unicharset,
*shape_table, float_classes,
inttemp_file.string(),
pffmtable_file.string());
delete [] float_classes;
FreeLabeledClassList(mf_classes);
delete trainer;
delete shape_table;
printf("Done!\n");
if (!FLAGS_test_ch.empty()) {
// If we are displaying debug window(s), wait for the user to look at them.
printf("Hit return to exit...\n");
while (getchar() != '\n');
}
return 0;
} /* main */
| 1080228-arabicocr11 | training/mftraining.cpp | C++ | asf20 | 12,728 |
/******************************************************************************
** Filename: MergeNF.c
** Purpose: Program for merging similar nano-feature protos
** Author: Dan Johnson
** History: Wed Nov 21 09:55:23 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "protos.h"
#include "cluster.h"
#include "ocrfeatures.h"
#include "callcpp.h"
#include "picofeat.h"
#define WORST_MATCH_ALLOWED (0.9)
#define WORST_EVIDENCE (1.0)
#define MAX_LENGTH_MISMATCH (2.0 * GetPicoFeatureLength ())
#define PROTO_SUFFIX ".mf.p"
#define CONFIG_SUFFIX ".cl"
#define NO_PROTO (-1)
#define XPOSITION 0
#define YPOSITION 1
#define MFLENGTH 2
#define ORIENTATION 3
typedef struct
{
FLOAT32 MinX, MaxX, MinY, MaxY;
} FRECT;
/**----------------------------------------------------------------------------
Public Macros
----------------------------------------------------------------------------**/
#define CenterX(M) ( (M)[XPOSITION] )
#define CenterY(M) ( (M)[YPOSITION] )
#define LengthOf(M) ( (M)[MFLENGTH] )
#define OrientationOf(M) ( (M)[ORIENTATION] )
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
FLOAT32 CompareProtos (
PROTO p1,
PROTO p2);
void ComputeMergedProto (
PROTO p1,
PROTO p2,
FLOAT32 w1,
FLOAT32 w2,
PROTO MergedProto);
int FindClosestExistingProto (
CLASS_TYPE Class,
int NumMerged[],
PROTOTYPE *Prototype);
void MakeNewFromOld (
PROTO New,
PROTOTYPE *Old);
FLOAT32 SubfeatureEvidence (
FEATURE Feature,
PROTO Proto);
double EvidenceOf (
register double Similarity);
BOOL8 DummyFastMatch (
FEATURE Feature,
PROTO Proto);
void ComputePaddedBoundingBox (
PROTO Proto,
FLOAT32 TangentPad,
FLOAT32 OrthogonalPad,
FRECT *BoundingBox);
BOOL8 PointInside (
FRECT *Rectangle,
FLOAT32 X,
FLOAT32 Y);
| 1080228-arabicocr11 | training/mergenf.h | C | asf20 | 2,871 |
/**********************************************************************
* File: boxchar.h
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_BOXCHAR_H_
#define TESSERACT_TRAINING_BOXCHAR_H_
#include <string>
#include <vector>
#include "allheaders.h" // from Leptonica
#ifdef USE_STD_NAMESPACE
using std::string;
using std::vector;
#endif
struct Box;
namespace tesseract {
class BoxChar {
public:
BoxChar(const char* utf8_str, int len);
~BoxChar();
// Accessors.
const string& ch() const { return ch_; }
const Box* box() const { return box_; }
const int& page() const { return page_; }
// Set the box_ member.
void AddBox(int x, int y, int width, int height);
void set_page(int page) { page_ = page; }
string* mutable_ch() { return &ch_; }
Box* mutable_box() { return box_; }
static void TranslateBoxes(int xshift, int yshift,
vector<BoxChar*>* boxes);
// Rotate the vector of boxes between start and end by the given rotation.
// The rotation is in radians clockwise about the given center.
static void RotateBoxes(float rotation,
int xcenter,
int ycenter,
int start_box,
int end_box,
vector<BoxChar*>* boxes);
// Create a tesseract box file from the vector of boxes. The image height
// is needed to convert to tesseract coordinates.
static void WriteTesseractBoxFile(const string& name, int height,
const vector<BoxChar*>& boxes);
private:
string ch_;
Box* box_;
int page_;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_BOXCHAR_H_
| 1080228-arabicocr11 | training/boxchar.h | C++ | asf20 | 2,703 |
/**********************************************************************
* File: pango_font_info.cpp
* Description: Font-related objects and helper functions
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
#include "config_auto.h"
#endif
#ifdef MINGW
// workaround for stdlib.h and putenv
#undef __STRICT_ANSI__
#include "strcasestr.h"
#endif // MINGW
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/param.h>
#include <algorithm>
#include "pango_font_info.h"
#include "commandlineflags.h"
#include "fileio.h"
#include "normstrngs.h"
#include "tlog.h"
#include "unichar.h"
#include "util.h"
#include "pango/pango.h"
#include "pango/pangocairo.h"
#include "pango/pangofc-font.h"
STRING_PARAM_FLAG(fonts_dir, "/auto/ocr-data/tesstraining/fonts",
"Overrides system default font location");
STRING_PARAM_FLAG(fontconfig_tmpdir, "/tmp",
"Overrides fontconfig default temporary dir");
BOOL_PARAM_FLAG(fontconfig_refresh_cache, false,
"Does a one-time deletion of cache files from the "
"fontconfig_tmpdir before initializing fontconfig.");
#ifndef USE_STD_NAMESPACE
#include "ocr/trainingdata/typesetting/legacy_fonts.h"
BOOL_PARAM_FLAG(use_only_legacy_fonts, false,
"Overrides --fonts_dir and sets the known universe of fonts to"
"the list in legacy_fonts.h");
#else
using std::pair;
#endif
namespace tesseract {
// Default assumed output resolution. Required only for providing font metrics
// in pixels.
const int kDefaultResolution = 300;
PangoFontInfo::PangoFontInfo() : desc_(NULL), resolution_(kDefaultResolution) {
Clear();
}
PangoFontInfo::PangoFontInfo(const string& desc)
: desc_(NULL), resolution_(kDefaultResolution) {
if (!ParseFontDescriptionName(desc)) {
tprintf("ERROR: Could not parse %s\n", desc.c_str());
Clear();
}
}
void PangoFontInfo::Clear() {
font_size_ = 0;
is_bold_ = false;
is_italic_ = false;
is_smallcaps_ = false;
is_monospace_ = false;
family_name_.clear();
font_type_ = UNKNOWN;
if (desc_) {
pango_font_description_free(desc_);
desc_ = NULL;
}
}
string PangoFontInfo::DescriptionName() const {
if (!desc_) return "";
char* desc_str = pango_font_description_to_string(desc_);
string desc_name(desc_str);
g_free(desc_str);
return desc_name;
}
// Initializes Fontconfig for use by writing a fake fonts.conf file into the
// FLAGS_fontconfigs_tmpdir directory, that points to the supplied
// FLAGS_fonts_dir, and then overrides the FONTCONFIG_PATH environment variable
// to point to this fonts.conf file.
static void InitFontconfig() {
static bool init_fontconfig = false;
if (init_fontconfig || FLAGS_fonts_dir.empty()) {
init_fontconfig = true;
return;
}
if (FLAGS_fontconfig_refresh_cache) {
tprintf("Deleting cache files from %s\n", FLAGS_fontconfig_tmpdir.c_str());
File::DeleteMatchingFiles(File::JoinPath(
FLAGS_fontconfig_tmpdir.c_str(), "*cache-2").c_str());
}
tprintf("Initializing fontconfig\n");
const int MAX_FONTCONF_FILESIZE = 1024;
char fonts_conf_template[MAX_FONTCONF_FILESIZE];
snprintf(fonts_conf_template, MAX_FONTCONF_FILESIZE,
"<?xml version=\"1.0\"?>\n"
"<!DOCTYPE fontconfig SYSTEM \"fonts.dtd\">\n"
"<fontconfig>\n"
"<dir>%s</dir>\n"
"<cachedir>%s</cachedir>\n"
"<config></config>\n"
"</fontconfig>", FLAGS_fonts_dir.c_str(),
FLAGS_fontconfig_tmpdir.c_str());
string fonts_conf_file = File::JoinPath(FLAGS_fontconfig_tmpdir.c_str(),
"fonts.conf");
File::WriteStringToFileOrDie(fonts_conf_template, fonts_conf_file);
#ifdef _WIN32
std::string env("FONTCONFIG_PATH=");
env.append(FLAGS_fontconfig_tmpdir.c_str());
putenv(env.c_str());
putenv("LANG=en_US.utf8");
#else
setenv("FONTCONFIG_PATH", FLAGS_fontconfig_tmpdir.c_str(), true);
// Fix the locale so that the reported font names are consistent.
setenv("LANG", "en_US.utf8", true);
#endif // _WIN32
init_fontconfig = true;
}
static void ListFontFamilies(PangoFontFamily*** families,
int* n_families) {
InitFontconfig();
PangoFontMap* font_map = pango_cairo_font_map_get_default();
DISABLE_HEAP_LEAK_CHECK;
pango_font_map_list_families(font_map, families, n_families);
}
// Inspects whether a given font family is monospace. If the font is not
// available, it cannot make a decision and returns false by default.
static bool IsMonospaceFontFamily(const char* family_name) {
PangoFontFamily** families = 0;
int n_families = 0;
bool is_monospace = false;
ListFontFamilies(&families, &n_families);
ASSERT_HOST(n_families > 0);
bool found = false;
for (int i = 0; i < n_families; ++i) {
if (!strcasecmp(family_name, pango_font_family_get_name(families[i]))) {
is_monospace = pango_font_family_is_monospace(families[i]);
found = true;
break;
}
}
if (!found) {
tlog(1, "Could not find monospace property of family %s\n", family_name);
}
g_free(families);
return is_monospace;
}
bool PangoFontInfo::ParseFontDescription(const PangoFontDescription *desc) {
Clear();
const char* family = pango_font_description_get_family(desc);
if (!family) {
char* desc_str = pango_font_description_to_string(desc);
tprintf("WARNING: Could not parse family name from description: '%s'\n",
desc_str);
g_free(desc_str);
return false;
}
family_name_ = string(family);
desc_ = pango_font_description_copy(desc);
is_monospace_ = IsMonospaceFontFamily(family);
// Set font size in points
font_size_ = pango_font_description_get_size(desc);
if (!pango_font_description_get_size_is_absolute(desc)) {
font_size_ /= PANGO_SCALE;
}
PangoStyle style = pango_font_description_get_style(desc);
is_italic_ = (PANGO_STYLE_ITALIC == style ||
PANGO_STYLE_OBLIQUE == style);
is_smallcaps_ = (pango_font_description_get_variant(desc)
== PANGO_VARIANT_SMALL_CAPS);
is_bold_ = (pango_font_description_get_weight(desc) >= PANGO_WEIGHT_BOLD);
// We dont have a way to detect whether a font is of type Fraktur. The fonts
// we currently use all have "Fraktur" in their family name, so we do a
// fragile but functional check for that here.
is_fraktur_ = (strcasestr(family, "Fraktur") != NULL);
return true;
}
bool PangoFontInfo::ParseFontDescriptionName(const string& name) {
PangoFontDescription *desc = pango_font_description_from_string(name.c_str());
bool success = ParseFontDescription(desc);
pango_font_description_free(desc);
return success;
}
// Returns the PangoFont structure corresponding to the closest available font
// in the font map. Note that if the font is wholly missing, this could
// correspond to a completely different font family and face.
PangoFont* PangoFontInfo::ToPangoFont() const {
InitFontconfig();
PangoFontMap* font_map = pango_cairo_font_map_get_default();
PangoContext* context = pango_context_new();
pango_cairo_context_set_resolution(context, resolution_);
pango_context_set_font_map(context, font_map);
PangoFont* font = NULL;
{
DISABLE_HEAP_LEAK_CHECK;
font = pango_font_map_load_font(font_map, context, desc_);
}
g_object_unref(context);
return font;
}
bool PangoFontInfo::CoversUTF8Text(const char* utf8_text, int byte_length) const {
PangoFont* font = ToPangoFont();
PangoCoverage* coverage = pango_font_get_coverage(font, NULL);
for (UNICHAR::const_iterator it = UNICHAR::begin(utf8_text, byte_length);
it != UNICHAR::end(utf8_text, byte_length);
++it) {
if (IsWhitespace(*it) || pango_is_zero_width(*it))
continue;
if (pango_coverage_get(coverage, *it) != PANGO_COVERAGE_EXACT) {
char tmp[5];
int len = it.get_utf8(tmp);
tmp[len] = '\0';
tlog(2, "'%s' (U+%x) not covered by font\n", tmp, *it);
return false;
}
}
return true;
}
int PangoFontInfo::DropUncoveredChars(string* utf8_text) const {
PangoFont* font = ToPangoFont();
PangoCoverage* coverage = pango_font_get_coverage(font, NULL);
int num_dropped_chars = 0;
// Maintain two iterators that point into the string. For space efficiency, we
// will repeatedly copy one covered UTF8 character from one to the other, and
// at the end resize the string to the right length.
char* out = const_cast<char*>(utf8_text->c_str());
const UNICHAR::const_iterator it_begin =
UNICHAR::begin(utf8_text->c_str(), utf8_text->length());
const UNICHAR::const_iterator it_end =
UNICHAR::end(utf8_text->c_str(), utf8_text->length());
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
// Skip bad utf-8.
if (!it.is_legal())
continue; // One suitable error message will still be issued.
if (!IsWhitespace(*it) && !pango_is_zero_width(*it) &&
pango_coverage_get(coverage, *it) != PANGO_COVERAGE_EXACT) {
if (TLOG_IS_ON(2)) {
char tmp[5];
int len = it.get_utf8(tmp);
tmp[len] = '\0';
tlog(2, "'%s' (U+%x) not covered by font\n", tmp, *it);
}
++num_dropped_chars;
continue;
}
strncpy(out, it.utf8_data(), it.utf8_len());
out += it.utf8_len();
}
utf8_text->resize(out - utf8_text->c_str());
return num_dropped_chars;
}
bool PangoFontInfo::GetSpacingProperties(const string& utf8_char,
int* x_bearing, int* x_advance) const {
// Convert to equivalent PangoFont structure
PangoFont* font = ToPangoFont();
// Find the glyph index in the font for the supplied utf8 character.
int total_advance = 0;
int min_bearing = 0;
// Handle multi-unicode strings by reporting the left-most position of the
// x-bearing, and right-most position of the x-advance if the string were to
// be rendered.
const UNICHAR::const_iterator it_begin = UNICHAR::begin(utf8_char.c_str(),
utf8_char.length());
const UNICHAR::const_iterator it_end = UNICHAR::end(utf8_char.c_str(),
utf8_char.length());
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
PangoGlyph glyph_index = pango_fc_font_get_glyph(
reinterpret_cast<PangoFcFont*>(font), *it);
if (!glyph_index) {
// Glyph for given unicode character doesn't exist in font.
return false;
}
// Find the ink glyph extents for the glyph
PangoRectangle ink_rect, logical_rect;
pango_font_get_glyph_extents(font, glyph_index, &ink_rect, &logical_rect);
pango_extents_to_pixels(&ink_rect, NULL);
pango_extents_to_pixels(&logical_rect, NULL);
int bearing = total_advance + PANGO_LBEARING(ink_rect);
if (it == it_begin || bearing < min_bearing) {
min_bearing = bearing;
}
total_advance += PANGO_RBEARING(logical_rect);
}
*x_bearing = min_bearing;
*x_advance = total_advance;
return true;
}
bool PangoFontInfo::CanRenderString(const char* utf8_word, int len) const {
vector<string> graphemes;
return CanRenderString(utf8_word, len, &graphemes);
}
bool PangoFontInfo::CanRenderString(const char* utf8_word, int len,
vector<string>* graphemes) const {
if (graphemes) graphemes->clear();
// We check for font coverage of the text first, as otherwise Pango could
// (undesirably) fall back to another font that does have the required
// coverage.
if (!CoversUTF8Text(utf8_word, len)) {
return false;
}
// U+25CC dotted circle character that often (but not always) gets rendered
// when there is an illegal grapheme sequence.
const char32 kDottedCircleGlyph = 9676;
bool bad_glyph = false;
PangoFontMap* font_map = pango_cairo_font_map_get_default();
PangoContext* context = pango_context_new();
pango_context_set_font_map(context, font_map);
PangoLayout* layout;
{
// Pango is not relasing the cached layout.
DISABLE_HEAP_LEAK_CHECK;
layout = pango_layout_new(context);
}
if (desc_) {
pango_layout_set_font_description(layout, desc_);
} else {
PangoFontDescription *desc = pango_font_description_from_string(
DescriptionName().c_str());
pango_layout_set_font_description(layout, desc);
pango_font_description_free(desc);
}
pango_layout_set_text(layout, utf8_word, len);
PangoLayoutIter* run_iter = NULL;
{ // Fontconfig caches some information here that is not freed before exit.
DISABLE_HEAP_LEAK_CHECK;
run_iter = pango_layout_get_iter(layout);
}
do {
PangoLayoutRun* run = pango_layout_iter_get_run_readonly(run_iter);
if (!run) {
tlog(2, "Found end of line NULL run marker\n");
continue;
}
PangoGlyph dotted_circle_glyph;
PangoFont* font = run->item->analysis.font;
dotted_circle_glyph = pango_fc_font_get_glyph(
reinterpret_cast<PangoFcFont*>(font), kDottedCircleGlyph);
if (TLOG_IS_ON(2)) {
PangoFontDescription* desc = pango_font_describe(font);
char* desc_str = pango_font_description_to_string(desc);
tlog(2, "Desc of font in run: %s\n", desc_str);
g_free(desc_str);
pango_font_description_free(desc);
}
PangoGlyphItemIter cluster_iter;
gboolean have_cluster;
for (have_cluster = pango_glyph_item_iter_init_start(&cluster_iter,
run, utf8_word);
have_cluster && !bad_glyph;
have_cluster = pango_glyph_item_iter_next_cluster(&cluster_iter)) {
const int start_byte_index = cluster_iter.start_index;
const int end_byte_index = cluster_iter.end_index;
int start_glyph_index = cluster_iter.start_glyph;
int end_glyph_index = cluster_iter.end_glyph;
string cluster_text = string(utf8_word + start_byte_index,
end_byte_index - start_byte_index);
if (graphemes) graphemes->push_back(cluster_text);
if (IsUTF8Whitespace(cluster_text.c_str())) {
tlog(2, "Skipping whitespace\n");
continue;
}
if (TLOG_IS_ON(2)) {
printf("start_byte=%d end_byte=%d start_glyph=%d end_glyph=%d ",
start_byte_index, end_byte_index,
start_glyph_index, end_glyph_index);
}
for (int i = start_glyph_index,
step = (end_glyph_index > start_glyph_index) ? 1 : -1;
!bad_glyph && i != end_glyph_index; i+= step) {
const bool unknown_glyph =
(cluster_iter.glyph_item->glyphs->glyphs[i].glyph &
PANGO_GLYPH_UNKNOWN_FLAG);
const bool illegal_glyph =
(cluster_iter.glyph_item->glyphs->glyphs[i].glyph ==
dotted_circle_glyph);
bad_glyph = unknown_glyph || illegal_glyph;
if (TLOG_IS_ON(2)) {
printf("(%d=%d)", cluster_iter.glyph_item->glyphs->glyphs[i].glyph,
bad_glyph ? 1 : 0);
}
}
if (TLOG_IS_ON(2)) {
printf(" '%s'\n", cluster_text.c_str());
}
if (bad_glyph)
tlog(1, "Found illegal glyph!\n");
}
} while (!bad_glyph && pango_layout_iter_next_run(run_iter));
pango_layout_iter_free(run_iter);
g_object_unref(context);
g_object_unref(layout);
if (bad_glyph && graphemes) graphemes->clear();
return !bad_glyph;
}
// ------------------------ FontUtils ------------------------------------
// Returns whether the specified font description is available in the fonts
// directory.
//
// The generated list of font families and faces includes "synthesized" font
// faces that are not truly loadable. Pango versions >=1.18 have a
// pango_font_face_is_synthesized method that can be used to prune the list.
// Until then, we are restricted to using a hack where we try to load the font
// from the font_map, and then check what we loaded to see if it has the
// description we expected. If it is not, then the font is deemed unavailable.
/* static */
bool FontUtils::IsAvailableFont(const char* input_query_desc) {
string query_desc(input_query_desc);
if (PANGO_VERSION <= 12005) {
// Strip commas and any ' Medium' substring in the name.
query_desc.erase(std::remove(query_desc.begin(), query_desc.end(), ','),
query_desc.end());
const string kMediumStr = " Medium";
std::size_t found = query_desc.find(kMediumStr);
if (found != std::string::npos) {
query_desc.erase(found, kMediumStr.length());
}
}
PangoFontDescription *desc = pango_font_description_from_string(
query_desc.c_str());
PangoFont* selected_font = NULL;
{
InitFontconfig();
PangoFontMap* font_map = pango_cairo_font_map_get_default();
PangoContext* context = pango_context_new();
pango_context_set_font_map(context, font_map);
{
DISABLE_HEAP_LEAK_CHECK;
selected_font = pango_font_map_load_font(font_map, context, desc);
}
g_object_unref(context);
}
if (selected_font == NULL) {
pango_font_description_free(desc);
return false;
}
PangoFontDescription* selected_desc = pango_font_describe(selected_font);
bool equal = pango_font_description_equal(desc, selected_desc);
tlog(3, "query weight = %d \t selected weight =%d\n",
pango_font_description_get_weight(desc),
pango_font_description_get_weight(selected_desc));
char* selected_desc_str = pango_font_description_to_string(selected_desc);
tlog(2, "query_desc: '%s' Selected: 's'\n", query_desc.c_str(),
selected_desc_str);
g_free(selected_desc_str);
pango_font_description_free(selected_desc);
g_object_unref(selected_font);
pango_font_description_free(desc);
return equal;
}
static bool ShouldIgnoreFontFamilyName(const char* query) {
static const char* kIgnoredFamilyNames[]
= { "Sans", "Serif", "Monospace", NULL };
const char** list = kIgnoredFamilyNames;
for (; *list != NULL; ++list) {
if (!strcmp(*list, query))
return true;
}
return false;
}
// Outputs description names of available fonts.
/* static */
const vector<string>& FontUtils::ListAvailableFonts() {
static vector<string> available_fonts_; // cache list
if (available_fonts_.size()) {
return available_fonts_;
}
#ifndef USE_STD_NAMESPACE
if (FLAGS_use_only_legacy_fonts) {
// Restrict view to list of fonts in legacy_fonts.h
tprintf("Using list of legacy fonts only\n");
const int kNumFontLists = 4;
for (int i = 0; i < kNumFontLists; ++i) {
for (int j = 0; kFontlists[i][j] != NULL; ++j) {
available_fonts_.push_back(kFontlists[i][j]);
}
}
return available_fonts_;
}
#endif
PangoFontFamily** families = 0;
int n_families = 0;
ListFontFamilies(&families, &n_families);
for (int i = 0; i < n_families; ++i) {
const char* family_name = pango_font_family_get_name(families[i]);
tlog(2, "Listing family %s\n", family_name);
if (ShouldIgnoreFontFamilyName(family_name))
continue;
int n_faces;
PangoFontFace** faces = NULL;
pango_font_family_list_faces(families[i], &faces, &n_faces);
for (int j = 0; j < n_faces; ++j) {
PangoFontDescription* desc = pango_font_face_describe(faces[j]);
char* desc_str = pango_font_description_to_string(desc);
if (IsAvailableFont(desc_str)) {
available_fonts_.push_back(desc_str);
}
pango_font_description_free(desc);
g_free(desc_str);
}
g_free(faces);
}
g_free(families);
sort(available_fonts_.begin(), available_fonts_.end());
return available_fonts_;
}
static void CharCoverageMapToBitmap(PangoCoverage* coverage,
vector<bool>* unichar_bitmap) {
const int kMinUnicodeValue = 33;
const int kMaxUnicodeValue = 0x10FFFF;
unichar_bitmap->resize(kMaxUnicodeValue + 1, false);
// Mark off characters that the font can render.
for (int i = kMinUnicodeValue; i <= kMaxUnicodeValue; ++i) {
if (IsInterchangeValid(i)) {
(*unichar_bitmap)[i]
= (pango_coverage_get(coverage, i) == PANGO_COVERAGE_EXACT);
}
}
}
/* static */
void FontUtils::GetAllRenderableCharacters(vector<bool>* unichar_bitmap) {
const vector<string>& all_fonts = ListAvailableFonts();
return GetAllRenderableCharacters(all_fonts, unichar_bitmap);
}
/* static */
void FontUtils::GetAllRenderableCharacters(const string& font_name,
vector<bool>* unichar_bitmap) {
PangoFontInfo font_info(font_name);
PangoCoverage* coverage = pango_font_get_coverage(
font_info.ToPangoFont(), NULL);
CharCoverageMapToBitmap(coverage, unichar_bitmap);
}
/* static */
void FontUtils::GetAllRenderableCharacters(const vector<string>& fonts,
vector<bool>* unichar_bitmap) {
// Form the union of coverage maps from the fonts
PangoCoverage* all_coverage = pango_coverage_new();
tlog(1, "Processing %d fonts\n", fonts.size());
for (int i = 0; i < fonts.size(); ++i) {
PangoFontInfo font_info(fonts[i]);
PangoCoverage* coverage = pango_font_get_coverage(
font_info.ToPangoFont(), NULL);
// Mark off characters that any font can render.
pango_coverage_max(all_coverage, coverage);
}
CharCoverageMapToBitmap(all_coverage, unichar_bitmap);
pango_coverage_unref(all_coverage);
}
// Utilities written to be backward compatible with StringRender
/* static */
int FontUtils::FontScore(const unordered_map<char32, inT64>& ch_map,
const string& fontname,
int* raw_score,
vector<bool>* ch_flags) {
PangoFontInfo font_info;
if (!font_info.ParseFontDescriptionName(fontname)) {
tprintf("ERROR: Could not parse %s\n", fontname.c_str());
}
PangoFont* font = font_info.ToPangoFont();
PangoCoverage* coverage = pango_font_get_coverage(font, NULL);
if (ch_flags) {
ch_flags->clear();
ch_flags->reserve(ch_map.size());
}
*raw_score = 0;
int ok_chars = 0;
for (unordered_map<char32, inT64>::const_iterator it = ch_map.begin();
it != ch_map.end(); ++it) {
bool covered = (IsWhitespace(it->first) ||
(pango_coverage_get(coverage, it->first)
== PANGO_COVERAGE_EXACT));
if (covered) {
++(*raw_score);
ok_chars += it->second;
}
if (ch_flags) {
ch_flags->push_back(covered);
}
}
return ok_chars;
}
/* static */
string FontUtils::BestFonts(const unordered_map<char32, inT64>& ch_map,
vector<pair<const char*, vector<bool> > >* fonts) {
const double kMinOKFraction = 0.99;
// Weighted fraction of characters that must be renderable in a font to make
// it OK even if the raw count is not good.
const double kMinWeightedFraction = 0.99995;
fonts->clear();
vector<vector<bool> > font_flags;
vector<int> font_scores;
vector<int> raw_scores;
int most_ok_chars = 0;
int best_raw_score = 0;
const vector<string>& font_names = FontUtils::ListAvailableFonts();
for (int i = 0; i < font_names.size(); ++i) {
vector<bool> ch_flags;
int raw_score = 0;
int ok_chars = FontScore(ch_map, font_names[i], &raw_score, &ch_flags);
most_ok_chars = MAX(ok_chars, most_ok_chars);
best_raw_score = MAX(raw_score, best_raw_score);
font_flags.push_back(ch_flags);
font_scores.push_back(ok_chars);
raw_scores.push_back(raw_score);
}
// Now select the fonts with a score above a threshold fraction
// of both the raw and weighted best scores. To prevent bogus fonts being
// selected for CJK, we require a high fraction (kMinOKFraction = 0.99) of
// BOTH weighted and raw scores.
// In low character-count scripts, the issue is more getting enough fonts,
// when only 1 or 2 might have all those rare dingbats etc in them, so we
// allow a font with a very high weighted (coverage) score
// (kMinWeightedFraction = 0.99995) to be used even if its raw score is poor.
int least_good_enough = static_cast<int>(most_ok_chars * kMinOKFraction);
int least_raw_enough = static_cast<int>(best_raw_score * kMinOKFraction);
int override_enough = static_cast<int>(most_ok_chars * kMinWeightedFraction);
string font_list;
for (int i = 0; i < font_names.size(); ++i) {
int score = font_scores[i];
int raw_score = raw_scores[i];
if ((score >= least_good_enough && raw_score >= least_raw_enough) ||
score >= override_enough) {
fonts->push_back(make_pair(font_names[i].c_str(), font_flags[i]));
tlog(1, "OK font %s = %.4f%%, raw = %d = %.2f%%\n",
font_names[i].c_str(),
100.0 * score / most_ok_chars,
raw_score, 100.0 * raw_score / best_raw_score);
font_list += font_names[i];
font_list += "\n";
} else if (score >= least_good_enough || raw_score >= least_raw_enough) {
tlog(1, "Runner-up font %s = %.4f%%, raw = %d = %.2f%%\n",
font_names[i].c_str(),
100.0 * score / most_ok_chars,
raw_score, 100.0 * raw_score / best_raw_score);
}
}
return font_list;
}
/* static */
bool FontUtils::SelectFont(const char* utf8_word, const int utf8_len,
string* font_name, vector<string>* graphemes) {
return SelectFont(utf8_word, utf8_len, ListAvailableFonts(), font_name,
graphemes);
}
/* static */
bool FontUtils::SelectFont(const char* utf8_word, const int utf8_len,
const vector<string>& all_fonts,
string* font_name, vector<string>* graphemes) {
if (font_name) font_name->clear();
if (graphemes) graphemes->clear();
for (int i = 0; i < all_fonts.size(); ++i) {
PangoFontInfo font;
vector<string> found_graphemes;
ASSERT_HOST_MSG(font.ParseFontDescriptionName(all_fonts[i]),
"Could not parse font desc name %s\n",
all_fonts[i].c_str());
if (font.CanRenderString(utf8_word, utf8_len, &found_graphemes)) {
if (graphemes) graphemes->swap(found_graphemes);
if (font_name) *font_name = all_fonts[i];
return true;
}
}
return false;
}
} // namespace tesseract
| 1080228-arabicocr11 | training/pango_font_info.cpp | C++ | asf20 | 27,006 |
#include "commandlineflags.h"
#ifdef USE_STD_NAMESPACE
namespace tesseract {
bool IntFlagExists(const char* flag_name, inT32* value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<IntParam*> empty;
IntParam *p = ParamUtils::FindParam<IntParam>(
full_flag_name.string(), GlobalParams()->int_params, empty);
if (p == NULL) return false;
*value = (inT32)(*p);
return true;
}
bool DoubleFlagExists(const char* flag_name, double* value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<DoubleParam*> empty;
DoubleParam *p = ParamUtils::FindParam<DoubleParam>(
full_flag_name.string(), GlobalParams()->double_params, empty);
if (p == NULL) return false;
*value = static_cast<double>(*p);
return true;
}
bool BoolFlagExists(const char* flag_name, bool* value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<BoolParam*> empty;
BoolParam *p = ParamUtils::FindParam<BoolParam>(
full_flag_name.string(), GlobalParams()->bool_params, empty);
if (p == NULL) return false;
*value = (BOOL8)(*p);
return true;
}
bool StringFlagExists(const char* flag_name, const char** value) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<StringParam*> empty;
StringParam *p = ParamUtils::FindParam<StringParam>(
full_flag_name.string(), GlobalParams()->string_params, empty);
*value = (p != NULL) ? p->string() : NULL;
return p != NULL;
}
void SetIntFlagValue(const char* flag_name, const inT32 new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<IntParam*> empty;
IntParam *p = ParamUtils::FindParam<IntParam>(
full_flag_name.string(), GlobalParams()->int_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(new_val);
}
void SetDoubleFlagValue(const char* flag_name, const double new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<DoubleParam*> empty;
DoubleParam *p = ParamUtils::FindParam<DoubleParam>(
full_flag_name.string(), GlobalParams()->double_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(new_val);
}
void SetBoolFlagValue(const char* flag_name, const bool new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<BoolParam*> empty;
BoolParam *p = ParamUtils::FindParam<BoolParam>(
full_flag_name.string(), GlobalParams()->bool_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(new_val);
}
void SetStringFlagValue(const char* flag_name, const char* new_val) {
STRING full_flag_name("FLAGS_");
full_flag_name += flag_name;
GenericVector<StringParam*> empty;
StringParam *p = ParamUtils::FindParam<StringParam>(
full_flag_name.string(), GlobalParams()->string_params, empty);
ASSERT_HOST(p != NULL);
p->set_value(STRING(new_val));
}
bool SafeAtoi(const char* str, int* val) {
char *endptr = NULL;
*val = strtol(str, &endptr, 10);
return endptr != NULL && *endptr == '\0';
}
bool SafeAtod(const char* str, double* val) {
char *endptr = NULL;
*val = strtod(str, &endptr);
return endptr != NULL && *endptr == '\0';
}
void PrintCommandLineFlags() {
const char* kFlagNamePrefix = "FLAGS_";
const int kFlagNamePrefixLen = strlen(kFlagNamePrefix);
for (int i = 0; i < GlobalParams()->int_params.size(); ++i) {
if (!strncmp(GlobalParams()->int_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:int default:%d)\n",
GlobalParams()->int_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->int_params[i]->info_str(),
inT32(*(GlobalParams()->int_params[i])));
}
}
for (int i = 0; i < GlobalParams()->double_params.size(); ++i) {
if (!strncmp(GlobalParams()->double_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:double default:%g)\n",
GlobalParams()->double_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->double_params[i]->info_str(),
static_cast<double>(*(GlobalParams()->double_params[i])));
}
}
for (int i = 0; i < GlobalParams()->bool_params.size(); ++i) {
if (!strncmp(GlobalParams()->bool_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:bool default:%s)\n",
GlobalParams()->bool_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->bool_params[i]->info_str(),
(BOOL8(*(GlobalParams()->bool_params[i])) ? "true" : "false"));
}
}
for (int i = 0; i < GlobalParams()->string_params.size(); ++i) {
if (!strncmp(GlobalParams()->string_params[i]->name_str(),
kFlagNamePrefix, kFlagNamePrefixLen)) {
tprintf(" --%s %s (type:string default:%s)\n",
GlobalParams()->string_params[i]->name_str() + kFlagNamePrefixLen,
GlobalParams()->string_params[i]->info_str(),
GlobalParams()->string_params[i]->string());
}
}
}
void ParseCommandLineFlags(const char* usage,
int* argc, char*** argv,
const bool remove_flags) {
unsigned int i = 1;
for (i = 1; i < *argc; ++i) {
const char* current_arg = (*argv)[i];
// If argument does not start with a hyphen then break.
if (current_arg[0] != '-') {
break;
}
// Position current_arg after startings hyphens. We treat a sequence of
// consecutive hyphens of any length identically.
while (*current_arg == '-') {
++current_arg;
}
// If this is asking for usage, print the help message and abort.
if (!strcmp(current_arg, "help") ||
!strcmp(current_arg, "helpshort")) {
tprintf("USAGE: %s\n", usage);
PrintCommandLineFlags();
exit(0);
}
// Find the starting position of the value if it was specified in this
// string.
const char* equals_position = strchr(current_arg, '=');
const char* rhs = NULL;
if (equals_position != NULL) {
rhs = equals_position + 1;
}
// Extract the flag name.
STRING lhs;
if (equals_position == NULL) {
lhs = current_arg;
} else {
lhs.assign(current_arg, equals_position - current_arg);
}
if (!lhs.length()) {
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
// Find the flag name in the list of global flags.
// inT32 flag
inT32 int_val;
if (IntFlagExists(lhs.string(), &int_val)) {
if (rhs != NULL) {
if (!strlen(rhs)) {
// Bad input of the format --int_flag=
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
if (!SafeAtoi(rhs, &int_val)) {
tprintf("ERROR: Could not parse int from %s in flag %s\n",
rhs, (*argv)[i]);
exit(1);
}
} else {
// We need to parse the next argument
if (i + 1 >= *argc) {
tprintf("ERROR: Could not find value argument for flag %s\n",
lhs.string());
exit(1);
} else {
++i;
if (!SafeAtoi((*argv)[i], &int_val)) {
tprintf("ERROR: Could not parse inT32 from %s\n", (*argv)[i]);
exit(1);
}
}
}
SetIntFlagValue(lhs.string(), int_val);
continue;
}
// double flag
double double_val;
if (DoubleFlagExists(lhs.string(), &double_val)) {
if (rhs != NULL) {
if (!strlen(rhs)) {
// Bad input of the format --double_flag=
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
if (!SafeAtod(rhs, &double_val)) {
tprintf("ERROR: Could not parse double from %s in flag %s\n",
rhs, (*argv)[i]);
exit(1);
}
} else {
// We need to parse the next argument
if (i + 1 >= *argc) {
tprintf("ERROR: Could not find value argument for flag %s\n",
lhs.string());
exit(1);
} else {
++i;
if (!SafeAtod((*argv)[i], &double_val)) {
tprintf("ERROR: Could not parse double from %s\n", (*argv)[i]);
exit(1);
}
}
}
SetDoubleFlagValue(lhs.string(), double_val);
continue;
}
// Bool flag. Allow input forms --flag (equivalent to --flag=true),
// --flag=false, --flag=true, --flag=0 and --flag=1
bool bool_val;
if (BoolFlagExists(lhs.string(), &bool_val)) {
if (rhs == NULL) {
// --flag form
bool_val = true;
} else {
if (!strlen(rhs)) {
// Bad input of the format --bool_flag=
tprintf("ERROR: Bad argument: %s\n", (*argv)[i]);
exit(1);
}
if (!strcmp(rhs, "false") || !strcmp(rhs, "0")) {
bool_val = false;
} else if (!strcmp(rhs, "true") || !strcmp(rhs, "1")) {
bool_val = true;
} else {
tprintf("ERROR: Could not parse bool from flag %s\n", (*argv)[i]);
exit(1);
}
}
SetBoolFlagValue(lhs.string(), bool_val);
continue;
}
// string flag
const char* string_val;
if (StringFlagExists(lhs.string(), &string_val)) {
if (rhs != NULL) {
string_val = rhs;
} else {
// Pick the next argument
if (i + 1 >= *argc) {
tprintf("ERROR: Could not find string value for flag %s\n",
lhs.string());
exit(1);
} else {
string_val = (*argv)[++i];
}
}
SetStringFlagValue(lhs.string(), string_val);
continue;
}
// Flag was not found. Exit with an error message.
tprintf("ERROR: Non-existent flag %s\n", (*argv)[i]);
exit(1);
} // for each argv
if (remove_flags) {
(*argv)[i - 1] = (*argv)[0];
(*argv) += (i - 1);
(*argc) -= (i - 1);
}
}
} // namespace tesseract
#else
#include "base/init_google.h"
namespace tesseract {
void ParseCommandLineFlags(const char* usage,
int* argc, char*** argv,
const bool remove_flags) {
InitGoogle(usage, argc, argv, remove_flags);
}
} // namespace tesseract
#endif
| 1080228-arabicocr11 | training/commandlineflags.cpp | C++ | asf20 | 10,372 |
/**********************************************************************
* File: commandlineflags.h
* Description: Header file for commandline flag parsing.
* Author: Ranjith Unnikrishnan
* Created: July 2013
*
* (C) Copyright 2013, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_COMMANDLINEFLAGS_H_
#define TESSERACT_TRAINING_COMMANDLINEFLAGS_H_
#ifdef USE_STD_NAMESPACE
#include <stdlib.h>
#include "tprintf.h"
#include "params.h"
#define INT_PARAM_FLAG(name, val, comment) \
INT_VAR(FLAGS_##name, val, comment)
#define DECLARE_INT_PARAM_FLAG(name) \
extern INT_VAR_H(FLAGS_##name, 0, "")
#define DOUBLE_PARAM_FLAG(name, val, comment) \
double_VAR(FLAGS_##name, val, comment)
#define DECLARE_DOUBLE_PARAM_FLAG(name) \
extern double_VAR_H(FLAGS_##name, "", "")
#define BOOL_PARAM_FLAG(name, val, comment) \
BOOL_VAR(FLAGS_##name, val, comment)
#define DECLARE_BOOL_PARAM_FLAG(name) \
extern BOOL_VAR_H(FLAGS_##name, 0, "")
#define STRING_PARAM_FLAG(name, val, comment) \
STRING_VAR(FLAGS_##name, val, comment)
#define DECLARE_STRING_PARAM_FLAG(name) \
extern STRING_VAR_H(FLAGS_##name, "", "")
#else
#include "base/commandlineflags.h"
#define INT_PARAM_FLAG(name, val, comment) \
DEFINE_int32(name, val, comment)
#define DECLARE_INT_PARAM_FLAG(name) \
DECLARE_int32(name)
#define DOUBLE_PARAM_FLAG(name, val, comment) \
DEFINE_double(name, val, comment)
#define DECLARE_DOUBLE_PARAM_FLAG(name) \
DECLARE_double(name)
#define BOOL_PARAM_FLAG(name, val, comment) \
DEFINE_bool(name, val, comment)
#define DECLARE_BOOL_PARAM_FLAG(name) \
DECLARE_bool(name)
#define STRING_PARAM_FLAG(name, val, comment) \
DEFINE_string(name, val, comment)
#define DECLARE_STRING_PARAM_FLAG(name) \
DECLARE_string(name)
#endif
namespace tesseract {
// Parse commandline flags and values. Prints the usage string and exits on
// input of --help or --helpshort.
//
// If remove_flags is true, the argv pointer is advanced so that (*argv)[1]
// points to the first non-flag argument, (*argv)[0] points to the same string
// as before, and argc is decremented to reflect the new shorter length of argv.
// eg. If the input *argv is
// { "program", "--foo=4", "--bar=true", "file1", "file2" } with *argc = 5, the
// output *argv is { "program", "file1", "file2" } with *argc = 3
void ParseCommandLineFlags(const char* usage, int* argc,
char*** argv, const bool remove_flags);
}
#endif // TESSERACT_TRAINING_COMMANDLINEFLAGS_H_
| 1080228-arabicocr11 | training/commandlineflags.h | C++ | asf20 | 3,165 |
/**********************************************************************
* File: icuerrorcode.h
* Description: Wrapper class for UErrorCode, with conversion operators for
* direct use in ICU C and C++ APIs.
* Author: Fredrik Roubert
* Created: Thu July 4 2013
*
* Features:
* - The constructor initializes the internal UErrorCode to U_ZERO_ERROR,
* removing one common source of errors.
* - Same use in C APIs taking a UErrorCode* (pointer) and C++ taking
* UErrorCode& (reference), via conversion operators.
* - Automatic checking for success when it goes out of scope. On failure,
* the destructor will log an error message and exit.
*
* Most of ICU will handle errors gracefully and provide sensible fallbacks.
* Using IcuErrorCode, it is therefore possible to write very compact code
* that does sensible things on failure and provides logging for debugging.
*
* Example:
* IcuErrorCode icuerrorcode;
* return collator.compareUTF8(a, b, icuerrorcode) == UCOL_EQUAL;
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_ICUERRORCODE_H_
#define TESSERACT_CCUTIL_ICUERRORCODE_H_
#include "tprintf.h"
#include "unicode/errorcode.h" // From libicu
namespace tesseract {
class IcuErrorCode : public icu::ErrorCode {
public:
IcuErrorCode() {}
virtual ~IcuErrorCode() {
if (isFailure()) {
handleFailure();
}
}
protected:
virtual void handleFailure() const {
tprintf("ICU ERROR: %s", errorName());
exit(errorCode);
}
private:
// Disallow implicit copying of object.
IcuErrorCode(const IcuErrorCode&);
void operator=(const IcuErrorCode&);
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_ICUERRORCODE_H_
| 1080228-arabicocr11 | training/icuerrorcode.h | C++ | asf20 | 2,339 |
/**********************************************************************
* File: degradeimage.cpp
* Description: Function to degrade an image (usually of text) as if it
* has been printed and then scanned.
* Authors: Ray Smith
* Created: Tue Nov 19 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "degradeimage.h"
#include <stdlib.h>
#include "allheaders.h" // from leptonica
#include "helpers.h" // For TRand.
namespace tesseract {
// Rotation is +/- kRotationRange radians.
const float kRotationRange = 0.02f;
// Number of grey levels to shift by for each exposure step.
const int kExposureFactor = 16;
// Salt and pepper noise is +/- kSaltnPepper.
const int kSaltnPepper = 5;
// Min sum of width + height on which to operate the ramp.
const int kMinRampSize = 1000;
// Degrade the pix as if by a print/copy/scan cycle with exposure > 0
// corresponding to darkening on the copier and <0 lighter and 0 not copied.
// Exposures in [-2,2] are most useful, with -3 and 3 being extreme.
// If rotation is NULL, rotation is skipped. If *rotation is non-zero, the pix
// is rotated by *rotation else it is randomly rotated and *rotation is
// modified.
//
// HOW IT WORKS:
// Most of the process is really dictated by the fact that the minimum
// available convolution is 3X3, which is too big really to simulate a
// good quality print/scan process. (2X2 would be better.)
// 1 pixel wide inputs are heavily smeared by the 3X3 convolution, making the
// images generally biased to being too light, so most of the work is to make
// them darker. 3 levels of thickening/darkening are achieved with 2 dilations,
// (using a greyscale erosion) one heavy (by being before convolution) and one
// light (after convolution).
// With no dilation, after covolution, the images are so light that a heavy
// constant offset is required to make the 0 image look reasonable. A simple
// constant offset multiple of exposure to undo this value is enough to achieve
// all the required lightening. This gives the advantage that exposure level 1
// with a single dilation gives a good impression of the broken-yet-too-dark
// problem that is often seen in scans.
// A small random rotation gives some varying greyscale values on the edges,
// and some random salt and pepper noise on top helps to realistically jaggy-up
// the edges.
// Finally a greyscale ramp provides a continuum of effects between exposure
// levels.
Pix* DegradeImage(Pix* input, int exposure, TRand* randomizer,
float* rotation) {
Pix* pix = pixConvertTo8(input, false);
pixDestroy(&input);
input = pix;
int width = pixGetWidth(input);
int height = pixGetHeight(input);
if (exposure >= 2) {
// An erosion simulates the spreading darkening of a dark copy.
// This is backwards to binary morphology,
// see http://www.leptonica.com/grayscale-morphology.html
pix = input;
input = pixErodeGray(pix, 3, 3);
pixDestroy(&pix);
}
// A convolution is essential to any mode as no scanner produces an
// image as sharp as the electronic image.
pix = pixBlockconv(input, 1, 1);
pixDestroy(&input);
// A small random rotation helps to make the edges jaggy in a realistic way.
if (rotation != NULL) {
float radians_clockwise = 0.0f;
if (*rotation) {
radians_clockwise = *rotation;
} else if (randomizer != NULL) {
radians_clockwise = randomizer->SignedRand(kRotationRange);
}
input = pixRotate(pix, radians_clockwise,
L_ROTATE_AREA_MAP, L_BRING_IN_WHITE,
0, 0);
// Rotate the boxes to match.
*rotation = radians_clockwise;
pixDestroy(&pix);
} else {
input = pix;
}
if (exposure >= 3 || exposure == 1) {
// Erosion after the convolution is not as heavy as before, so it is
// good for level 1 and in addition as a level 3.
// This is backwards to binary morphology,
// see http://www.leptonica.com/grayscale-morphology.html
pix = input;
input = pixErodeGray(pix, 3, 3);
pixDestroy(&pix);
}
// The convolution really needed to be 2x2 to be realistic enough, but
// we only have 3x3, so we have to bias the image darker or lose thin
// strokes.
int erosion_offset = 0;
// For light and 0 exposure, there is no dilation, so compensate for the
// convolution with a big darkening bias which is undone for lighter
// exposures.
if (exposure <= 0)
erosion_offset = -3 * kExposureFactor;
// Add in a general offset of the greyscales for the exposure level so
// a threshold of 128 gives a reasonable binary result.
erosion_offset -= exposure * kExposureFactor;
// Add a gradual fade over the page and a small amount of salt and pepper
// noise to simulate noise in the sensor/paper fibres and varying
// illumination.
l_uint32* data = pixGetData(input);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int pixel = GET_DATA_BYTE(data, x);
if (randomizer != NULL)
pixel += randomizer->IntRand() % (kSaltnPepper*2 + 1) - kSaltnPepper;
if (height + width > kMinRampSize)
pixel -= (2*x + y) * 32 / (height + width);
pixel += erosion_offset;
if (pixel < 0)
pixel = 0;
if (pixel > 255)
pixel = 255;
SET_DATA_BYTE(data, x, pixel);
}
data += input->wpl;
}
return input;
}
} // namespace tesseract
| 1080228-arabicocr11 | training/degradeimage.cpp | C++ | asf20 | 6,032 |
///////////////////////////////////////////////////////////////////////
// File: ambiguous_words.cpp
// Description: A program that takes a text file with a list of words as
// input (one per line) and outputs a file with the words
// that were found in the dictionary followed by the words
// that are ambiguous to them.
// Author: Rika Antonova
// Created: Fri Oct 21 11:26:43 PDT 2011
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
#include <stdio.h>
#include "baseapi.h"
#include "helpers.h"
#include "strngs.h"
#include "dict.h"
#include "tesseractclass.h"
int main(int argc, char** argv) {
// Parse input arguments.
if (argc != 4 && (argc != 6 || strcmp(argv[1], "-l") != 0)) {
printf("Usage: %s [-l lang] tessdata_dir wordlist_file"
" output_ambiguious_wordlist_file\n", argv[0]);
return 1;
}
int argv_offset = 0;
STRING lang;
if (argc == 6) {
lang = argv[2];
argv_offset = 2;
} else {
lang = "eng";
}
const char *tessdata_dir = argv[++argv_offset];
const char *input_file_str = argv[++argv_offset];
const char *output_file_str = argv[++argv_offset];
// Initialize Tesseract.
tesseract::TessBaseAPI api;
GenericVector<STRING> vars_vec;
GenericVector<STRING> vars_values;
vars_vec.push_back("output_ambig_words_file");
vars_values.push_back(output_file_str);
api.Init(tessdata_dir, lang.string(), tesseract::OEM_TESSERACT_ONLY,
NULL, 0, &vars_vec, &vars_values, false);
tesseract::Dict &dict = api.tesseract()->getDict();
FILE *input_file = fopen(input_file_str, "rb");
if (input_file == NULL) {
tprintf("Failed to open input wordlist file %s\n", input_file_str);
exit(1);
}
char str[CHARS_PER_LINE];
// Read word list and call Dict::NoDangerousAmbig() for each word
// to record ambiguities in the output file.
while (fgets(str, CHARS_PER_LINE, input_file) != NULL) {
chomp_string(str); // remove newline
WERD_CHOICE word(str, dict.getUnicharset());
dict.NoDangerousAmbig(&word, NULL, false, NULL);
}
// Clean up.
fclose(input_file);
}
| 1080228-arabicocr11 | training/ambiguous_words.cpp | C++ | asf20 | 2,739 |
// Copyright 2008 Google Inc. All Rights Reserved.
// Author: scharron@google.com (Samuel Charron)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESSERACT_TRAINING_COMMONTRAINING_H__
#define TESSERACT_TRAINING_COMMONTRAINING_H__
#include "cluster.h"
#include "commandlineflags.h"
#include "featdefs.h"
#include "intproto.h"
#include "oldlist.h"
namespace tesseract {
class Classify;
class MasterTrainer;
class ShapeTable;
}
//////////////////////////////////////////////////////////////////////////////
// Globals ///////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
extern FEATURE_DEFS_STRUCT feature_defs;
// Must be defined in the file that "implements" commonTraining facilities.
extern CLUSTERCONFIG Config;
//////////////////////////////////////////////////////////////////////////////
// Structs ///////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
typedef struct
{
char *Label;
int SampleCount;
int font_sample_count;
LIST List;
}
LABELEDLISTNODE, *LABELEDLIST;
typedef struct
{
char* Label;
int NumMerged[MAX_NUM_PROTOS];
CLASS_TYPE Class;
}MERGE_CLASS_NODE;
typedef MERGE_CLASS_NODE* MERGE_CLASS;
//////////////////////////////////////////////////////////////////////////////
// Functions /////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
void ParseArguments(int* argc, char*** argv);
namespace tesseract {
// Helper loads shape table from the given file.
ShapeTable* LoadShapeTable(const STRING& file_prefix);
// Helper to write the shape_table.
void WriteShapeTable(const STRING& file_prefix, const ShapeTable& shape_table);
// Creates a MasterTraininer and loads the training data into it:
// Initializes feature_defs and IntegerFX.
// Loads the shape_table if shape_table != NULL.
// Loads initial unicharset from -U command-line option.
// If FLAGS_input_trainer is set, loads the majority of data from there, else:
// Loads font info from -F option.
// Loads xheights from -X option.
// Loads samples from .tr files in remaining command-line args.
// Deletes outliers and computes canonical samples.
// If FLAGS_output_trainer is set, saves the trainer for future use.
// Computes canonical and cloud features.
// If shape_table is not NULL, but failed to load, make a fake flat one,
// as shape clustering was not run.
MasterTrainer* LoadTrainingData(int argc, const char* const * argv,
bool replication,
ShapeTable** shape_table,
STRING* file_prefix);
} // namespace tesseract.
const char *GetNextFilename(int argc, const char* const * argv);
LABELEDLIST FindList(
LIST List,
char *Label);
LABELEDLIST NewLabeledList(
const char *Label);
void ReadTrainingSamples(const FEATURE_DEFS_STRUCT& feature_defs,
const char *feature_name, int max_samples,
UNICHARSET* unicharset,
FILE* file, LIST* training_samples);
void WriteTrainingSamples(
const FEATURE_DEFS_STRUCT &FeatureDefs,
char *Directory,
LIST CharList,
const char *program_feature_type);
void FreeTrainingSamples(
LIST CharList);
void FreeLabeledList(
LABELEDLIST LabeledList);
void FreeLabeledClassList(
LIST ClassListList);
CLUSTERER *SetUpForClustering(
const FEATURE_DEFS_STRUCT &FeatureDefs,
LABELEDLIST CharSample,
const char *program_feature_type);
LIST RemoveInsignificantProtos(
LIST ProtoList,
BOOL8 KeepSigProtos,
BOOL8 KeepInsigProtos,
int N);
void CleanUpUnusedData(
LIST ProtoList);
void MergeInsignificantProtos(
LIST ProtoList,
const char *label,
CLUSTERER *Clusterer,
CLUSTERCONFIG *Config);
MERGE_CLASS FindClass(
LIST List,
const char *Label);
MERGE_CLASS NewLabeledClass(
const char *Label);
void FreeTrainingSamples(
LIST CharList);
CLASS_STRUCT* SetUpForFloat2Int(const UNICHARSET& unicharset,
LIST LabeledClassList);
void Normalize(
float *Values);
void FreeNormProtoList(
LIST CharList);
void AddToNormProtosList(
LIST* NormProtoList,
LIST ProtoList,
char *CharName);
int NumberOfProtos(
LIST ProtoList,
BOOL8 CountSigProtos,
BOOL8 CountInsigProtos);
void allocNormProtos();
#endif // TESSERACT_TRAINING_COMMONTRAINING_H__
| 1080228-arabicocr11 | training/commontraining.h | C++ | asf20 | 5,275 |
#!/bin/bash
# (C) Copyright 2014, Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script provides an easy way to execute various phases of training
# Tesseract. For a detailed description of the phases, see
# https://code.google.com/p/tesseract-ocr/wiki/TrainingTesseract3
#
# USAGE:
#
# tesstrain.sh
# --bin_dir PATH # Location of training program.
# --fontlist FONTS_STR # A plus-separated list of fontnames to train on.
# --fonts_dir FONTS_PATH # Path to font files.
# --lang LANG_CODE # ISO 639 code.
# --langdata_dir DATADIR # Path to tesseract/training/langdata directory.
# --output_dir OUTPUTDIR # Location of output traineddata file.
# --overwrite # Safe to overwrite files in output_dir.
# --run_shape_clustering # Run shape clustering (use for Indic langs).
#
# OPTIONAL flags for input data. If unspecified we will look for them in
# the langdata_dir directory.
# --training_text TEXTFILE # Text to render and use for training.
# --wordlist WORDFILE # Word list for the language ordered by
# # decreasing frequency.
#
# OPTIONAL flag to specify location of existing traineddata files, required
# during feature extraction. If unspecified will use TESSDATA_PREFIX defined in
# the current environment.
# --tessdata_dir TESSDATADIR # Path to tesseract/tessdata directory.
#
# NOTE:
# The font names specified in --fontlist need to be recognizable by Pango using
# fontconfig. An easy way to list the canonical names of all fonts available on
# your system is to run text2image with --list_available_fonts and the
# appropriate --fonts_dir path.
FONTS=(
"Arial" \
"Times New Roman," \
)
FONTS_DIR="/usr/share/fonts/truetype/"
OUTPUT_DIR="/tmp/tesstrain/tessdata"
OVERWRITE=0
RUN_SHAPE_CLUSTERING=0
EXTRACT_FONT_PROPERTIES=1
WORKSPACE_DIR="/tmp/tesstrain"
# Logging helper functions.
tlog() {
echo -e $* 2>&1 1>&2 | tee -a ${LOG_FILE}
}
err() {
echo -e "ERROR: "$* 2>&1 1>&2 | tee -a ${LOG_FILE}
exit 1
}
# Helper function to run a command and append its output to a log. Aborts early
# if the program file is not found.
# Usage: run_cmd CMD ARG1 ARG2...
run_cmd() {
local cmd=$1
shift
if [[ ! -x ${cmd} ]]; then
err "File ${cmd} not found"
fi
tlog "[$(date)] ${cmd} $@"
${cmd} "$@" 2>&1 1>&2 | tee -a ${LOG_FILE}
# check completion status
if [[ $? -gt 0 ]]; then
err "Program $(basename ${cmd}) failed. Abort."
fi
}
# Check if all the given files exist, or exit otherwise.
# Used to check required input files and produced output files in each phase.
# Usage: check_file_readable FILE1 FILE2...
check_file_readable() {
for file in $@; do
if [[ ! -r ${file} ]]; then
err "${file} does not exist or is not readable"
fi
done
}
# Write a file (with name specified in $2) with records that account for
# n% (specified in $3) of the total weights of records in the input file
# (input file name specified in $1). The input file should have one record
# per line along with its weight separated by \t. The records should be
# sorted in non-ascending order of frequency.
# If $4 is true the first record is skipped.
# USAGE: discard_tail INPUT_FILE OUTPUT_FILE PERCENTAGE
discard_tail() {
local infile=$1
local outfile=$2
local pct=$3
local skip_first=$4
local more_arg="1";
if [[ ${skip_first} ]]; then
more_arg="2"
fi
local sum=$(tail -n +${more_arg} ${infile} \
| awk 'BEGIN {FS = "\t"} {if ($1 != " ") {s=s+$2}}; END {print s}')
if [[ ${sum} == "" ]]; then sum=0
fi
local limit=$((${sum}*${pct}/100))
tail -n +${more_arg} ${infile} | awk 'BEGIN {FS = "\t"}
{if (s > 0) {print $1; if ($1 != " ") {s=s-$2;}}}' s=${limit} \
>> ${outfile}
}
# Set global path variables that are based on parsed flags.
set_prog_paths() {
if [[ -z ${BINDIR} ]]; then
err "Need to specify location of program files"
fi
CN_TRAINING_EXE=${BINDIR}/cntraining
COMBINE_TESSDATA_EXE=${BINDIR}/combine_tessdata
MF_TRAINING_EXE=${BINDIR}/mftraining
SET_UNICHARSET_PROPERTIES_EXE=${BINDIR}/set_unicharset_properties
SHAPE_TRAINING_EXE=${BINDIR}/shapeclustering
TESSERACT_EXE=${BINDIR}/tesseract
TEXT2IMAGE_EXE=${BINDIR}/text2image
UNICHARSET_EXTRACTOR_EXE=${BINDIR}/unicharset_extractor
WORDLIST2DAWG_EXE=${BINDIR}/wordlist2dawg
}
# Sets the named variable to given value. Aborts if the value is missing or
# if it looks like a flag.
# Usage: parse_value VAR_NAME VALUE
parse_value() {
local val="$2"
if [[ -z $val ]]; then
err "Missing value for variable $1"
exit
fi
if [[ ${val:0:2} == "--" ]]; then
err "Invalid value $val passed for variable $1"
exit
fi
eval $1=\"$val\"
}
# Does simple command-line parsing and initialization.
parse_flags() {
local i=0
while test $i -lt ${#ARGV[@]}; do
local j=$((i+1))
case ${ARGV[$i]} in
--)
break;;
--bin_dir)
parse_value "BINDIR" ${ARGV[$j]}
i=$j ;;
--fontlist) # Expect a plus-separated list of names
if [[ -z ${ARGV[$j]} ]] || [[ ${ARGV[$j]:0:2} == "--" ]]; then
err "Invalid value passed to --fontlist"
fi
local ofs=$IFS
IFS='+'
FONTS=( ${ARGV[$j]} )
IFS=$ofs
i=$j ;;
--fonts_dir)
parse_value "FONTS_DIR" ${ARGV[$j]}
i=$j ;;
--lang)
parse_value "LANG_CODE" ${ARGV[$j]}
i=$j ;;
--langdata_dir)
parse_value "LANGDATA_ROOT" ${ARGV[$j]}
i=$j ;;
--output_dir)
parse_value "OUTPUT_DIR" ${ARGV[$j]}
i=$j ;;
--overwrite)
OVERWRITE=1 ;;
--extract_font_properties)
EXTRACT_FONT_PROPERTIES=1 ;;
--noextract_font_properties)
EXTRACT_FONT_PROPERTIES=0 ;;
--run_shape_clustering)
RUN_SHAPE_CLUSTERING=1 ;;
--tessdata_dir)
parse_value "TESSDATA_DIR" ${ARGV[$j]}
i=$j ;;
--training_text)
parse_value "TRAINING_TEXT" "${ARGV[$j]}"
i=$j ;;
--wordlist)
parse_value "WORDLIST_FILE" ${ARGV[$j]}
i=$j ;;
*)
err "Unrecognized argument ${ARGV[$i]}" ;;
esac
i=$((i+1))
done
if [[ -z ${LANG_CODE} ]]; then
err "Need to specify a language --lang"
fi
if [[ -z ${BINDIR} ]]; then
err "Need to specify path to built binaries --bin_dir"
fi
if [[ -z ${LANGDATA_ROOT} ]]; then
err "Need to specify path to language files --langdata_dir"
fi
if [[ -z ${TESSDATA_DIR} ]]; then
if [[ -z ${TESSDATA_PREFIX} ]]; then
err "Need to specify a --tessdata_dir or have a "\
"TESSDATA_PREFIX variable defined in your environment"
else
TESSDATA_DIR="${TESSDATA_PREFIX}"
fi
fi
set_prog_paths
# Location where intermediate files will be created.
TRAINING_DIR=${WORKSPACE_DIR}/${LANG_CODE}
# Location of log file for the whole run.
LOG_FILE=${TRAINING_DIR}/tesstrain.log
# Take training text and wordlist from the langdata directory if not
# specified in the commend-line.
if [[ -z ${TRAINING_TEXT} ]]; then
TRAINING_TEXT=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.training_text
fi
if [[ -z ${WORDLIST_FILE} ]]; then
WORDLIST_FILE=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.wordlist.clean
fi
WORD_BIGRAMS_FILE=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.word.bigrams.clean
NUMBERS_FILE=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.numbers
PUNC_FILE=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.punc
BIGRAM_FREQS_FILE=${TRAINING_TEXT}.bigram_freqs
UNIGRAM_FREQS_FILE=${TRAINING_TEXT}.unigram_freqs
TRAIN_NGRAMS_FILE=${TRAINING_TEXT}.train_ngrams
}
# Phase I : Generate (I)mages from training text for each font.
phaseI_generate_image() {
tlog "\n=== Phase I: Generating training images ==="
if [[ -z ${TRAINING_TEXT} ]] || [[ ! -r ${TRAINING_TEXT} ]]; then
err "Could not find training text file ${TRAINING_TEXT}"
fi
BOX_PADDING="0"
CHAR_SPACING="0.0"
EXPOSURE="0"
LEADING="32"
NGRAM_CHAR_SPACING="0.0"
if (( ${EXTRACT_FONT_PROPERTIES} )) && [[ -r ${BIGRAM_FREQS} ]]; then
# Parse .bigram_freqs file and compose a .train_ngrams file with text
# for tesseract to recognize during training. Take only the ngrams whose
# combined weight accounts for 95% of all the bigrams in the language.
TMP_FILE="${TRAINING_DIR}/_tmp"
cat ${BIGRAM_FREQS_FILE} > ${TMP_FILE}
NGRAM_FRAC=$(cat ${BIGRAM_FREQS_FILE} \
| awk '{s=s+$2}; END {print (s/100)*p}' p=99)
cat ${BIGRAM_FREQS_FILE} | sort -rnk2 \
| awk '{s=s+$2; if (s <= x) {printf "%s ", $1; } }' \
x=${NGRAM_FRAC} > ${TRAIN_NGRAMS_FILE}
check_file_readable ${TRAIN_NGRAMS_FILE}
fi
for font in "${FONTS[@]}"; do
tlog "Rendering using ${font}"
fontname=$(echo ${font} | tr ' ' '_' | sed 's/,//g')
outbase=${TRAINING_DIR}/${LANG_CODE}.${fontname}.exp${EXPOSURE}
common_args="--leading=${LEADING} --fonts_dir=${FONTS_DIR} "
common_args+=" --box_padding=${BOX_PADDING} --strip_unrenderable_words"
run_cmd ${TEXT2IMAGE_EXE} ${common_args} \
--char_spacing=${CHAR_SPACING} --exposure=${EXPOSURE} \
--font="${font}" --outputbase=${outbase} --text=${TRAINING_TEXT}
check_file_readable ${outbase}.box ${outbase}.tif
if (( ${EXTRACT_FONT_PROPERTIES} )) &&
[[ -r ${TRAIN_NGRAMS_FILE} ]]; then
tlog "Rendering ngrams using ${font}"
outbase=${TRAINING_DIR}/ngrams/${LANG_CODE}.ngrams.${fontname}.exp${EXPOSURE}
run_cmd ${TEXT2IMAGE_EXE} ${common_args} \
--char_spacing=${NGRAM_CHAR_SPACING} --exposure=${EXPOSURE} \
--font="${font}" --outputbase=${outbase} \
--box_padding=${BOX_PADDING} --render_ngrams=1 \
--text=${TRAIN_NGRAMS_FILE}
check_file_readable ${outbase}.box ${outbase}.tif
fi
done
}
# Phase UP : Generate (U)nicharset and (P)roperties file.
phaseUP_generate_unicharset() {
tlog "\n=== Phase UP: Generating unicharset and unichar properties files ==="
box_files=$(ls ${TRAINING_DIR}/*.box)
run_cmd ${UNICHARSET_EXTRACTOR_EXE} -D "${TRAINING_DIR}/" ${box_files}
outfile=${TRAINING_DIR}/unicharset
UNICHARSET_FILE="${TRAINING_DIR}/${LANG_CODE}.unicharset"
check_file_readable ${outfile}
mv ${outfile} ${UNICHARSET_FILE}
XHEIGHTS_FILE="${TRAINING_DIR}/${LANG_CODE}.xheights"
check_file_readable ${UNICHARSET_FILE}
run_cmd ${SET_UNICHARSET_PROPERTIES_EXE} \
-U ${UNICHARSET_FILE} -O ${UNICHARSET_FILE} -X ${XHEIGHTS_FILE} \
--script_dir=${LANGDATA_ROOT}
check_file_readable ${XHEIGHTS_FILE}
}
# Phase D : Generate (D)awg files from unicharset file and wordlist files
phaseD_generate_dawg() {
tlog "\n=== Phase D: Generating Dawg files ==="
# Output files
WORD_DAWG=${TRAINING_DIR}/${LANG_CODE}.word-dawg
FREQ_DAWG=${TRAINING_DIR}/${LANG_CODE}.freq-dawg
PUNC_DAWG=${TRAINING_DIR}/${LANG_CODE}.punc-dawg
NUMBER_DAWG=${TRAINING_DIR}/${LANG_CODE}.number-dawg
BIGRAM_DAWG=${TRAINING_DIR}/${LANG_CODE}.bigram-dawg
# Word DAWG
local freq_wordlist_file=${TRAINING_DIR}/${LANG_CODE}.wordlist.clean.freq
if [[ -r ${WORDLIST_FILE} ]]; then
tlog "Generating word Dawg"
check_file_readable ${UNICHARSET_FILE}
run_cmd ${WORDLIST2DAWG_EXE} -r 1 ${WORDLIST_FILE} ${WORD_DAWG} \
${UNICHARSET_FILE}
check_file_readable ${WORD_DAWG}
FREQ_DAWG_SIZE=100
head -n ${FREQ_DAWG_SIZE} ${WORDLIST_FILE} > ${freq_wordlist_file}
fi
# Freq-word DAWG
if [[ -r ${freq_wordlist_file} ]]; then
check_file_readable ${UNICHARSET_FILE}
tlog "Generating frequent-word Dawg"
run_cmd ${WORDLIST2DAWG_EXE} -r 1 ${freq_wordlist_file} ${FREQ_DAWG} \
${UNICHARSET_FILE}
check_file_readable ${FREQ_DAWG}
fi
# Punctuation DAWG
local punc_clean="${LANGDATA_ROOT}/common.punc"
if [[ -r ${PUNC_FILE} ]]; then
local top_punc_file=${TRAINING_DIR}/${LANG_CODE}.punc.top
head -n 1 ${PUNC_FILE} | awk 'BEGIN {FS = "\t"} {print $1}' \
> ${top_punc_file}
discard_tail ${PUNC_FILE} ${top_punc_file} 99 1
punc_clean="${top_punc_file}"
fi
# -r arguments to WORDLIST2DAWG_EXE denote RTL reverse policy
# (see Trie::RTLReversePolicy enum in third_party/tesseract/dict/trie.h).
# We specify 0/RRP_DO_NO_REVERSE when generating number DAWG,
# 1/RRP_REVERSE_IF_HAS_RTL for freq and word DAWGS,
# 2/RRP_FORCE_REVERSE for the punctuation DAWG.
local punc_reverse_policy=0;
if [[ ${LANG_CODE} == "heb" || ${LANG_CODE} == "ara" ]]; then
punc_reverse_policy=2
fi
if [[ -r ${punc_clean} ]]; then
run_cmd ${WORDLIST2DAWG_EXE} -r ${punc_reverse_policy} \
${punc_clean} ${PUNC_DAWG} ${UNICHARSET_FILE}
check_file_readable ${PUNC_DAWG}
fi
# Numbers DAWG
if [[ -r ${NUMBERS_FILE} ]]; then
local top_num_file=${TRAINING_DIR}/${LANG_CODE}.numbers.top
head -n 1 ${NUMBERS_FILE} | awk 'BEGIN {FS = "\t"} {print $1}' \
> ${top_num_file}
discard_tail ${NUMBERS_FILE} ${top_num_file} 85 1
run_cmd ${WORDLIST2DAWG_EXE} -r 0 \
${top_num_file} ${NUMBER_DAWG} ${UNICHARSET_FILE}
check_file_readable ${NUMBER_DAWG}
fi
# Bigram dawg
if [[ -r ${WORD_BIGRAMS_FILE} ]]; then
run_cmd ${WORDLIST2DAWG_EXE} -r 1 \
${WORD_BIGRAMS_FILE} ${BIGRAM_DAWG} ${UNICHARSET_FILE}
check_file_readable ${BIGRAM_DAWG}
fi
}
# Phase E : (E)xtract .tr feature files from .tif/.box files
phaseE_extract_features() {
tlog "\n=== Phase E: Extracting features ==="
local box_config="box.train"
TRAIN_EXPOSURES='0'
for exposure in ${TRAIN_EXPOSURES}; do
img_files=${img_files}' '$(ls ${TRAINING_DIR}/*.exp${exposure}.tif)
done
# Use any available language-specific configs.
local config=""
if [[ -r ${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.config ]]; then
config=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.config
fi
OLD_TESSDATA_PREFIX=${TESSDATA_PREFIX}
export TESSDATA_PREFIX=${TESSDATA_DIR}
tlog "Using TESSDATA_PREFIX=${TESSDATA_PREFIX}"
for img_file in ${img_files}; do
run_cmd ${TESSERACT_EXE} ${img_file} ${img_file%.*} \
${box_config} ${config}
done
export TESSDATA_PREFIX=${OLD_TESSDATA_PREFIX}
}
# Phase C : (C)luster feature prototypes in .tr into normproto file (cnTraining)
# phaseC_cluster_prototypes ${TRAINING_DIR}/${LANG_CODE}.normproto
phaseC_cluster_prototypes() {
tlog "\n=== Phase C: Clustering feature prototypes (cnTraining) ==="
local out_normproto=${TRAINING_DIR}/${LANG_CODE}.normproto
run_cmd ${CN_TRAINING_EXE} -D "${TRAINING_DIR}/" \
$(ls ${TRAINING_DIR}/*.tr)
check_file_readable ${TRAINING_DIR}/normproto
mv ${TRAINING_DIR}/normproto ${out_normproto}
}
# Phase S : (S)hape clustering
phaseS_cluster_shapes() {
if (( ! ${RUN_SHAPE_CLUSTERING} )); then
return
fi
check_file_readable ${LANGDATA_ROOT}/font_properties
local font_props=${LANGDATA_ROOT}/font_properties
if [[ -r ${font_props} ]]; then
font_props="-F ${font_props}"
else
font_props=""
fi
if [[ -r ${TRAINING_DIR}/${LANG_CODE}.xheights ]] &&\
[[ -s ${TRAINING_DIR}/${LANG_CODE}.xheights ]]; then
font_props=${font_props}" -X ${TRAINING_DIR}/${LANG_CODE}.xheights"
fi
run_cmd ${SHAPE_TRAINING_EXE} \
-D "${TRAINING_DIR}/" \
-U ${TRAINING_DIR}/${LANG_CODE}.unicharset \
-O ${TRAINING_DIR}/${LANG_CODE}.mfunicharset \
${font_props} \
$(ls ${TRAINING_DIR}/*.tr)
check_file_readable ${TRAINING_DIR}/shapetable \
${TRAINING_DIR}/${LANG_CODE}.mfunicharset
}
# Phase M : Clustering microfeatures (mfTraining)
phaseM_cluster_microfeatures() {
tlog "\n=== Phase M : Clustering microfeatures (mfTraining) ==="
font_props=${LANGDATA_ROOT}/font_properties
if [[ -r ${font_props} ]]; then
font_props="-F ${font_props}"
else
font_props=""
fi
if [[ -r ${TRAINING_DIR}/${LANG_CODE}.xheights ]] && \
[[ -s ${TRAINING_DIR}/${LANG_CODE}.xheights ]]; then
font_props=${font_props}" -X ${TRAINING_DIR}/${LANG_CODE}.xheights"
fi
run_cmd ${MF_TRAINING_EXE} \
-D "${TRAINING_DIR}/" \
-U ${TRAINING_DIR}/${LANG_CODE}.unicharset \
-O ${TRAINING_DIR}/${LANG_CODE}.mfunicharset \
${font_props} \
$(ls ${TRAINING_DIR}/*.tr)
check_file_readable ${TRAINING_DIR}/inttemp ${TRAINING_DIR}/shapetable \
${TRAINING_DIR}/pffmtable ${TRAINING_DIR}/${LANG_CODE}.mfunicharset
mv ${TRAINING_DIR}/inttemp ${TRAINING_DIR}/${LANG_CODE}.inttemp
mv ${TRAINING_DIR}/shapetable ${TRAINING_DIR}/${LANG_CODE}.shapetable
mv ${TRAINING_DIR}/pffmtable ${TRAINING_DIR}/${LANG_CODE}.pffmtable
mv ${TRAINING_DIR}/${LANG_CODE}.mfunicharset ${TRAINING_DIR}/${LANG_CODE}.unicharset
}
phaseB_generate_ambiguities() {
tlog "\n=== Phase B : ambiguities training ==="
# Check for manually created ambiguities data.
if [[ -r ${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.unicharambigs ]]; then
tlog "Found file ${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.unicharambigs"
cp ${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}.unicharambigs \
${TRAINING_DIR}/${LANG_CODE}.unicharambigs
# Make it writable, as it may be read-only in the client.
chmod u+w ${TRAINING_DIR}/${LANG_CODE}.unicharambigs
return
else
tlog "No unicharambigs file found!"
fi
# TODO: Add support for generating ambiguities automatically.
}
make_traineddata() {
tlog "\n=== Making final traineddata file ==="
local lang_prefix=${LANGDATA_ROOT}/${LANG_CODE}/${LANG_CODE}
# Combine available files for this language from the langdata dir.
if [[ -r ${lang_prefix}.config ]]; then
tlog "Copying ${lang_prefix}.config to ${TRAINING_DIR}"
cp ${lang_prefix}.config ${TRAINING_DIR}
chmod u+w ${TRAINING_DIR}/${LANG_CODE}.config
fi
if [[ -r ${lang_prefix}.cube-unicharset ]]; then
tlog "Copying ${lang_prefix}.cube-unicharset to ${TRAINING_DIR}"
cp ${lang_prefix}.cube-unicharset ${TRAINING_DIR}
chmod u+w ${TRAINING_DIR}/${LANG_CODE}.cube-unicharset
fi
if [[ -r ${lang_prefix}.cube-word-dawg ]]; then
tlog "Copying ${lang_prefix}.cube-word-dawg to ${TRAINING_DIR}"
cp ${lang_prefix}.cube-word-dawg ${TRAINING_DIR}
chmod u+w ${TRAINING_DIR}/${LANG_CODE}.cube-word-dawg
fi
if [[ -r ${lang_prefix}.params-model ]]; then
tlog "Copying ${lang_prefix}.params-model to ${TRAINING_DIR}"
cp ${lang_prefix}.params-model ${TRAINING_DIR}
chmod u+w ${TRAINING_DIR}/${LANG_CODE}.params-model
fi
# Compose the traineddata file.
run_cmd ${COMBINE_TESSDATA_EXE} ${TRAINING_DIR}/${LANG_CODE}.
# Copy it to the output dir, overwriting only if allowed by the cmdline flag.
if [[ ! -d ${OUTPUT_DIR} ]]; then
tlog "Creating new directory ${OUTPUT_DIR}"
mkdir -p ${OUTPUT_DIR}
fi
local destfile=${OUTPUT_DIR}/${LANG_CODE}.traineddata;
if [[ -f ${destfile} ]] && (( ! ${OVERWRITE} )); then
err "File ${destfile} exists and no --overwrite specified";
fi
tlog "Moving ${TRAINING_DIR}/${LANG_CODE}.traineddata to ${OUTPUT_DIR}"
cp -f ${TRAINING_DIR}/${LANG_CODE}.traineddata ${destfile}
}
ARGV=("$@")
parse_flags
tlog "\n=== Starting training for language '${LANG_CODE}'"
tlog "Cleaning workspace directory ${TRAINING_DIR}..."
mkdir -p ${TRAINING_DIR}
rm -fr ${TRAINING_DIR}/*
phaseI_generate_image
phaseUP_generate_unicharset
phaseD_generate_dawg
phaseE_extract_features
phaseC_cluster_prototypes
phaseS_cluster_shapes
phaseM_cluster_microfeatures
phaseB_generate_ambiguities
make_traineddata
tlog "\nCompleted training for language '${LANG_CODE}'\n"
| 1080228-arabicocr11 | training/tesstrain.sh | Shell | asf20 | 21,190 |
/**********************************************************************
* File: normstrngs.h
* Description: Utilities to normalize and manipulate UTF-32 and
* UTF-8 strings.
* Author: Ranjith Unnikrishnan
* Created: Thu July 4 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_NORMSTRNGS_H_
#define TESSERACT_CCUTIL_NORMSTRNGS_H_
#include "genericvector.h"
#include "strngs.h"
typedef signed int char32;
namespace tesseract {
// UTF-8 to UTF-32 conversion function.
void UTF8ToUTF32(const char* utf8_str, GenericVector<char32>* str32);
// UTF-32 to UTF-8 convesion function.
void UTF32ToUTF8(const GenericVector<char32>& str32, STRING* utf8_str);
// Normalize a single char32 using NFKC + OCR-specific transformations.
// NOTE that proper NFKC may require multiple characters as input. The
// assumption of this function is that the input is already as fully composed
// as it can be, but may require some compatibility normalizations or just
// OCR evaluation related normalizations.
void NormalizeChar32(char32 ch, GenericVector<char32>* str);
// Normalize a UTF8 string. Same as above, but for UTF8-encoded strings, that
// can contain multiple UTF32 code points.
STRING NormalizeUTF8String(const char* str8);
// Apply just the OCR-specific normalizations and return the normalized char.
char32 OCRNormalize(char32 ch);
// Returns true if the OCRNormalized ch1 and ch2 are the same.
bool IsOCREquivalent(char32 ch1, char32 ch2);
// Returns true if the value lies in the range of valid unicodes.
bool IsValidCodepoint(const char32 ch);
// Returns true a code point has the White_Space Unicode property.
bool IsWhitespace(const char32 ch);
// Returns true if every char in the given (null-terminated) string has the
// White_Space Unicode property.
bool IsUTF8Whitespace(const char* text);
// Returns the length of bytes of the prefix of 'text' that have the White_Space
// unicode property.
int SpanUTF8Whitespace(const char* text);
// Returns the length of bytes of the prefix of 'text' that DO NOT have the
// White_Space unicode property.
int SpanUTF8NotWhitespace(const char* text);
// Returns true if the char is interchange valid i.e. no C0 or C1 control codes
// (other than CR LF HT FF) and no non-characters.
bool IsInterchangeValid(const char32 ch);
// Same as above but restricted to 7-bit ASCII.
bool IsInterchangeValid7BitAscii(const char32 ch);
// Convert a full-width UTF-8 string to half-width.
char32 FullwidthToHalfwidth(const char32 ch);
} // namespace tesseract
#endif // TESSERACT_CCUTIL_NORMSTRNGS_H_
| 1080228-arabicocr11 | training/normstrngs.h | C++ | asf20 | 3,218 |
/**********************************************************************
* File: stringrenderer.cpp
* Description: Class for rendering UTF-8 text to an image, and retrieving
* bounding boxes around each grapheme cluster.
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "stringrenderer.h"
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <map>
#include <utility>
#include <vector>
#include "allheaders.h" // from leptonica
#include "boxchar.h"
#include "ligature_table.h"
#include "normstrngs.h"
#include "pango/pango-font.h"
#include "pango/pango-glyph-item.h"
#include "tlog.h"
#include "unichar.h"
#include "unicode/uchar.h" // from libicu
#include "util.h"
#ifdef USE_STD_NAMESPACE
using std::map;
using std::max;
using std::min;
using std::swap;
#endif
namespace tesseract {
static const int kDefaultOutputResolution = 300;
// Word joiner (U+2060) inserted after letters in ngram mode, as per
// recommendation in http://unicode.org/reports/tr14/ to avoid line-breaks at
// hyphens and other non-alpha characters.
static const char* kWordJoinerUTF8 = "\u2060";
static const char32 kWordJoiner = 0x2060;
static bool IsCombiner(int ch) {
const int char_type = u_charType(ch);
return ((char_type == U_NON_SPACING_MARK) ||
(char_type == U_ENCLOSING_MARK) ||
(char_type == U_COMBINING_SPACING_MARK));
}
static string EncodeAsUTF8(const char32 ch32) {
UNICHAR uni_ch(ch32);
return string(uni_ch.utf8(), uni_ch.utf8_len());
}
// Returns true with probability 'prob'.
static bool RandBool(const double prob, TRand* rand) {
if (prob == 1.0) return true;
if (prob == 0.0) return false;
return rand->UnsignedRand(1.0) < prob;
}
/* static */
Pix* CairoARGB32ToPixFormat(cairo_surface_t *surface) {
if (cairo_image_surface_get_format(surface) != CAIRO_FORMAT_ARGB32) {
printf("Unexpected surface format %d\n",
cairo_image_surface_get_format(surface));
return NULL;
}
const int width = cairo_image_surface_get_width(surface);
const int height = cairo_image_surface_get_height(surface);
Pix* pix = pixCreate(width, height, 32);
int byte_stride = cairo_image_surface_get_stride(surface);
for (int i = 0; i < height; ++i) {
memcpy(reinterpret_cast<unsigned char*>(pix->data + i * pix->wpl) + 1,
cairo_image_surface_get_data(surface) + i * byte_stride,
byte_stride - ((i == height - 1) ? 1 : 0));
}
return pix;
}
StringRenderer::StringRenderer(const string& font_desc, int page_width,
int page_height)
: page_width_(page_width),
page_height_(page_height),
h_margin_(50),
v_margin_(50),
char_spacing_(0),
leading_(0),
vertical_text_(false),
gravity_hint_strong_(false),
render_fullwidth_latin_(false),
underline_start_prob_(0),
underline_continuation_prob_(0),
underline_style_(PANGO_UNDERLINE_SINGLE),
drop_uncovered_chars_(true),
strip_unrenderable_words_(false),
add_ligatures_(false),
output_word_boxes_(false),
surface_(NULL),
cr_(NULL),
layout_(NULL),
start_box_(0),
page_(0),
box_padding_(0),
total_chars_(0),
font_index_(0),
last_offset_(0) {
pen_color_[0] = 0.0;
pen_color_[1] = 0.0;
pen_color_[2] = 0.0;
set_font(font_desc);
set_resolution(kDefaultOutputResolution);
page_boxes_ = NULL;
}
bool StringRenderer::set_font(const string& desc) {
bool success = font_.ParseFontDescriptionName(desc);
font_.set_resolution(resolution_);
return success;
}
void StringRenderer::set_resolution(const int resolution) {
resolution_ = resolution;
font_.set_resolution(resolution);
}
StringRenderer::~StringRenderer() {
ClearBoxes();
FreePangoCairo();
}
void StringRenderer::InitPangoCairo() {
FreePangoCairo();
surface_ = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, page_width_,
page_height_);
cr_ = cairo_create(surface_);
{
DISABLE_HEAP_LEAK_CHECK;
layout_ = pango_cairo_create_layout(cr_);
}
if (vertical_text_) {
PangoContext* context = pango_layout_get_context(layout_);
pango_context_set_base_gravity(context, PANGO_GRAVITY_EAST);
if (gravity_hint_strong_) {
pango_context_set_gravity_hint(context, PANGO_GRAVITY_HINT_STRONG);
}
pango_layout_context_changed(layout_);
}
SetLayoutProperties();
}
void StringRenderer::SetLayoutProperties() {
string font_desc = font_.DescriptionName();
// Specify the font via a description name
PangoFontDescription *desc =
pango_font_description_from_string(font_desc.c_str());
// Assign the font description to the layout
pango_layout_set_font_description(layout_, desc);
pango_font_description_free(desc); // free the description
pango_cairo_context_set_resolution(pango_layout_get_context(layout_),
resolution_);
int max_width = page_width_ - 2 * h_margin_;
int max_height = page_height_ - 2 * v_margin_;
tlog(3, "max_width = %d, max_height = %d\n", max_width, max_height);
if (vertical_text_) {
swap(max_width, max_height);
}
pango_layout_set_width(layout_, max_width * PANGO_SCALE);
pango_layout_set_wrap(layout_, PANGO_WRAP_WORD);
// Adjust character spacing
PangoAttrList* attr_list = pango_attr_list_new();
if (char_spacing_) {
PangoAttribute* spacing_attr = pango_attr_letter_spacing_new(
static_cast<int>(char_spacing_ * PANGO_SCALE + 0.5));
spacing_attr->start_index = 0;
spacing_attr->end_index = static_cast<guint>(-1);
pango_attr_list_change(attr_list, spacing_attr);
}
pango_layout_set_attributes(layout_, attr_list);
pango_attr_list_unref(attr_list);
// Adjust line spacing
if (leading_) {
pango_layout_set_spacing(layout_, leading_ * PANGO_SCALE);
}
}
void StringRenderer::FreePangoCairo() {
if (layout_) {
g_object_unref(layout_);
layout_ = NULL;
}
if (cr_) {
cairo_destroy(cr_);
cr_ = NULL;
}
if (surface_) {
cairo_surface_destroy(surface_);
surface_ = NULL;
}
}
void StringRenderer::SetWordUnderlineAttributes(const string& page_text) {
if (underline_start_prob_ == 0) return;
PangoAttrList* attr_list = pango_layout_get_attributes(layout_);
const char* text = page_text.c_str();
int offset = 0;
TRand rand;
bool started_underline = false;
PangoAttribute* und_attr = nullptr;
while (offset < page_text.length()) {
offset += SpanUTF8Whitespace(text + offset);
if (offset == page_text.length()) break;
int word_start = offset;
int word_len = SpanUTF8NotWhitespace(text + offset);
offset += word_len;
if (started_underline) {
// Should we continue the underline to the next word?
if (RandBool(underline_continuation_prob_, &rand)) {
// Continue the current underline to this word.
und_attr->end_index = word_start + word_len;
} else {
// Otherwise end the current underline attribute at the end of the
// previous word.
pango_attr_list_insert(attr_list, und_attr);
started_underline = false;
und_attr = nullptr;
}
}
if (!started_underline && RandBool(underline_start_prob_, &rand)) {
// Start a new underline attribute
und_attr = pango_attr_underline_new(underline_style_);
und_attr->start_index = word_start;
und_attr->end_index = word_start + word_len;
started_underline = true;
}
}
// Finish the current underline attribute at the end of the page.
if (started_underline) {
und_attr->end_index = page_text.length();
pango_attr_list_insert(attr_list, und_attr);
}
}
// Returns offset in utf8 bytes to first page.
int StringRenderer::FindFirstPageBreakOffset(const char* text,
int text_length) {
if (!text_length) return 0;
const int max_height = (page_height_ - 2 * v_margin_);
const int max_width = (page_width_ - 2 * h_margin_);
const int max_layout_height = vertical_text_ ? max_width : max_height;
UNICHAR::const_iterator it = UNICHAR::begin(text, text_length);
const UNICHAR::const_iterator it_end = UNICHAR::end(text, text_length);
const int kMaxUnicodeBufLength = 15000;
for (int i = 0; i < kMaxUnicodeBufLength && it != it_end; ++it, ++i);
int buf_length = it.utf8_data() - text;
tlog(1, "len = %d buf_len = %d\n", text_length, buf_length);
pango_layout_set_text(layout_, text, buf_length);
PangoLayoutIter* line_iter = NULL;
{ // Fontconfig caches some info here that is not freed before exit.
DISABLE_HEAP_LEAK_CHECK;
line_iter = pango_layout_get_iter(layout_);
}
bool first_page = true;
int page_top = 0;
int offset = buf_length;
do {
// Get bounding box of the current line
PangoRectangle line_ink_rect;
pango_layout_iter_get_line_extents(line_iter, &line_ink_rect, NULL);
pango_extents_to_pixels(&line_ink_rect, NULL);
PangoLayoutLine* line = pango_layout_iter_get_line_readonly(line_iter);
if (first_page) {
page_top = line_ink_rect.y;
first_page = false;
}
int line_bottom = line_ink_rect.y + line_ink_rect.height;
if (line_bottom - page_top > max_layout_height) {
offset = line->start_index;
tlog(1, "Found offset = %d\n", offset);
break;
}
} while (pango_layout_iter_next_line(line_iter));
pango_layout_iter_free(line_iter);
return offset;
}
const vector<BoxChar*>& StringRenderer::GetBoxes() const {
return boxchars_;
}
Boxa* StringRenderer::GetPageBoxes() const {
return page_boxes_;
}
void StringRenderer::RotatePageBoxes(float rotation) {
BoxChar::RotateBoxes(rotation, page_width_ / 2, page_height_ / 2,
start_box_, boxchars_.size(), &boxchars_);
}
void StringRenderer::ClearBoxes() {
for (int i = 0; i < boxchars_.size(); ++i)
delete boxchars_[i];
boxchars_.clear();
boxaDestroy(&page_boxes_);
}
void StringRenderer::WriteAllBoxes(const string& filename) const {
BoxChar::WriteTesseractBoxFile(filename, page_height_, boxchars_);
}
// Returns cluster strings in logical order.
bool StringRenderer::GetClusterStrings(vector<string>* cluster_text) {
map<int, string> start_byte_to_text;
PangoLayoutIter* run_iter = pango_layout_get_iter(layout_);
const char* full_text = pango_layout_get_text(layout_);
do {
PangoLayoutRun* run = pango_layout_iter_get_run_readonly(run_iter);
if (!run) {
// End of line NULL run marker
tlog(2, "Found end of line marker\n");
continue;
}
PangoGlyphItemIter cluster_iter;
gboolean have_cluster;
for (have_cluster = pango_glyph_item_iter_init_start(&cluster_iter,
run, full_text);
have_cluster;
have_cluster = pango_glyph_item_iter_next_cluster(&cluster_iter)) {
const int start_byte_index = cluster_iter.start_index;
const int end_byte_index = cluster_iter.end_index;
string text = string(full_text + start_byte_index,
end_byte_index - start_byte_index);
if (IsUTF8Whitespace(text.c_str())) {
tlog(2, "Found whitespace\n");
text = " ";
}
tlog(2, "start_byte=%d end_byte=%d : '%s'\n", start_byte_index,
end_byte_index, text.c_str());
if (add_ligatures_) {
// Make sure the output box files have ligatured text in case the font
// decided to use an unmapped glyph.
text = LigatureTable::Get()->AddLigatures(text, NULL);
}
start_byte_to_text[start_byte_index] = text;
}
} while (pango_layout_iter_next_run(run_iter));
pango_layout_iter_free(run_iter);
cluster_text->clear();
for (map<int, string>::const_iterator it = start_byte_to_text.begin();
it != start_byte_to_text.end(); ++it) {
cluster_text->push_back(it->second);
}
return cluster_text->size();
}
// Merges an array of BoxChars into words based on the identification of
// BoxChars containing the space character as inter-word separators.
//
// Sometime two adjacent characters in the sequence may be detected as lying on
// different lines based on their spatial positions. This may be the result of a
// newline character at end of the last word on a line in the source text, or of
// a discretionary line-break created by Pango at intra-word locations like
// hyphens. When this is detected the word is split at that location into
// multiple BoxChars. Otherwise, each resulting BoxChar will contain a word and
// its bounding box.
static void MergeBoxCharsToWords(vector<BoxChar*>* boxchars) {
vector<BoxChar*> result;
bool started_word = false;
for (int i = 0; i < boxchars->size(); ++i) {
if (boxchars->at(i)->ch() == " " ||
boxchars->at(i)->box() == NULL) {
result.push_back(boxchars->at(i));
boxchars->at(i) = NULL;
started_word = false;
continue;
}
if (!started_word) {
// Begin new word
started_word = true;
result.push_back(boxchars->at(i));
boxchars->at(i) = NULL;
} else {
BoxChar* last_boxchar = result.back();
// Compute bounding box union
const Box* box = boxchars->at(i)->box();
Box* last_box = last_boxchar->mutable_box();
int left = min(last_box->x, box->x);
int right = max(last_box->x + last_box->w, box->x + box->w);
int top = min(last_box->y, box->y);
int bottom = max(last_box->y + last_box->h, box->y + box->h);
// Conclude that the word was broken to span multiple lines based on the
// size of the merged bounding box in relation to those of the individual
// characters seen so far.
if (right - left > last_box->w + 5 * box->w) {
tlog(1, "Found line break after '%s'", last_boxchar->ch().c_str());
// Insert a fake interword space and start a new word with the current
// boxchar.
result.push_back(new BoxChar(" ", 1));
result.push_back(boxchars->at(i));
boxchars->at(i) = NULL;
continue;
}
// Append to last word
last_boxchar->mutable_ch()->append(boxchars->at(i)->ch());
last_box->x = left;
last_box->w = right - left;
last_box->y = top;
last_box->h = bottom - top;
delete boxchars->at(i);
boxchars->at(i) = NULL;
}
}
boxchars->swap(result);
}
void StringRenderer::ComputeClusterBoxes() {
const char* text = pango_layout_get_text(layout_);
PangoLayoutIter* cluster_iter = pango_layout_get_iter(layout_);
// Do a first pass to store cluster start indexes.
vector<int> cluster_start_indices;
do {
cluster_start_indices.push_back(pango_layout_iter_get_index(cluster_iter));
tlog(3, "Added %d\n", cluster_start_indices.back());
} while (pango_layout_iter_next_cluster(cluster_iter));
pango_layout_iter_free(cluster_iter);
cluster_start_indices.push_back(strlen(text));
tlog(3, "Added last index %d\n", cluster_start_indices.back());
// Sort the indices and create a map from start to end indices.
sort(cluster_start_indices.begin(), cluster_start_indices.end());
map<int, int> cluster_start_to_end_index;
for (int i = 0; i < cluster_start_indices.size() - 1; ++i) {
cluster_start_to_end_index[cluster_start_indices[i]]
= cluster_start_indices[i + 1];
}
// Iterate again to compute cluster boxes and their text with the obtained
// cluster extent information.
cluster_iter = pango_layout_get_iter(layout_);
// Store BoxChars* sorted by their byte start positions
map<int, BoxChar*> start_byte_to_box;
do {
PangoRectangle cluster_rect;
pango_layout_iter_get_cluster_extents(cluster_iter, &cluster_rect,
NULL);
pango_extents_to_pixels(&cluster_rect, NULL);
const int start_byte_index = pango_layout_iter_get_index(cluster_iter);
const int end_byte_index = cluster_start_to_end_index[start_byte_index];
string cluster_text = string(text + start_byte_index,
end_byte_index - start_byte_index);
if (cluster_text.size() && cluster_text[0] == '\n') {
tlog(2, "Skipping newlines at start of text.\n");
continue;
}
if (!cluster_rect.width || !cluster_rect.height ||
IsUTF8Whitespace(cluster_text.c_str())) {
tlog(2, "Skipping whitespace with boxdim (%d,%d) '%s'\n",
cluster_rect.width, cluster_rect.height, cluster_text.c_str());
BoxChar* boxchar = new BoxChar(" ", 1);
boxchar->set_page(page_);
start_byte_to_box[start_byte_index] = boxchar;
continue;
}
// Prepare a boxchar for addition at this byte position.
tlog(2, "[%d %d], %d, %d : start_byte=%d end_byte=%d : '%s'\n",
cluster_rect.x, cluster_rect.y,
cluster_rect.width, cluster_rect.height,
start_byte_index, end_byte_index,
cluster_text.c_str());
ASSERT_HOST_MSG(cluster_rect.width,
"cluster_text:%s start_byte_index:%d\n",
cluster_text.c_str(), start_byte_index);
ASSERT_HOST_MSG(cluster_rect.height,
"cluster_text:%s start_byte_index:%d\n",
cluster_text.c_str(), start_byte_index);
if (box_padding_) {
cluster_rect.x = max(0, cluster_rect.x - box_padding_);
cluster_rect.width += 2 * box_padding_;
cluster_rect.y = max(0, cluster_rect.y - box_padding_);
cluster_rect.height += 2 * box_padding_;
}
if (add_ligatures_) {
// Make sure the output box files have ligatured text in case the font
// decided to use an unmapped glyph.
cluster_text = LigatureTable::Get()->AddLigatures(cluster_text, NULL);
}
BoxChar* boxchar = new BoxChar(cluster_text.c_str(), cluster_text.size());
boxchar->set_page(page_);
boxchar->AddBox(cluster_rect.x, cluster_rect.y,
cluster_rect.width, cluster_rect.height);
start_byte_to_box[start_byte_index] = boxchar;
} while (pango_layout_iter_next_cluster(cluster_iter));
pango_layout_iter_free(cluster_iter);
// There is a subtle bug in the cluster text reported by the PangoLayoutIter
// on ligatured characters (eg. The word "Lam-Aliph" in arabic). To work
// around this, we use text reported using the PangoGlyphIter which is
// accurate.
// TODO(ranjith): Revisit whether this is still needed in newer versions of
// pango.
vector<string> cluster_text;
if (GetClusterStrings(&cluster_text)) {
ASSERT_HOST(cluster_text.size() == start_byte_to_box.size());
int ind = 0;
for (map<int, BoxChar*>::iterator it = start_byte_to_box.begin();
it != start_byte_to_box.end(); ++it, ++ind) {
it->second->mutable_ch()->swap(cluster_text[ind]);
}
}
// Append to the boxchars list in byte order.
vector<BoxChar*> page_boxchars;
page_boxchars.reserve(start_byte_to_box.size());
string last_ch;
for (map<int, BoxChar*>::const_iterator it = start_byte_to_box.begin();
it != start_byte_to_box.end(); ++it) {
if (it->second->ch() == kWordJoinerUTF8) {
// Skip zero-width joiner characters (ZWJs) here.
delete it->second;
} else {
page_boxchars.push_back(it->second);
}
}
CorrectBoxPositionsToLayout(&page_boxchars);
if (render_fullwidth_latin_) {
for (map<int, BoxChar*>::iterator it = start_byte_to_box.begin();
it != start_byte_to_box.end(); ++it) {
// Convert fullwidth Latin characters to their halfwidth forms.
string half(ConvertFullwidthLatinToBasicLatin(it->second->ch()));
it->second->mutable_ch()->swap(half);
}
}
// Merge the character boxes into word boxes if we are rendering n-grams.
if (output_word_boxes_) {
MergeBoxCharsToWords(&page_boxchars);
}
boxchars_.insert(boxchars_.end(), page_boxchars.begin(), page_boxchars.end());
// Compute the page bounding box
Box* page_box = NULL;
Boxa* all_boxes = NULL;
for (int i = 0; i < page_boxchars.size(); ++i) {
if (page_boxchars[i]->box() == NULL) continue;
if (all_boxes == NULL)
all_boxes = boxaCreate(0);
boxaAddBox(all_boxes, page_boxchars[i]->mutable_box(), L_CLONE);
}
boxaGetExtent(all_boxes, NULL, NULL, &page_box);
boxaDestroy(&all_boxes);
if (page_boxes_ == NULL)
page_boxes_ = boxaCreate(0);
boxaAddBox(page_boxes_, page_box, L_INSERT);
}
void StringRenderer::CorrectBoxPositionsToLayout(vector<BoxChar*>* boxchars) {
if (vertical_text_) {
const double rotation = - pango_gravity_to_rotation(
pango_context_get_base_gravity(pango_layout_get_context(layout_)));
BoxChar::TranslateBoxes(page_width_ - h_margin_, v_margin_, boxchars);
BoxChar::RotateBoxes(rotation, page_width_ - h_margin_, v_margin_,
0, boxchars->size(), boxchars);
} else {
BoxChar::TranslateBoxes(h_margin_, v_margin_, boxchars);
}
}
int StringRenderer::StripUnrenderableWords(string* utf8_text) const {
string output_text;
const char* text = utf8_text->c_str();
int offset = 0;
int num_dropped = 0;
while (offset < utf8_text->length()) {
int space_len = SpanUTF8Whitespace(text + offset);
output_text.append(text + offset, space_len);
offset += space_len;
if (offset == utf8_text->length()) break;
int word_len = SpanUTF8NotWhitespace(text + offset);
if (font_.CanRenderString(text + offset, word_len)) {
output_text.append(text + offset, word_len);
} else {
++num_dropped;
}
offset += word_len;
}
utf8_text->swap(output_text);
if (num_dropped > 0) {
tprintf("Stripped %d unrenderable words\n", num_dropped);
}
return num_dropped;
}
int StringRenderer::RenderToGrayscaleImage(const char* text, int text_length,
Pix** pix) {
Pix *orig_pix = NULL;
int offset = RenderToImage(text, text_length, &orig_pix);
if (orig_pix) {
*pix = pixConvertTo8(orig_pix, false);
pixDestroy(&orig_pix);
}
return offset;
}
int StringRenderer::RenderToBinaryImage(const char* text, int text_length,
int threshold, Pix** pix) {
Pix *orig_pix = NULL;
int offset = RenderToImage(text, text_length, &orig_pix);
if (orig_pix) {
Pix* gray_pix = pixConvertTo8(orig_pix, false);
pixDestroy(&orig_pix);
*pix = pixThresholdToBinary(gray_pix, threshold);
pixDestroy(&gray_pix);
} else {
*pix = orig_pix;
}
return offset;
}
// Add word joiner (WJ) characters between adjacent non-space characters except
// immediately before a combiner.
/* static */
string StringRenderer::InsertWordJoiners(const string& text) {
string out_str;
const UNICHAR::const_iterator it_end = UNICHAR::end(text.c_str(),
text.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(text.c_str(), text.length());
it != it_end; ++it) {
// Add the symbol to the output string.
out_str.append(it.utf8_data(), it.utf8_len());
// Check the next symbol.
UNICHAR::const_iterator next_it = it;
++next_it;
bool next_char_is_boundary = (next_it == it_end || *next_it == ' ');
bool next_char_is_combiner = (next_it == it_end) ?
false : IsCombiner(*next_it);
if (*it != ' ' && *it != '\n' && !next_char_is_boundary &&
!next_char_is_combiner) {
out_str += kWordJoinerUTF8;
}
}
return out_str;
}
// Convert halfwidth Basic Latin characters to their fullwidth forms.
string StringRenderer::ConvertBasicLatinToFullwidthLatin(const string& str) {
string full_str;
const UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(),
str.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(str.c_str(), str.length());
it != it_end; ++it) {
// Convert printable and non-space 7-bit ASCII characters to
// their fullwidth forms.
if (IsInterchangeValid7BitAscii(*it) && isprint(*it) && !isspace(*it)) {
// Convert by adding 0xFEE0 to the codepoint of 7-bit ASCII.
char32 full_char = *it + 0xFEE0;
full_str.append(EncodeAsUTF8(full_char));
} else {
full_str.append(it.utf8_data(), it.utf8_len());
}
}
return full_str;
}
// Convert fullwidth Latin characters to their halfwidth forms.
string StringRenderer::ConvertFullwidthLatinToBasicLatin(const string& str) {
string half_str;
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(str.c_str(), str.length());
it != it_end; ++it) {
char32 half_char = FullwidthToHalfwidth(*it);
// Convert fullwidth Latin characters to their halfwidth forms
// only if halfwidth forms are printable and non-space 7-bit ASCII.
if (IsInterchangeValid7BitAscii(half_char) &&
isprint(half_char) && !isspace(half_char)) {
half_str.append(EncodeAsUTF8(half_char));
} else {
half_str.append(it.utf8_data(), it.utf8_len());
}
}
return half_str;
}
// Returns offset to end of text substring rendered in this method.
int StringRenderer::RenderToImage(const char* text, int text_length,
Pix** pix) {
if (pix && *pix) pixDestroy(pix);
InitPangoCairo();
const int page_offset = FindFirstPageBreakOffset(text, text_length);
if (!page_offset) {
return 0;
}
start_box_ = boxchars_.size();
if (!vertical_text_) {
// Translate by the specified margin
cairo_translate(cr_, h_margin_, v_margin_);
} else {
// Vertical text rendering is achieved by a two-step process of first
// performing regular horizontal layout with character orientation set to
// EAST, and then translating and rotating the layout before rendering onto
// the desired image surface. The settings required for the former step are
// done within InitPangoCairo().
//
// Translate to the top-right margin of page
cairo_translate(cr_, page_width_ - h_margin_, v_margin_);
// Rotate the layout
double rotation = - pango_gravity_to_rotation(
pango_context_get_base_gravity(pango_layout_get_context(layout_)));
tlog(2, "Rotating by %f radians\n", rotation);
cairo_rotate(cr_, rotation);
pango_cairo_update_layout(cr_, layout_);
}
string page_text(text, page_offset);
if (render_fullwidth_latin_) {
// Convert Basic Latin to their fullwidth forms.
page_text = ConvertBasicLatinToFullwidthLatin(page_text);
}
if (strip_unrenderable_words_) {
StripUnrenderableWords(&page_text);
}
if (drop_uncovered_chars_ &&
!font_.CoversUTF8Text(page_text.c_str(), page_text.length())) {
int num_dropped = font_.DropUncoveredChars(&page_text);
if (num_dropped) {
tprintf("WARNING: Dropped %d uncovered characters\n", num_dropped);
}
}
if (add_ligatures_) {
// Add ligatures wherever possible, including custom ligatures.
page_text = LigatureTable::Get()->AddLigatures(page_text, &font_);
}
if (underline_start_prob_ > 0) {
SetWordUnderlineAttributes(page_text);
}
pango_layout_set_text(layout_, page_text.c_str(), page_text.length());
if (pix) {
// Set a white background for the target image surface.
cairo_set_source_rgb(cr_, 1.0, 1.0, 1.0); // sets drawing colour to white
// Fill the surface with the active colour (if you don't do this, you will
// be given a surface with a transparent background to draw on)
cairo_paint(cr_);
// Set the ink color to black
cairo_set_source_rgb(cr_, pen_color_[0], pen_color_[1], pen_color_[2]);
// If the target surface or transformation properties of the cairo instance
// have changed, update the pango layout to reflect this
pango_cairo_update_layout(cr_, layout_);
{
DISABLE_HEAP_LEAK_CHECK; // for Fontconfig
// Draw the pango layout onto the cairo surface
pango_cairo_show_layout(cr_, layout_);
}
*pix = CairoARGB32ToPixFormat(surface_);
}
ComputeClusterBoxes();
FreePangoCairo();
// Update internal state variables.
++page_;
return page_offset;
}
// Render a string to an image, returning it as an 8 bit pix. Behaves as
// RenderString, except that it ignores the font set at construction and works
// through all the fonts, returning 0 until they are exhausted, at which point
// it returns the value it should have returned all along, but no pix this time.
// Fonts that don't contain a given proportion of the characters in the string
// get skipped.
// Fonts that work each get rendered and the font name gets added
// to the image.
// NOTE that no boxes are produced by this function.
//
// Example usage: To render a null terminated char-array "txt"
//
// int offset = 0;
// do {
// Pix *pix;
// offset += renderer.RenderAllFontsToImage(min_proportion, txt + offset,
// strlen(txt + offset), NULL, &pix);
// ...
// } while (offset < strlen(text));
//
int StringRenderer::RenderAllFontsToImage(double min_coverage,
const char* text, int text_length,
string* font_used, Pix** image) {
// Select a suitable font to render the title with.
const char kTitleTemplate[] = "%s : %d hits = %.2f%%, raw = %d = %.2f%%";
string title_font;
if (!FontUtils::SelectFont(kTitleTemplate, strlen(kTitleTemplate),
&title_font, NULL)) {
tprintf("WARNING: Could not find a font to render image title with!\n");
title_font = "Arial";
}
title_font += " 8";
tlog(1, "Selected title font: %s\n", title_font.c_str());
if (font_used) font_used->clear();
string orig_font = font_.DescriptionName();
if (char_map_.empty()) {
total_chars_ = 0;
// Fill the hash table and use that for computing which fonts to use.
for (UNICHAR::const_iterator it = UNICHAR::begin(text, text_length);
it != UNICHAR::end(text, text_length); ++it) {
++total_chars_;
++char_map_[*it];
}
tprintf("Total chars = %d\n", total_chars_);
}
const vector<string>& all_fonts = FontUtils::ListAvailableFonts();
for (int i = font_index_; i < all_fonts.size(); ++i) {
++font_index_;
int raw_score = 0;
int ok_chars = FontUtils::FontScore(char_map_, all_fonts[i], &raw_score,
NULL);
if (ok_chars > 0 && ok_chars >= total_chars_ * min_coverage) {
set_font(all_fonts[i]);
int offset = RenderToBinaryImage(text, text_length, 128, image);
ClearBoxes(); // Get rid of them as they are garbage.
const int kMaxTitleLength = 1024;
char title[kMaxTitleLength];
snprintf(title, kMaxTitleLength, kTitleTemplate,
all_fonts[i].c_str(), ok_chars,
100.0 * ok_chars / total_chars_, raw_score,
100.0 * raw_score / char_map_.size());
tprintf("%s\n", title);
// This is a good font! Store the offset to return once we've tried all
// the fonts.
if (offset) {
last_offset_ = offset;
if (font_used) *font_used = all_fonts[i];
}
// Add the font to the image.
set_font(title_font);
v_margin_ /= 8;
Pix* title_image = NULL;
RenderToBinaryImage(title, strlen(title), 128, &title_image);
pixOr(*image, *image, title_image);
pixDestroy(&title_image);
v_margin_ *= 8;
set_font(orig_font);
// We return the real offset only after cycling through the list of fonts.
return 0;
} else {
tprintf("Font %s failed with %d hits = %.2f%%\n",
all_fonts[i].c_str(), ok_chars, 100.0 * ok_chars / total_chars_);
}
}
*image = NULL;
font_index_ = 0;
char_map_.clear();
return last_offset_;
}
} // namespace tesseract
| 1080228-arabicocr11 | training/stringrenderer.cpp | C++ | asf20 | 32,524 |
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Filename: classifier_tester.cpp
// Purpose: Tests a character classifier on data as formatted for training,
// but doesn't have to be the same as the training data.
// Author: Ray Smith
#include <stdio.h>
#ifndef USE_STD_NAMESPACE
#include "base/commandlineflags.h"
#endif
#include "baseapi.h"
#include "commontraining.h"
#include "cubeclassifier.h"
#include "mastertrainer.h"
#include "params.h"
#include "strngs.h"
#include "tessclassifier.h"
STRING_PARAM_FLAG(classifier, "", "Classifier to test");
STRING_PARAM_FLAG(lang, "eng", "Language to test");
STRING_PARAM_FLAG(tessdata_dir, "", "Directory of traineddata files");
DECLARE_INT_PARAM_FLAG(debug_level);
DECLARE_STRING_PARAM_FLAG(T);
enum ClassifierName {
CN_PRUNER,
CN_FULL,
CN_CUBE,
CN_CUBETESS,
CN_COUNT
};
const char* names[] = {"pruner", "full", "cube", "cubetess", NULL };
static tesseract::ShapeClassifier* InitializeClassifier(
const char* classifer_name, const UNICHARSET& unicharset,
int argc, char **argv,
tesseract::TessBaseAPI** api) {
// Decode the classifier string.
ClassifierName classifier = CN_COUNT;
for (int c = 0; c < CN_COUNT; ++c) {
if (strcmp(classifer_name, names[c]) == 0) {
classifier = static_cast<ClassifierName>(c);
break;
}
}
if (classifier == CN_COUNT) {
fprintf(stderr, "Invalid classifier name:%s\n", FLAGS_classifier.c_str());
return NULL;
}
// We need to initialize tesseract to test.
*api = new tesseract::TessBaseAPI;
tesseract::OcrEngineMode engine_mode = tesseract::OEM_TESSERACT_ONLY;
if (classifier == CN_CUBE || classifier == CN_CUBETESS)
engine_mode = tesseract::OEM_TESSERACT_CUBE_COMBINED;
tesseract::Tesseract* tesseract = NULL;
tesseract::Classify* classify = NULL;
if (classifier == CN_CUBE || classifier == CN_CUBETESS ||
classifier == CN_PRUNER || classifier == CN_FULL) {
(*api)->SetVariable("cube_debug_level", "2");
if ((*api)->Init(FLAGS_tessdata_dir.c_str(), FLAGS_lang.c_str(),
engine_mode) < 0) {
fprintf(stderr, "Tesseract initialization failed!\n");
return NULL;
}
tesseract = const_cast<tesseract::Tesseract*>((*api)->tesseract());
classify = reinterpret_cast<tesseract::Classify*>(tesseract);
if (classify->shape_table() == NULL) {
fprintf(stderr, "Tesseract must contain a ShapeTable!\n");
return NULL;
}
}
tesseract::ShapeClassifier* shape_classifier = NULL;
if (!FLAGS_T.empty()) {
const char* config_name;
while ((config_name = GetNextFilename(argc, argv)) != NULL) {
tprintf("Reading config file %s ...\n", config_name);
(*api)->ReadConfigFile(config_name);
}
}
if (classifier == CN_PRUNER) {
shape_classifier = new tesseract::TessClassifier(true, classify);
} else if (classifier == CN_FULL) {
shape_classifier = new tesseract::TessClassifier(false, classify);
} else if (classifier == CN_CUBE) {
shape_classifier = new tesseract::CubeClassifier(tesseract);
} else if (classifier == CN_CUBETESS) {
shape_classifier = new tesseract::CubeTessClassifier(tesseract);
} else {
fprintf(stderr, "%s tester not yet implemented\n", classifer_name);
return NULL;
}
tprintf("Testing classifier %s:\n", classifer_name);
return shape_classifier;
}
// This program has complex setup requirements, so here is some help:
// Two different modes, tr files and serialized mastertrainer.
// From tr files:
// classifier_tester -U unicharset -F font_properties -X xheights
// -classifier x -lang lang [-output_trainer trainer] *.tr
// From a serialized trainer:
// classifier_tester -input_trainer trainer [-lang lang] -classifier x
//
// In the first case, the unicharset must be the unicharset from within
// the classifier under test, and the font_properties and xheights files must
// match the files used during training.
// In the second case, the trainer file must have been prepared from
// some previous run of shapeclustering, mftraining, or classifier_tester
// using the same conditions as above, ie matching unicharset/font_properties.
//
// Available values of classifier (x above) are:
// pruner : Tesseract class pruner only.
// full : Tesseract full classifier.
// cube : Cube classifier. (Not possible with an input trainer.)
// cubetess : Tesseract class pruner with rescoring by Cube. (Not possible
// with an input trainer.)
int main(int argc, char **argv) {
ParseArguments(&argc, &argv);
STRING file_prefix;
tesseract::MasterTrainer* trainer = tesseract::LoadTrainingData(
argc, argv, false, NULL, &file_prefix);
tesseract::TessBaseAPI* api;
// Decode the classifier string.
tesseract::ShapeClassifier* shape_classifier = InitializeClassifier(
FLAGS_classifier.c_str(), trainer->unicharset(), argc, argv, &api);
if (shape_classifier == NULL) {
fprintf(stderr, "Classifier init failed!:%s\n", FLAGS_classifier.c_str());
return 1;
}
// We want to test junk as well if it is available.
// trainer->IncludeJunk();
// We want to test with replicated samples too.
trainer->ReplicateAndRandomizeSamplesIfRequired();
trainer->TestClassifierOnSamples(tesseract:: CT_UNICHAR_TOP1_ERR,
MAX(3, FLAGS_debug_level), false,
shape_classifier, NULL);
delete shape_classifier;
delete api;
delete trainer;
return 0;
} /* main */
| 1080228-arabicocr11 | training/classifier_tester.cpp | C++ | asf20 | 6,092 |
///////////////////////////////////////////////////////////////////////
// File: unicharset_extractor.cpp
// Description: Unicode character/ligature set extractor.
// Author: Thomas Kielbus
// Created: Wed Jun 28 17:05:01 PDT 2006
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Given a list of box files on the command line, this program generates a file
// containing a unicharset, a list of all the characters used by Tesseract
//
// The file contains the size of the set on the first line, and then one
// unichar per line.
#include <stdio.h>
#if defined(HAVE_WCHAR_T) || defined(_WIN32) || defined(GOOGLE3)
#include <wchar.h>
#include <wctype.h>
#define USING_WCTYPE
#endif
#include <locale.h>
#include "boxread.h"
#include "rect.h"
#include "strngs.h"
#include "tessopt.h"
#include "unichar.h"
#include "unicharset.h"
static const char* const kUnicharsetFileName = "unicharset";
UNICHAR_ID wc_to_unichar_id(const UNICHARSET &unicharset, int wc) {
UNICHAR uch(wc);
char *unichar = uch.utf8_str();
UNICHAR_ID unichar_id = unicharset.unichar_to_id(unichar);
delete[] unichar;
return unichar_id;
}
// Set character properties using wctype if we have it.
// Contributed by piggy@gmail.com.
// Modified by Ray to use UNICHAR for unicode conversion
// and to check for wctype using autoconf/presence of windows.
void set_properties(UNICHARSET *unicharset, const char* const c_string) {
#ifdef USING_WCTYPE
UNICHAR_ID id;
int wc;
// Convert the string to a unichar id.
id = unicharset->unichar_to_id(c_string);
// Set the other_case property to be this unichar id by default.
unicharset->set_other_case(id, id);
int step = UNICHAR::utf8_step(c_string);
if (step == 0)
return; // Invalid utf-8.
// Get the next Unicode code point in the string.
UNICHAR ch(c_string, step);
wc = ch.first_uni();
/* Copy the properties. */
if (iswalpha(wc)) {
unicharset->set_isalpha(id, 1);
if (iswlower(wc)) {
unicharset->set_islower(id, 1);
unicharset->set_other_case(id, wc_to_unichar_id(*unicharset,
towupper(wc)));
}
if (iswupper(wc)) {
unicharset->set_isupper(id, 1);
unicharset->set_other_case(id, wc_to_unichar_id(*unicharset,
towlower(wc)));
}
}
if (iswdigit(wc))
unicharset->set_isdigit(id, 1);
if(iswpunct(wc))
unicharset->set_ispunctuation(id, 1);
#endif
}
int main(int argc, char** argv) {
int option;
const char* output_directory = ".";
STRING unicharset_file_name;
// Special characters are now included by default.
UNICHARSET unicharset;
setlocale(LC_ALL, "");
// Print usage
if (argc <= 1) {
printf("Usage: %s [-D DIRECTORY] FILE...\n", argv[0]);
exit(1);
}
// Parse arguments
while ((option = tessopt(argc, argv, "D" )) != EOF) {
switch (option) {
case 'D':
output_directory = tessoptarg;
++tessoptind;
break;
}
}
// Save file name
unicharset_file_name = output_directory;
unicharset_file_name += "/";
unicharset_file_name += kUnicharsetFileName;
// Load box files
for (; tessoptind < argc; ++tessoptind) {
printf("Extracting unicharset from %s\n", argv[tessoptind]);
FILE* box_file = fopen(argv[tessoptind], "rb");
if (box_file == NULL) {
printf("Cannot open box file %s\n", argv[tessoptind]);
return -1;
}
TBOX box;
STRING unichar_string;
int line_number = 0;
while (ReadNextBox(&line_number, box_file, &unichar_string, &box)) {
unicharset.unichar_insert(unichar_string.string());
set_properties(&unicharset, unichar_string.string());
}
}
// Write unicharset file
if (unicharset.save_to_file(unicharset_file_name.string())) {
printf("Wrote unicharset file %s.\n", unicharset_file_name.string());
}
else {
printf("Cannot save unicharset file %s.\n", unicharset_file_name.string());
return -1;
}
return 0;
}
| 1080228-arabicocr11 | training/unicharset_extractor.cpp | C++ | asf20 | 4,630 |
///////////////////////////////////////////////////////////////////////
// File: combine_tessdata
// Description: Creates a unified traineddata file from several
// data files produced by the training process.
// Author: Daria Antonova
// Created: Wed Jun 03 11:26:43 PST 2009
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tessdatamanager.h"
// Main program to combine/extract/overwrite tessdata components
// in [lang].traineddata files.
//
// To combine all the individual tessdata components (unicharset, DAWGs,
// classifier templates, ambiguities, language configs) located at, say,
// /home/$USER/temp/eng.* run:
//
// combine_tessdata /home/$USER/temp/eng.
//
// The result will be a combined tessdata file /home/$USER/temp/eng.traineddata
//
// Specify option -e if you would like to extract individual components
// from a combined traineddata file. For example, to extract language config
// file and the unicharset from tessdata/eng.traineddata run:
//
// combine_tessdata -e tessdata/eng.traineddata
// /home/$USER/temp/eng.config /home/$USER/temp/eng.unicharset
//
// The desired config file and unicharset will be written to
// /home/$USER/temp/eng.config /home/$USER/temp/eng.unicharset
//
// Specify option -o to overwrite individual components of the given
// [lang].traineddata file. For example, to overwrite language config
// and unichar ambiguities files in tessdata/eng.traineddata use:
//
// combine_tessdata -o tessdata/eng.traineddata
// /home/$USER/temp/eng.config /home/$USER/temp/eng.unicharambigs
//
// As a result, tessdata/eng.traineddata will contain the new language config
// and unichar ambigs, plus all the original DAWGs, classifier teamples, etc.
//
// Note: the file names of the files to extract to and to overwrite from should
// have the appropriate file suffixes (extensions) indicating their tessdata
// component type (.unicharset for the unicharset, .unicharambigs for unichar
// ambigs, etc). See k*FileSuffix variable in ccutil/tessdatamanager.h.
//
// Specify option -u to unpack all the components to the specified path:
//
// combine_tessdata -u tessdata/eng.traineddata /home/$USER/temp/eng.
//
// This will create /home/$USER/temp/eng.* files with individual tessdata
// components from tessdata/eng.traineddata.
//
int main(int argc, char **argv) {
int i;
if (argc == 2) {
printf("Combining tessdata files\n");
STRING lang = argv[1];
char* last = &argv[1][strlen(argv[1])-1];
if (*last != '.')
lang += '.';
STRING output_file = lang;
output_file += kTrainedDataSuffix;
if (!tesseract::TessdataManager::CombineDataFiles(
lang.string(), output_file.string())) {
printf("Error combining tessdata files into %s\n",
output_file.string());
} else {
printf("Output %s created sucessfully.\n", output_file.string());
}
} else if (argc >= 4 && (strcmp(argv[1], "-e") == 0 ||
strcmp(argv[1], "-u") == 0)) {
// Initialize TessdataManager with the data in the given traineddata file.
tesseract::TessdataManager tm;
tm.Init(argv[2], 0);
printf("Extracting tessdata components from %s\n", argv[2]);
if (strcmp(argv[1], "-e") == 0) {
for (i = 3; i < argc; ++i) {
if (tm.ExtractToFile(argv[i])) {
printf("Wrote %s\n", argv[i]);
} else {
printf("Not extracting %s, since this component"
" is not present\n", argv[i]);
}
}
} else { // extract all the components
for (i = 0; i < tesseract::TESSDATA_NUM_ENTRIES; ++i) {
STRING filename = argv[3];
char* last = &argv[3][strlen(argv[3])-1];
if (*last != '.')
filename += '.';
filename += tesseract::kTessdataFileSuffixes[i];
if (tm.ExtractToFile(filename.string())) {
printf("Wrote %s\n", filename.string());
}
}
}
tm.End();
} else if (argc >= 4 && strcmp(argv[1], "-o") == 0) {
// Rename the current traineddata file to a temporary name.
const char *new_traineddata_filename = argv[2];
STRING traineddata_filename = new_traineddata_filename;
traineddata_filename += ".__tmp__";
if (rename(new_traineddata_filename, traineddata_filename.string()) != 0) {
tprintf("Failed to create a temporary file %s\n",
traineddata_filename.string());
exit(1);
}
// Initialize TessdataManager with the data in the given traineddata file.
tesseract::TessdataManager tm;
tm.Init(traineddata_filename.string(), 0);
// Write the updated traineddata file.
tm.OverwriteComponents(new_traineddata_filename, argv+3, argc-3);
tm.End();
} else {
printf("Usage for combining tessdata components:\n"
" %s language_data_path_prefix\n"
" (e.g. %s tessdata/eng.)\n\n", argv[0], argv[0]);
printf("Usage for extracting tessdata components:\n"
" %s -e traineddata_file [output_component_file...]\n"
" (e.g. %s -e eng.traineddata eng.unicharset)\n\n",
argv[0], argv[0]);
printf("Usage for overwriting tessdata components:\n"
" %s -o traineddata_file [input_component_file...]\n"
" (e.g. %s -o eng.traineddata eng.unicharset)\n\n",
argv[0], argv[0]);
printf("Usage for unpacking all tessdata components:\n"
" %s -u traineddata_file output_path_prefix\n"
" (e.g. %s -u eng.traineddata tmp/eng.)\n", argv[0], argv[0]);
return 1;
}
}
| 1080228-arabicocr11 | training/combine_tessdata.cpp | C++ | asf20 | 6,157 |
/**********************************************************************
* File: degradeimage.h
* Description: Function to degrade an image (usually of text) as if it
* has been printed and then scanned.
* Authors: Ray Smith
* Created: Tue Nov 19 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_DEGRADEIMAGE_H_
#define TESSERACT_TRAINING_DEGRADEIMAGE_H_
struct Pix;
namespace tesseract {
class TRand;
// Degrade the pix as if by a print/copy/scan cycle with exposure > 0
// corresponding to darkening on the copier and <0 lighter and 0 not copied.
// If rotation is not NULL, the clockwise rotation in radians is saved there.
// The input pix must be 8 bit grey. (Binary with values 0 and 255 is OK.)
// The input image is destroyed and a different image returned.
struct Pix* DegradeImage(struct Pix* input, int exposure, TRand* randomizer,
float* rotation);
} // namespace tesseract
#endif // TESSERACT_TRAINING_DEGRADEIMAGE_H_
| 1080228-arabicocr11 | training/degradeimage.h | C++ | asf20 | 1,638 |
/**********************************************************************
* File: normstrngs.cpp
* Description: Utilities to normalize and manipulate UTF-32 and
* UTF-8 strings.
* Author: Ranjith Unnikrishnan
* Created: Thu July 4 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "normstrngs.h"
#include "icuerrorcode.h"
#include "unichar.h"
#include "unicode/normalizer2.h" // From libicu
#include "unicode/translit.h" // From libicu
#include "unicode/unorm2.h" // From libicu
namespace tesseract {
void UTF8ToUTF32(const char* utf8_str, GenericVector<char32>* str32) {
str32->clear();
str32->reserve(strlen(utf8_str));
int len = strlen(utf8_str);
int step = 0;
for (int ch = 0; ch < len; ch += step) {
step = UNICHAR::utf8_step(utf8_str + ch);
if (step > 0) {
UNICHAR uni_ch(utf8_str + ch, step);
(*str32) += uni_ch.first_uni();
}
}
}
void UTF32ToUTF8(const GenericVector<char32>& str32, STRING* utf8_str) {
utf8_str->ensure(str32.length());
utf8_str->assign("", 0);
for (int i = 0; i < str32.length(); ++i) {
UNICHAR uni_ch(str32[i]);
char *utf8 = uni_ch.utf8_str();
if (utf8 != NULL) {
(*utf8_str) += utf8;
delete[] utf8;
}
}
}
bool is_hyphen_punc(const char32 ch) {
static const int kNumHyphenPuncUnicodes = 13;
static const char32 kHyphenPuncUnicodes[kNumHyphenPuncUnicodes] = {
'-',
0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, // hyphen..horizontal bar
0x207b, // superscript minus
0x208b, // subscript minus
0x2212, // minus sign
0xfe58, // small em dash
0xfe63, // small hyphen-minus
0xff0d, // fullwidth hyphen-minus
};
for (int i = 0; i < kNumHyphenPuncUnicodes; ++i) {
if (kHyphenPuncUnicodes[i] == ch)
return true;
}
return false;
}
bool is_single_quote(const char32 ch) {
static const int kNumSingleQuoteUnicodes = 8;
static const char32 kSingleQuoteUnicodes[kNumSingleQuoteUnicodes] = {
'\'',
'`',
0x2018, // left single quotation mark (English, others)
0x2019, // right single quotation mark (Danish, Finnish, Swedish, Norw.)
// We may have to introduce a comma set with 0x201a
0x201B, // single high-reveresed-9 quotation mark (PropList.txt)
0x2032, // prime
0x300C, // left corner bracket (East Asian languages)
0xFF07, // fullwidth apostrophe
};
for (int i = 0; i < kNumSingleQuoteUnicodes; ++i) {
if (kSingleQuoteUnicodes[i] == ch)
return true;
}
return false;
}
bool is_double_quote(const char32 ch) {
static const int kNumDoubleQuoteUnicodes = 8;
static const char32 kDoubleQuoteUnicodes[kNumDoubleQuoteUnicodes] = {
'"',
0x201C, // left double quotation mark (English, others)
0x201D, // right double quotation mark (Danish, Finnish, Swedish, Norw.)
0x201F, // double high-reversed-9 quotation mark (PropList.txt)
0x2033, // double prime
0x301D, // reversed double prime quotation mark (East Asian langs, horiz.)
0x301E, // close double prime (East Asian languages written horizontally)
0xFF02, // fullwidth quotation mark
};
for (int i = 0; i < kNumDoubleQuoteUnicodes; ++i) {
if (kDoubleQuoteUnicodes[i] == ch)
return true;
}
return false;
}
STRING NormalizeUTF8String(const char* str8) {
GenericVector<char32> str32, out_str32, norm_str;
UTF8ToUTF32(str8, &str32);
for (int i = 0; i < str32.length(); ++i) {
norm_str.clear();
NormalizeChar32(str32[i], &norm_str);
for (int j = 0; j < norm_str.length(); ++j) {
out_str32.push_back(norm_str[j]);
}
}
STRING out_str8;
UTF32ToUTF8(out_str32, &out_str8);
return out_str8;
}
void NormalizeChar32(char32 ch, GenericVector<char32>* str) {
IcuErrorCode error_code;
const icu::Normalizer2* nfkc = icu::Normalizer2::getInstance(
NULL, "nfkc", UNORM2_COMPOSE, error_code);
error_code.assertSuccess();
error_code.reset();
icu::UnicodeString uch_str(static_cast<UChar32>(ch));
icu::UnicodeString norm_str = nfkc->normalize(uch_str, error_code);
error_code.assertSuccess();
str->clear();
for (int i = 0; i < norm_str.length(); ++i) {
// If any spaces were added by NFKC, pretend normalization is a nop.
if (norm_str[i] == ' ') {
str->clear();
str->push_back(ch);
break;
} else {
str->push_back(OCRNormalize(static_cast<char32>(norm_str[i])));
}
}
}
// Apply just the OCR-specific normalizations and return the normalized char.
char32 OCRNormalize(char32 ch) {
if (is_hyphen_punc(ch))
return '-';
else if (is_single_quote(ch))
return '\'';
else if (is_double_quote(ch))
return '"';
return ch;
}
bool IsOCREquivalent(char32 ch1, char32 ch2) {
return OCRNormalize(ch1) == OCRNormalize(ch2);
}
bool IsValidCodepoint(const char32 ch) {
// In the range [0, 0xD800) or [0xE000, 0x10FFFF]
return (static_cast<uinT32>(ch) < 0xD800)
|| (ch >= 0xE000 && ch <= 0x10FFFF);
}
bool IsWhitespace(const char32 ch) {
ASSERT_HOST_MSG(IsValidCodepoint(ch),
"Invalid Unicode codepoint: 0x%x\n", ch);
return u_isUWhiteSpace(static_cast<UChar32>(ch));
}
bool IsUTF8Whitespace(const char* text) {
return SpanUTF8Whitespace(text) == strlen(text);
}
int SpanUTF8Whitespace(const char* text) {
int n_white = 0;
for (UNICHAR::const_iterator it = UNICHAR::begin(text, strlen(text));
it != UNICHAR::end(text, strlen(text));
++it) {
if (!IsWhitespace(*it)) break;
n_white += it.utf8_len();
}
return n_white;
}
int SpanUTF8NotWhitespace(const char* text) {
int n_notwhite = 0;
for (UNICHAR::const_iterator it = UNICHAR::begin(text, strlen(text));
it != UNICHAR::end(text, strlen(text));
++it) {
if (IsWhitespace(*it)) break;
n_notwhite += it.utf8_len();
}
return n_notwhite;
}
bool IsInterchangeValid(const char32 ch) {
return IsValidCodepoint(ch) &&
!(ch >= 0xFDD0 && ch <= 0xFDEF) && // Noncharacters.
!(ch >= 0xFFFE && ch <= 0xFFFF) &&
!(ch >= 0x1FFFE && ch <= 0x1FFFF) &&
!(ch >= 0x2FFFE && ch <= 0x2FFFF) &&
!(ch >= 0x3FFFE && ch <= 0x3FFFF) &&
!(ch >= 0x4FFFE && ch <= 0x4FFFF) &&
!(ch >= 0x5FFFE && ch <= 0x5FFFF) &&
!(ch >= 0x6FFFE && ch <= 0x6FFFF) &&
!(ch >= 0x7FFFE && ch <= 0x7FFFF) &&
!(ch >= 0x8FFFE && ch <= 0x8FFFF) &&
!(ch >= 0x9FFFE && ch <= 0x9FFFF) &&
!(ch >= 0xAFFFE && ch <= 0xAFFFF) &&
!(ch >= 0xBFFFE && ch <= 0xBFFFF) &&
!(ch >= 0xCFFFE && ch <= 0xCFFFF) &&
!(ch >= 0xDFFFE && ch <= 0xDFFFF) &&
!(ch >= 0xEFFFE && ch <= 0xEFFFF) &&
!(ch >= 0xFFFFE && ch <= 0xFFFFF) &&
!(ch >= 0x10FFFE && ch <= 0x10FFFF) &&
(!u_isISOControl(static_cast<UChar32>(ch)) ||
ch == '\n' || ch == '\f' || ch == '\t' || ch == '\r');
}
bool IsInterchangeValid7BitAscii(const char32 ch) {
return IsValidCodepoint(ch) &&
ch <= 128 &&
(!u_isISOControl(static_cast<UChar32>(ch)) ||
ch == '\n' || ch == '\f' || ch == '\t' || ch == '\r');
}
char32 FullwidthToHalfwidth(const char32 ch) {
// Return unchanged if not in the fullwidth-halfwidth Unicode block.
if (ch < 0xFF00 || ch > 0xFFEF || !IsValidCodepoint(ch)) {
if (ch != 0x3000) return ch;
}
// Special case for fullwidth left and right "white parentheses".
if (ch == 0xFF5F) return 0x2985;
if (ch == 0xFF60) return 0x2986;
// Construct a full-to-half width transliterator.
IcuErrorCode error_code;
icu::UnicodeString uch_str(static_cast<UChar32>(ch));
const icu::Transliterator* fulltohalf = icu::Transliterator::createInstance(
"Fullwidth-Halfwidth", UTRANS_FORWARD, error_code);
error_code.assertSuccess();
error_code.reset();
fulltohalf->transliterate(uch_str);
delete fulltohalf;
ASSERT_HOST(uch_str.length() != 0);
return uch_str[0];
}
} // namespace tesseract
| 1080228-arabicocr11 | training/normstrngs.cpp | C++ | asf20 | 8,513 |
AUTOMAKE_OPTIONS = subdir-objects
AM_CPPFLAGS += \
-DUSE_STD_NAMESPACE -DPANGO_ENABLE_ENGINE\
-I$(top_srcdir)/ccmain -I$(top_srcdir)/api \
-I$(top_srcdir)/ccutil -I$(top_srcdir)/ccstruct \
-I$(top_srcdir)/viewer \
-I$(top_srcdir)/textord -I$(top_srcdir)/dict \
-I$(top_srcdir)/classify -I$(top_srcdir)/display \
-I$(top_srcdir)/wordrec -I$(top_srcdir)/cutil
EXTRA_DIST = tesstrain.sh
if T_WIN
# try static build
#AM_LDFLAGS += -all-static
#libic=-lsicuin -licudt -lsicuuc
libicu=-licuin -licuuc
else
libicu=-licui18n -licuuc
endif
# TODO: training programs can not be linked to shared library created
# with -fvisibility
if VISIBILITY
AM_LDFLAGS += -all-static
endif
noinst_HEADERS = \
boxchar.h commandlineflags.h commontraining.h degradeimage.h \
fileio.h icuerrorcode.h ligature_table.h normstrngs.h \
mergenf.h pango_font_info.h stringrenderer.h \
tessopt.h tlog.h util.h
noinst_LTLIBRARIES = libtesseract_training.la libtesseract_tessopt.la
libtesseract_training_la_LIBADD = \
../cutil/libtesseract_cutil.la
# ../api/libtesseract.la
libtesseract_training_la_SOURCES = \
boxchar.cpp commandlineflags.cpp commontraining.cpp degradeimage.cpp \
fileio.cpp ligature_table.cpp normstrngs.cpp pango_font_info.cpp \
stringrenderer.cpp tlog.cpp
libtesseract_tessopt_la_SOURCES = \
tessopt.cpp
bin_PROGRAMS = ambiguous_words classifier_tester cntraining combine_tessdata \
dawg2wordlist mftraining set_unicharset_properties shapeclustering \
text2image unicharset_extractor wordlist2dawg
ambiguous_words_SOURCES = ambiguous_words.cpp
ambiguous_words_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
ambiguous_words_LDADD += \
../api/libtesseract_api.la \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
ambiguous_words_LDADD += \
../api/libtesseract.la
endif
classifier_tester_SOURCES = classifier_tester.cpp
#classifier_tester_LDFLAGS = -static
classifier_tester_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
classifier_tester_LDADD += \
../api/libtesseract_api.la \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
classifier_tester_LDADD += \
../api/libtesseract.la
endif
combine_tessdata_SOURCES = combine_tessdata.cpp
#combine_tessdata_LDFLAGS = -static
if USING_MULTIPLELIBS
combine_tessdata_LDADD = \
../ccutil/libtesseract_ccutil.la
else
combine_tessdata_LDADD = \
../api/libtesseract.la
endif
cntraining_SOURCES = cntraining.cpp
#cntraining_LDFLAGS = -static
cntraining_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
cntraining_LDADD += \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
cntraining_LDADD += \
../api/libtesseract.la
endif
dawg2wordlist_SOURCES = dawg2wordlist.cpp
#dawg2wordlist_LDFLAGS = -static
dawg2wordlist_LDADD = \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
dawg2wordlist_LDADD += \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../textord/libtesseract_textord.la \
../ccutil/libtesseract_ccutil.la
else
dawg2wordlist_LDADD += \
../api/libtesseract.la
endif
mftraining_SOURCES = mftraining.cpp mergenf.cpp
#mftraining_LDFLAGS = -static
mftraining_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
mftraining_LDADD += \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
mftraining_LDADD += \
../api/libtesseract.la
endif
set_unicharset_properties_SOURCES = set_unicharset_properties.cpp
#set_unicharset_properties_LDFLAGS = $(pkg-config --libs icu-uc)
set_unicharset_properties_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la \
$(libicu)
if USING_MULTIPLELIBS
set_unicharset_properties_LDADD += \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
set_unicharset_properties_LDADD += \
../api/libtesseract.la
endif
shapeclustering_SOURCES = shapeclustering.cpp
#shapeclustering_LDFLAGS = -static
shapeclustering_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
shapeclustering_LDADD += \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
shapeclustering_LDADD += \
../api/libtesseract.la
endif
text2image_SOURCES = text2image.cpp
#text2image_LDFLAGS = -static
text2image_LDADD = \
libtesseract_training.la \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
text2image_LDADD += \
../textord/libtesseract_textord.la \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../ccutil/libtesseract_ccutil.la
else
text2image_LDADD += \
../api/libtesseract.la
endif
text2image_LDADD += $(libicu) -lpango-1.0 -lpangocairo-1.0 \
-lgobject-2.0 -lglib-2.0 -lcairo -lpangoft2-1.0
unicharset_extractor_SOURCES = unicharset_extractor.cpp
#unicharset_extractor_LDFLAGS = -static
unicharset_extractor_LDADD = \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
unicharset_extractor_LDADD += \
../ccutil/libtesseract_ccutil.la \
../ccstruct/libtesseract_ccstruct.la
else
unicharset_extractor_LDADD += \
../api/libtesseract.la
endif
wordlist2dawg_SOURCES = wordlist2dawg.cpp
#wordlist2dawg_LDFLAGS = -static
wordlist2dawg_LDADD = \
libtesseract_tessopt.la
if USING_MULTIPLELIBS
wordlist2dawg_LDADD += \
../classify/libtesseract_classify.la \
../dict/libtesseract_dict.la \
../ccstruct/libtesseract_ccstruct.la \
../cutil/libtesseract_cutil.la \
../viewer/libtesseract_viewer.la \
../ccmain/libtesseract_main.la \
../cube/libtesseract_cube.la \
../neural_networks/runtime/libtesseract_neural.la \
../wordrec/libtesseract_wordrec.la \
../textord/libtesseract_textord.la \
../ccutil/libtesseract_ccutil.la
else
wordlist2dawg_LDADD += \
../api/libtesseract.la
endif
if T_WIN
ambiguous_words_LDADD += -lws2_32
classifier_tester_LDADD += -lws2_32
cntraining_LDADD += -lws2_32
combine_tessdata_LDADD += -lws2_32
dawg2wordlist_LDADD += -lws2_32
mftraining_LDADD += -lws2_32
set_unicharset_properties_LDADD += -lws2_32
shapeclustering_LDADD += -lws2_32
unicharset_extractor_LDADD += -lws2_32
text2image_LDADD += -lws2_32
wordlist2dawg_LDADD += -lws2_32
AM_CPPFLAGS += -I$(top_srcdir)/vs2010/port
noinst_HEADERS += ../vs2010/port/strcasestr.h
libtesseract_training_la_SOURCES += ../vs2010/port/strcasestr.cpp
endif
if USE_OPENCL
ambiguous_words_LDADD += $(OPENCL_LIB)
classifier_tester_LDADD += $(OPENCL_LIB)
cntraining_LDADD += $(OPENCL_LIB)
combine_tessdata_LDADD += $(OPENCL_LIB)
dawg2wordlist_LDADD += $(OPENCL_LIB)
mftraining_LDADD += $(OPENCL_LIB)
set_unicharset_properties_LDADD += $(OPENCL_LIB)
shapeclustering_LDADD += $(OPENCL_LIB)
text2image_LDADD += $(OPENCL_LIB)
unicharset_extractor_LDADD += $(OPENCL_LIB)
wordlist2dawg_LDADD += $(OPENCL_LIB)
endif
| 1080228-arabicocr11 | training/Makefile.am | Makefile | asf20 | 9,784 |
/**********************************************************************
* File: boxchar.cpp
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "boxchar.h"
#include <stddef.h>
#include "fileio.h"
#include "ndminx.h"
namespace tesseract {
BoxChar::BoxChar(const char* utf8_str, int len) : ch_(utf8_str, len) {
box_ = NULL;
}
BoxChar::~BoxChar() {
boxDestroy(&box_);
}
void BoxChar::AddBox(int x, int y, int width, int height) {
box_ = boxCreate(x, y, width, height);
}
/* static */
void BoxChar::TranslateBoxes(int xshift, int yshift,
vector<BoxChar*>* boxes) {
for (int i = 0; i < boxes->size(); ++i) {
BOX* box = (*boxes)[i]->box_;
if (box != NULL) {
box->x += xshift;
box->y += yshift;
}
}
}
// Rotate the boxes in [start_box, end_box) by the given rotation.
// The rotation is in radians clockwise about the given center.
/* static */
void BoxChar::RotateBoxes(float rotation,
int xcenter,
int ycenter,
int start_box,
int end_box,
vector<BoxChar*>* boxes) {
Boxa* orig = boxaCreate(0);
for (int i = start_box; i < end_box; ++i) {
BOX* box = (*boxes)[i]->box_;
if (box) boxaAddBox(orig, box, L_CLONE);
}
Boxa* rotated = boxaRotate(orig, xcenter, ycenter, rotation);
boxaDestroy(&orig);
for (int i = start_box, box_ind = 0; i < end_box; ++i) {
if ((*boxes)[i]->box_) {
boxDestroy(&((*boxes)[i]->box_));
(*boxes)[i]->box_ = boxaGetBox(rotated, box_ind++, L_CLONE);
}
}
boxaDestroy(&rotated);
}
const int kMaxLineLength = 1024;
// Helper appends a tab box to the string to indicate a newline. We can't use
// an actual newline as the file format is line-based text.
static void AppendTabBox(const Box* box, int height, int page, string* output) {
char buffer[kMaxLineLength];
int nbytes = snprintf(buffer, kMaxLineLength, "\t %d %d %d %d %d\n",
box->x + box->w, height - box->y - box->h,
box->x + box->w + 10, height - box->y, page);
output->append(buffer, nbytes);
}
/* static */
void BoxChar::WriteTesseractBoxFile(const string& filename, int height,
const vector<BoxChar*>& boxes) {
string output;
char buffer[kMaxLineLength];
for (int i = 0; i < boxes.size(); ++i) {
const Box* box = boxes[i]->box_;
if (box != NULL) {
if (i > 0 && boxes[i - 1]->box_ != NULL &&
boxes[i - 1]->page_ == boxes[i]->page_ &&
box->x + box->w < boxes[i - 1]->box_->x) {
// We are on a newline. Output a tab character to indicate the newline.
AppendTabBox(boxes[i - 1]->box_, height, boxes[i]->page_, &output);
}
int nbytes = snprintf(buffer, kMaxLineLength,
"%s %d %d %d %d %d\n",
boxes[i]->ch_.c_str(),
box->x, height - box->y - box->h,
box->x + box->w, height - box->y,
boxes[i]->page_);
output.append(buffer, nbytes);
} else if (i > 0 && boxes[i - 1]->box_ != NULL) {
int j = i + 1;
// Find the next non-null box, as there may be multiple spaces.
while (j < boxes.size() && boxes[j]->box_ == NULL) ++j;
if (j < boxes.size() && boxes[i - 1]->page_ == boxes[j]->page_) {
const Box* prev = boxes[i - 1]->box_;
const Box* next = boxes[j]->box_;
if (next->x + next->w < prev->x) {
// We are on a newline. Output a tab character to indicate it.
AppendTabBox(prev, height, boxes[j]->page_, &output);
} else {
// Space between words.
int nbytes = snprintf(buffer, kMaxLineLength,
" %d %d %d %d %d\n",
prev->x + prev->w,
height - MAX(prev->y + prev->h,
next->y + next->h),
next->x, height - MIN(prev->y, next->y),
boxes[i - 1]->page_);
output.append(buffer, nbytes);
}
}
}
}
File::WriteStringToFileOrDie(output, filename);
}
} // namespace tesseract
| 1080228-arabicocr11 | training/boxchar.cpp | C++ | asf20 | 5,290 |
/******************************************************************************
** Filename: MergeNF.c
** Purpose: Program for merging similar nano-feature protos
** Author: Dan Johnson
** History: Wed Nov 21 09:55:23 1990, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "mergenf.h"
#include "host.h"
#include "efio.h"
#include "clusttool.h"
#include "cluster.h"
#include "oldlist.h"
#include "protos.h"
#include "ndminx.h"
#include "ocrfeatures.h"
#include "const.h"
#include "featdefs.h"
#include "intproto.h"
#include "params.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
/*-------------------once in subfeat---------------------------------*/
double_VAR(training_angle_match_scale, 1.0, "Angle Match Scale ...");
double_VAR(training_similarity_midpoint, 0.0075, "Similarity Midpoint ...");
double_VAR(training_similarity_curl, 2.0, "Similarity Curl ...");
/*-----------------------------once in fasttrain----------------------------------*/
double_VAR(training_tangent_bbox_pad, 0.5, "Tangent bounding box pad ...");
double_VAR(training_orthogonal_bbox_pad, 2.5, "Orthogonal bounding box pad ...");
double_VAR(training_angle_pad, 45.0, "Angle pad ...");
/**
* Compare protos p1 and p2 and return an estimate of the
* worst evidence rating that will result for any part of p1
* that is compared to p2. In other words, if p1 were broken
* into pico-features and each pico-feature was matched to p2,
* what is the worst evidence rating that will be achieved for
* any pico-feature.
*
* @param p1, p2 protos to be compared
*
* Globals: none
*
* @return Worst possible result when matching p1 to p2.
* @note Exceptions: none
* @note History: Mon Nov 26 08:27:53 1990, DSJ, Created.
*/
FLOAT32 CompareProtos(PROTO p1, PROTO p2) {
FEATURE Feature;
FLOAT32 WorstEvidence = WORST_EVIDENCE;
FLOAT32 Evidence;
FLOAT32 Angle, Length;
/* if p1 and p2 are not close in length, don't let them match */
Length = fabs (p1->Length - p2->Length);
if (Length > MAX_LENGTH_MISMATCH)
return (0.0);
/* create a dummy pico-feature to be used for comparisons */
Feature = NewFeature (&PicoFeatDesc);
Feature->Params[PicoFeatDir] = p1->Angle;
/* convert angle to radians */
Angle = p1->Angle * 2.0 * PI;
/* find distance from center of p1 to 1/2 picofeat from end */
Length = p1->Length / 2.0 - GetPicoFeatureLength () / 2.0;
if (Length < 0) Length = 0;
/* set the dummy pico-feature at one end of p1 and match it to p2 */
Feature->Params[PicoFeatX] = p1->X + cos (Angle) * Length;
Feature->Params[PicoFeatY] = p1->Y + sin (Angle) * Length;
if (DummyFastMatch (Feature, p2)) {
Evidence = SubfeatureEvidence (Feature, p2);
if (Evidence < WorstEvidence)
WorstEvidence = Evidence;
} else {
FreeFeature(Feature);
return 0.0;
}
/* set the dummy pico-feature at the other end of p1 and match it to p2 */
Feature->Params[PicoFeatX] = p1->X - cos (Angle) * Length;
Feature->Params[PicoFeatY] = p1->Y - sin (Angle) * Length;
if (DummyFastMatch (Feature, p2)) {
Evidence = SubfeatureEvidence (Feature, p2);
if (Evidence < WorstEvidence)
WorstEvidence = Evidence;
} else {
FreeFeature(Feature);
return 0.0;
}
FreeFeature (Feature);
return (WorstEvidence);
} /* CompareProtos */
/**
* This routine computes a proto which is the weighted
* average of protos p1 and p2. The new proto is returned
* in MergedProto.
*
* @param p1, p2 protos to be merged
* @param w1, w2 weight of each proto
* @param MergedProto place to put resulting merged proto
*
* Globals: none
*
* @return none (results are returned in MergedProto)
* @note Exceptions: none
* @note History: Mon Nov 26 08:15:08 1990, DSJ, Created.
*/
void ComputeMergedProto (PROTO p1,
PROTO p2,
FLOAT32 w1,
FLOAT32 w2,
PROTO MergedProto) {
FLOAT32 TotalWeight;
TotalWeight = w1 + w2;
w1 /= TotalWeight;
w2 /= TotalWeight;
MergedProto->X = p1->X * w1 + p2->X * w2;
MergedProto->Y = p1->Y * w1 + p2->Y * w2;
MergedProto->Length = p1->Length * w1 + p2->Length * w2;
MergedProto->Angle = p1->Angle * w1 + p2->Angle * w2;
FillABC(MergedProto);
} /* ComputeMergedProto */
/**
* This routine searches thru all of the prototypes in
* Class and returns the id of the proto which would provide
* the best approximation of Prototype. If no close
* approximation can be found, NO_PROTO is returned.
*
* @param Class class to search for matching old proto in
* @param NumMerged # of protos merged into each proto of Class
* @param Prototype new proto to find match for
*
* Globals: none
*
* @return Id of closest proto in Class or NO_PROTO.
* @note Exceptions: none
* @note History: Sat Nov 24 11:42:58 1990, DSJ, Created.
*/
int FindClosestExistingProto(CLASS_TYPE Class, int NumMerged[],
PROTOTYPE *Prototype) {
PROTO_STRUCT NewProto;
PROTO_STRUCT MergedProto;
int Pid;
PROTO Proto;
int BestProto;
FLOAT32 BestMatch;
FLOAT32 Match, OldMatch, NewMatch;
MakeNewFromOld (&NewProto, Prototype);
BestProto = NO_PROTO;
BestMatch = WORST_MATCH_ALLOWED;
for (Pid = 0; Pid < Class->NumProtos; Pid++) {
Proto = ProtoIn(Class, Pid);
ComputeMergedProto(Proto, &NewProto,
(FLOAT32) NumMerged[Pid], 1.0, &MergedProto);
OldMatch = CompareProtos(Proto, &MergedProto);
NewMatch = CompareProtos(&NewProto, &MergedProto);
Match = MIN(OldMatch, NewMatch);
if (Match > BestMatch) {
BestProto = Pid;
BestMatch = Match;
}
}
return BestProto;
} /* FindClosestExistingProto */
/**
* This fills in the fields of the New proto based on the
* fields of the Old proto.
*
* @param New new proto to be filled in
* @param Old old proto to be converted
*
* Globals: none
*
* Exceptions: none
* History: Mon Nov 26 09:45:39 1990, DSJ, Created.
*/
void MakeNewFromOld(PROTO New, PROTOTYPE *Old) {
New->X = CenterX(Old->Mean);
New->Y = CenterY(Old->Mean);
New->Length = LengthOf(Old->Mean);
New->Angle = OrientationOf(Old->Mean);
FillABC(New);
} /* MakeNewFromOld */
/*-------------------once in subfeat---------------------------------*/
/**
* @name SubfeatureEvidence
*
* Compare a feature to a prototype. Print the result.
*/
FLOAT32 SubfeatureEvidence(FEATURE Feature, PROTO Proto) {
float Distance;
float Dangle;
Dangle = Proto->Angle - Feature->Params[PicoFeatDir];
if (Dangle < -0.5) Dangle += 1.0;
if (Dangle > 0.5) Dangle -= 1.0;
Dangle *= training_angle_match_scale;
Distance = Proto->A * Feature->Params[PicoFeatX] +
Proto->B * Feature->Params[PicoFeatY] +
Proto->C;
return (EvidenceOf (Distance * Distance + Dangle * Dangle));
}
/**
* @name EvidenceOf
*
* Return the new type of evidence number corresponding to this
* distance value. This number is no longer based on the chi squared
* approximation. The equation that represents the transform is:
* 1 / (1 + (sim / midpoint) ^ curl)
*/
double EvidenceOf (double Similarity) {
Similarity /= training_similarity_midpoint;
if (training_similarity_curl == 3)
Similarity = Similarity * Similarity * Similarity;
else if (training_similarity_curl == 2)
Similarity = Similarity * Similarity;
else
Similarity = pow (Similarity, training_similarity_curl);
return (1.0 / (1.0 + Similarity));
}
/**
* This routine returns TRUE if Feature would be matched
* by a fast match table built from Proto.
*
* @param Feature feature to be "fast matched" to proto
* @param Proto proto being "fast matched" against
*
* Globals:
* - training_tangent_bbox_pad bounding box pad tangent to proto
* - training_orthogonal_bbox_pad bounding box pad orthogonal to proto
*
* @return TRUE if feature could match Proto.
* @note Exceptions: none
* @note History: Wed Nov 14 17:19:58 1990, DSJ, Created.
*/
BOOL8 DummyFastMatch (
FEATURE Feature,
PROTO Proto)
{
FRECT BoundingBox;
FLOAT32 MaxAngleError;
FLOAT32 AngleError;
MaxAngleError = training_angle_pad / 360.0;
AngleError = fabs (Proto->Angle - Feature->Params[PicoFeatDir]);
if (AngleError > 0.5)
AngleError = 1.0 - AngleError;
if (AngleError > MaxAngleError)
return (FALSE);
ComputePaddedBoundingBox (Proto,
training_tangent_bbox_pad * GetPicoFeatureLength (),
training_orthogonal_bbox_pad * GetPicoFeatureLength (),
&BoundingBox);
return PointInside(&BoundingBox, Feature->Params[PicoFeatX],
Feature->Params[PicoFeatY]);
} /* DummyFastMatch */
/**
* This routine computes a bounding box that encloses the
* specified proto along with some padding. The
* amount of padding is specified as separate distances
* in the tangential and orthogonal directions.
*
* @param Proto proto to compute bounding box for
* @param TangentPad amount of pad to add in direction of segment
* @param OrthogonalPad amount of pad to add orthogonal to segment
* @param[out] BoundingBox place to put results
*
* Globals: none
*
* @return none (results are returned in BoundingBox)
* @note Exceptions: none
* @note History: Wed Nov 14 14:55:30 1990, DSJ, Created.
*/
void ComputePaddedBoundingBox (PROTO Proto, FLOAT32 TangentPad,
FLOAT32 OrthogonalPad, FRECT *BoundingBox) {
FLOAT32 Pad, Length, Angle;
FLOAT32 CosOfAngle, SinOfAngle;
Length = Proto->Length / 2.0 + TangentPad;
Angle = Proto->Angle * 2.0 * PI;
CosOfAngle = fabs(cos(Angle));
SinOfAngle = fabs(sin(Angle));
Pad = MAX (CosOfAngle * Length, SinOfAngle * OrthogonalPad);
BoundingBox->MinX = Proto->X - Pad;
BoundingBox->MaxX = Proto->X + Pad;
Pad = MAX(SinOfAngle * Length, CosOfAngle * OrthogonalPad);
BoundingBox->MinY = Proto->Y - Pad;
BoundingBox->MaxY = Proto->Y + Pad;
} /* ComputePaddedBoundingBox */
/**
* Return TRUE if point (X,Y) is inside of Rectangle.
*
* Globals: none
*
* @return TRUE if point (X,Y) is inside of Rectangle.
* @note Exceptions: none
* @note History: Wed Nov 14 17:26:35 1990, DSJ, Created.
*/
BOOL8 PointInside(FRECT *Rectangle, FLOAT32 X, FLOAT32 Y) {
if (X < Rectangle->MinX) return (FALSE);
if (X > Rectangle->MaxX) return (FALSE);
if (Y < Rectangle->MinY) return (FALSE);
if (Y > Rectangle->MaxY) return (FALSE);
return (TRUE);
} /* PointInside */
| 1080228-arabicocr11 | training/mergenf.cpp | C++ | asf20 | 11,125 |