2007-03-08 04:03:40 +08:00
|
|
|
/* -*-C-*-
|
|
|
|
********************************************************************************
|
|
|
|
*
|
|
|
|
* File: trie.c (Formerly trie.c)
|
|
|
|
* Description: Functions to build a trie data structure.
|
|
|
|
* Author: Mark Seaman, OCR Technology
|
|
|
|
* Created: Fri Oct 16 14:37:00 1987
|
|
|
|
* Modified: Fri Jul 26 12:18:10 1991 (Mark Seaman) marks@hpgrlt
|
|
|
|
* Language: C
|
|
|
|
* Package: N/A
|
|
|
|
* Status: Reusable Software Component
|
|
|
|
*
|
|
|
|
* (c) Copyright 1987, Hewlett-Packard Company.
|
|
|
|
** Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
** you may not use this file except in compliance with the License.
|
|
|
|
** You may obtain a copy of the License at
|
|
|
|
** http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
** Unless required by applicable law or agreed to in writing, software
|
|
|
|
** distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
** See the License for the specific language governing permissions and
|
|
|
|
** limitations under the License.
|
|
|
|
*
|
|
|
|
*********************************************************************************/
|
|
|
|
/*----------------------------------------------------------------------
|
|
|
|
I n c l u d e s
|
|
|
|
----------------------------------------------------------------------*/
|
2010-07-22 02:11:00 +08:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma warning(disable:4244) // Conversion warnings
|
|
|
|
#pragma warning(disable:4800) // int/bool warnings
|
|
|
|
#endif
|
2007-03-08 04:03:40 +08:00
|
|
|
#include "trie.h"
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
#include "callcpp.h"
|
|
|
|
#include "dawg.h"
|
|
|
|
#include "dict.h"
|
|
|
|
#include "freelist.h"
|
2010-11-24 02:34:14 +08:00
|
|
|
#include "genericvector.h"
|
2009-07-11 10:20:33 +08:00
|
|
|
#include "helpers.h"
|
2013-09-23 23:26:50 +08:00
|
|
|
#include "kdpair.h"
|
2009-07-11 10:20:33 +08:00
|
|
|
|
|
|
|
namespace tesseract {
|
|
|
|
|
2012-02-02 10:56:18 +08:00
|
|
|
const char kDoNotReverse[] = "RRP_DO_NO_REVERSE";
|
|
|
|
const char kReverseIfHasRTL[] = "RRP_REVERSE_IF_HAS_RTL";
|
|
|
|
const char kForceReverse[] = "RRP_FORCE_REVERSE";
|
|
|
|
|
|
|
|
const char * const RTLReversePolicyNames[] = {
|
|
|
|
kDoNotReverse,
|
|
|
|
kReverseIfHasRTL,
|
|
|
|
kForceReverse
|
|
|
|
};
|
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
const char Trie::kAlphaPatternUnicode[] = "\u2000";
|
|
|
|
const char Trie::kDigitPatternUnicode[] = "\u2001";
|
|
|
|
const char Trie::kAlphanumPatternUnicode[] = "\u2002";
|
|
|
|
const char Trie::kPuncPatternUnicode[] = "\u2003";
|
|
|
|
const char Trie::kLowerPatternUnicode[] = "\u2004";
|
|
|
|
const char Trie::kUpperPatternUnicode[] = "\u2005";
|
|
|
|
|
2012-02-02 10:56:18 +08:00
|
|
|
const char *Trie::get_reverse_policy_name(RTLReversePolicy reverse_policy) {
|
|
|
|
return RTLReversePolicyNames[reverse_policy];
|
|
|
|
}
|
|
|
|
|
2011-03-22 05:46:35 +08:00
|
|
|
// Reset the Trie to empty.
|
|
|
|
void Trie::clear() {
|
|
|
|
nodes_.delete_data_pointers();
|
|
|
|
nodes_.clear();
|
2013-09-23 23:26:50 +08:00
|
|
|
root_back_freelist_.clear();
|
2011-03-22 05:46:35 +08:00
|
|
|
num_edges_ = 0;
|
|
|
|
new_dawg_node(); // Need to allocate node 0.
|
|
|
|
}
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
bool Trie::edge_char_of(NODE_REF node_ref, NODE_REF next_node,
|
|
|
|
int direction, bool word_end, UNICHAR_ID unichar_id,
|
|
|
|
EDGE_RECORD **edge_ptr, EDGE_INDEX *edge_index) const {
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ == 3) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("edge_char_of() given node_ref " REFFORMAT " next_node " REFFORMAT
|
|
|
|
" direction %d word_end %d unichar_id %d, exploring node:\n",
|
|
|
|
node_ref, next_node, direction, word_end, unichar_id);
|
|
|
|
if (node_ref != NO_EDGE) {
|
|
|
|
print_node(node_ref, nodes_[node_ref]->forward_edges.size());
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
}
|
|
|
|
if (node_ref == NO_EDGE) return false;
|
|
|
|
assert(node_ref < nodes_.size());
|
|
|
|
EDGE_VECTOR &vec = (direction == FORWARD_EDGE) ?
|
|
|
|
nodes_[node_ref]->forward_edges : nodes_[node_ref]->backward_edges;
|
|
|
|
int vec_size = vec.size();
|
2013-09-23 23:26:50 +08:00
|
|
|
if (node_ref == 0 && direction == FORWARD_EDGE) { // binary search
|
2009-07-11 10:20:33 +08:00
|
|
|
EDGE_INDEX start = 0;
|
|
|
|
EDGE_INDEX end = vec_size - 1;
|
|
|
|
EDGE_INDEX k;
|
|
|
|
int compare;
|
|
|
|
while (start <= end) {
|
|
|
|
k = (start + end) >> 1; // (start + end) / 2
|
|
|
|
compare = given_greater_than_edge_rec(next_node, word_end,
|
|
|
|
unichar_id, vec[k]);
|
|
|
|
if (compare == 0) { // given == vec[k]
|
|
|
|
*edge_ptr = &(vec[k]);
|
|
|
|
*edge_index = k;
|
|
|
|
return true;
|
|
|
|
} else if (compare == 1) { // given > vec[k]
|
|
|
|
start = k + 1;
|
|
|
|
} else { // given < vec[k]
|
|
|
|
end = k - 1;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
}
|
|
|
|
} else { // linear search
|
|
|
|
for (int i = 0; i < vec_size; ++i) {
|
|
|
|
EDGE_RECORD &edge_rec = vec[i];
|
|
|
|
if (edge_rec_match(next_node, word_end, unichar_id,
|
|
|
|
next_node_from_edge_rec(edge_rec),
|
|
|
|
end_of_word_from_edge_rec(edge_rec),
|
|
|
|
unichar_id_from_edge_rec(edge_rec))) {
|
|
|
|
*edge_ptr = &(edge_rec);
|
|
|
|
*edge_index = i;
|
|
|
|
return true;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
return false; // not found
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
bool Trie::add_edge_linkage(NODE_REF node1, NODE_REF node2, bool marker_flag,
|
|
|
|
int direction, bool word_end,
|
|
|
|
UNICHAR_ID unichar_id) {
|
2009-07-11 10:20:33 +08:00
|
|
|
EDGE_VECTOR *vec = (direction == FORWARD_EDGE) ?
|
|
|
|
&(nodes_[node1]->forward_edges) : &(nodes_[node1]->backward_edges);
|
|
|
|
int search_index;
|
2013-09-23 23:26:50 +08:00
|
|
|
if (node1 == 0 && direction == FORWARD_EDGE) {
|
2009-07-11 10:20:33 +08:00
|
|
|
search_index = 0; // find the index to make the add sorted
|
|
|
|
while (search_index < vec->size() &&
|
|
|
|
given_greater_than_edge_rec(node2, word_end, unichar_id,
|
|
|
|
(*vec)[search_index]) == 1) {
|
|
|
|
search_index++;
|
2008-08-21 01:47:05 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
} else {
|
|
|
|
search_index = vec->size(); // add is unsorted, so index does not matter
|
|
|
|
}
|
|
|
|
EDGE_RECORD edge_rec;
|
2010-11-24 02:34:14 +08:00
|
|
|
link_edge(&edge_rec, node2, marker_flag, direction, word_end, unichar_id);
|
2013-09-23 23:26:50 +08:00
|
|
|
if (node1 == 0 && direction == BACKWARD_EDGE &&
|
|
|
|
!root_back_freelist_.empty()) {
|
|
|
|
EDGE_INDEX edge_index = root_back_freelist_.pop_back();
|
|
|
|
(*vec)[edge_index] = edge_rec;
|
|
|
|
} else if (search_index < vec->size()) {
|
2009-07-11 10:20:33 +08:00
|
|
|
vec->insert(edge_rec, search_index);
|
|
|
|
} else {
|
|
|
|
vec->push_back(edge_rec);
|
2008-08-21 01:47:05 +08:00
|
|
|
}
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("new edge in nodes_[" REFFORMAT "]: ", node1);
|
|
|
|
print_edge_rec(edge_rec);
|
|
|
|
tprintf("\n");
|
|
|
|
}
|
|
|
|
num_edges_++;
|
|
|
|
return true;
|
2008-08-21 01:47:05 +08:00
|
|
|
}
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
void Trie::add_word_ending(EDGE_RECORD *edge_ptr,
|
|
|
|
NODE_REF the_next_node,
|
2010-11-24 02:34:14 +08:00
|
|
|
bool marker_flag,
|
2009-07-11 10:20:33 +08:00
|
|
|
UNICHAR_ID unichar_id) {
|
|
|
|
EDGE_RECORD *back_edge_ptr;
|
|
|
|
EDGE_INDEX back_edge_index;
|
|
|
|
ASSERT_HOST(edge_char_of(the_next_node, NO_EDGE, BACKWARD_EDGE, false,
|
|
|
|
unichar_id, &back_edge_ptr, &back_edge_index));
|
2010-11-24 02:34:14 +08:00
|
|
|
if (marker_flag) {
|
|
|
|
*back_edge_ptr |= (MARKER_FLAG << flag_start_bit_);
|
|
|
|
*edge_ptr |= (MARKER_FLAG << flag_start_bit_);
|
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
// Mark both directions as end of word.
|
|
|
|
*back_edge_ptr |= (WERD_END_FLAG << flag_start_bit_);
|
|
|
|
*edge_ptr |= (WERD_END_FLAG << flag_start_bit_);
|
|
|
|
}
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2012-02-02 10:56:18 +08:00
|
|
|
bool Trie::add_word_to_dawg(const WERD_CHOICE &word,
|
2010-11-24 02:34:14 +08:00
|
|
|
const GenericVector<bool> *repetitions) {
|
2012-02-02 10:56:18 +08:00
|
|
|
if (word.length() <= 0) return false; // can't add empty words
|
2010-11-24 02:34:14 +08:00
|
|
|
if (repetitions != NULL) ASSERT_HOST(repetitions->size() == word.length());
|
2012-02-02 10:56:18 +08:00
|
|
|
// Make sure the word does not contain invalid unchar ids.
|
|
|
|
for (int i = 0; i < word.length(); ++i) {
|
|
|
|
if (word.unichar_id(i) < 0 ||
|
|
|
|
word.unichar_id(i) >= unicharset_size_) return false;
|
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
|
|
|
|
EDGE_RECORD *edge_ptr;
|
|
|
|
NODE_REF last_node = 0;
|
|
|
|
NODE_REF the_next_node;
|
2010-11-24 02:34:14 +08:00
|
|
|
bool marker_flag = false;
|
2009-07-11 10:20:33 +08:00
|
|
|
EDGE_INDEX edge_index;
|
|
|
|
int i;
|
|
|
|
inT32 still_finding_chars = true;
|
|
|
|
inT32 word_end = false;
|
|
|
|
bool add_failed = false;
|
|
|
|
bool found;
|
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) word.print("\nAdding word: ");
|
2009-07-11 10:20:33 +08:00
|
|
|
|
|
|
|
UNICHAR_ID unichar_id;
|
|
|
|
for (i = 0; i < word.length() - 1; ++i) {
|
|
|
|
unichar_id = word.unichar_id(i);
|
2010-11-24 02:34:14 +08:00
|
|
|
marker_flag = (repetitions != NULL) ? (*repetitions)[i] : false;
|
|
|
|
if (debug_level_ > 1) tprintf("Adding letter %d\n", unichar_id);
|
2007-03-08 04:03:40 +08:00
|
|
|
if (still_finding_chars) {
|
2009-07-11 10:20:33 +08:00
|
|
|
found = edge_char_of(last_node, NO_EDGE, FORWARD_EDGE, word_end,
|
|
|
|
unichar_id, &edge_ptr, &edge_index);
|
2010-11-24 02:34:14 +08:00
|
|
|
if (found && debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("exploring edge " REFFORMAT " in node " REFFORMAT "\n",
|
|
|
|
edge_index, last_node);
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
if (!found) {
|
|
|
|
still_finding_chars = false;
|
|
|
|
} else if (next_node_from_edge_rec(*edge_ptr) == 0) {
|
2013-09-23 23:26:50 +08:00
|
|
|
// We hit the end of an existing word, but the new word is longer.
|
|
|
|
// In this case we have to disconnect the existing word from the
|
|
|
|
// backwards root node, mark the current position as end-of-word
|
|
|
|
// and add new nodes for the increased length. Disconnecting the
|
|
|
|
// existing word from the backwards root node requires a linear
|
|
|
|
// search, so it is much faster to add the longest words first,
|
|
|
|
// to avoid having to come here.
|
2009-07-11 10:20:33 +08:00
|
|
|
word_end = true;
|
|
|
|
still_finding_chars = false;
|
|
|
|
remove_edge(last_node, 0, word_end, unichar_id);
|
|
|
|
} else {
|
2013-09-23 23:26:50 +08:00
|
|
|
// We have to add a new branch here for the new word.
|
2010-11-24 02:34:14 +08:00
|
|
|
if (marker_flag) set_marker_flag_in_edge_rec(edge_ptr);
|
2009-07-11 10:20:33 +08:00
|
|
|
last_node = next_node_from_edge_rec(*edge_ptr);
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
if (!still_finding_chars) {
|
|
|
|
the_next_node = new_dawg_node();
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1)
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("adding node " REFFORMAT "\n", the_next_node);
|
2007-08-31 02:16:00 +08:00
|
|
|
if (the_next_node == 0) {
|
|
|
|
add_failed = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-24 02:34:14 +08:00
|
|
|
if (!add_new_edge(last_node, the_next_node,
|
|
|
|
marker_flag, word_end, unichar_id)) {
|
2007-08-31 02:16:00 +08:00
|
|
|
add_failed = true;
|
|
|
|
break;
|
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
word_end = false;
|
2007-03-08 04:03:40 +08:00
|
|
|
last_node = the_next_node;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
the_next_node = 0;
|
2009-07-11 10:20:33 +08:00
|
|
|
unichar_id = word.unichar_id(i);
|
2010-11-24 02:34:14 +08:00
|
|
|
marker_flag = (repetitions != NULL) ? (*repetitions)[i] : false;
|
|
|
|
if (debug_level_ > 1) tprintf("Adding letter %d\n", unichar_id);
|
2008-08-21 01:47:05 +08:00
|
|
|
if (still_finding_chars &&
|
2009-07-11 10:20:33 +08:00
|
|
|
edge_char_of(last_node, NO_EDGE, FORWARD_EDGE, false,
|
|
|
|
unichar_id, &edge_ptr, &edge_index)) {
|
2008-08-21 01:47:05 +08:00
|
|
|
// An extension of this word already exists in the trie, so we
|
|
|
|
// only have to add the ending flags in both directions.
|
2010-11-24 02:34:14 +08:00
|
|
|
add_word_ending(edge_ptr, next_node_from_edge_rec(*edge_ptr),
|
|
|
|
marker_flag, unichar_id);
|
2008-08-21 01:47:05 +08:00
|
|
|
} else {
|
2013-09-23 23:26:50 +08:00
|
|
|
// Add a link to node 0. All leaves connect to node 0 so the back links can
|
|
|
|
// be used in reduction to a dawg. This root backward node has one edge
|
|
|
|
// entry for every word, (except prefixes of longer words) so it is huge.
|
2008-08-21 01:47:05 +08:00
|
|
|
if (!add_failed &&
|
2010-11-24 02:34:14 +08:00
|
|
|
!add_new_edge(last_node, the_next_node, marker_flag, true, unichar_id))
|
2008-08-21 01:47:05 +08:00
|
|
|
add_failed = true;
|
|
|
|
}
|
2007-08-31 02:16:00 +08:00
|
|
|
if (add_failed) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("Re-initializing document dictionary...\n");
|
2011-03-22 05:46:35 +08:00
|
|
|
clear();
|
2012-02-02 10:56:18 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return true;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
NODE_REF Trie::new_dawg_node() {
|
|
|
|
TRIE_NODE_RECORD *node = new TRIE_NODE_RECORD();
|
|
|
|
nodes_.push_back(node);
|
|
|
|
return nodes_.length() - 1;
|
2007-07-18 09:05:40 +08:00
|
|
|
}
|
|
|
|
|
2013-09-23 23:26:50 +08:00
|
|
|
// Sort function to sort words by decreasing order of length.
|
|
|
|
static int sort_strings_by_dec_length(const void* v1, const void* v2) {
|
|
|
|
const STRING* s1 = reinterpret_cast<const STRING*>(v1);
|
|
|
|
const STRING* s2 = reinterpret_cast<const STRING*>(v2);
|
|
|
|
return s2->length() - s1->length();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Trie::read_and_add_word_list(const char *filename,
|
|
|
|
const UNICHARSET &unicharset,
|
|
|
|
Trie::RTLReversePolicy reverse_policy) {
|
|
|
|
GenericVector<STRING> word_list;
|
|
|
|
if (!read_word_list(filename, unicharset, reverse_policy, &word_list))
|
|
|
|
return false;
|
|
|
|
word_list.sort(sort_strings_by_dec_length);
|
|
|
|
return add_word_list(word_list, unicharset);
|
|
|
|
}
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
bool Trie::read_word_list(const char *filename,
|
2012-02-02 10:56:18 +08:00
|
|
|
const UNICHARSET &unicharset,
|
2013-09-23 23:26:50 +08:00
|
|
|
Trie::RTLReversePolicy reverse_policy,
|
|
|
|
GenericVector<STRING>* words) {
|
2007-07-18 09:05:40 +08:00
|
|
|
FILE *word_file;
|
2010-11-24 02:34:14 +08:00
|
|
|
char string[CHARS_PER_LINE];
|
2007-07-18 09:05:40 +08:00
|
|
|
int word_count = 0;
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2013-09-23 23:26:50 +08:00
|
|
|
word_file = fopen(filename, "rb");
|
|
|
|
if (word_file == NULL) return false;
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
while (fgets(string, CHARS_PER_LINE, word_file) != NULL) {
|
|
|
|
chomp_string(string); // remove newline
|
|
|
|
WERD_CHOICE word(string, unicharset);
|
2012-02-02 10:56:18 +08:00
|
|
|
if ((reverse_policy == RRP_REVERSE_IF_HAS_RTL &&
|
|
|
|
word.has_rtl_unichar_id()) ||
|
|
|
|
reverse_policy == RRP_FORCE_REVERSE) {
|
|
|
|
word.reverse_and_mirror_unichar_ids();
|
|
|
|
}
|
2007-07-18 09:05:40 +08:00
|
|
|
++word_count;
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ && word_count % 10000 == 0)
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("Read %d words so far\n", word_count);
|
|
|
|
if (word.length() != 0 && !word.contains_unichar_id(INVALID_UNICHAR_ID)) {
|
2013-09-23 23:26:50 +08:00
|
|
|
words->push_back(word.unichar_string());
|
2010-11-24 02:34:14 +08:00
|
|
|
} else if (debug_level_) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("Skipping invalid word %s\n", string);
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ >= 3) word.print();
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
}
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_)
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("Read %d words total.\n", word_count);
|
2007-05-16 08:44:44 +08:00
|
|
|
fclose(word_file);
|
2009-07-11 10:20:33 +08:00
|
|
|
return true;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
|
2013-09-23 23:26:50 +08:00
|
|
|
bool Trie::add_word_list(const GenericVector<STRING>& words,
|
|
|
|
const UNICHARSET &unicharset) {
|
|
|
|
for (int i = 0; i < words.size(); ++i) {
|
|
|
|
WERD_CHOICE word(words[i].string(), unicharset);
|
|
|
|
if (!word_in_dawg(word)) {
|
|
|
|
add_word_to_dawg(word);
|
|
|
|
if (!word_in_dawg(word)) {
|
|
|
|
tprintf("Error: word '%s' not in DAWG after adding it\n",
|
|
|
|
words[i].string());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
void Trie::initialize_patterns(UNICHARSET *unicharset) {
|
|
|
|
unicharset->unichar_insert(kAlphaPatternUnicode);
|
|
|
|
alpha_pattern_ = unicharset->unichar_to_id(kAlphaPatternUnicode);
|
|
|
|
unicharset->unichar_insert(kDigitPatternUnicode);
|
|
|
|
digit_pattern_ = unicharset->unichar_to_id(kDigitPatternUnicode);
|
|
|
|
unicharset->unichar_insert(kAlphanumPatternUnicode);
|
|
|
|
alphanum_pattern_ = unicharset->unichar_to_id(kAlphanumPatternUnicode);
|
|
|
|
unicharset->unichar_insert(kPuncPatternUnicode);
|
|
|
|
punc_pattern_ = unicharset->unichar_to_id(kPuncPatternUnicode);
|
|
|
|
unicharset->unichar_insert(kLowerPatternUnicode);
|
|
|
|
lower_pattern_ = unicharset->unichar_to_id(kLowerPatternUnicode);
|
|
|
|
unicharset->unichar_insert(kUpperPatternUnicode);
|
|
|
|
upper_pattern_ = unicharset->unichar_to_id(kUpperPatternUnicode);
|
|
|
|
initialized_patterns_ = true;
|
2012-02-02 10:56:18 +08:00
|
|
|
unicharset_size_ = unicharset->size();
|
2010-11-24 02:34:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Trie::unichar_id_to_patterns(UNICHAR_ID unichar_id,
|
|
|
|
const UNICHARSET &unicharset,
|
|
|
|
GenericVector<UNICHAR_ID> *vec) const {
|
|
|
|
bool is_alpha = unicharset.get_isalpha(unichar_id);
|
|
|
|
if (is_alpha) {
|
|
|
|
vec->push_back(alpha_pattern_);
|
|
|
|
vec->push_back(alphanum_pattern_);
|
|
|
|
if (unicharset.get_islower(unichar_id)) {
|
|
|
|
vec->push_back(lower_pattern_);
|
|
|
|
} else if (unicharset.get_isupper(unichar_id)) {
|
|
|
|
vec->push_back(upper_pattern_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (unicharset.get_isdigit(unichar_id)) {
|
|
|
|
vec->push_back(digit_pattern_);
|
|
|
|
if (!is_alpha) vec->push_back(alphanum_pattern_);
|
|
|
|
}
|
|
|
|
if (unicharset.get_ispunctuation(unichar_id)) {
|
|
|
|
vec->push_back(punc_pattern_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
UNICHAR_ID Trie::character_class_to_pattern(char ch) {
|
|
|
|
if (ch == 'c') {
|
|
|
|
return alpha_pattern_;
|
|
|
|
} else if (ch == 'd') {
|
|
|
|
return digit_pattern_;
|
|
|
|
} else if (ch == 'n') {
|
|
|
|
return alphanum_pattern_;
|
|
|
|
} else if (ch == 'p') {
|
|
|
|
return punc_pattern_;
|
|
|
|
} else if (ch == 'a') {
|
|
|
|
return lower_pattern_;
|
|
|
|
} else if (ch == 'A') {
|
|
|
|
return upper_pattern_;
|
|
|
|
} else {
|
|
|
|
return INVALID_UNICHAR_ID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Trie::read_pattern_list(const char *filename,
|
|
|
|
const UNICHARSET &unicharset) {
|
|
|
|
if (!initialized_patterns_) {
|
|
|
|
tprintf("please call initialize_patterns() before read_pattern_list()\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-09-23 23:26:50 +08:00
|
|
|
FILE *pattern_file = fopen(filename, "rb");
|
2010-11-24 02:34:14 +08:00
|
|
|
if (pattern_file == NULL) {
|
|
|
|
tprintf("Error opening pattern file %s\n", filename);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pattern_count = 0;
|
|
|
|
char string[CHARS_PER_LINE];
|
|
|
|
while (fgets(string, CHARS_PER_LINE, pattern_file) != NULL) {
|
|
|
|
chomp_string(string); // remove newline
|
|
|
|
// Parse the pattern and construct a unichar id vector.
|
|
|
|
// Record the number of repetitions of each unichar in the parallel vector.
|
2012-02-02 10:56:18 +08:00
|
|
|
WERD_CHOICE word(&unicharset);
|
2010-11-24 02:34:14 +08:00
|
|
|
GenericVector<bool> repetitions_vec;
|
|
|
|
const char *str_ptr = string;
|
|
|
|
int step = unicharset.step(str_ptr);
|
|
|
|
bool failed = false;
|
|
|
|
while (step > 0) {
|
|
|
|
UNICHAR_ID curr_unichar_id = INVALID_UNICHAR_ID;
|
|
|
|
if (step == 1 && *str_ptr == '\\') {
|
|
|
|
++str_ptr;
|
|
|
|
if (*str_ptr == '\\') { // regular '\' unichar that was escaped
|
|
|
|
curr_unichar_id = unicharset.unichar_to_id(str_ptr, step);
|
|
|
|
} else {
|
|
|
|
if (word.length() < kSaneNumConcreteChars) {
|
|
|
|
tprintf("Please provide at least %d concrete characters at the"
|
|
|
|
" beginning of the pattern\n", kSaneNumConcreteChars);
|
|
|
|
failed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Parse character class from expression.
|
|
|
|
curr_unichar_id = character_class_to_pattern(*str_ptr);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
curr_unichar_id = unicharset.unichar_to_id(str_ptr, step);
|
|
|
|
}
|
|
|
|
if (curr_unichar_id == INVALID_UNICHAR_ID) {
|
|
|
|
failed = true;
|
|
|
|
break; // failed to parse this pattern
|
|
|
|
}
|
|
|
|
word.append_unichar_id(curr_unichar_id, 1, 0.0, 0.0);
|
|
|
|
repetitions_vec.push_back(false);
|
|
|
|
str_ptr += step;
|
|
|
|
step = unicharset.step(str_ptr);
|
|
|
|
// Check if there is a repetition pattern specified after this unichar.
|
|
|
|
if (step == 1 && *str_ptr == '\\' && *(str_ptr+1) == '*') {
|
|
|
|
repetitions_vec[repetitions_vec.size()-1] = true;
|
|
|
|
str_ptr += 2;
|
|
|
|
step = unicharset.step(str_ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (failed) {
|
|
|
|
tprintf("Invalid user pattern %s\n", string);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Insert the pattern into the trie.
|
|
|
|
if (debug_level_ > 2) {
|
|
|
|
tprintf("Inserting expanded user pattern %s\n",
|
2012-02-02 10:56:18 +08:00
|
|
|
word.debug_string().string());
|
2010-11-24 02:34:14 +08:00
|
|
|
}
|
|
|
|
if (!this->word_in_dawg(word)) {
|
|
|
|
this->add_word_to_dawg(word, &repetitions_vec);
|
|
|
|
if (!this->word_in_dawg(word)) {
|
|
|
|
tprintf("Error: failed to insert pattern '%s'\n", string);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++pattern_count;
|
|
|
|
}
|
|
|
|
if (debug_level_) {
|
|
|
|
tprintf("Read %d valid patterns from %s\n", pattern_count, filename);
|
|
|
|
}
|
|
|
|
fclose(pattern_file);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
void Trie::remove_edge_linkage(NODE_REF node1, NODE_REF node2, int direction,
|
|
|
|
bool word_end, UNICHAR_ID unichar_id) {
|
2010-11-24 02:34:14 +08:00
|
|
|
EDGE_RECORD *edge_ptr = NULL;
|
|
|
|
EDGE_INDEX edge_index = 0;
|
2009-07-11 10:20:33 +08:00
|
|
|
ASSERT_HOST(edge_char_of(node1, node2, direction, word_end,
|
|
|
|
unichar_id, &edge_ptr, &edge_index));
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("removed edge in nodes_[" REFFORMAT "]: ", node1);
|
|
|
|
print_edge_rec(*edge_ptr);
|
|
|
|
tprintf("\n");
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
if (direction == FORWARD_EDGE) {
|
|
|
|
nodes_[node1]->forward_edges.remove(edge_index);
|
2013-09-23 23:26:50 +08:00
|
|
|
} else if (node1 == 0) {
|
|
|
|
KillEdge(&nodes_[node1]->backward_edges[edge_index]);
|
|
|
|
root_back_freelist_.push_back(edge_index);
|
2009-07-11 10:20:33 +08:00
|
|
|
} else {
|
|
|
|
nodes_[node1]->backward_edges.remove(edge_index);
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
--num_edges_;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
|
2013-09-23 23:26:50 +08:00
|
|
|
// Some optimizations employed in add_word_to_dawg and trie_to_dawg:
|
|
|
|
// 1 Avoid insertion sorting or bubble sorting the tail root node
|
|
|
|
// (back links on node 0, a list of all the leaves.). The node is
|
|
|
|
// huge, and sorting it with n^2 time is terrible.
|
|
|
|
// 2 Avoid using GenericVector::remove on the tail root node.
|
|
|
|
// (a) During add of words to the trie, zero-out the unichars and
|
|
|
|
// keep a freelist of spaces to re-use.
|
|
|
|
// (b) During reduction, just zero-out the unichars of deleted back
|
|
|
|
// links, skipping zero entries while searching.
|
|
|
|
// 3 Avoid linear search of the tail root node. This has to be done when
|
|
|
|
// a suffix is added to an existing word. Adding words by decreasing
|
|
|
|
// length avoids this problem entirely. Words can still be added in
|
|
|
|
// any order, but it is faster to add the longest first.
|
2009-07-11 10:20:33 +08:00
|
|
|
SquishedDawg *Trie::trie_to_dawg() {
|
2013-09-23 23:26:50 +08:00
|
|
|
root_back_freelist_.clear(); // Will be invalided by trie_to_dawg.
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 2) {
|
2009-07-11 10:20:33 +08:00
|
|
|
print_all("Before reduction:", MAX_NODE_EDGES_DISPLAY);
|
|
|
|
}
|
|
|
|
NODE_MARKER reduced_nodes = new bool[nodes_.size()];
|
|
|
|
for (int i = 0; i < nodes_.size(); i++) reduced_nodes[i] = 0;
|
|
|
|
this->reduce_node_input(0, reduced_nodes);
|
|
|
|
delete[] reduced_nodes;
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 2) {
|
2009-07-11 10:20:33 +08:00
|
|
|
print_all("After reduction:", MAX_NODE_EDGES_DISPLAY);
|
|
|
|
}
|
|
|
|
// Build a translation map from node indices in nodes_ vector to
|
|
|
|
// their target indices in EDGE_ARRAY.
|
|
|
|
NODE_REF *node_ref_map = new NODE_REF[nodes_.size() + 1];
|
|
|
|
int i, j;
|
|
|
|
node_ref_map[0] = 0;
|
|
|
|
for (i = 0; i < nodes_.size(); ++i) {
|
|
|
|
node_ref_map[i+1] = node_ref_map[i] + nodes_[i]->forward_edges.size();
|
|
|
|
}
|
|
|
|
int num_forward_edges = node_ref_map[i];
|
|
|
|
|
|
|
|
// Convert nodes_ vector into EDGE_ARRAY translating the next node references
|
|
|
|
// in edges using node_ref_map. Empty nodes and backward edges are dropped.
|
|
|
|
EDGE_ARRAY edge_array =
|
|
|
|
(EDGE_ARRAY)memalloc(num_forward_edges * sizeof(EDGE_RECORD));
|
|
|
|
EDGE_ARRAY edge_array_ptr = edge_array;
|
|
|
|
for (i = 0; i < nodes_.size(); ++i) {
|
|
|
|
TRIE_NODE_RECORD *node_ptr = nodes_[i];
|
|
|
|
int end = node_ptr->forward_edges.size();
|
|
|
|
for (j = 0; j < end; ++j) {
|
|
|
|
EDGE_RECORD &edge_rec = node_ptr->forward_edges[j];
|
|
|
|
NODE_REF node_ref = next_node_from_edge_rec(edge_rec);
|
|
|
|
ASSERT_HOST(node_ref < nodes_.size());
|
|
|
|
UNICHAR_ID unichar_id = unichar_id_from_edge_rec(edge_rec);
|
2010-11-24 02:34:14 +08:00
|
|
|
link_edge(edge_array_ptr, node_ref_map[node_ref], false, FORWARD_EDGE,
|
2009-07-11 10:20:33 +08:00
|
|
|
end_of_word_from_edge_rec(edge_rec), unichar_id);
|
2010-11-24 02:34:14 +08:00
|
|
|
if (j == end - 1) set_marker_flag_in_edge_rec(edge_array_ptr);
|
2009-07-11 10:20:33 +08:00
|
|
|
++edge_array_ptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete[] node_ref_map;
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
return new SquishedDawg(edge_array, num_forward_edges, type_, lang_,
|
|
|
|
perm_, unicharset_size_, debug_level_);
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
bool Trie::eliminate_redundant_edges(NODE_REF node,
|
|
|
|
const EDGE_RECORD &edge1,
|
|
|
|
const EDGE_RECORD &edge2) {
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("\nCollapsing node %d:\n", node);
|
|
|
|
print_node(node, MAX_NODE_EDGES_DISPLAY);
|
|
|
|
tprintf("Candidate edges: ");
|
|
|
|
print_edge_rec(edge1);
|
|
|
|
tprintf(", ");
|
|
|
|
print_edge_rec(edge2);
|
|
|
|
tprintf("\n\n");
|
|
|
|
}
|
|
|
|
NODE_REF next_node1 = next_node_from_edge_rec(edge1);
|
|
|
|
NODE_REF next_node2 = next_node_from_edge_rec(edge2);
|
|
|
|
TRIE_NODE_RECORD *next_node2_ptr = nodes_[next_node2];
|
|
|
|
// Translate all edges going to/from next_node2 to go to/from next_node1.
|
2010-11-24 02:34:14 +08:00
|
|
|
EDGE_RECORD *edge_ptr = NULL;
|
2009-07-11 10:20:33 +08:00
|
|
|
EDGE_INDEX edge_index;
|
|
|
|
int i;
|
2013-09-23 23:26:50 +08:00
|
|
|
// The backward link in node to next_node2 will be zeroed out by the caller.
|
2009-07-11 10:20:33 +08:00
|
|
|
// Copy all the backward links in next_node2 to node next_node1
|
|
|
|
for (i = 0; i < next_node2_ptr->backward_edges.size(); ++i) {
|
|
|
|
const EDGE_RECORD &bkw_edge = next_node2_ptr->backward_edges[i];
|
|
|
|
NODE_REF curr_next_node = next_node_from_edge_rec(bkw_edge);
|
|
|
|
UNICHAR_ID curr_unichar_id = unichar_id_from_edge_rec(bkw_edge);
|
|
|
|
int curr_word_end = end_of_word_from_edge_rec(bkw_edge);
|
2010-11-24 02:34:14 +08:00
|
|
|
bool marker_flag = marker_flag_from_edge_rec(bkw_edge);
|
|
|
|
add_edge_linkage(next_node1, curr_next_node, marker_flag, BACKWARD_EDGE,
|
2009-07-11 10:20:33 +08:00
|
|
|
curr_word_end, curr_unichar_id);
|
|
|
|
// Relocate the corresponding forward edge in curr_next_node
|
|
|
|
ASSERT_HOST(edge_char_of(curr_next_node, next_node2, FORWARD_EDGE,
|
|
|
|
curr_word_end, curr_unichar_id,
|
|
|
|
&edge_ptr, &edge_index));
|
|
|
|
set_next_node_in_edge_rec(edge_ptr, next_node1);
|
|
|
|
}
|
|
|
|
int next_node2_num_edges = (next_node2_ptr->forward_edges.size() +
|
|
|
|
next_node2_ptr->backward_edges.size());
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("removed %d edges from node " REFFORMAT "\n",
|
|
|
|
next_node2_num_edges, next_node2);
|
|
|
|
}
|
|
|
|
next_node2_ptr->forward_edges.clear();
|
|
|
|
next_node2_ptr->backward_edges.clear();
|
|
|
|
num_edges_ -= next_node2_num_edges;
|
|
|
|
return true;
|
|
|
|
}
|
2007-03-08 04:03:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
bool Trie::reduce_lettered_edges(EDGE_INDEX edge_index,
|
|
|
|
UNICHAR_ID unichar_id,
|
|
|
|
NODE_REF node,
|
2013-09-23 23:26:50 +08:00
|
|
|
EDGE_VECTOR* backward_edges,
|
2009-07-11 10:20:33 +08:00
|
|
|
NODE_MARKER reduced_nodes) {
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1)
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("reduce_lettered_edges(edge=" REFFORMAT ")\n", edge_index);
|
|
|
|
// Compare each of the edge pairs with the given unichar_id.
|
|
|
|
bool did_something = false;
|
2013-09-23 23:26:50 +08:00
|
|
|
for (int i = edge_index; i < backward_edges->size() - 1; ++i) {
|
2009-07-11 10:20:33 +08:00
|
|
|
// Find the first edge that can be eliminated.
|
|
|
|
UNICHAR_ID curr_unichar_id = INVALID_UNICHAR_ID;
|
2013-09-23 23:26:50 +08:00
|
|
|
while (i < backward_edges->size()) {
|
2014-08-14 09:51:28 +08:00
|
|
|
if (!DeadEdge((*backward_edges)[i])) {
|
|
|
|
curr_unichar_id = unichar_id_from_edge_rec((*backward_edges)[i]);
|
2013-09-23 23:26:50 +08:00
|
|
|
if (curr_unichar_id != unichar_id) return did_something;
|
|
|
|
if (can_be_eliminated((*backward_edges)[i])) break;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
if (i == backward_edges->size()) break;
|
|
|
|
const EDGE_RECORD &edge_rec = (*backward_edges)[i];
|
2009-07-11 10:20:33 +08:00
|
|
|
// Compare it to the rest of the edges with the given unichar_id.
|
2013-09-23 23:26:50 +08:00
|
|
|
for (int j = i + 1; j < backward_edges->size(); ++j) {
|
|
|
|
const EDGE_RECORD &next_edge_rec = (*backward_edges)[j];
|
2014-08-14 09:51:28 +08:00
|
|
|
if (DeadEdge(next_edge_rec)) continue;
|
2013-09-23 23:26:50 +08:00
|
|
|
UNICHAR_ID next_id = unichar_id_from_edge_rec(next_edge_rec);
|
|
|
|
if (next_id != unichar_id) break;
|
2009-07-11 10:20:33 +08:00
|
|
|
if (end_of_word_from_edge_rec(next_edge_rec) ==
|
|
|
|
end_of_word_from_edge_rec(edge_rec) &&
|
|
|
|
can_be_eliminated(next_edge_rec) &&
|
|
|
|
eliminate_redundant_edges(node, edge_rec, next_edge_rec)) {
|
|
|
|
reduced_nodes[next_node_from_edge_rec(edge_rec)] = 0;
|
|
|
|
did_something = true;
|
2013-09-23 23:26:50 +08:00
|
|
|
KillEdge(&(*backward_edges)[j]);
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
return did_something;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
void Trie::sort_edges(EDGE_VECTOR *edges) {
|
|
|
|
int num_edges = edges->size();
|
|
|
|
if (num_edges <= 1) return;
|
2013-09-23 23:26:50 +08:00
|
|
|
GenericVector<KDPairInc<UNICHAR_ID, EDGE_RECORD> > sort_vec;
|
|
|
|
sort_vec.reserve(num_edges);
|
|
|
|
for (int i = 0; i < num_edges; ++i) {
|
|
|
|
sort_vec.push_back(KDPairInc<UNICHAR_ID, EDGE_RECORD>(
|
|
|
|
unichar_id_from_edge_rec((*edges)[i]), (*edges)[i]));
|
|
|
|
}
|
|
|
|
sort_vec.sort();
|
|
|
|
for (int i = 0; i < num_edges; ++i)
|
|
|
|
(*edges)[i] = sort_vec[i].data;
|
2007-03-08 04:03:40 +08:00
|
|
|
}
|
2007-07-18 09:05:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
void Trie::reduce_node_input(NODE_REF node,
|
|
|
|
NODE_MARKER reduced_nodes) {
|
2014-08-14 09:51:28 +08:00
|
|
|
EDGE_VECTOR &backward_edges = nodes_[node]->backward_edges;
|
|
|
|
sort_edges(&backward_edges);
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("reduce_node_input(node=" REFFORMAT ")\n", node);
|
|
|
|
print_node(node, MAX_NODE_EDGES_DISPLAY);
|
|
|
|
}
|
2007-07-18 09:05:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
EDGE_INDEX edge_index = 0;
|
|
|
|
while (edge_index < backward_edges.size()) {
|
2014-08-14 09:51:28 +08:00
|
|
|
if (DeadEdge(backward_edges[edge_index])) continue;
|
2009-07-11 10:20:33 +08:00
|
|
|
UNICHAR_ID unichar_id =
|
|
|
|
unichar_id_from_edge_rec(backward_edges[edge_index]);
|
|
|
|
while (reduce_lettered_edges(edge_index, unichar_id, node,
|
2013-09-23 23:26:50 +08:00
|
|
|
&backward_edges, reduced_nodes));
|
|
|
|
while (++edge_index < backward_edges.size()) {
|
|
|
|
UNICHAR_ID id = unichar_id_from_edge_rec(backward_edges[edge_index]);
|
2014-08-14 09:51:28 +08:00
|
|
|
if (!DeadEdge(backward_edges[edge_index]) && id != unichar_id) break;
|
2013-09-23 23:26:50 +08:00
|
|
|
}
|
2009-07-11 10:20:33 +08:00
|
|
|
}
|
|
|
|
reduced_nodes[node] = true; // mark as reduced
|
2007-07-18 09:05:40 +08:00
|
|
|
|
2010-11-24 02:34:14 +08:00
|
|
|
if (debug_level_ > 1) {
|
2009-07-11 10:20:33 +08:00
|
|
|
tprintf("Node " REFFORMAT " after reduction:\n", node);
|
|
|
|
print_node(node, MAX_NODE_EDGES_DISPLAY);
|
|
|
|
}
|
2007-07-18 09:05:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
for (int i = 0; i < backward_edges.size(); ++i) {
|
2014-08-14 09:51:28 +08:00
|
|
|
if (DeadEdge(backward_edges[i])) continue;
|
2009-07-11 10:20:33 +08:00
|
|
|
NODE_REF next_node = next_node_from_edge_rec(backward_edges[i]);
|
|
|
|
if (next_node != 0 && !reduced_nodes[next_node]) {
|
|
|
|
reduce_node_input(next_node, reduced_nodes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-07-18 09:05:40 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
void Trie::print_node(NODE_REF node, int max_num_edges) const {
|
|
|
|
if (node == NO_EDGE) return; // nothing to print
|
|
|
|
TRIE_NODE_RECORD *node_ptr = nodes_[node];
|
|
|
|
int num_fwd = node_ptr->forward_edges.size();
|
|
|
|
int num_bkw = node_ptr->backward_edges.size();
|
|
|
|
EDGE_VECTOR *vec;
|
|
|
|
for (int dir = 0; dir < 2; ++dir) {
|
|
|
|
if (dir == 0) {
|
|
|
|
vec = &(node_ptr->forward_edges);
|
|
|
|
tprintf(REFFORMAT " (%d %d): ", node, num_fwd, num_bkw);
|
|
|
|
} else {
|
|
|
|
vec = &(node_ptr->backward_edges);
|
|
|
|
tprintf("\t");
|
|
|
|
}
|
|
|
|
int i;
|
|
|
|
for (i = 0; (dir == 0 ? i < num_fwd : i < num_bkw) &&
|
|
|
|
i < max_num_edges; ++i) {
|
2014-08-14 09:51:28 +08:00
|
|
|
if (DeadEdge((*vec)[i])) continue;
|
2009-07-11 10:20:33 +08:00
|
|
|
print_edge_rec((*vec)[i]);
|
|
|
|
tprintf(" ");
|
|
|
|
}
|
|
|
|
if (dir == 0 ? i < num_fwd : i < num_bkw) tprintf("...");
|
|
|
|
tprintf("\n");
|
|
|
|
}
|
2007-07-18 09:05:40 +08:00
|
|
|
}
|
2013-09-23 23:26:50 +08:00
|
|
|
|
2009-07-11 10:20:33 +08:00
|
|
|
} // namespace tesseract
|