mirror of
https://github.com/tesseract-ocr/tesseract.git
synced 2024-11-23 18:49:08 +08:00
Fix compiler warnings (mostly -Wsign-compare)
Signed-off-by: Stefan Weil <sw@weilnetz.de>
This commit is contained in:
parent
c7653bf59f
commit
0c20d3f843
@ -935,7 +935,7 @@ bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char
|
|||||||
int tessedit_page_number) {
|
int tessedit_page_number) {
|
||||||
if (!flist && !buf)
|
if (!flist && !buf)
|
||||||
return false;
|
return false;
|
||||||
int page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
|
unsigned page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
|
||||||
char pagename[MAX_PATH];
|
char pagename[MAX_PATH];
|
||||||
|
|
||||||
std::vector<std::string> lines;
|
std::vector<std::string> lines;
|
||||||
@ -958,7 +958,7 @@ bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Skip to the requested page number.
|
// Skip to the requested page number.
|
||||||
for (int i = 0; i < page; i++) {
|
for (unsigned i = 0; i < page; i++) {
|
||||||
if (flist) {
|
if (flist) {
|
||||||
if (fgets(pagename, sizeof(pagename), flist) == nullptr)
|
if (fgets(pagename, sizeof(pagename), flist) == nullptr)
|
||||||
break;
|
break;
|
||||||
@ -986,7 +986,7 @@ bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char
|
|||||||
tprintf("Image file %s cannot be read!\n", pagename);
|
tprintf("Image file %s cannot be read!\n", pagename);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
tprintf("Page %d : %s\n", page, pagename);
|
tprintf("Page %u : %s\n", page, pagename);
|
||||||
bool r = ProcessPage(pix, page, pagename, retry_config, timeout_millisec, renderer);
|
bool r = ProcessPage(pix, page, pagename, retry_config, timeout_millisec, renderer);
|
||||||
pixDestroy(&pix);
|
pixDestroy(&pix);
|
||||||
if (!r)
|
if (!r)
|
||||||
|
@ -236,8 +236,8 @@ char **TessBaseAPIGetLoadedLanguagesAsVector(const TessBaseAPI *handle) {
|
|||||||
std::vector<std::string> languages;
|
std::vector<std::string> languages;
|
||||||
handle->GetLoadedLanguagesAsVector(&languages);
|
handle->GetLoadedLanguagesAsVector(&languages);
|
||||||
char **arr = new char *[languages.size() + 1];
|
char **arr = new char *[languages.size() + 1];
|
||||||
for (int index = 0; index < languages.size(); ++index) {
|
for (auto &language : languages) {
|
||||||
arr[index] = strdup(languages[index].c_str());
|
arr[&language - &languages[0]] = strdup(language.c_str());
|
||||||
}
|
}
|
||||||
arr[languages.size()] = nullptr;
|
arr[languages.size()] = nullptr;
|
||||||
return arr;
|
return arr;
|
||||||
@ -247,8 +247,8 @@ char **TessBaseAPIGetAvailableLanguagesAsVector(const TessBaseAPI *handle) {
|
|||||||
std::vector<std::string> languages;
|
std::vector<std::string> languages;
|
||||||
handle->GetAvailableLanguagesAsVector(&languages);
|
handle->GetAvailableLanguagesAsVector(&languages);
|
||||||
char **arr = new char *[languages.size() + 1];
|
char **arr = new char *[languages.size() + 1];
|
||||||
for (int index = 0; index < languages.size(); ++index) {
|
for (auto &language : languages) {
|
||||||
arr[index] = strdup(languages[index].c_str());
|
arr[&language - &languages[0]] = strdup(language.c_str());
|
||||||
}
|
}
|
||||||
arr[languages.size()] = nullptr;
|
arr[languages.size()] = nullptr;
|
||||||
return arr;
|
return arr;
|
||||||
|
@ -380,13 +380,13 @@ bool Tesseract::ResegmentCharBox(PAGE_RES *page_res, const TBOX *prev_box, const
|
|||||||
// this box.
|
// this box.
|
||||||
if (applybox_debug > 1) {
|
if (applybox_debug > 1) {
|
||||||
tprintf("Best state = ");
|
tprintf("Best state = ");
|
||||||
for (int j = 0; j < word_res->best_state.size(); ++j) {
|
for (auto best_state : word_res->best_state) {
|
||||||
tprintf("%d ", word_res->best_state[j]);
|
tprintf("%d ", best_state);
|
||||||
}
|
}
|
||||||
tprintf("\n");
|
tprintf("\n");
|
||||||
tprintf("Correct text = [[ ");
|
tprintf("Correct text = [[ ");
|
||||||
for (int j = 0; j < word_res->correct_text.size(); ++j) {
|
for (auto &correct_text : word_res->correct_text) {
|
||||||
tprintf("%s ", word_res->correct_text[j].c_str());
|
tprintf("%s ", correct_text.c_str());
|
||||||
}
|
}
|
||||||
tprintf("]]\n");
|
tprintf("]]\n");
|
||||||
}
|
}
|
||||||
@ -561,8 +561,8 @@ bool Tesseract::FindSegmentation(const std::vector<UNICHAR_ID> &target_text, WER
|
|||||||
// Build the original segmentation and if it is the same length as the
|
// Build the original segmentation and if it is the same length as the
|
||||||
// truth, assume it will do.
|
// truth, assume it will do.
|
||||||
int blob_count = 1;
|
int blob_count = 1;
|
||||||
for (int s = 0; s < word_res->seam_array.size(); ++s) {
|
for (auto s : word_res->seam_array) {
|
||||||
SEAM *seam = word_res->seam_array[s];
|
SEAM *seam = s;
|
||||||
if (!seam->HasAnySplits()) {
|
if (!seam->HasAnySplits()) {
|
||||||
word_res->best_state.push_back(blob_count);
|
word_res->best_state.push_back(blob_count);
|
||||||
blob_count = 1;
|
blob_count = 1;
|
||||||
@ -577,8 +577,8 @@ bool Tesseract::FindSegmentation(const std::vector<UNICHAR_ID> &target_text, WER
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
word_res->correct_text.clear();
|
word_res->correct_text.clear();
|
||||||
for (int i = 0; i < target_text.size(); ++i) {
|
for (auto &text : target_text) {
|
||||||
word_res->correct_text.push_back(unicharset.id_to_unichar(target_text[i]));
|
word_res->correct_text.push_back(unicharset.id_to_unichar(text));
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -602,7 +602,7 @@ void Tesseract::SearchForText(const std::vector<BLOB_CHOICE_LIST *> *choices, in
|
|||||||
int text_index, float rating, std::vector<int> *segmentation,
|
int text_index, float rating, std::vector<int> *segmentation,
|
||||||
float *best_rating, std::vector<int> *best_segmentation) {
|
float *best_rating, std::vector<int> *best_segmentation) {
|
||||||
const UnicharAmbigsVector &table = getDict().getUnicharAmbigs().dang_ambigs();
|
const UnicharAmbigsVector &table = getDict().getUnicharAmbigs().dang_ambigs();
|
||||||
for (int length = 1; length <= choices[choices_pos].size(); ++length) {
|
for (unsigned length = 1; length <= choices[choices_pos].size(); ++length) {
|
||||||
// Rating of matching choice or worst choice if no match.
|
// Rating of matching choice or worst choice if no match.
|
||||||
float choice_rating = 0.0f;
|
float choice_rating = 0.0f;
|
||||||
// Find the corresponding best BLOB_CHOICE.
|
// Find the corresponding best BLOB_CHOICE.
|
||||||
@ -746,12 +746,12 @@ void Tesseract::CorrectClassifyWords(PAGE_RES *page_res) {
|
|||||||
PAGE_RES_IT pr_it(page_res);
|
PAGE_RES_IT pr_it(page_res);
|
||||||
for (WERD_RES *word_res = pr_it.word(); word_res != nullptr; word_res = pr_it.forward()) {
|
for (WERD_RES *word_res = pr_it.word(); word_res != nullptr; word_res = pr_it.forward()) {
|
||||||
auto *choice = new WERD_CHOICE(word_res->uch_set, word_res->correct_text.size());
|
auto *choice = new WERD_CHOICE(word_res->uch_set, word_res->correct_text.size());
|
||||||
for (int i = 0; i < word_res->correct_text.size(); ++i) {
|
for (auto &correct_text : word_res->correct_text) {
|
||||||
// The part before the first space is the real ground truth, and the
|
// The part before the first space is the real ground truth, and the
|
||||||
// rest is the bounding box location and page number.
|
// rest is the bounding box location and page number.
|
||||||
std::vector<std::string> tokens = split(word_res->correct_text[i], ' ');
|
std::vector<std::string> tokens = split(correct_text, ' ');
|
||||||
UNICHAR_ID char_id = unicharset.unichar_to_id(tokens[0].c_str());
|
UNICHAR_ID char_id = unicharset.unichar_to_id(tokens[0].c_str());
|
||||||
choice->append_unichar_id_space_allocated(char_id, word_res->best_state[i], 0.0f, 0.0f);
|
choice->append_unichar_id_space_allocated(char_id, word_res->best_state[&correct_text - &word_res->correct_text[0]], 0.0f, 0.0f);
|
||||||
}
|
}
|
||||||
word_res->ClearWordChoices();
|
word_res->ClearWordChoices();
|
||||||
word_res->LogNewRawChoice(choice);
|
word_res->LogNewRawChoice(choice);
|
||||||
|
@ -152,7 +152,7 @@ void Tesseract::SetupAllWordsPassN(int pass_n, const TBOX *target_word_box, cons
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Setup all the words for recognition with polygonal approximation.
|
// Setup all the words for recognition with polygonal approximation.
|
||||||
for (int w = 0; w < words->size(); ++w) {
|
for (unsigned w = 0; w < words->size(); ++w) {
|
||||||
SetupWordPassN(pass_n, &(*words)[w]);
|
SetupWordPassN(pass_n, &(*words)[w]);
|
||||||
if (w > 0)
|
if (w > 0)
|
||||||
(*words)[w].prev_word = &(*words)[w - 1];
|
(*words)[w].prev_word = &(*words)[w - 1];
|
||||||
@ -173,7 +173,7 @@ void Tesseract::SetupWordPassN(int pass_n, WordData *word) {
|
|||||||
word->word->x_height = word->row->x_height();
|
word->word->x_height = word->row->x_height();
|
||||||
}
|
}
|
||||||
word->lang_words.truncate(0);
|
word->lang_words.truncate(0);
|
||||||
for (int s = 0; s <= sub_langs_.size(); ++s) {
|
for (unsigned s = 0; s <= sub_langs_.size(); ++s) {
|
||||||
// The sub_langs_.size() entry is for the master language.
|
// The sub_langs_.size() entry is for the master language.
|
||||||
Tesseract *lang_t = s < sub_langs_.size() ? sub_langs_[s] : this;
|
Tesseract *lang_t = s < sub_langs_.size() ? sub_langs_[s] : this;
|
||||||
auto *word_res = new WERD_RES;
|
auto *word_res = new WERD_RES;
|
||||||
@ -199,7 +199,7 @@ bool Tesseract::RecogAllWordsPassN(int pass_n, ETEXT_DESC *monitor, PAGE_RES_IT
|
|||||||
// added. The results will be significantly different with adaption on, and
|
// added. The results will be significantly different with adaption on, and
|
||||||
// deterioration will need investigation.
|
// deterioration will need investigation.
|
||||||
pr_it->restart_page();
|
pr_it->restart_page();
|
||||||
for (int w = 0; w < words->size(); ++w) {
|
for (unsigned w = 0; w < words->size(); ++w) {
|
||||||
WordData *word = &(*words)[w];
|
WordData *word = &(*words)[w];
|
||||||
if (w > 0)
|
if (w > 0)
|
||||||
word->prev_word = &(*words)[w - 1];
|
word->prev_word = &(*words)[w - 1];
|
||||||
@ -302,11 +302,11 @@ bool Tesseract::recog_all_words(PAGE_RES *page_res, ETEXT_DESC *monitor,
|
|||||||
StartBackupAdaptiveClassifier();
|
StartBackupAdaptiveClassifier();
|
||||||
}
|
}
|
||||||
// Now check the sub-langs as well.
|
// Now check the sub-langs as well.
|
||||||
for (int i = 0; i < sub_langs_.size(); ++i) {
|
for (auto &lang : sub_langs_) {
|
||||||
if (sub_langs_[i]->AdaptiveClassifierIsFull()) {
|
if (lang->AdaptiveClassifierIsFull()) {
|
||||||
sub_langs_[i]->SwitchAdaptiveClassifier();
|
lang->SwitchAdaptiveClassifier();
|
||||||
} else if (!sub_langs_[i]->AdaptiveClassifierIsEmpty()) {
|
} else if (!lang->AdaptiveClassifierIsEmpty()) {
|
||||||
sub_langs_[i]->StartBackupAdaptiveClassifier();
|
lang->StartBackupAdaptiveClassifier();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -556,7 +556,7 @@ void Tesseract::bigram_correction_pass(PAGE_RES *page_res) {
|
|||||||
if (tessedit_bigram_debug > 1) {
|
if (tessedit_bigram_debug > 1) {
|
||||||
std::string bigrams_list;
|
std::string bigrams_list;
|
||||||
const int kMaxChoicesToPrint = 20;
|
const int kMaxChoicesToPrint = 20;
|
||||||
for (int i = 0; i < overrides_word1.size() && i < kMaxChoicesToPrint; i++) {
|
for (unsigned i = 0; i < overrides_word1.size() && i < kMaxChoicesToPrint; i++) {
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
bigrams_list += ", ";
|
bigrams_list += ", ";
|
||||||
}
|
}
|
||||||
@ -679,8 +679,8 @@ void Tesseract::blamer_pass(PAGE_RES *page_res) {
|
|||||||
}
|
}
|
||||||
if (page_res->misadaption_log.size() > 0) {
|
if (page_res->misadaption_log.size() > 0) {
|
||||||
tprintf("Misadaption log:\n");
|
tprintf("Misadaption log:\n");
|
||||||
for (int i = 0; i < page_res->misadaption_log.size(); ++i) {
|
for (auto &log : page_res->misadaption_log) {
|
||||||
tprintf("%s\n", page_res->misadaption_log[i].c_str());
|
tprintf("%s\n", log.c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -838,8 +838,8 @@ static int SelectBestWords(double rating_ratio, double certainty_margin, bool de
|
|||||||
}
|
}
|
||||||
// Transfer from out_words to best_words.
|
// Transfer from out_words to best_words.
|
||||||
best_words->clear();
|
best_words->clear();
|
||||||
for (int i = 0; i < out_words.size(); ++i)
|
for (auto &out_word : out_words)
|
||||||
best_words->push_back(out_words[i]);
|
best_words->push_back(out_word);
|
||||||
return num_new - num_best;
|
return num_new - num_best;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,7 +908,7 @@ bool Tesseract::ReassignDiacritics(int pass, PAGE_RES_IT *pr_it, bool *make_next
|
|||||||
std::vector<C_OUTLINE *> wanted_outlines;
|
std::vector<C_OUTLINE *> wanted_outlines;
|
||||||
int num_overlapped = 0;
|
int num_overlapped = 0;
|
||||||
int num_overlapped_used = 0;
|
int num_overlapped_used = 0;
|
||||||
for (int i = 0; i < overlapped_any_blob.size(); ++i) {
|
for (unsigned i = 0; i < overlapped_any_blob.size(); ++i) {
|
||||||
if (overlapped_any_blob[i]) {
|
if (overlapped_any_blob[i]) {
|
||||||
++num_overlapped;
|
++num_overlapped;
|
||||||
if (word_wanted[i])
|
if (word_wanted[i])
|
||||||
@ -923,7 +923,7 @@ bool Tesseract::ReassignDiacritics(int pass, PAGE_RES_IT *pr_it, bool *make_next
|
|||||||
AssignDiacriticsToNewBlobs(outlines, pass, real_word, pr_it, &word_wanted, &target_blobs);
|
AssignDiacriticsToNewBlobs(outlines, pass, real_word, pr_it, &word_wanted, &target_blobs);
|
||||||
int non_overlapped = 0;
|
int non_overlapped = 0;
|
||||||
int non_overlapped_used = 0;
|
int non_overlapped_used = 0;
|
||||||
for (int i = 0; i < word_wanted.size(); ++i) {
|
for (unsigned i = 0; i < word_wanted.size(); ++i) {
|
||||||
if (word_wanted[i])
|
if (word_wanted[i])
|
||||||
++non_overlapped_used;
|
++non_overlapped_used;
|
||||||
if (outlines[i] != nullptr)
|
if (outlines[i] != nullptr)
|
||||||
@ -967,7 +967,7 @@ void Tesseract::AssignDiacriticsToOverlappingBlobs(const std::vector<C_OUTLINE *
|
|||||||
const TBOX blob_box = blob->bounding_box();
|
const TBOX blob_box = blob->bounding_box();
|
||||||
blob_wanted.resize(outlines.size(), false);
|
blob_wanted.resize(outlines.size(), false);
|
||||||
int num_blob_outlines = 0;
|
int num_blob_outlines = 0;
|
||||||
for (int i = 0; i < outlines.size(); ++i) {
|
for (unsigned i = 0; i < outlines.size(); ++i) {
|
||||||
if (blob_box.major_x_overlap(outlines[i]->bounding_box()) && !(*word_wanted)[i]) {
|
if (blob_box.major_x_overlap(outlines[i]->bounding_box()) && !(*word_wanted)[i]) {
|
||||||
blob_wanted[i] = true;
|
blob_wanted[i] = true;
|
||||||
(*overlapped_any_blob)[i] = true;
|
(*overlapped_any_blob)[i] = true;
|
||||||
@ -985,7 +985,7 @@ void Tesseract::AssignDiacriticsToOverlappingBlobs(const std::vector<C_OUTLINE *
|
|||||||
if (0 < num_blob_outlines && num_blob_outlines < noise_maxperblob) {
|
if (0 < num_blob_outlines && num_blob_outlines < noise_maxperblob) {
|
||||||
if (SelectGoodDiacriticOutlines(pass, noise_cert_basechar, pr_it, blob, outlines,
|
if (SelectGoodDiacriticOutlines(pass, noise_cert_basechar, pr_it, blob, outlines,
|
||||||
num_blob_outlines, &blob_wanted)) {
|
num_blob_outlines, &blob_wanted)) {
|
||||||
for (int i = 0; i < blob_wanted.size(); ++i) {
|
for (unsigned i = 0; i < blob_wanted.size(); ++i) {
|
||||||
if (blob_wanted[i]) {
|
if (blob_wanted[i]) {
|
||||||
// Claim the outline and record where it is going.
|
// Claim the outline and record where it is going.
|
||||||
(*word_wanted)[i] = true;
|
(*word_wanted)[i] = true;
|
||||||
@ -1007,7 +1007,7 @@ void Tesseract::AssignDiacriticsToNewBlobs(const std::vector<C_OUTLINE *> &outli
|
|||||||
word_wanted->resize(outlines.size(), false);
|
word_wanted->resize(outlines.size(), false);
|
||||||
target_blobs->resize(outlines.size(), nullptr);
|
target_blobs->resize(outlines.size(), nullptr);
|
||||||
// Check for outlines that need to be turned into stand-alone blobs.
|
// Check for outlines that need to be turned into stand-alone blobs.
|
||||||
for (int i = 0; i < outlines.size(); ++i) {
|
for (unsigned i = 0; i < outlines.size(); ++i) {
|
||||||
if (outlines[i] == nullptr)
|
if (outlines[i] == nullptr)
|
||||||
continue;
|
continue;
|
||||||
// Get a set of adjacent outlines that don't overlap any existing blob.
|
// Get a set of adjacent outlines that don't overlap any existing blob.
|
||||||
@ -1039,7 +1039,7 @@ void Tesseract::AssignDiacriticsToNewBlobs(const std::vector<C_OUTLINE *> &outli
|
|||||||
num_blob_outlines, &blob_wanted)) {
|
num_blob_outlines, &blob_wanted)) {
|
||||||
if (debug_noise_removal)
|
if (debug_noise_removal)
|
||||||
tprintf("Added to left blob\n");
|
tprintf("Added to left blob\n");
|
||||||
for (int j = 0; j < blob_wanted.size(); ++j) {
|
for (unsigned j = 0; j < blob_wanted.size(); ++j) {
|
||||||
if (blob_wanted[j]) {
|
if (blob_wanted[j]) {
|
||||||
(*word_wanted)[j] = true;
|
(*word_wanted)[j] = true;
|
||||||
(*target_blobs)[j] = left_blob;
|
(*target_blobs)[j] = left_blob;
|
||||||
@ -1052,7 +1052,7 @@ void Tesseract::AssignDiacriticsToNewBlobs(const std::vector<C_OUTLINE *> &outli
|
|||||||
num_blob_outlines, &blob_wanted)) {
|
num_blob_outlines, &blob_wanted)) {
|
||||||
if (debug_noise_removal)
|
if (debug_noise_removal)
|
||||||
tprintf("Added to right blob\n");
|
tprintf("Added to right blob\n");
|
||||||
for (int j = 0; j < blob_wanted.size(); ++j) {
|
for (unsigned j = 0; j < blob_wanted.size(); ++j) {
|
||||||
if (blob_wanted[j]) {
|
if (blob_wanted[j]) {
|
||||||
(*word_wanted)[j] = true;
|
(*word_wanted)[j] = true;
|
||||||
(*target_blobs)[j] = right_blob;
|
(*target_blobs)[j] = right_blob;
|
||||||
@ -1062,7 +1062,7 @@ void Tesseract::AssignDiacriticsToNewBlobs(const std::vector<C_OUTLINE *> &outli
|
|||||||
num_blob_outlines, &blob_wanted)) {
|
num_blob_outlines, &blob_wanted)) {
|
||||||
if (debug_noise_removal)
|
if (debug_noise_removal)
|
||||||
tprintf("Fitted between blobs\n");
|
tprintf("Fitted between blobs\n");
|
||||||
for (int j = 0; j < blob_wanted.size(); ++j) {
|
for (unsigned j = 0; j < blob_wanted.size(); ++j) {
|
||||||
if (blob_wanted[j]) {
|
if (blob_wanted[j]) {
|
||||||
(*word_wanted)[j] = true;
|
(*word_wanted)[j] = true;
|
||||||
(*target_blobs)[j] = nullptr;
|
(*target_blobs)[j] = nullptr;
|
||||||
@ -1098,7 +1098,7 @@ bool Tesseract::SelectGoodDiacriticOutlines(int pass, float certainty_threshold,
|
|||||||
float best_cert = ClassifyBlobPlusOutlines(test_outlines, outlines, pass, pr_it, blob, all_str);
|
float best_cert = ClassifyBlobPlusOutlines(test_outlines, outlines, pass, pr_it, blob, all_str);
|
||||||
if (debug_noise_removal) {
|
if (debug_noise_removal) {
|
||||||
TBOX ol_box;
|
TBOX ol_box;
|
||||||
for (int i = 0; i < test_outlines.size(); ++i) {
|
for (unsigned i = 0; i < test_outlines.size(); ++i) {
|
||||||
if (test_outlines[i])
|
if (test_outlines[i])
|
||||||
ol_box += outlines[i]->bounding_box();
|
ol_box += outlines[i]->bounding_box();
|
||||||
}
|
}
|
||||||
@ -1113,14 +1113,14 @@ bool Tesseract::SelectGoodDiacriticOutlines(int pass, float certainty_threshold,
|
|||||||
(blob == nullptr || best_cert < target_cert || blob != nullptr)) {
|
(blob == nullptr || best_cert < target_cert || blob != nullptr)) {
|
||||||
// Find the best bit to zero out.
|
// Find the best bit to zero out.
|
||||||
best_index = -1;
|
best_index = -1;
|
||||||
for (int i = 0; i < outlines.size(); ++i) {
|
for (unsigned i = 0; i < outlines.size(); ++i) {
|
||||||
if (test_outlines[i]) {
|
if (test_outlines[i]) {
|
||||||
test_outlines[i] = false;
|
test_outlines[i] = false;
|
||||||
std::string str;
|
std::string str;
|
||||||
float cert = ClassifyBlobPlusOutlines(test_outlines, outlines, pass, pr_it, blob, str);
|
float cert = ClassifyBlobPlusOutlines(test_outlines, outlines, pass, pr_it, blob, str);
|
||||||
if (debug_noise_removal) {
|
if (debug_noise_removal) {
|
||||||
TBOX ol_box;
|
TBOX ol_box;
|
||||||
for (int j = 0; j < outlines.size(); ++j) {
|
for (unsigned j = 0; j < outlines.size(); ++j) {
|
||||||
if (test_outlines[j])
|
if (test_outlines[j])
|
||||||
ol_box += outlines[j]->bounding_box();
|
ol_box += outlines[j]->bounding_box();
|
||||||
tprintf("%c", test_outlines[j] ? 'T' : 'F');
|
tprintf("%c", test_outlines[j] ? 'T' : 'F');
|
||||||
@ -1147,8 +1147,8 @@ bool Tesseract::SelectGoodDiacriticOutlines(int pass, float certainty_threshold,
|
|||||||
*ok_outlines = best_outlines;
|
*ok_outlines = best_outlines;
|
||||||
if (debug_noise_removal) {
|
if (debug_noise_removal) {
|
||||||
tprintf("%s noise combination ", blob ? "Adding" : "New");
|
tprintf("%s noise combination ", blob ? "Adding" : "New");
|
||||||
for (int i = 0; i < best_outlines.size(); ++i) {
|
for (auto best_outline : best_outlines) {
|
||||||
tprintf("%c", best_outlines[i] ? 'T' : 'F');
|
tprintf("%c", best_outline ? 'T' : 'F');
|
||||||
}
|
}
|
||||||
tprintf(" yields certainty %g, beating target of %g\n", best_cert, target_cert);
|
tprintf(" yields certainty %g, beating target of %g\n", best_cert, target_cert);
|
||||||
}
|
}
|
||||||
@ -1171,7 +1171,7 @@ float Tesseract::ClassifyBlobPlusOutlines(const std::vector<bool> &ok_outlines,
|
|||||||
ol_it.set_to_list(blob->out_list());
|
ol_it.set_to_list(blob->out_list());
|
||||||
first_to_keep = ol_it.data();
|
first_to_keep = ol_it.data();
|
||||||
}
|
}
|
||||||
for (int i = 0; i < ok_outlines.size(); ++i) {
|
for (unsigned i = 0; i < ok_outlines.size(); ++i) {
|
||||||
if (ok_outlines[i]) {
|
if (ok_outlines[i]) {
|
||||||
// This outline is to be added.
|
// This outline is to be added.
|
||||||
if (blob == nullptr) {
|
if (blob == nullptr) {
|
||||||
@ -1278,7 +1278,7 @@ void Tesseract::classify_word_and_language(int pass_n, PAGE_RES_IT *pr_it, WordD
|
|||||||
most_recently_used_ = word->tesseract;
|
most_recently_used_ = word->tesseract;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int sub = sub_langs_.size();
|
auto sub = sub_langs_.size();
|
||||||
if (most_recently_used_ != this) {
|
if (most_recently_used_ != this) {
|
||||||
// Get the index of the most_recently_used_.
|
// Get the index of the most_recently_used_.
|
||||||
for (sub = 0; sub < sub_langs_.size() && most_recently_used_ != sub_langs_[sub]; ++sub) {
|
for (sub = 0; sub < sub_langs_.size() && most_recently_used_ != sub_langs_[sub]; ++sub) {
|
||||||
@ -1294,7 +1294,7 @@ void Tesseract::classify_word_and_language(int pass_n, PAGE_RES_IT *pr_it, WordD
|
|||||||
&word_data->lang_words[sub_langs_.size()], &best_words) > 0) {
|
&word_data->lang_words[sub_langs_.size()], &best_words) > 0) {
|
||||||
best_lang_tess = this;
|
best_lang_tess = this;
|
||||||
}
|
}
|
||||||
for (int i = 0; !WordsAcceptable(best_words) && i < sub_langs_.size(); ++i) {
|
for (unsigned i = 0; !WordsAcceptable(best_words) && i < sub_langs_.size(); ++i) {
|
||||||
if (most_recently_used_ != sub_langs_[i] &&
|
if (most_recently_used_ != sub_langs_[i] &&
|
||||||
sub_langs_[i]->RetryWithLanguage(*word_data, recognizer, debug, &word_data->lang_words[i],
|
sub_langs_[i]->RetryWithLanguage(*word_data, recognizer, debug, &word_data->lang_words[i],
|
||||||
&best_words) > 0) {
|
&best_words) > 0) {
|
||||||
@ -1876,10 +1876,10 @@ void Tesseract::set_word_fonts(WERD_RES *word) {
|
|||||||
if (choice == nullptr)
|
if (choice == nullptr)
|
||||||
continue;
|
continue;
|
||||||
auto &fonts = choice->fonts();
|
auto &fonts = choice->fonts();
|
||||||
for (int f = 0; f < fonts.size(); ++f) {
|
for (auto &f : fonts) {
|
||||||
const int fontinfo_id = fonts[f].fontinfo_id;
|
const int fontinfo_id = f.fontinfo_id;
|
||||||
if (0 <= fontinfo_id && fontinfo_id < fontinfo_size) {
|
if (0 <= fontinfo_id && fontinfo_id < fontinfo_size) {
|
||||||
font_total_score[fontinfo_id] += fonts[f].score;
|
font_total_score[fontinfo_id] += f.score;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -75,14 +75,14 @@ bool Tesseract::TrainLineRecognizer(const char *input_imagename, const std::stri
|
|||||||
// appends them to the given training_data.
|
// appends them to the given training_data.
|
||||||
void Tesseract::TrainFromBoxes(const std::vector<TBOX> &boxes, const std::vector<std::string> &texts,
|
void Tesseract::TrainFromBoxes(const std::vector<TBOX> &boxes, const std::vector<std::string> &texts,
|
||||||
BLOCK_LIST *block_list, DocumentData *training_data) {
|
BLOCK_LIST *block_list, DocumentData *training_data) {
|
||||||
int box_count = boxes.size();
|
auto box_count = boxes.size();
|
||||||
// Process all the text lines in this page, as defined by the boxes.
|
// Process all the text lines in this page, as defined by the boxes.
|
||||||
int end_box = 0;
|
unsigned end_box = 0;
|
||||||
// Don't let \t, which marks newlines in the box file, get into the line
|
// Don't let \t, which marks newlines in the box file, get into the line
|
||||||
// content, as that makes the line unusable in training.
|
// content, as that makes the line unusable in training.
|
||||||
while (end_box < texts.size() && texts[end_box] == "\t")
|
while (end_box < texts.size() && texts[end_box] == "\t")
|
||||||
++end_box;
|
++end_box;
|
||||||
for (int start_box = end_box; start_box < box_count; start_box = end_box) {
|
for (auto start_box = end_box; start_box < box_count; start_box = end_box) {
|
||||||
// Find the textline of boxes starting at start and their bounding box.
|
// Find the textline of boxes starting at start and their bounding box.
|
||||||
TBOX line_box = boxes[start_box];
|
TBOX line_box = boxes[start_box];
|
||||||
std::string line_str = texts[start_box];
|
std::string line_str = texts[start_box];
|
||||||
|
@ -352,8 +352,8 @@ ColumnFinder *Tesseract::SetupPageSegAndDetectOrientation(PageSegMode pageseg_mo
|
|||||||
// We are running osd as part of layout analysis, so constrain the
|
// We are running osd as part of layout analysis, so constrain the
|
||||||
// scripts to those allowed by *this.
|
// scripts to those allowed by *this.
|
||||||
AddAllScriptsConverted(unicharset, osd_tess->unicharset, &osd_scripts);
|
AddAllScriptsConverted(unicharset, osd_tess->unicharset, &osd_scripts);
|
||||||
for (int s = 0; s < sub_langs_.size(); ++s) {
|
for (auto &lang : sub_langs_) {
|
||||||
AddAllScriptsConverted(sub_langs_[s]->unicharset, osd_tess->unicharset, &osd_scripts);
|
AddAllScriptsConverted(lang->unicharset, osd_tess->unicharset, &osd_scripts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
os_detect_blobs(&osd_scripts, &osd_blobs, osr, osd_tess);
|
os_detect_blobs(&osd_scripts, &osd_blobs, osr, osd_tess);
|
||||||
|
@ -2259,26 +2259,26 @@ void DetectParagraphs(int debug_level, std::vector<RowInfo> *row_infos,
|
|||||||
|
|
||||||
std::vector<Interval> leftovers;
|
std::vector<Interval> leftovers;
|
||||||
LeftoverSegments(rows, &leftovers, 0, rows.size());
|
LeftoverSegments(rows, &leftovers, 0, rows.size());
|
||||||
for (int i = 0; i < leftovers.size(); i++) {
|
for (auto &leftover : leftovers) {
|
||||||
// Pass 2a:
|
// Pass 2a:
|
||||||
// Find any strongly evidenced start-of-paragraph lines. If they're
|
// Find any strongly evidenced start-of-paragraph lines. If they're
|
||||||
// followed by two lines that look like body lines, make a paragraph
|
// followed by two lines that look like body lines, make a paragraph
|
||||||
// model for that and see if that model applies throughout the text
|
// model for that and see if that model applies throughout the text
|
||||||
// (that is, "smear" it).
|
// (that is, "smear" it).
|
||||||
StrongEvidenceClassify(debug_level, &rows, leftovers[i].begin, leftovers[i].end, &theory);
|
StrongEvidenceClassify(debug_level, &rows, leftover.begin, leftover.end, &theory);
|
||||||
|
|
||||||
// Pass 2b:
|
// Pass 2b:
|
||||||
// If we had any luck in pass 2a, we got part of the page and didn't
|
// If we had any luck in pass 2a, we got part of the page and didn't
|
||||||
// know how to classify a few runs of rows. Take the segments that
|
// know how to classify a few runs of rows. Take the segments that
|
||||||
// didn't find a model and reprocess them individually.
|
// didn't find a model and reprocess them individually.
|
||||||
std::vector<Interval> leftovers2;
|
std::vector<Interval> leftovers2;
|
||||||
LeftoverSegments(rows, &leftovers2, leftovers[i].begin, leftovers[i].end);
|
LeftoverSegments(rows, &leftovers2, leftover.begin, leftover.end);
|
||||||
bool pass2a_was_useful =
|
bool pass2a_was_useful =
|
||||||
leftovers2.size() > 1 ||
|
leftovers2.size() > 1 ||
|
||||||
(leftovers2.size() == 1 && (leftovers2[0].begin != 0 || leftovers2[0].end != rows.size()));
|
(leftovers2.size() == 1 && (leftovers2[0].begin != 0 || leftovers2[0].end != rows.size()));
|
||||||
if (pass2a_was_useful) {
|
if (pass2a_was_useful) {
|
||||||
for (int j = 0; j < leftovers2.size(); j++) {
|
for (auto &leftover2 : leftovers2) {
|
||||||
StrongEvidenceClassify(debug_level, &rows, leftovers2[j].begin, leftovers2[j].end, &theory);
|
StrongEvidenceClassify(debug_level, &rows, leftover2.begin, leftover2.end, &theory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2290,8 +2290,8 @@ void DetectParagraphs(int debug_level, std::vector<RowInfo> *row_infos,
|
|||||||
// and geometric clues to form matching models for. Let's see if
|
// and geometric clues to form matching models for. Let's see if
|
||||||
// the geometric clues are simple enough that we could just use those.
|
// the geometric clues are simple enough that we could just use those.
|
||||||
LeftoverSegments(rows, &leftovers, 0, rows.size());
|
LeftoverSegments(rows, &leftovers, 0, rows.size());
|
||||||
for (int i = 0; i < leftovers.size(); i++) {
|
for (auto &leftover : leftovers) {
|
||||||
GeometricClassify(debug_level, &rows, leftovers[i].begin, leftovers[i].end, &theory);
|
GeometricClassify(debug_level, &rows, leftover.begin, leftover.end, &theory);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Undo any flush models for which there's little evidence.
|
// Undo any flush models for which there's little evidence.
|
||||||
@ -2302,8 +2302,8 @@ void DetectParagraphs(int debug_level, std::vector<RowInfo> *row_infos,
|
|||||||
// Pass 4:
|
// Pass 4:
|
||||||
// Take everything that's still not marked up well and clear all markings.
|
// Take everything that's still not marked up well and clear all markings.
|
||||||
LeftoverSegments(rows, &leftovers, 0, rows.size());
|
LeftoverSegments(rows, &leftovers, 0, rows.size());
|
||||||
for (int i = 0; i < leftovers.size(); i++) {
|
for (auto &leftover : leftovers) {
|
||||||
for (int j = leftovers[i].begin; j < leftovers[i].end; j++) {
|
for (int j = leftover.begin; j < leftover.end; j++) {
|
||||||
rows[j].SetUnknown();
|
rows[j].SetUnknown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2498,16 +2498,16 @@ void DetectParagraphs(int debug_level, bool after_text_recognition,
|
|||||||
if (!row_infos.empty()) {
|
if (!row_infos.empty()) {
|
||||||
int min_lmargin = row_infos[0].pix_ldistance;
|
int min_lmargin = row_infos[0].pix_ldistance;
|
||||||
int min_rmargin = row_infos[0].pix_rdistance;
|
int min_rmargin = row_infos[0].pix_rdistance;
|
||||||
for (int i = 1; i < row_infos.size(); i++) {
|
for (unsigned i = 1; i < row_infos.size(); i++) {
|
||||||
if (row_infos[i].pix_ldistance < min_lmargin)
|
if (row_infos[i].pix_ldistance < min_lmargin)
|
||||||
min_lmargin = row_infos[i].pix_ldistance;
|
min_lmargin = row_infos[i].pix_ldistance;
|
||||||
if (row_infos[i].pix_rdistance < min_rmargin)
|
if (row_infos[i].pix_rdistance < min_rmargin)
|
||||||
min_rmargin = row_infos[i].pix_rdistance;
|
min_rmargin = row_infos[i].pix_rdistance;
|
||||||
}
|
}
|
||||||
if (min_lmargin > 0 || min_rmargin > 0) {
|
if (min_lmargin > 0 || min_rmargin > 0) {
|
||||||
for (int i = 0; i < row_infos.size(); i++) {
|
for (auto &row_info : row_infos) {
|
||||||
row_infos[i].pix_ldistance -= min_lmargin;
|
row_info.pix_ldistance -= min_lmargin;
|
||||||
row_infos[i].pix_rdistance -= min_rmargin;
|
row_info.pix_rdistance -= min_rmargin;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2524,10 +2524,10 @@ void DetectParagraphs(int debug_level, bool after_text_recognition,
|
|||||||
|
|
||||||
// Now stitch in the row_owners into the rows.
|
// Now stitch in the row_owners into the rows.
|
||||||
row = *block_start;
|
row = *block_start;
|
||||||
for (int i = 0; i < row_owners.size(); i++) {
|
for (auto &row_owner : row_owners) {
|
||||||
while (!row.PageResIt()->row())
|
while (!row.PageResIt()->row())
|
||||||
row.Next(RIL_TEXTLINE);
|
row.Next(RIL_TEXTLINE);
|
||||||
row.PageResIt()->row()->row->set_para(row_owners[i]);
|
row.PageResIt()->row()->row->set_para(row_owner);
|
||||||
row.Next(RIL_TEXTLINE);
|
row.Next(RIL_TEXTLINE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -209,21 +209,20 @@ SVMenuNode *ParamsEditor::BuildListOfAllLeaves(tesseract::Tesseract *tess) {
|
|||||||
std::map<const char *, int> amount;
|
std::map<const char *, int> amount;
|
||||||
|
|
||||||
// Add all parameters to a list.
|
// Add all parameters to a list.
|
||||||
int v, i;
|
|
||||||
int num_iterations = (tess->params() == nullptr) ? 1 : 2;
|
int num_iterations = (tess->params() == nullptr) ? 1 : 2;
|
||||||
for (v = 0; v < num_iterations; ++v) {
|
for (int v = 0; v < num_iterations; ++v) {
|
||||||
tesseract::ParamsVectors *vec = (v == 0) ? GlobalParams() : tess->params();
|
tesseract::ParamsVectors *vec = (v == 0) ? GlobalParams() : tess->params();
|
||||||
for (i = 0; i < vec->int_params.size(); ++i) {
|
for (auto ¶m : vec->int_params) {
|
||||||
vc_it.add_after_then_move(new ParamContent(vec->int_params[i]));
|
vc_it.add_after_then_move(new ParamContent(param));
|
||||||
}
|
}
|
||||||
for (i = 0; i < vec->bool_params.size(); ++i) {
|
for (auto ¶m : vec->bool_params) {
|
||||||
vc_it.add_after_then_move(new ParamContent(vec->bool_params[i]));
|
vc_it.add_after_then_move(new ParamContent(param));
|
||||||
}
|
}
|
||||||
for (i = 0; i < vec->string_params.size(); ++i) {
|
for (auto ¶m : vec->string_params) {
|
||||||
vc_it.add_after_then_move(new ParamContent(vec->string_params[i]));
|
vc_it.add_after_then_move(new ParamContent(param));
|
||||||
}
|
}
|
||||||
for (i = 0; i < vec->double_params.size(); ++i) {
|
for (auto ¶m : vec->double_params) {
|
||||||
vc_it.add_after_then_move(new ParamContent(vec->double_params[i]));
|
vc_it.add_after_then_move(new ParamContent(param));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,8 +219,8 @@ public:
|
|||||||
pixDestroy(&pix_original_);
|
pixDestroy(&pix_original_);
|
||||||
pix_original_ = original_pix;
|
pix_original_ = original_pix;
|
||||||
// Clone to sublangs as well.
|
// Clone to sublangs as well.
|
||||||
for (int i = 0; i < sub_langs_.size(); ++i) {
|
for (auto &lang : sub_langs_) {
|
||||||
sub_langs_[i]->set_pix_original(original_pix ? pixClone(original_pix) : nullptr);
|
lang->set_pix_original(original_pix ? pixClone(original_pix) : nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Returns a pointer to a Pix representing the best available resolution image
|
// Returns a pointer to a Pix representing the best available resolution image
|
||||||
@ -286,8 +286,8 @@ public:
|
|||||||
bool AnyTessLang() const {
|
bool AnyTessLang() const {
|
||||||
if (tessedit_ocr_engine_mode != OEM_LSTM_ONLY)
|
if (tessedit_ocr_engine_mode != OEM_LSTM_ONLY)
|
||||||
return true;
|
return true;
|
||||||
for (int i = 0; i < sub_langs_.size(); ++i) {
|
for (auto &lang : sub_langs_) {
|
||||||
if (sub_langs_[i]->tessedit_ocr_engine_mode != OEM_LSTM_ONLY)
|
if (lang->tessedit_ocr_engine_mode != OEM_LSTM_ONLY)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -296,8 +296,8 @@ public:
|
|||||||
bool AnyLSTMLang() const {
|
bool AnyLSTMLang() const {
|
||||||
if (tessedit_ocr_engine_mode != OEM_TESSERACT_ONLY)
|
if (tessedit_ocr_engine_mode != OEM_TESSERACT_ONLY)
|
||||||
return true;
|
return true;
|
||||||
for (int i = 0; i < sub_langs_.size(); ++i) {
|
for (auto &lang : sub_langs_) {
|
||||||
if (sub_langs_[i]->tessedit_ocr_engine_mode != OEM_TESSERACT_ONLY) {
|
if (lang->tessedit_ocr_engine_mode != OEM_TESSERACT_ONLY) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,13 +83,13 @@ struct FontInfo {
|
|||||||
// (FontInfo class takes ownership of the pointer).
|
// (FontInfo class takes ownership of the pointer).
|
||||||
// Note: init_spacing should be called before calling this function.
|
// Note: init_spacing should be called before calling this function.
|
||||||
void add_spacing(UNICHAR_ID uch_id, FontSpacingInfo *spacing_info) {
|
void add_spacing(UNICHAR_ID uch_id, FontSpacingInfo *spacing_info) {
|
||||||
ASSERT_HOST(spacing_vec != nullptr && spacing_vec->size() > uch_id);
|
ASSERT_HOST(static_cast<size_t>(uch_id) < spacing_vec->size());
|
||||||
(*spacing_vec)[uch_id] = spacing_info;
|
(*spacing_vec)[uch_id] = spacing_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the pointer to FontSpacingInfo for the given UNICHAR_ID.
|
// Returns the pointer to FontSpacingInfo for the given UNICHAR_ID.
|
||||||
const FontSpacingInfo *get_spacing(UNICHAR_ID uch_id) const {
|
const FontSpacingInfo *get_spacing(UNICHAR_ID uch_id) const {
|
||||||
return (spacing_vec == nullptr || spacing_vec->size() <= uch_id) ? nullptr
|
return (spacing_vec == nullptr || spacing_vec->size() <= static_cast<size_t>(uch_id)) ? nullptr
|
||||||
: (*spacing_vec)[uch_id];
|
: (*spacing_vec)[uch_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ struct FontInfo {
|
|||||||
const FontSpacingInfo *fsi = this->get_spacing(uch_id);
|
const FontSpacingInfo *fsi = this->get_spacing(uch_id);
|
||||||
if (prev_fsi == nullptr || fsi == nullptr)
|
if (prev_fsi == nullptr || fsi == nullptr)
|
||||||
return false;
|
return false;
|
||||||
int i = 0;
|
size_t i = 0;
|
||||||
for (; i < prev_fsi->kerned_unichar_ids.size(); ++i) {
|
for (; i < prev_fsi->kerned_unichar_ids.size(); ++i) {
|
||||||
if (prev_fsi->kerned_unichar_ids[i] == uch_id)
|
if (prev_fsi->kerned_unichar_ids[i] == uch_id)
|
||||||
break;
|
break;
|
||||||
|
@ -102,15 +102,15 @@ public:
|
|||||||
int score1 = 0, score2 = 0;
|
int score1 = 0, score2 = 0;
|
||||||
fontinfo_id_ = -1;
|
fontinfo_id_ = -1;
|
||||||
fontinfo_id2_ = -1;
|
fontinfo_id2_ = -1;
|
||||||
for (int f = 0; f < fonts_.size(); ++f) {
|
for (auto &f : fonts_) {
|
||||||
if (fonts_[f].score > score1) {
|
if (f.score > score1) {
|
||||||
score2 = score1;
|
score2 = score1;
|
||||||
fontinfo_id2_ = fontinfo_id_;
|
fontinfo_id2_ = fontinfo_id_;
|
||||||
score1 = fonts_[f].score;
|
score1 = f.score;
|
||||||
fontinfo_id_ = fonts_[f].fontinfo_id;
|
fontinfo_id_ = f.fontinfo_id;
|
||||||
} else if (fonts_[f].score > score2) {
|
} else if (f.score > score2) {
|
||||||
score2 = fonts_[f].score;
|
score2 = f.score;
|
||||||
fontinfo_id2_ = fonts_[f].fontinfo_id;
|
fontinfo_id2_ = f.fontinfo_id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -234,7 +234,7 @@ bool DeSerialize(bool swap, FILE *fp, std::vector<T> &data) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (swap) {
|
if (swap) {
|
||||||
for (int i = 0; i < size; ++i) {
|
for (uint32_t i = 0; i < size; ++i) {
|
||||||
ReverseN(&data[i], sizeof(T));
|
ReverseN(&data[i], sizeof(T));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,15 +38,15 @@ public:
|
|||||||
ObjectCache() = default;
|
ObjectCache() = default;
|
||||||
~ObjectCache() {
|
~ObjectCache() {
|
||||||
std::lock_guard<std::mutex> guard(mu_);
|
std::lock_guard<std::mutex> guard(mu_);
|
||||||
for (int i = 0; i < cache_.size(); i++) {
|
for (auto &it : cache_) {
|
||||||
if (cache_[i].count > 0) {
|
if (it.count > 0) {
|
||||||
tprintf(
|
tprintf(
|
||||||
"ObjectCache(%p)::~ObjectCache(): WARNING! LEAK! object %p "
|
"ObjectCache(%p)::~ObjectCache(): WARNING! LEAK! object %p "
|
||||||
"still has count %d (id %s)\n",
|
"still has count %d (id %s)\n",
|
||||||
this, cache_[i].object, cache_[i].count, cache_[i].id.c_str());
|
this, it.object, it.count, it.id.c_str());
|
||||||
} else {
|
} else {
|
||||||
delete cache_[i].object;
|
delete it.object;
|
||||||
cache_[i].object = nullptr;
|
it.object = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -60,11 +60,11 @@ public:
|
|||||||
T *Get(const std::string &id, std::function<T *()> loader) {
|
T *Get(const std::string &id, std::function<T *()> loader) {
|
||||||
T *retval = nullptr;
|
T *retval = nullptr;
|
||||||
std::lock_guard<std::mutex> guard(mu_);
|
std::lock_guard<std::mutex> guard(mu_);
|
||||||
for (int i = 0; i < cache_.size(); i++) {
|
for (auto &it : cache_) {
|
||||||
if (id == cache_[i].id) {
|
if (id == it.id) {
|
||||||
retval = cache_[i].object;
|
retval = it.object;
|
||||||
if (cache_[i].object != nullptr) {
|
if (it.object != nullptr) {
|
||||||
cache_[i].count++;
|
it.count++;
|
||||||
}
|
}
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
@ -83,9 +83,9 @@ public:
|
|||||||
if (t == nullptr)
|
if (t == nullptr)
|
||||||
return false;
|
return false;
|
||||||
std::lock_guard<std::mutex> guard(mu_);
|
std::lock_guard<std::mutex> guard(mu_);
|
||||||
for (int i = 0; i < cache_.size(); i++) {
|
for (auto &it : cache_) {
|
||||||
if (cache_[i].object == t) {
|
if (it.object == t) {
|
||||||
--cache_[i].count;
|
--it.count;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,9 +68,9 @@ public:
|
|||||||
// Uses a linear search.
|
// Uses a linear search.
|
||||||
void Add(T value, int count) {
|
void Add(T value, int count) {
|
||||||
// Linear search for value.
|
// Linear search for value.
|
||||||
for (int i = 0; i < counts_.size(); ++i) {
|
for (auto &it : counts_) {
|
||||||
if (counts_[i].value == value) {
|
if (it.value == value) {
|
||||||
counts_[i].count += count;
|
it.count += count;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -83,11 +83,11 @@ public:
|
|||||||
// If the array is empty, returns -INT32_MAX and max_value is unchanged.
|
// If the array is empty, returns -INT32_MAX and max_value is unchanged.
|
||||||
int MaxCount(T *max_value) const {
|
int MaxCount(T *max_value) const {
|
||||||
int best_count = -INT32_MAX;
|
int best_count = -INT32_MAX;
|
||||||
for (int i = 0; i < counts_.size(); ++i) {
|
for (auto &it : counts_) {
|
||||||
if (counts_[i].count > best_count) {
|
if (it.count > best_count) {
|
||||||
best_count = counts_[i].count;
|
best_count = it.count;
|
||||||
if (max_value != nullptr)
|
if (max_value != nullptr)
|
||||||
*max_value = counts_[i].value;
|
*max_value = it.value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return best_count;
|
return best_count;
|
||||||
|
@ -212,7 +212,7 @@ int UNICHARSET::step(const char *str) const {
|
|||||||
// Return whether the given UTF-8 string is encodable with this UNICHARSET.
|
// Return whether the given UTF-8 string is encodable with this UNICHARSET.
|
||||||
// If not encodable, write the first byte offset which cannot be converted
|
// If not encodable, write the first byte offset which cannot be converted
|
||||||
// into the second (return) argument.
|
// into the second (return) argument.
|
||||||
bool UNICHARSET::encodable_string(const char *str, int *first_bad_position) const {
|
bool UNICHARSET::encodable_string(const char *str, unsigned *first_bad_position) const {
|
||||||
std::vector<UNICHAR_ID> encoding;
|
std::vector<UNICHAR_ID> encoding;
|
||||||
return encode_string(str, true, &encoding, nullptr, first_bad_position);
|
return encode_string(str, true, &encoding, nullptr, first_bad_position);
|
||||||
}
|
}
|
||||||
@ -228,13 +228,13 @@ bool UNICHARSET::encodable_string(const char *str, int *first_bad_position) cons
|
|||||||
// Use CleanupString to perform the cleaning.
|
// Use CleanupString to perform the cleaning.
|
||||||
bool UNICHARSET::encode_string(const char *str, bool give_up_on_failure,
|
bool UNICHARSET::encode_string(const char *str, bool give_up_on_failure,
|
||||||
std::vector<UNICHAR_ID> *encoding, std::vector<char> *lengths,
|
std::vector<UNICHAR_ID> *encoding, std::vector<char> *lengths,
|
||||||
int *encoded_length) const {
|
unsigned *encoded_length) const {
|
||||||
std::vector<UNICHAR_ID> working_encoding;
|
std::vector<UNICHAR_ID> working_encoding;
|
||||||
std::vector<char> working_lengths;
|
std::vector<char> working_lengths;
|
||||||
std::vector<char> best_lengths;
|
std::vector<char> best_lengths;
|
||||||
encoding->clear(); // Just in case str is empty.
|
encoding->clear(); // Just in case str is empty.
|
||||||
int str_length = strlen(str);
|
auto str_length = strlen(str);
|
||||||
int str_pos = 0;
|
unsigned str_pos = 0;
|
||||||
bool perfect = true;
|
bool perfect = true;
|
||||||
while (str_pos < str_length) {
|
while (str_pos < str_length) {
|
||||||
encode_string(str, str_pos, str_length, &working_encoding, &working_lengths, &str_pos, encoding,
|
encode_string(str, str_pos, str_length, &working_encoding, &working_lengths, &str_pos, encoding,
|
||||||
@ -366,8 +366,8 @@ bool UNICHARSET::get_isprivate(UNICHAR_ID unichar_id) const {
|
|||||||
|
|
||||||
// Sets all ranges to empty, so they can be expanded to set the values.
|
// Sets all ranges to empty, so they can be expanded to set the values.
|
||||||
void UNICHARSET::set_ranges_empty() {
|
void UNICHARSET::set_ranges_empty() {
|
||||||
for (int id = 0; id < unichars.size(); ++id) {
|
for (auto &uc : unichars) {
|
||||||
unichars[id].properties.SetRangesEmpty();
|
uc.properties.SetRangesEmpty();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,7 +404,7 @@ void UNICHARSET::PartialSetPropertiesFromOther(int start_index, const UNICHARSET
|
|||||||
// src unicharset with ranges in it. The unicharsets don't have to be the
|
// src unicharset with ranges in it. The unicharsets don't have to be the
|
||||||
// same, and graphemes are correctly accounted for.
|
// same, and graphemes are correctly accounted for.
|
||||||
void UNICHARSET::ExpandRangesFromOther(const UNICHARSET &src) {
|
void UNICHARSET::ExpandRangesFromOther(const UNICHARSET &src) {
|
||||||
for (int ch = 0; ch < unichars.size(); ++ch) {
|
for (unsigned ch = 0; ch < unichars.size(); ++ch) {
|
||||||
const char *utf8 = id_to_unichar(ch);
|
const char *utf8 = id_to_unichar(ch);
|
||||||
UNICHAR_PROPERTIES properties;
|
UNICHAR_PROPERTIES properties;
|
||||||
if (src.GetStrProperties(utf8, &properties)) {
|
if (src.GetStrProperties(utf8, &properties)) {
|
||||||
@ -418,7 +418,7 @@ void UNICHARSET::ExpandRangesFromOther(const UNICHARSET &src) {
|
|||||||
// ids will not be present in this if not in src. Does NOT reorder the set!
|
// ids will not be present in this if not in src. Does NOT reorder the set!
|
||||||
void UNICHARSET::CopyFrom(const UNICHARSET &src) {
|
void UNICHARSET::CopyFrom(const UNICHARSET &src) {
|
||||||
clear();
|
clear();
|
||||||
for (int ch = 0; ch < src.unichars.size(); ++ch) {
|
for (unsigned ch = 0; ch < src.unichars.size(); ++ch) {
|
||||||
const UNICHAR_PROPERTIES &src_props = src.unichars[ch].properties;
|
const UNICHAR_PROPERTIES &src_props = src.unichars[ch].properties;
|
||||||
const char *utf8 = src.id_to_unichar(ch);
|
const char *utf8 = src.id_to_unichar(ch);
|
||||||
unichar_insert_backwards_compatible(utf8);
|
unichar_insert_backwards_compatible(utf8);
|
||||||
@ -434,7 +434,7 @@ void UNICHARSET::CopyFrom(const UNICHARSET &src) {
|
|||||||
// ExpandRangesFromOther.
|
// ExpandRangesFromOther.
|
||||||
void UNICHARSET::AppendOtherUnicharset(const UNICHARSET &src) {
|
void UNICHARSET::AppendOtherUnicharset(const UNICHARSET &src) {
|
||||||
int initial_used = unichars.size();
|
int initial_used = unichars.size();
|
||||||
for (int ch = 0; ch < src.unichars.size(); ++ch) {
|
for (unsigned ch = 0; ch < src.unichars.size(); ++ch) {
|
||||||
const UNICHAR_PROPERTIES &src_props = src.unichars[ch].properties;
|
const UNICHAR_PROPERTIES &src_props = src.unichars[ch].properties;
|
||||||
const char *utf8 = src.id_to_unichar(ch);
|
const char *utf8 = src.id_to_unichar(ch);
|
||||||
int id = unichars.size();
|
int id = unichars.size();
|
||||||
@ -470,7 +470,7 @@ bool UNICHARSET::SizesDistinct(UNICHAR_ID id1, UNICHAR_ID id2) const {
|
|||||||
// See unicharset.h for definition of the args.
|
// See unicharset.h for definition of the args.
|
||||||
void UNICHARSET::encode_string(const char *str, int str_index, int str_length,
|
void UNICHARSET::encode_string(const char *str, int str_index, int str_length,
|
||||||
std::vector<UNICHAR_ID> *encoding, std::vector<char> *lengths,
|
std::vector<UNICHAR_ID> *encoding, std::vector<char> *lengths,
|
||||||
int *best_total_length, std::vector<UNICHAR_ID> *best_encoding,
|
unsigned *best_total_length, std::vector<UNICHAR_ID> *best_encoding,
|
||||||
std::vector<char> *best_lengths) const {
|
std::vector<char> *best_lengths) const {
|
||||||
if (str_index > *best_total_length) {
|
if (str_index > *best_total_length) {
|
||||||
// This is the best result so far.
|
// This is the best result so far.
|
||||||
@ -519,8 +519,8 @@ bool UNICHARSET::GetStrProperties(const char *utf8_str, UNICHAR_PROPERTIES *prop
|
|||||||
std::vector<UNICHAR_ID> encoding;
|
std::vector<UNICHAR_ID> encoding;
|
||||||
if (!encode_string(utf8_str, true, &encoding, nullptr, nullptr))
|
if (!encode_string(utf8_str, true, &encoding, nullptr, nullptr))
|
||||||
return false; // Some part was invalid.
|
return false; // Some part was invalid.
|
||||||
for (int i = 0; i < encoding.size(); ++i) {
|
for (auto it : encoding) {
|
||||||
int id = encoding[i];
|
int id = it;
|
||||||
const UNICHAR_PROPERTIES &src_props = unichars[id].properties;
|
const UNICHAR_PROPERTIES &src_props = unichars[id].properties;
|
||||||
// Logical OR all the bools.
|
// Logical OR all the bools.
|
||||||
if (src_props.isalpha)
|
if (src_props.isalpha)
|
||||||
@ -888,7 +888,7 @@ void UNICHARSET::post_load_setup() {
|
|||||||
// not the common script, as that still contains some "alphas".
|
// not the common script, as that still contains some "alphas".
|
||||||
int *script_counts = new int[script_table_size_used];
|
int *script_counts = new int[script_table_size_used];
|
||||||
memset(script_counts, 0, sizeof(*script_counts) * script_table_size_used);
|
memset(script_counts, 0, sizeof(*script_counts) * script_table_size_used);
|
||||||
for (int id = 0; id < unichars.size(); ++id) {
|
for (unsigned id = 0; id < unichars.size(); ++id) {
|
||||||
if (get_isalpha(id)) {
|
if (get_isalpha(id)) {
|
||||||
++script_counts[get_script(id)];
|
++script_counts[get_script(id)];
|
||||||
}
|
}
|
||||||
@ -908,7 +908,7 @@ void UNICHARSET::post_load_setup() {
|
|||||||
bool UNICHARSET::major_right_to_left() const {
|
bool UNICHARSET::major_right_to_left() const {
|
||||||
int ltr_count = 0;
|
int ltr_count = 0;
|
||||||
int rtl_count = 0;
|
int rtl_count = 0;
|
||||||
for (int id = 0; id < unichars.size(); ++id) {
|
for (unsigned id = 0; id < unichars.size(); ++id) {
|
||||||
int dir = get_direction(id);
|
int dir = get_direction(id);
|
||||||
if (dir == UNICHARSET::U_LEFT_TO_RIGHT)
|
if (dir == UNICHARSET::U_LEFT_TO_RIGHT)
|
||||||
ltr_count++;
|
ltr_count++;
|
||||||
@ -927,33 +927,33 @@ void UNICHARSET::set_black_and_whitelist(const char *blacklist, const char *whit
|
|||||||
const char *unblacklist) {
|
const char *unblacklist) {
|
||||||
bool def_enabled = whitelist == nullptr || whitelist[0] == '\0';
|
bool def_enabled = whitelist == nullptr || whitelist[0] == '\0';
|
||||||
// Set everything to default
|
// Set everything to default
|
||||||
for (int ch = 0; ch < unichars.size(); ++ch)
|
for (auto &uc : unichars)
|
||||||
unichars[ch].properties.enabled = def_enabled;
|
uc.properties.enabled = def_enabled;
|
||||||
if (!def_enabled) {
|
if (!def_enabled) {
|
||||||
// Enable the whitelist.
|
// Enable the whitelist.
|
||||||
std::vector<UNICHAR_ID> encoding;
|
std::vector<UNICHAR_ID> encoding;
|
||||||
encode_string(whitelist, false, &encoding, nullptr, nullptr);
|
encode_string(whitelist, false, &encoding, nullptr, nullptr);
|
||||||
for (int i = 0; i < encoding.size(); ++i) {
|
for (auto it : encoding) {
|
||||||
if (encoding[i] != INVALID_UNICHAR_ID)
|
if (it != INVALID_UNICHAR_ID)
|
||||||
unichars[encoding[i]].properties.enabled = true;
|
unichars[it].properties.enabled = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (blacklist != nullptr && blacklist[0] != '\0') {
|
if (blacklist != nullptr && blacklist[0] != '\0') {
|
||||||
// Disable the blacklist.
|
// Disable the blacklist.
|
||||||
std::vector<UNICHAR_ID> encoding;
|
std::vector<UNICHAR_ID> encoding;
|
||||||
encode_string(blacklist, false, &encoding, nullptr, nullptr);
|
encode_string(blacklist, false, &encoding, nullptr, nullptr);
|
||||||
for (int i = 0; i < encoding.size(); ++i) {
|
for (auto it : encoding) {
|
||||||
if (encoding[i] != INVALID_UNICHAR_ID)
|
if (it != INVALID_UNICHAR_ID)
|
||||||
unichars[encoding[i]].properties.enabled = false;
|
unichars[it].properties.enabled = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (unblacklist != nullptr && unblacklist[0] != '\0') {
|
if (unblacklist != nullptr && unblacklist[0] != '\0') {
|
||||||
// Re-enable the unblacklist.
|
// Re-enable the unblacklist.
|
||||||
std::vector<UNICHAR_ID> encoding;
|
std::vector<UNICHAR_ID> encoding;
|
||||||
encode_string(unblacklist, false, &encoding, nullptr, nullptr);
|
encode_string(unblacklist, false, &encoding, nullptr, nullptr);
|
||||||
for (int i = 0; i < encoding.size(); ++i) {
|
for (auto it : encoding) {
|
||||||
if (encoding[i] != INVALID_UNICHAR_ID)
|
if (it != INVALID_UNICHAR_ID)
|
||||||
unichars[encoding[i]].properties.enabled = true;
|
unichars[it].properties.enabled = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -220,7 +220,7 @@ public:
|
|||||||
// Returns true if the given UTF-8 string is encodable with this UNICHARSET.
|
// Returns true if the given UTF-8 string is encodable with this UNICHARSET.
|
||||||
// If not encodable, write the first byte offset which cannot be converted
|
// If not encodable, write the first byte offset which cannot be converted
|
||||||
// into the second (return) argument.
|
// into the second (return) argument.
|
||||||
bool encodable_string(const char *str, int *first_bad_position) const;
|
bool encodable_string(const char *str, unsigned *first_bad_position) const;
|
||||||
|
|
||||||
// Encodes the given UTF-8 string with this UNICHARSET.
|
// Encodes the given UTF-8 string with this UNICHARSET.
|
||||||
// Any part of the string that cannot be encoded (because the utf8 can't
|
// Any part of the string that cannot be encoded (because the utf8 can't
|
||||||
@ -238,7 +238,7 @@ public:
|
|||||||
// that do not belong in the unicharset, or encoding may fail.
|
// that do not belong in the unicharset, or encoding may fail.
|
||||||
// Use CleanupString to perform the cleaning.
|
// Use CleanupString to perform the cleaning.
|
||||||
bool encode_string(const char *str, bool give_up_on_failure, std::vector<UNICHAR_ID> *encoding,
|
bool encode_string(const char *str, bool give_up_on_failure, std::vector<UNICHAR_ID> *encoding,
|
||||||
std::vector<char> *lengths, int *encoded_length) const;
|
std::vector<char> *lengths, unsigned *encoded_length) const;
|
||||||
|
|
||||||
// Return the unichar representation corresponding to the given UNICHAR_ID
|
// Return the unichar representation corresponding to the given UNICHAR_ID
|
||||||
// within the UNICHARSET.
|
// within the UNICHARSET.
|
||||||
@ -294,7 +294,7 @@ public:
|
|||||||
// Return true if the given unichar id exists within the set.
|
// Return true if the given unichar id exists within the set.
|
||||||
// Relies on the fact that unichar ids are contiguous in the unicharset.
|
// Relies on the fact that unichar ids are contiguous in the unicharset.
|
||||||
bool contains_unichar_id(UNICHAR_ID unichar_id) const {
|
bool contains_unichar_id(UNICHAR_ID unichar_id) const {
|
||||||
return unichar_id != INVALID_UNICHAR_ID && unichar_id < unichars.size() && unichar_id >= 0;
|
return static_cast<size_t>(unichar_id) < unichars.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return true if the given unichar representation exists within the set.
|
// Return true if the given unichar representation exists within the set.
|
||||||
@ -1000,7 +1000,7 @@ private:
|
|||||||
// best_lengths (may be null) contains the lengths of best_encoding.
|
// best_lengths (may be null) contains the lengths of best_encoding.
|
||||||
void encode_string(const char *str, int str_index, int str_length,
|
void encode_string(const char *str, int str_index, int str_length,
|
||||||
std::vector<UNICHAR_ID> *encoding, std::vector<char> *lengths,
|
std::vector<UNICHAR_ID> *encoding, std::vector<char> *lengths,
|
||||||
int *best_total_length, std::vector<UNICHAR_ID> *best_encoding,
|
unsigned *best_total_length, std::vector<UNICHAR_ID> *best_encoding,
|
||||||
std::vector<char> *best_lengths) const;
|
std::vector<char> *best_lengths) const;
|
||||||
|
|
||||||
// Gets the properties for a grapheme string, combining properties for
|
// Gets the properties for a grapheme string, combining properties for
|
||||||
|
@ -107,7 +107,7 @@ struct ADAPT_RESULTS {
|
|||||||
best_unichar_id = INVALID_UNICHAR_ID;
|
best_unichar_id = INVALID_UNICHAR_ID;
|
||||||
best_match_index = -1;
|
best_match_index = -1;
|
||||||
best_rating = WORST_POSSIBLE_RATING;
|
best_rating = WORST_POSSIBLE_RATING;
|
||||||
for (int i = 0; i < match.size(); ++i) {
|
for (unsigned i = 0; i < match.size(); ++i) {
|
||||||
if (match[i].rating > best_rating) {
|
if (match[i].rating > best_rating) {
|
||||||
best_rating = match[i].rating;
|
best_rating = match[i].rating;
|
||||||
best_unichar_id = match[i].unichar_id;
|
best_unichar_id = match[i].unichar_id;
|
||||||
@ -145,7 +145,7 @@ inline bool MarginalMatch(float confidence, float matcher_great_threshold) {
|
|||||||
// Returns the index of the given id in results, if present, or the size of the
|
// Returns the index of the given id in results, if present, or the size of the
|
||||||
// vector (index it will go at) if not present.
|
// vector (index it will go at) if not present.
|
||||||
static int FindScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) {
|
static int FindScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) {
|
||||||
for (int i = 0; i < results.match.size(); i++) {
|
for (unsigned i = 0; i < results.match.size(); i++) {
|
||||||
if (results.match[i].unichar_id == id)
|
if (results.match[i].unichar_id == id)
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
@ -1112,7 +1112,7 @@ void Classify::ExpandShapesAndApplyCorrections(ADAPT_CLASS *classes, bool debug,
|
|||||||
if (!unicharset.get_enabled(unichar_id))
|
if (!unicharset.get_enabled(unichar_id))
|
||||||
continue;
|
continue;
|
||||||
// Find the mapped_result for unichar_id.
|
// Find the mapped_result for unichar_id.
|
||||||
int r = 0;
|
unsigned r = 0;
|
||||||
for (r = 0; r < mapped_results.size() && mapped_results[r].unichar_id != unichar_id;
|
for (r = 0; r < mapped_results.size() && mapped_results[r].unichar_id != unichar_id;
|
||||||
++r) {
|
++r) {
|
||||||
}
|
}
|
||||||
@ -1127,11 +1127,11 @@ void Classify::ExpandShapesAndApplyCorrections(ADAPT_CLASS *classes, bool debug,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int m = 0; m < mapped_results.size(); ++m) {
|
for (auto &m : mapped_results) {
|
||||||
mapped_results[m].rating = ComputeCorrectedRating(
|
m.rating = ComputeCorrectedRating(
|
||||||
debug, mapped_results[m].unichar_id, cp_rating, int_result->rating,
|
debug, m.unichar_id, cp_rating, int_result->rating,
|
||||||
int_result->feature_misses, bottom, top, blob_length, matcher_multiplier, cn_factors);
|
int_result->feature_misses, bottom, top, blob_length, matcher_multiplier, cn_factors);
|
||||||
AddNewResult(mapped_results[m], final_results);
|
AddNewResult(m, final_results);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1252,8 +1252,8 @@ int Classify::CharNormClassifier(TBLOB *blob, const TrainingSample &sample,
|
|||||||
std::vector<UnicharRating> unichar_results;
|
std::vector<UnicharRating> unichar_results;
|
||||||
static_classifier_->UnicharClassifySample(sample, blob->denorm().pix(), 0, -1, &unichar_results);
|
static_classifier_->UnicharClassifySample(sample, blob->denorm().pix(), 0, -1, &unichar_results);
|
||||||
// Convert results to the format used internally by AdaptiveClassifier.
|
// Convert results to the format used internally by AdaptiveClassifier.
|
||||||
for (int r = 0; r < unichar_results.size(); ++r) {
|
for (auto &r : unichar_results) {
|
||||||
AddNewResult(unichar_results[r], adapt_results);
|
AddNewResult(r, adapt_results);
|
||||||
}
|
}
|
||||||
return sample.num_features();
|
return sample.num_features();
|
||||||
} /* CharNormClassifier */
|
} /* CharNormClassifier */
|
||||||
@ -1289,16 +1289,16 @@ int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const Trai
|
|||||||
}
|
}
|
||||||
if (pruner_only) {
|
if (pruner_only) {
|
||||||
// Convert pruner results to output format.
|
// Convert pruner results to output format.
|
||||||
for (int i = 0; i < adapt_results->CPResults.size(); ++i) {
|
for (auto &it : adapt_results->CPResults) {
|
||||||
int class_id = adapt_results->CPResults[i].Class;
|
int class_id = it.Class;
|
||||||
results->push_back(UnicharRating(class_id, 1.0f - adapt_results->CPResults[i].Rating));
|
results->push_back(UnicharRating(class_id, 1.0f - it.Rating));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
MasterMatcher(PreTrainedTemplates, num_features, sample.features(), char_norm_array, nullptr,
|
MasterMatcher(PreTrainedTemplates, num_features, sample.features(), char_norm_array, nullptr,
|
||||||
matcher_debug_flags, classify_integer_matcher_multiplier, blob_box,
|
matcher_debug_flags, classify_integer_matcher_multiplier, blob_box,
|
||||||
adapt_results->CPResults, adapt_results);
|
adapt_results->CPResults, adapt_results);
|
||||||
// Convert master matcher results to output format.
|
// Convert master matcher results to output format.
|
||||||
for (int i = 0; i < adapt_results->match.size(); i++) {
|
for (unsigned i = 0; i < adapt_results->match.size(); i++) {
|
||||||
results->push_back(adapt_results->match[i]);
|
results->push_back(adapt_results->match[i]);
|
||||||
}
|
}
|
||||||
if (results->size() > 1) {
|
if (results->size() > 1) {
|
||||||
@ -1358,8 +1358,8 @@ void Classify::ConvertMatchesToChoices(const DENORM &denorm, const TBOX &box,
|
|||||||
}
|
}
|
||||||
|
|
||||||
float best_certainty = -FLT_MAX;
|
float best_certainty = -FLT_MAX;
|
||||||
for (int i = 0; i < Results->match.size(); i++) {
|
for (auto &it : Results->match) {
|
||||||
const UnicharRating &result = Results->match[i];
|
const UnicharRating &result = it;
|
||||||
bool adapted = result.adapted;
|
bool adapted = result.adapted;
|
||||||
bool current_is_frag = (unicharset.get_fragment(result.unichar_id) != nullptr);
|
bool current_is_frag = (unicharset.get_fragment(result.unichar_id) != nullptr);
|
||||||
if (temp_it.length() + 1 == max_matches && !contains_nonfrag && current_is_frag) {
|
if (temp_it.length() + 1 == max_matches && !contains_nonfrag && current_is_frag) {
|
||||||
@ -1504,7 +1504,6 @@ void Classify::DoAdaptiveMatch(TBLOB *Blob, ADAPT_RESULTS *Results) {
|
|||||||
UNICHAR_ID *Classify::GetAmbiguities(TBLOB *Blob, CLASS_ID CorrectClass) {
|
UNICHAR_ID *Classify::GetAmbiguities(TBLOB *Blob, CLASS_ID CorrectClass) {
|
||||||
auto *Results = new ADAPT_RESULTS();
|
auto *Results = new ADAPT_RESULTS();
|
||||||
UNICHAR_ID *Ambiguities;
|
UNICHAR_ID *Ambiguities;
|
||||||
int i;
|
|
||||||
|
|
||||||
Results->Initialize();
|
Results->Initialize();
|
||||||
INT_FX_RESULT_STRUCT fx_info;
|
INT_FX_RESULT_STRUCT fx_info;
|
||||||
@ -1526,6 +1525,7 @@ UNICHAR_ID *Classify::GetAmbiguities(TBLOB *Blob, CLASS_ID CorrectClass) {
|
|||||||
Ambiguities = new UNICHAR_ID[Results->match.size() + 1];
|
Ambiguities = new UNICHAR_ID[Results->match.size() + 1];
|
||||||
if (Results->match.size() > 1 ||
|
if (Results->match.size() > 1 ||
|
||||||
(Results->match.size() == 1 && Results->match[0].unichar_id != CorrectClass)) {
|
(Results->match.size() == 1 && Results->match[0].unichar_id != CorrectClass)) {
|
||||||
|
unsigned i;
|
||||||
for (i = 0; i < Results->match.size(); i++)
|
for (i = 0; i < Results->match.size(); i++)
|
||||||
Ambiguities[i] = Results->match[i].unichar_id;
|
Ambiguities[i] = Results->match[i].unichar_id;
|
||||||
Ambiguities[i] = -1;
|
Ambiguities[i] = -1;
|
||||||
@ -1888,9 +1888,9 @@ int MakeTempProtoPerm(void *item1, void *item2) {
|
|||||||
* Globals: none
|
* Globals: none
|
||||||
*/
|
*/
|
||||||
void Classify::PrintAdaptiveMatchResults(const ADAPT_RESULTS &results) {
|
void Classify::PrintAdaptiveMatchResults(const ADAPT_RESULTS &results) {
|
||||||
for (int i = 0; i < results.match.size(); ++i) {
|
for (auto &it : results.match) {
|
||||||
tprintf("%s ", unicharset.debug_str(results.match[i].unichar_id).c_str());
|
tprintf("%s ", unicharset.debug_str(it.unichar_id).c_str());
|
||||||
results.match[i].Print();
|
it.Print();
|
||||||
}
|
}
|
||||||
} /* PrintAdaptiveMatchResults */
|
} /* PrintAdaptiveMatchResults */
|
||||||
|
|
||||||
@ -1908,7 +1908,7 @@ void Classify::PrintAdaptiveMatchResults(const ADAPT_RESULTS &results) {
|
|||||||
* - matcher_bad_match_pad defines a "bad match"
|
* - matcher_bad_match_pad defines a "bad match"
|
||||||
*/
|
*/
|
||||||
void Classify::RemoveBadMatches(ADAPT_RESULTS *Results) {
|
void Classify::RemoveBadMatches(ADAPT_RESULTS *Results) {
|
||||||
int Next, NextGood;
|
unsigned Next, NextGood;
|
||||||
float BadMatchThreshold;
|
float BadMatchThreshold;
|
||||||
static const char *romans = "i v x I V X";
|
static const char *romans = "i v x I V X";
|
||||||
BadMatchThreshold = Results->best_rating - matcher_bad_match_pad;
|
BadMatchThreshold = Results->best_rating - matcher_bad_match_pad;
|
||||||
@ -1965,7 +1965,7 @@ void Classify::RemoveBadMatches(ADAPT_RESULTS *Results) {
|
|||||||
* @param Results contains matches to be filtered
|
* @param Results contains matches to be filtered
|
||||||
*/
|
*/
|
||||||
void Classify::RemoveExtraPuncs(ADAPT_RESULTS *Results) {
|
void Classify::RemoveExtraPuncs(ADAPT_RESULTS *Results) {
|
||||||
int Next, NextGood;
|
unsigned Next, NextGood;
|
||||||
int punc_count; /*no of garbage characters */
|
int punc_count; /*no of garbage characters */
|
||||||
int digit_count;
|
int digit_count;
|
||||||
/*garbage characters */
|
/*garbage characters */
|
||||||
|
@ -110,10 +110,10 @@ public:
|
|||||||
*/
|
*/
|
||||||
void unichar_ids_of(NODE_REF node, NodeChildVector *vec, bool word_end) const override {
|
void unichar_ids_of(NODE_REF node, NodeChildVector *vec, bool word_end) const override {
|
||||||
const EDGE_VECTOR &forward_edges = nodes_[static_cast<int>(node)]->forward_edges;
|
const EDGE_VECTOR &forward_edges = nodes_[static_cast<int>(node)]->forward_edges;
|
||||||
for (int i = 0; i < forward_edges.size(); ++i) {
|
for (auto &edge : forward_edges) {
|
||||||
if (!word_end || end_of_word_from_edge_rec(forward_edges[i])) {
|
if (!word_end || end_of_word_from_edge_rec(edge)) {
|
||||||
vec->push_back(
|
vec->push_back(
|
||||||
NodeChild(unichar_id_from_edge_rec(forward_edges[i]), make_edge_ref(node, i)));
|
NodeChild(unichar_id_from_edge_rec(edge), make_edge_ref(node, &edge - &forward_edges[0])));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -320,7 +320,7 @@ protected:
|
|||||||
// At most max_num_edges will be printed for each node.
|
// At most max_num_edges will be printed for each node.
|
||||||
void print_all(const char *msg, int max_num_edges) {
|
void print_all(const char *msg, int max_num_edges) {
|
||||||
tprintf("\n__________________________\n%s\n", msg);
|
tprintf("\n__________________________\n%s\n", msg);
|
||||||
for (int i = 0; i < nodes_.size(); ++i)
|
for (size_t i = 0; i < nodes_.size(); ++i)
|
||||||
print_node(i, max_num_edges);
|
print_node(i, max_num_edges);
|
||||||
tprintf("__________________________\n");
|
tprintf("__________________________\n");
|
||||||
}
|
}
|
||||||
|
@ -143,8 +143,8 @@ public:
|
|||||||
learning_rate_ *= factor;
|
learning_rate_ *= factor;
|
||||||
if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) {
|
if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) {
|
||||||
std::vector<std::string> layers = EnumerateLayers();
|
std::vector<std::string> layers = EnumerateLayers();
|
||||||
for (int i = 0; i < layers.size(); ++i) {
|
for (auto &layer : layers) {
|
||||||
ScaleLayerLearningRate(layers[i], factor);
|
ScaleLayerLearningRate(layer, factor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -214,7 +214,8 @@ public:
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
class Stack {
|
class Stack {
|
||||||
public:
|
public:
|
||||||
Stack() : stack_top_(0) {}
|
Stack() {
|
||||||
|
}
|
||||||
|
|
||||||
~Stack() {
|
~Stack() {
|
||||||
for (auto data : stack_) {
|
for (auto data : stack_) {
|
||||||
@ -241,9 +242,9 @@ public:
|
|||||||
void Return(T *item) {
|
void Return(T *item) {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
// Linear search will do.
|
// Linear search will do.
|
||||||
int index = stack_top_ - 1;
|
int index = stack_top_;
|
||||||
while (index >= 0 && stack_[index] != item)
|
while (--index >= 0 && stack_[index] != item) {
|
||||||
--index;
|
}
|
||||||
if (index >= 0)
|
if (index >= 0)
|
||||||
flags_[index] = false;
|
flags_[index] = false;
|
||||||
while (stack_top_ > 0 && !flags_[stack_top_ - 1])
|
while (stack_top_ > 0 && !flags_[stack_top_ - 1])
|
||||||
@ -253,7 +254,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
std::vector<T *> stack_;
|
std::vector<T *> stack_;
|
||||||
std::vector<bool> flags_;
|
std::vector<bool> flags_;
|
||||||
int stack_top_;
|
unsigned stack_top_ = 0;
|
||||||
std::mutex mutex_;
|
std::mutex mutex_;
|
||||||
}; // class Stack.
|
}; // class Stack.
|
||||||
|
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
// File: parallel.h
|
// File: parallel.h
|
||||||
// Description: Runs networks in parallel on the same input.
|
// Description: Runs networks in parallel on the same input.
|
||||||
// Author: Ray Smith
|
// Author: Ray Smith
|
||||||
// Created: Thu May 02 08:02:06 PST 2013
|
|
||||||
//
|
//
|
||||||
// (C) Copyright 2013, Google Inc.
|
// (C) Copyright 2013, Google Inc.
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@ -52,8 +51,8 @@ public:
|
|||||||
if (type_ == NT_REPLICATED) {
|
if (type_ == NT_REPLICATED) {
|
||||||
spec += "R" + std::to_string(stack_.size()) + "(" + stack_[0]->spec();
|
spec += "R" + std::to_string(stack_.size()) + "(" + stack_[0]->spec();
|
||||||
} else {
|
} else {
|
||||||
for (int i = 0; i < stack_.size(); ++i)
|
for (auto &it : stack_)
|
||||||
spec += stack_[i]->spec();
|
spec += it->spec();
|
||||||
}
|
}
|
||||||
spec += ")";
|
spec += ")";
|
||||||
}
|
}
|
||||||
|
@ -427,10 +427,11 @@ void RecodeBeamSearch::extractSymbolChoices(const UNICHARSET *unicharset) {
|
|||||||
if (ratings[i] < ratings[bestPos])
|
if (ratings[i] < ratings[bestPos])
|
||||||
bestPos = i;
|
bestPos = i;
|
||||||
}
|
}
|
||||||
|
// TODO: bestCode is currently unused (see commit 2dd5d0d60).
|
||||||
int bestCode = -10;
|
int bestCode = -10;
|
||||||
for (int i = 0; i < best_nodes.size(); ++i) {
|
for (auto &node : best_nodes) {
|
||||||
if (best_nodes[i]->unichar_id == unichar_ids[bestPos]) {
|
if (node->unichar_id == unichar_ids[bestPos]) {
|
||||||
bestCode = best_nodes[i]->code;
|
bestCode = node->code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Exclude the best choice for the followup decoding.
|
// Exclude the best choice for the followup decoding.
|
||||||
|
@ -54,9 +54,9 @@ public:
|
|||||||
to = 'y';
|
to = 'y';
|
||||||
}
|
}
|
||||||
// Change the from char to the to char.
|
// Change the from char to the to char.
|
||||||
for (int i = 0; i < net_spec.length(); ++i) {
|
for (auto &it : net_spec) {
|
||||||
if (net_spec[i] == from)
|
if (it == from)
|
||||||
net_spec[i] = to;
|
it = to;
|
||||||
}
|
}
|
||||||
spec += net_spec;
|
spec += net_spec;
|
||||||
return spec;
|
return spec;
|
||||||
|
@ -36,8 +36,8 @@ public:
|
|||||||
|
|
||||||
std::string spec() const override {
|
std::string spec() const override {
|
||||||
std::string spec("[");
|
std::string spec("[");
|
||||||
for (int i = 0; i < stack_.size(); ++i)
|
for (auto &it : stack_)
|
||||||
spec += stack_[i]->spec();
|
spec += it->spec();
|
||||||
spec += "]";
|
spec += "]";
|
||||||
return spec;
|
return spec;
|
||||||
}
|
}
|
||||||
|
@ -127,37 +127,37 @@ static bool SafeAtod(const char *str, double *val) {
|
|||||||
static void PrintCommandLineFlags() {
|
static void PrintCommandLineFlags() {
|
||||||
const char *kFlagNamePrefix = "FLAGS_";
|
const char *kFlagNamePrefix = "FLAGS_";
|
||||||
const int kFlagNamePrefixLen = strlen(kFlagNamePrefix);
|
const int kFlagNamePrefixLen = strlen(kFlagNamePrefix);
|
||||||
for (int i = 0; i < GlobalParams()->int_params.size(); ++i) {
|
for (auto ¶m : GlobalParams()->int_params) {
|
||||||
if (!strncmp(GlobalParams()->int_params[i]->name_str(), kFlagNamePrefix, kFlagNamePrefixLen)) {
|
if (!strncmp(param->name_str(), kFlagNamePrefix, kFlagNamePrefixLen)) {
|
||||||
printf(" --%s %s (type:int default:%d)\n",
|
printf(" --%s %s (type:int default:%d)\n",
|
||||||
GlobalParams()->int_params[i]->name_str() + kFlagNamePrefixLen,
|
param->name_str() + kFlagNamePrefixLen,
|
||||||
GlobalParams()->int_params[i]->info_str(), int32_t(*(GlobalParams()->int_params[i])));
|
param->info_str(), int32_t(*param));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < GlobalParams()->double_params.size(); ++i) {
|
for (auto ¶m : GlobalParams()->double_params) {
|
||||||
if (!strncmp(GlobalParams()->double_params[i]->name_str(), kFlagNamePrefix,
|
if (!strncmp(param->name_str(), kFlagNamePrefix,
|
||||||
kFlagNamePrefixLen)) {
|
kFlagNamePrefixLen)) {
|
||||||
printf(" --%s %s (type:double default:%g)\n",
|
printf(" --%s %s (type:double default:%g)\n",
|
||||||
GlobalParams()->double_params[i]->name_str() + kFlagNamePrefixLen,
|
param->name_str() + kFlagNamePrefixLen,
|
||||||
GlobalParams()->double_params[i]->info_str(),
|
param->info_str(),
|
||||||
static_cast<double>(*(GlobalParams()->double_params[i])));
|
static_cast<double>(*param));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < GlobalParams()->bool_params.size(); ++i) {
|
for (auto ¶m : GlobalParams()->bool_params) {
|
||||||
if (!strncmp(GlobalParams()->bool_params[i]->name_str(), kFlagNamePrefix, kFlagNamePrefixLen)) {
|
if (!strncmp(param->name_str(), kFlagNamePrefix, kFlagNamePrefixLen)) {
|
||||||
printf(" --%s %s (type:bool default:%s)\n",
|
printf(" --%s %s (type:bool default:%s)\n",
|
||||||
GlobalParams()->bool_params[i]->name_str() + kFlagNamePrefixLen,
|
param->name_str() + kFlagNamePrefixLen,
|
||||||
GlobalParams()->bool_params[i]->info_str(),
|
param->info_str(),
|
||||||
bool(*(GlobalParams()->bool_params[i])) ? "true" : "false");
|
bool(*param) ? "true" : "false");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < GlobalParams()->string_params.size(); ++i) {
|
for (auto ¶m : GlobalParams()->string_params) {
|
||||||
if (!strncmp(GlobalParams()->string_params[i]->name_str(), kFlagNamePrefix,
|
if (!strncmp(param->name_str(), kFlagNamePrefix,
|
||||||
kFlagNamePrefixLen)) {
|
kFlagNamePrefixLen)) {
|
||||||
printf(" --%s %s (type:string default:%s)\n",
|
printf(" --%s %s (type:string default:%s)\n",
|
||||||
GlobalParams()->string_params[i]->name_str() + kFlagNamePrefixLen,
|
param->name_str() + kFlagNamePrefixLen,
|
||||||
GlobalParams()->string_params[i]->info_str(),
|
param->info_str(),
|
||||||
GlobalParams()->string_params[i]->c_str());
|
param->c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -229,8 +229,8 @@ float CTC::CalculateBiasFraction() {
|
|||||||
for (int l = 0; l < num_labels_; ++l) {
|
for (int l = 0; l < num_labels_; ++l) {
|
||||||
++truth_counts[labels_[l]];
|
++truth_counts[labels_[l]];
|
||||||
}
|
}
|
||||||
for (int l = 0; l < output_labels.size(); ++l) {
|
for (auto l : output_labels) {
|
||||||
++output_counts[output_labels[l]];
|
++output_counts[l];
|
||||||
}
|
}
|
||||||
// Count the number of true and false positive non-nulls and truth labels.
|
// Count the number of true and false positive non-nulls and truth labels.
|
||||||
int true_pos = 0, false_pos = 0, total_labels = 0;
|
int true_pos = 0, false_pos = 0, total_labels = 0;
|
||||||
|
@ -197,10 +197,10 @@ Pix *PrepareDistortedPix(const Pix *pix, bool perspective, bool invert, bool whi
|
|||||||
if (perspective)
|
if (perspective)
|
||||||
GeneratePerspectiveDistortion(0, 0, randomizer, &distorted, boxes);
|
GeneratePerspectiveDistortion(0, 0, randomizer, &distorted, boxes);
|
||||||
if (boxes != nullptr) {
|
if (boxes != nullptr) {
|
||||||
for (int b = 0; b < boxes->size(); ++b) {
|
for (auto &b : *boxes) {
|
||||||
(*boxes)[b].scale(1.0f / box_reduction);
|
b.scale(1.0f / box_reduction);
|
||||||
if ((*boxes)[b].width() <= 0)
|
if (b.width() <= 0)
|
||||||
(*boxes)[b].set_right((*boxes)[b].left() + 1);
|
b.set_right(b.left() + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (invert && randomizer->SignedRand(1.0) < -0)
|
if (invert && randomizer->SignedRand(1.0) < -0)
|
||||||
@ -232,16 +232,16 @@ void GeneratePerspectiveDistortion(int width, int height, TRand *randomizer, Pix
|
|||||||
}
|
}
|
||||||
if (boxes != nullptr) {
|
if (boxes != nullptr) {
|
||||||
// Transform the boxes.
|
// Transform the boxes.
|
||||||
for (int b = 0; b < boxes->size(); ++b) {
|
for (auto &b : *boxes) {
|
||||||
int x1, y1, x2, y2;
|
int x1, y1, x2, y2;
|
||||||
const TBOX &box = (*boxes)[b];
|
const TBOX &box = b;
|
||||||
projectiveXformSampledPt(box_coeffs, box.left(), height - box.top(), &x1, &y1);
|
projectiveXformSampledPt(box_coeffs, box.left(), height - box.top(), &x1, &y1);
|
||||||
projectiveXformSampledPt(box_coeffs, box.right(), height - box.bottom(), &x2, &y2);
|
projectiveXformSampledPt(box_coeffs, box.right(), height - box.bottom(), &x2, &y2);
|
||||||
TBOX new_box1(x1, height - y2, x2, height - y1);
|
TBOX new_box1(x1, height - y2, x2, height - y1);
|
||||||
projectiveXformSampledPt(box_coeffs, box.left(), height - box.bottom(), &x1, &y1);
|
projectiveXformSampledPt(box_coeffs, box.left(), height - box.bottom(), &x1, &y1);
|
||||||
projectiveXformSampledPt(box_coeffs, box.right(), height - box.top(), &x2, &y2);
|
projectiveXformSampledPt(box_coeffs, box.right(), height - box.top(), &x2, &y2);
|
||||||
TBOX new_box2(x1, height - y1, x2, height - y2);
|
TBOX new_box2(x1, height - y1, x2, height - y2);
|
||||||
(*boxes)[b] = new_box1.bounding_union(new_box2);
|
b = new_box1.bounding_union(new_box2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
free(im_coeffs);
|
free(im_coeffs);
|
||||||
|
@ -718,7 +718,7 @@ bool LSTMTrainer::EncodeString(const std::string &str, const UNICHARSET &unichar
|
|||||||
tprintf("Empty truth string!\n");
|
tprintf("Empty truth string!\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
int err_index;
|
unsigned err_index;
|
||||||
std::vector<int> internal_labels;
|
std::vector<int> internal_labels;
|
||||||
labels->clear();
|
labels->clear();
|
||||||
if (!simple_text)
|
if (!simple_text)
|
||||||
@ -822,7 +822,7 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw
|
|||||||
std::reverse(truth_labels.begin(), truth_labels.end());
|
std::reverse(truth_labels.begin(), truth_labels.end());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int w = 0;
|
unsigned w = 0;
|
||||||
while (w < truth_labels.size() &&
|
while (w < truth_labels.size() &&
|
||||||
(truth_labels[w] == UNICHAR_SPACE || truth_labels[w] == null_char_))
|
(truth_labels[w] == UNICHAR_SPACE || truth_labels[w] == null_char_))
|
||||||
++w;
|
++w;
|
||||||
|
@ -98,7 +98,7 @@ TEST(UnicharsetTest, Multibyte) {
|
|||||||
EXPECT_THAT(v, ElementsAreArray({3, 4, 4, 5, 8, 7}));
|
EXPECT_THAT(v, ElementsAreArray({3, 4, 4, 5, 8, 7}));
|
||||||
// With the fi ligature the fi is picked out.
|
// With the fi ligature the fi is picked out.
|
||||||
std::vector<char> lengths;
|
std::vector<char> lengths;
|
||||||
int encoded_length;
|
unsigned encoded_length;
|
||||||
std::string src_str = "\u0627\u062c\ufb01\u0635\u062b";
|
std::string src_str = "\u0627\u062c\ufb01\u0635\u062b";
|
||||||
// src_str has to be pre-cleaned for lengths to be correct.
|
// src_str has to be pre-cleaned for lengths to be correct.
|
||||||
std::string cleaned = u.CleanupString(src_str.c_str());
|
std::string cleaned = u.CleanupString(src_str.c_str());
|
||||||
|
Loading…
Reference in New Issue
Block a user