mirror of
https://github.com/tesseract-ocr/tesseract.git
synced 2024-11-23 18:49:08 +08:00
Removed unused parameters
The following parameters are not used anywhere anymore: * use_definite_ambigs_for_classifier * max_viterbi_list_size * word_to_debug_lengths * fragments_debug * tessedit_redo_xheight * debug_acceptable_wds * tessedit_matcher_log * tessedit_test_adaption_mode * docqual_excuse_outline_errs * crunch_pot_garbage * suspect_space_level * tessedit_consistent_reps * wordrec_display_all_words * wordrec_no_block * wordrec_worst_state * fragments_guide_chopper * segment_adjust_debug * classify_adapt_feature_thresh (classify_adapt_feature_threshold still exists) * classify_adapt_proto_thresh (classify_adapt_proto_threshold still exists) * classify_min_norm_scale_x * classify_max_norm_scale_x * classify_min_norm_scale_y * classify_max_norm_scale_y * il1_adaption_test * textord_blob_size_bigile * textord_blob_size_smallile * editor_debug_config_file * textord_tabfind_show_color_fit The list was generated by a python script and each parameter occurence checked manually.
This commit is contained in:
parent
8095e6c1c3
commit
aa2ab68e29
@ -141,8 +141,6 @@ INT_VAR(editor_word_ypos, 510, "Word window Y Pos");
|
||||
INT_VAR(editor_word_height, 240, "Word window height");
|
||||
INT_VAR(editor_word_width, 655, "Word window width");
|
||||
|
||||
static STRING_VAR(editor_debug_config_file, "", "Config file to apply to single words");
|
||||
|
||||
/**
|
||||
* show_point()
|
||||
*
|
||||
|
@ -137,8 +137,6 @@ Tesseract::Tesseract()
|
||||
"Don't bother with word plausibility", this->params()),
|
||||
BOOL_MEMBER(tessedit_fix_hyphens, true, "Crunch double hyphens?",
|
||||
this->params()),
|
||||
BOOL_MEMBER(tessedit_redo_xheight, true, "Check/Correct x-height",
|
||||
this->params()),
|
||||
BOOL_MEMBER(tessedit_enable_doc_dict, true,
|
||||
"Add words to the document dictionary", this->params()),
|
||||
BOOL_MEMBER(tessedit_debug_fonts, false, "Output font info per char",
|
||||
@ -182,8 +180,6 @@ Tesseract::Tesseract()
|
||||
INT_MEMBER(noise_maxperword, 16, "Max diacritics to apply to a word",
|
||||
this->params()),
|
||||
INT_MEMBER(debug_x_ht_level, 0, "Reestimate debug", this->params()),
|
||||
BOOL_MEMBER(debug_acceptable_wds, false, "Dump word pass/fail chk",
|
||||
this->params()),
|
||||
STRING_MEMBER(chs_leading_punct, "('`\"", "Leading punctuation",
|
||||
this->params()),
|
||||
STRING_MEMBER(chs_trailing_punct1, ").,;:?!", "1st Trailing punctuation",
|
||||
@ -206,10 +202,6 @@ Tesseract::Tesseract()
|
||||
"Do minimal rejection on pass 1 output", this->params()),
|
||||
BOOL_MEMBER(tessedit_test_adaption, false, "Test adaption criteria",
|
||||
this->params()),
|
||||
BOOL_MEMBER(tessedit_matcher_log, false, "Log matcher activity",
|
||||
this->params()),
|
||||
INT_MEMBER(tessedit_test_adaption_mode, 3,
|
||||
"Adaptation decision algorithm for tess", this->params()),
|
||||
BOOL_MEMBER(test_pt, false, "Test for point", this->params()),
|
||||
double_MEMBER(test_pt_x, 99999.99, "xcoord", this->params()),
|
||||
double_MEMBER(test_pt_y, 99999.99, "ycoord", this->params()),
|
||||
@ -227,8 +219,6 @@ Tesseract::Tesseract()
|
||||
this->params()),
|
||||
STRING_MEMBER(outlines_2, "ij!?%\":;", "Non standard number of outlines",
|
||||
this->params()),
|
||||
BOOL_MEMBER(docqual_excuse_outline_errs, false,
|
||||
"Allow outline errs in unrejection?", this->params()),
|
||||
BOOL_MEMBER(tessedit_good_quality_unrej, true,
|
||||
"Reduce rejection on good docs", this->params()),
|
||||
BOOL_MEMBER(tessedit_use_reject_spaces, true, "Reject spaces?",
|
||||
@ -291,8 +281,6 @@ Tesseract::Tesseract()
|
||||
this->params()),
|
||||
double_MEMBER(crunch_pot_poor_cert, -8.0, "POTENTIAL crunch cert lt this",
|
||||
this->params()),
|
||||
BOOL_MEMBER(crunch_pot_garbage, true, "POTENTIAL crunch garbage",
|
||||
this->params()),
|
||||
double_MEMBER(crunch_del_rating, 60, "POTENTIAL crunch rating lt this",
|
||||
this->params()),
|
||||
double_MEMBER(crunch_del_cert, -10.0, "POTENTIAL crunch cert lt this",
|
||||
@ -410,8 +398,6 @@ Tesseract::Tesseract()
|
||||
STRING_MEMBER(unrecognised_char, "|",
|
||||
"Output char for unidentified blobs", this->params()),
|
||||
INT_MEMBER(suspect_level, 99, "Suspect marker level", this->params()),
|
||||
INT_MEMBER(suspect_space_level, 100,
|
||||
"Min suspect level for rejecting spaces", this->params()),
|
||||
INT_MEMBER(suspect_short_words, 2,
|
||||
"Don't suspect dict wds longer than this", this->params()),
|
||||
BOOL_MEMBER(suspect_constrain_1Il, false, "UNLV keep 1Il chars rejected",
|
||||
@ -428,8 +414,6 @@ Tesseract::Tesseract()
|
||||
"Make output have exactly one word per WERD", this->params()),
|
||||
BOOL_MEMBER(tessedit_zero_kelvin_rejection, false,
|
||||
"Don't reject ANYTHING AT ALL", this->params()),
|
||||
BOOL_MEMBER(tessedit_consistent_reps, true,
|
||||
"Force all rep chars the same", this->params()),
|
||||
INT_MEMBER(tessedit_reject_mode, 0, "Rejection algorithm",
|
||||
this->params()),
|
||||
BOOL_MEMBER(tessedit_rejection_debug, false, "Adaption debug",
|
||||
|
@ -837,7 +837,6 @@ class Tesseract : public Wordrec {
|
||||
BOOL_VAR_H(tessedit_unrej_any_wd, false,
|
||||
"Don't bother with word plausibility");
|
||||
BOOL_VAR_H(tessedit_fix_hyphens, true, "Crunch double hyphens?");
|
||||
BOOL_VAR_H(tessedit_redo_xheight, true, "Check/Correct x-height");
|
||||
BOOL_VAR_H(tessedit_enable_doc_dict, true,
|
||||
"Add words to the document dictionary");
|
||||
BOOL_VAR_H(tessedit_debug_fonts, false, "Output font info per char");
|
||||
@ -868,7 +867,6 @@ class Tesseract : public Wordrec {
|
||||
INT_VAR_H(noise_maxperblob, 8, "Max diacritics to apply to a blob");
|
||||
INT_VAR_H(noise_maxperword, 16, "Max diacritics to apply to a word");
|
||||
INT_VAR_H(debug_x_ht_level, 0, "Reestimate debug");
|
||||
BOOL_VAR_H(debug_acceptable_wds, false, "Dump word pass/fail chk");
|
||||
STRING_VAR_H(chs_leading_punct, "('`\"", "Leading punctuation");
|
||||
STRING_VAR_H(chs_trailing_punct1, ").,;:?!", "1st Trailing punctuation");
|
||||
STRING_VAR_H(chs_trailing_punct2, ")'`\"", "2nd Trailing punctuation");
|
||||
@ -883,9 +881,6 @@ class Tesseract : public Wordrec {
|
||||
BOOL_VAR_H(tessedit_minimal_rej_pass1, false,
|
||||
"Do minimal rejection on pass 1 output");
|
||||
BOOL_VAR_H(tessedit_test_adaption, false, "Test adaption criteria");
|
||||
BOOL_VAR_H(tessedit_matcher_log, false, "Log matcher activity");
|
||||
INT_VAR_H(tessedit_test_adaption_mode, 3,
|
||||
"Adaptation decision algorithm for tess");
|
||||
BOOL_VAR_H(test_pt, false, "Test for point");
|
||||
double_VAR_H(test_pt_x, 99999.99, "xcoord");
|
||||
double_VAR_H(test_pt_y, 99999.99, "ycoord");
|
||||
@ -897,8 +892,6 @@ class Tesseract : public Wordrec {
|
||||
BOOL_VAR_H(lstm_use_matrix, 1, "Use ratings matrix/beam searct with lstm");
|
||||
STRING_VAR_H(outlines_odd, "%| ", "Non standard number of outlines");
|
||||
STRING_VAR_H(outlines_2, "ij!?%\":;", "Non standard number of outlines");
|
||||
BOOL_VAR_H(docqual_excuse_outline_errs, false,
|
||||
"Allow outline errs in unrejection?");
|
||||
BOOL_VAR_H(tessedit_good_quality_unrej, true,
|
||||
"Reduce rejection on good docs");
|
||||
BOOL_VAR_H(tessedit_use_reject_spaces, true, "Reject spaces?");
|
||||
@ -943,7 +936,6 @@ class Tesseract : public Wordrec {
|
||||
double_VAR_H(crunch_poor_garbage_rate, 60, "crunch garbage rating lt this");
|
||||
double_VAR_H(crunch_pot_poor_rate, 40, "POTENTIAL crunch rating lt this");
|
||||
double_VAR_H(crunch_pot_poor_cert, -8.0, "POTENTIAL crunch cert lt this");
|
||||
BOOL_VAR_H(crunch_pot_garbage, true, "POTENTIAL crunch garbage");
|
||||
double_VAR_H(crunch_del_rating, 60, "POTENTIAL crunch rating lt this");
|
||||
double_VAR_H(crunch_del_cert, -10.0, "POTENTIAL crunch cert lt this");
|
||||
double_VAR_H(crunch_del_min_ht, 0.7, "Del if word ht lt xht x this");
|
||||
@ -1017,7 +1009,6 @@ class Tesseract : public Wordrec {
|
||||
"Specify minimum characters to try during OSD");
|
||||
STRING_VAR_H(unrecognised_char, "|", "Output char for unidentified blobs");
|
||||
INT_VAR_H(suspect_level, 99, "Suspect marker level");
|
||||
INT_VAR_H(suspect_space_level, 100, "Min suspect level for rejecting spaces");
|
||||
INT_VAR_H(suspect_short_words, 2, "Don't Suspect dict wds longer than this");
|
||||
BOOL_VAR_H(suspect_constrain_1Il, false, "UNLV keep 1Il chars rejected");
|
||||
double_VAR_H(suspect_rating_per_ch, 999.9, "Don't touch bad rating limit");
|
||||
@ -1028,7 +1019,6 @@ class Tesseract : public Wordrec {
|
||||
"Make output have exactly one word per WERD");
|
||||
BOOL_VAR_H(tessedit_zero_kelvin_rejection, false,
|
||||
"Don't reject ANYTHING AT ALL");
|
||||
BOOL_VAR_H(tessedit_consistent_reps, true, "Force all rep chars the same");
|
||||
INT_VAR_H(tessedit_reject_mode, 0, "Rejection algorithm");
|
||||
BOOL_VAR_H(tessedit_rejection_debug, false, "Adaption debug");
|
||||
BOOL_VAR_H(tessedit_flip_0O, true, "Contextual 0O O0 flips");
|
||||
|
@ -17,8 +17,6 @@ CCUtil::CCUtil() :
|
||||
params_(),
|
||||
INT_INIT_MEMBER(ambigs_debug_level, 0, "Debug level for unichar ambiguities",
|
||||
¶ms_),
|
||||
BOOL_MEMBER(use_definite_ambigs_for_classifier, false, "Use definite"
|
||||
" ambiguities when running character classifier", ¶ms_),
|
||||
BOOL_MEMBER(use_ambigs_for_adaption, false, "Use ambigs for deciding"
|
||||
" whether to adapt to a character", ¶ms_) {
|
||||
}
|
||||
|
@ -69,8 +69,6 @@ class CCUtil {
|
||||
// These have to be declared and initialized after params_ member, since
|
||||
// params_ should be initialized before parameters are added to it.
|
||||
INT_VAR_H(ambigs_debug_level, 0, "Debug level for unichar ambiguities");
|
||||
BOOL_VAR_H(use_definite_ambigs_for_classifier, false,
|
||||
"Use definite ambiguities when running character classifier");
|
||||
BOOL_VAR_H(use_ambigs_for_adaption, false,
|
||||
"Use ambigs for deciding whether to adapt to a character");
|
||||
};
|
||||
|
@ -70,16 +70,6 @@ Classify::Classify()
|
||||
this->params()),
|
||||
double_MEMBER(classify_char_norm_range, 0.2,
|
||||
"Character Normalization Range ...", this->params()),
|
||||
double_MEMBER(classify_min_norm_scale_x, 0.0, "Min char x-norm scale ...",
|
||||
this->params()), /* PREV DEFAULT 0.1 */
|
||||
double_MEMBER(classify_max_norm_scale_x, 0.325,
|
||||
"Max char x-norm scale ...",
|
||||
this->params()), /* PREV DEFAULT 0.3 */
|
||||
double_MEMBER(classify_min_norm_scale_y, 0.0, "Min char y-norm scale ...",
|
||||
this->params()), /* PREV DEFAULT 0.1 */
|
||||
double_MEMBER(classify_max_norm_scale_y, 0.325,
|
||||
"Max char y-norm scale ...",
|
||||
this->params()), /* PREV DEFAULT 0.3 */
|
||||
double_MEMBER(classify_max_rating_ratio, 1.5,
|
||||
"Veto ratio between classifier ratings", this->params()),
|
||||
double_MEMBER(classify_max_certainty_margin, 5.5,
|
||||
@ -173,8 +163,6 @@ Classify::Classify()
|
||||
"Class Pruner CutoffStrength: ", this->params()),
|
||||
INT_MEMBER(classify_integer_matcher_multiplier, 10,
|
||||
"Integer Matcher Multiplier 0-255: ", this->params()),
|
||||
INT_MEMBER(il1_adaption_test, 0,
|
||||
"Don't adapt to i/I at beginning of word", this->params()),
|
||||
BOOL_MEMBER(classify_bln_numeric_mode, 0,
|
||||
"Assume the input is numbers [0-9].", this->params()),
|
||||
double_MEMBER(speckle_large_max_size, 0.30, "Max large speckle size",
|
||||
|
@ -434,10 +434,6 @@ class Classify : public CCStruct {
|
||||
INT_VAR_H(classify_norm_method, character, "Normalization Method ...");
|
||||
double_VAR_H(classify_char_norm_range, 0.2,
|
||||
"Character Normalization Range ...");
|
||||
double_VAR_H(classify_min_norm_scale_x, 0.0, "Min char x-norm scale ...");
|
||||
double_VAR_H(classify_max_norm_scale_x, 0.325, "Max char x-norm scale ...");
|
||||
double_VAR_H(classify_min_norm_scale_y, 0.0, "Min char y-norm scale ...");
|
||||
double_VAR_H(classify_max_norm_scale_y, 0.325, "Max char y-norm scale ...");
|
||||
double_VAR_H(classify_max_rating_ratio, 1.5,
|
||||
"Veto ratio between classifier ratings");
|
||||
double_VAR_H(classify_max_certainty_margin, 5.5,
|
||||
@ -508,7 +504,6 @@ class Classify : public CCStruct {
|
||||
INT_VAR_H(classify_integer_matcher_multiplier, 10,
|
||||
"Integer Matcher Multiplier 0-255: ");
|
||||
|
||||
INT_VAR_H(il1_adaption_test, 0, "Don't adapt to i/I at beginning of word");
|
||||
BOOL_VAR_H(classify_bln_numeric_mode, 0,
|
||||
"Assume the input is numbers [0-9].");
|
||||
double_VAR_H(speckle_large_max_size, 0.30, "Max large speckle size");
|
||||
|
@ -47,15 +47,6 @@ struct CP_RESULT_STRUCT {
|
||||
CLASS_ID Class;
|
||||
};
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
Variables
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
extern INT_VAR_H(classify_adapt_proto_thresh, 230,
|
||||
"Threshold for good protos during adaptive 0-255: ");
|
||||
|
||||
extern INT_VAR_H(classify_adapt_feature_thresh, 230,
|
||||
"Threshold for good features during adaptive 0-255: ");
|
||||
|
||||
/**----------------------------------------------------------------------------
|
||||
Public Function Prototypes
|
||||
|
@ -105,8 +105,6 @@ Dict::Dict(CCUtil* ccutil)
|
||||
getCCUtil()->params()),
|
||||
INT_MEMBER(hyphen_debug_level, 0, "Debug level for hyphenated words.",
|
||||
getCCUtil()->params()),
|
||||
INT_MEMBER(max_viterbi_list_size, 10, "Maximum size of viterbi list.",
|
||||
getCCUtil()->params()),
|
||||
BOOL_MEMBER(use_only_first_uft8_step, false,
|
||||
"Use only the first UTF8 step of the given string"
|
||||
" when computing log probabilities.",
|
||||
@ -140,11 +138,6 @@ Dict::Dict(CCUtil* ccutil)
|
||||
"Word for which stopper debug"
|
||||
" information should be printed to stdout",
|
||||
getCCUtil()->params()),
|
||||
STRING_MEMBER(word_to_debug_lengths, "",
|
||||
"Lengths of unichars in word_to_debug",
|
||||
getCCUtil()->params()),
|
||||
INT_MEMBER(fragments_debug, 0, "Debug character fragments",
|
||||
getCCUtil()->params()),
|
||||
BOOL_MEMBER(segment_nonalphabetic_script, false,
|
||||
"Don't use any alphabetic-specific tricks."
|
||||
" Set to true in the traineddata config file for"
|
||||
|
@ -621,7 +621,6 @@ class Dict {
|
||||
INT_VAR_H(dawg_debug_level, 0, "Set to 1 for general debug info"
|
||||
", to 2 for more details, to 3 to see all the debug messages");
|
||||
INT_VAR_H(hyphen_debug_level, 0, "Debug level for hyphenated words.");
|
||||
INT_VAR_H(max_viterbi_list_size, 10, "Maximum size of viterbi list.");
|
||||
BOOL_VAR_H(use_only_first_uft8_step, false,
|
||||
"Use only the first UTF8 step of the given string"
|
||||
" when computing log probabilities.");
|
||||
@ -643,9 +642,6 @@ class Dict {
|
||||
INT_VAR_H(tessedit_truncate_wordchoice_log, 10, "Max words to keep in list");
|
||||
STRING_VAR_H(word_to_debug, "", "Word for which stopper debug information"
|
||||
" should be printed to stdout");
|
||||
STRING_VAR_H(word_to_debug_lengths, "",
|
||||
"Lengths of unichars in word_to_debug");
|
||||
INT_VAR_H(fragments_debug, 0, "Debug character fragments");
|
||||
BOOL_VAR_H(segment_nonalphabetic_script, false,
|
||||
"Don't use any alphabetic-specific tricks."
|
||||
"Set to true in the traineddata config file for"
|
||||
|
@ -28,8 +28,6 @@
|
||||
|
||||
namespace tesseract {
|
||||
|
||||
static BOOL_VAR(textord_tabfind_show_color_fit, false, "Show stroke widths");
|
||||
|
||||
// Max pad factor used to search the neighbourhood of a partition to smooth
|
||||
// partition types.
|
||||
const int kMaxPadFactor = 6;
|
||||
|
@ -185,12 +185,8 @@ Textord::Textord(CCStruct* ccstruct)
|
||||
ccstruct_->params()),
|
||||
INT_MEMBER(textord_baseline_debug, 0, "Baseline debug level",
|
||||
ccstruct_->params()),
|
||||
double_MEMBER(textord_blob_size_bigile, 95, "Percentile for large blobs",
|
||||
ccstruct_->params()),
|
||||
double_MEMBER(textord_noise_area_ratio, 0.7,
|
||||
"Fraction of bounding box for noise", ccstruct_->params()),
|
||||
double_MEMBER(textord_blob_size_smallile, 20,
|
||||
"Percentile for small blobs", ccstruct_->params()),
|
||||
double_MEMBER(textord_initialx_ile, 0.75,
|
||||
"Ile of sizes for xheight guess", ccstruct_->params()),
|
||||
double_MEMBER(textord_initialasc_ile, 0.90,
|
||||
|
@ -375,10 +375,8 @@ class Textord {
|
||||
BOOL_VAR_H(textord_show_boxes, false, "Display boxes");
|
||||
INT_VAR_H(textord_max_noise_size, 7, "Pixel size of noise");
|
||||
INT_VAR_H(textord_baseline_debug, 0, "Baseline debug level");
|
||||
double_VAR_H(textord_blob_size_bigile, 95, "Percentile for large blobs");
|
||||
double_VAR_H(textord_noise_area_ratio, 0.7,
|
||||
"Fraction of bounding box for noise");
|
||||
double_VAR_H(textord_blob_size_smallile, 20, "Percentile for small blobs");
|
||||
double_VAR_H(textord_initialx_ile, 0.75, "Ile of sizes for xheight guess");
|
||||
double_VAR_H(textord_initialasc_ile, 0.90, "Ile of sizes for xheight guess");
|
||||
INT_VAR_H(textord_noise_sizefraction, 10, "Fraction of size for maxima");
|
||||
|
@ -38,8 +38,6 @@ C_COL color_list[] = {
|
||||
|
||||
BOOL_VAR(wordrec_display_all_blobs, 0, "Display Blobs");
|
||||
|
||||
BOOL_VAR(wordrec_display_all_words, 0, "Display Words");
|
||||
|
||||
BOOL_VAR(wordrec_blob_pause, 0, "Blob pause");
|
||||
|
||||
/*----------------------------------------------------------------------
|
||||
|
@ -37,8 +37,6 @@ extern C_COL color_list[]; /* Colors for outlines */
|
||||
|
||||
extern BOOL_VAR_H(wordrec_display_all_blobs, 0, "Display Blobs");
|
||||
|
||||
extern BOOL_VAR_H(wordrec_display_all_words, 0, "Display Words");
|
||||
|
||||
extern BOOL_VAR_H(wordrec_blob_pause, 0, "Blob pause");
|
||||
|
||||
#define NUM_COLORS 6
|
||||
|
@ -49,19 +49,12 @@ Wordrec::Wordrec() :
|
||||
BOOL_MEMBER(merge_fragments_in_matrix, true,
|
||||
"Merge the fragments in the ratings matrix and delete them"
|
||||
" after merging", params()),
|
||||
BOOL_MEMBER(wordrec_no_block, false, "Don't output block information",
|
||||
params()),
|
||||
BOOL_MEMBER(wordrec_enable_assoc, true, "Associator Enable",
|
||||
params()),
|
||||
BOOL_MEMBER(force_word_assoc, false,
|
||||
"force associator to run regardless of what enable_assoc is."
|
||||
" This is used for CJK where component grouping is necessary.",
|
||||
CCUtil::params()),
|
||||
double_MEMBER(wordrec_worst_state, 1.0, "Worst segmentation state",
|
||||
params()),
|
||||
BOOL_MEMBER(fragments_guide_chopper, false,
|
||||
"Use information from fragments to guide chopping process",
|
||||
params()),
|
||||
INT_MEMBER(repair_unchopped_blobs, 1, "Fix blobs that aren't chopped",
|
||||
params()),
|
||||
double_MEMBER(tessedit_certainty_threshold, -2.25, "Good blob limit",
|
||||
@ -104,8 +97,6 @@ Wordrec::Wordrec() :
|
||||
params()),
|
||||
INT_MEMBER(chop_x_y_weight, 3, "X / Y length weight",
|
||||
params()),
|
||||
INT_MEMBER(segment_adjust_debug, 0, "Segmentation adjustment debug",
|
||||
params()),
|
||||
BOOL_MEMBER(assume_fixed_pitch_char_segment, false,
|
||||
"include fixed-pitch heuristics in char segmentation",
|
||||
params()),
|
||||
|
@ -195,14 +195,10 @@ class Wordrec : public Classify {
|
||||
BOOL_VAR_H(merge_fragments_in_matrix, true,
|
||||
"Merge the fragments in the ratings matrix and delete them "
|
||||
"after merging");
|
||||
BOOL_VAR_H(wordrec_no_block, false, "Don't output block information");
|
||||
BOOL_VAR_H(wordrec_enable_assoc, true, "Associator Enable");
|
||||
BOOL_VAR_H(force_word_assoc, false,
|
||||
"force associator to run regardless of what enable_assoc is."
|
||||
"This is used for CJK where component grouping is necessary.");
|
||||
double_VAR_H(wordrec_worst_state, 1, "Worst segmentation state");
|
||||
BOOL_VAR_H(fragments_guide_chopper, false,
|
||||
"Use information from fragments to guide chopping process");
|
||||
INT_VAR_H(repair_unchopped_blobs, 1, "Fix blobs that aren't chopped");
|
||||
double_VAR_H(tessedit_certainty_threshold, -2.25, "Good blob limit");
|
||||
INT_VAR_H(chop_debug, 0, "Chop debug");
|
||||
@ -225,7 +221,6 @@ class Wordrec : public Classify {
|
||||
double_VAR_H(chop_ok_split, 100.0, "OK split limit");
|
||||
double_VAR_H(chop_good_split, 50.0, "Good split limit");
|
||||
INT_VAR_H(chop_x_y_weight, 3, "X / Y length weight");
|
||||
INT_VAR_H(segment_adjust_debug, 0, "Segmentation adjustment debug");
|
||||
BOOL_VAR_H(assume_fixed_pitch_char_segment, false,
|
||||
"include fixed-pitch heuristics in char segmentation");
|
||||
INT_VAR_H(wordrec_debug_level, 0, "Debug level for wordrec");
|
||||
|
@ -4,7 +4,6 @@ textord_fast_pitch_test T
|
||||
tessedit_zero_rejection T
|
||||
tessedit_minimal_rejection F
|
||||
tessedit_write_rep_codes F
|
||||
il1_adaption_test 1
|
||||
edges_children_fix F
|
||||
edges_childarea 0.65
|
||||
edges_boxarea 0.9
|
||||
|
@ -4,7 +4,6 @@ textord_fast_pitch_test T
|
||||
tessedit_zero_rejection T
|
||||
tessedit_minimal_rejection F
|
||||
tessedit_write_rep_codes F
|
||||
il1_adaption_test 1
|
||||
edges_children_fix F
|
||||
edges_childarea 0.65
|
||||
edges_boxarea 0.9
|
||||
|
@ -4,7 +4,6 @@ textord_fast_pitch_test T
|
||||
tessedit_zero_rejection T
|
||||
tessedit_minimal_rejection F
|
||||
tessedit_write_rep_codes F
|
||||
il1_adaption_test 1
|
||||
edges_children_fix F
|
||||
edges_childarea 0.65
|
||||
edges_boxarea 0.9
|
||||
|
@ -7,7 +7,6 @@ matcher_debug_flags 6
|
||||
matcher_debug_level 1
|
||||
|
||||
wordrec_display_splits 0
|
||||
wordrec_display_all_words 1
|
||||
wordrec_display_all_blobs 1
|
||||
wordrec_display_segmentations 2
|
||||
classify_debug_level 1
|
||||
|
@ -3,7 +3,6 @@
|
||||
#################################################
|
||||
|
||||
wordrec_display_splits 0
|
||||
wordrec_display_all_words 1
|
||||
wordrec_display_all_blobs 1
|
||||
wordrec_display_segmentations 2
|
||||
classify_debug_level 1
|
||||
|
Loading…
Reference in New Issue
Block a user