diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index ecc0b1808..e29b0550f 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,4 +1,4 @@ -Before you submit an issue, please review [the guidelines for this repository](https://github.com/tesseract-ocr/tesseract/blob/master/CONTRIBUTING.md). +Before you submit an issue, please review [the guidelines for this repository](https://github.com/tesseract-ocr/tesseract/blob/main/CONTRIBUTING.md). Please report an issue only for a BUG, not for asking questions. diff --git a/.github/workflows/autotools-macos.yml b/.github/workflows/autotools-macos.yml index 4134d45a0..52d8d5566 100644 --- a/.github/workflows/autotools-macos.yml +++ b/.github/workflows/autotools-macos.yml @@ -5,6 +5,7 @@ on: #push: schedule: - cron: 0 20 * * * + workflow_dispatch: jobs: brew: @@ -42,7 +43,7 @@ jobs: - name: Configure Tesseract run: | - ./configure '--disable-shared' '--disable-openmp' '--disable-doc' '--with-pic' 'CXX=${{ matrix.config.cxx }}' 'CXXFLAGS=-g -O2' "PKG_CONFIG_PATH=$(brew --prefix)/opt/icu4c/lib/pkgconfig:$(brew --prefix)/opt/libarchive/lib/pkgconfig:$(brew --prefix)/opt/libffi/lib/pkgconfig" + ./configure '--disable-shared' '--disable-openmp' '--disable-doc' '--with-pic' 'CXX=${{ matrix.config.cxx }}' 'CXXFLAGS=-g -O2' - name: Make and Install Tesseract run: | @@ -92,7 +93,7 @@ jobs: run: | export "PKG_CONFIG_PATH=/usr/local/lib/pkgconfig" cd test - ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp -I/usr/local/include -L/usr/local/lib `pkg-config --cflags --libs tesseract lept ` -pthread -std=c++11 + ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp $(pkg-config --cflags --libs tesseract lept) -pthread -std=c++11 -framework accelerate ./basicapitest - name: Display Compiler Version @@ -130,6 +131,9 @@ jobs: - name: Install Macports run: | curl -LO https://raw.githubusercontent.com/GiovanniBussi/macports-ci/master/macports-ci; source ./macports-ci install + # --remove-brew does not remove the Homebrew entries in bin, + # so remove them now. + rm -v $(brew --prefix)/bin/* - name: Install Dependencies run: | @@ -189,7 +193,7 @@ jobs: run: | export "PKG_CONFIG_PATH=/usr/local/lib/pkgconfig" cd test - ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp -I/opt/local/include -L/opt/local/lib -I/usr/local/include -L/usr/local/lib `pkg-config --cflags --libs tesseract lept ` -pthread -std=c++11 + ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp -I/opt/local/include -L/opt/local/lib $(pkg-config --cflags --libs tesseract lept) -pthread -std=c++11 -framework Accelerate ./basicapitest - name: Display Compiler Version @@ -202,4 +206,3 @@ jobs: run: | cat test-suite.log if: always() - diff --git a/.github/workflows/cifuzz.yml b/.github/workflows/cifuzz.yml new file mode 100644 index 000000000..fd5044a0f --- /dev/null +++ b/.github/workflows/cifuzz.yml @@ -0,0 +1,33 @@ +name: CIFuzz +# OSS-Fuzz CI +# See https://google.github.io/oss-fuzz/getting-started/continuous-integration/ +on: + pull_request: + branches: + - main + paths: + - '**.cpp' + - '**.h' +jobs: + Fuzzing: + runs-on: ubuntu-latest + steps: + - name: Build Fuzzers + id: build + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + with: + oss-fuzz-project-name: 'tesseract-ocr' + language: c++ + dry-run: false + - name: Run Fuzzers + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + with: + oss-fuzz-project-name: 'tesseract-ocr' + fuzz-seconds: 600 + dry-run: false + - name: Upload Crash + uses: actions/upload-artifact@v1 + if: failure() && steps.build.outcome == 'success' + with: + name: artifacts + path: ./out/artifacts diff --git a/.github/workflows/cmake-win64.yml b/.github/workflows/cmake-win64.yml index 569cade00..e205a696a 100644 --- a/.github/workflows/cmake-win64.yml +++ b/.github/workflows/cmake-win64.yml @@ -133,8 +133,8 @@ jobs: git clone --depth 1 https://github.com/tesseract-ocr/tessconfigs mkdir d:/a/local/share move tessconfigs d:/a/local/share - curl -L https://github.com/tesseract-ocr/tessdata/raw/master/eng.traineddata --output d:/a/local/share/tessconfigs/eng.traineddata - curl -L https://github.com/tesseract-ocr/tessdata/raw/master/osd.traineddata --output d:/a/local/share/tessconfigs/osd.traineddata + curl -L https://github.com/tesseract-ocr/tessdata/raw/main/eng.traineddata --output d:/a/local/share/tessconfigs/eng.traineddata + curl -L https://github.com/tesseract-ocr/tessdata/raw/main/osd.traineddata --output d:/a/local/share/tessconfigs/osd.traineddata set TESSDATA_PREFIX=d:/a/local/share/tessconfigs set PATH=d:/a/local/bin;%PATH% tesseract -v diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 1f66b1d76..688ecadbd 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -17,9 +17,15 @@ jobs: - { name: macos-10.15-clang-12-cmake, os: macos-10.15, cxx: clang++ } # default - { name: macos-10.15-clang-11-cmake, os: macos-10.15, cxx: '$(brew --prefix llvm)/bin/clang++' } #installed - - { name: macos-10.15-gcc-8-cmake, os: macos-10.15, cxx: g++-8 } #installed - { name: macos-10.15-gcc-9-cmake, os: macos-10.15, cxx: g++-9 } #installed - { name: macos-10.15-gcc-10-cmake, os: macos-10.15, cxx: g++-10 } #installed + - { name: macos-10.15-gcc-11-cmake, os: macos-10.15, cxx: g++-11 } #installed + + - { name: macos-11-clang-12-cmake, os: macos-11, cxx: clang++ } # default + - { name: macos-11-clang-11-cmake, os: macos-11, cxx: '$(brew --prefix llvm)/bin/clang++' } #installed + - { name: macos-11-gcc-9-cmake, os: macos-11, cxx: g++-9 } #installed + - { name: macos-11-gcc-10-cmake, os: macos-11, cxx: g++-10 } #installed + - { name: macos-11-gcc-11-cmake, os: macos-11, cxx: g++-11 } #installed - { name: ubuntu-18.04-clang-7-cmake, os: ubuntu-18.04, cxx: clang++-7 } - { name: ubuntu-18.04-clang-8-cmake, os: ubuntu-18.04, cxx: clang++-8 } #installed @@ -47,20 +53,22 @@ jobs: sudo apt-get install ${{ matrix.config.cxx }} -y if: runner.os == 'Linux' -# sudo apt-get install libarchive-dev libcurl4-openssl-dev libcurl4 curl -y - name: Install dependencies on Linux run: | sudo apt-get install autoconf libleptonica-dev -y + sudo apt-get install libarchive-dev libcurl4-openssl-dev -y sudo apt-get install libpango1.0-dev -y sudo apt-get install cabextract -y sudo apt-get install ninja-build -y + cmake --version if: runner.os == 'Linux' - name: Install dependencies on macOS run: | brew install autoconf automake brew install leptonica - brew install cairo pango icu4c + brew install libarchive + brew install pango brew install cabextract brew install ninja ninja --version @@ -137,16 +145,24 @@ jobs: build/inst/bin/tesseract test/testing/eurotext.tif - -l fra --oem 1 --tessdata-dir ../tessdata_best build/inst/bin/tesseract test/testing/arabic.tif - -l ara --oem 1 --psm 6 --tessdata-dir ../tessdata - - name: Build and run basicapitest + - name: Build and run basicapitest (Linux) run: | export "PKG_CONFIG_PATH=$GITHUB_WORKSPACE/build/inst/lib/pkgconfig/:$PKG_CONFIG_PATH" cd test - ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp "-I$GITHUB_WORKSPACE/build/inst/include" "-L$GITHUB_WORKSPACE/build/inst/lib" `pkg-config --cflags --libs tesseract lept ` -pthread -std=c++11 + ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp "-I$GITHUB_WORKSPACE/build/inst/include" "-L$GITHUB_WORKSPACE/build/inst/lib" $(pkg-config --cflags --libs tesseract lept libarchive libcurl) -pthread -std=c++11 ./basicapitest + if: runner.os == 'Linux' + + - name: Build and run basicapitest (macOS) + run: | + export "PKG_CONFIG_PATH=$GITHUB_WORKSPACE/build/inst/lib/pkgconfig/:$(brew --prefix)/opt/libarchive/lib/pkgconfig:$(brew --prefix)/Library/Homebrew/os/mac/pkgconfig/11:$PKG_CONFIG_PATH" + cd test + ${{ matrix.config.cxx }} -o basicapitest testing/basicapitest.cpp "-I$GITHUB_WORKSPACE/build/inst/include" "-L$GITHUB_WORKSPACE/build/inst/lib" $(pkg-config --cflags --libs tesseract lept libarchive libcurl) -pthread -std=c++11 + ./basicapitest + if: runner.os == 'macOS' - name: Display Compiler Version run: | ${{ matrix.config.cxx }} --version git log -3 --pretty=format:'%h %ad %s | %an' if: always() - diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000..828494caa --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,69 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main ] + schedule: + - cron: '34 23 * * 2' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'cpp' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install autoconf libleptonica-dev -y + sudo apt-get install libpango1.0-dev -y + sudo apt-get install cabextract libarchive-dev -y + sudo apt-get install libcurl4-openssl-dev libcurl4 curl -y + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + - name: Build + run: | + ./autogen.sh + ./configure + make all training + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/msys2-4.1.1.yml b/.github/workflows/msys2-4.1.1.yml index 5e32ccbfd..47da44a43 100644 --- a/.github/workflows/msys2-4.1.1.yml +++ b/.github/workflows/msys2-4.1.1.yml @@ -29,4 +29,3 @@ jobs: tesseract -v text2image -v lstmtraining -v - \ No newline at end of file diff --git a/.github/workflows/msys2.yml b/.github/workflows/msys2.yml index bf0316f03..047746287 100644 --- a/.github/workflows/msys2.yml +++ b/.github/workflows/msys2.yml @@ -1,5 +1,5 @@ name: msys2 -# msys2 build for tesseract -head from master branch. +# msys2 build for tesseract -head from main branch. on: #push: schedule: diff --git a/.github/workflows/sw.yml b/.github/workflows/sw.yml index e755052cf..fb953c365 100644 --- a/.github/workflows/sw.yml +++ b/.github/workflows/sw.yml @@ -13,7 +13,7 @@ jobs: strategy: fail-fast: false matrix: - os: [windows-latest, ubuntu-20.04, macOS-latest] + os: [windows-latest, windows-2022, ubuntu-20.04, macOS-latest] steps: - uses: actions/checkout@v2 @@ -22,50 +22,50 @@ jobs: - uses: egorpugin/sw-action@master - name: build - if: matrix.os == 'windows-latest' + if: matrix.os == 'windows-latest' || matrix.os == 'windows-2022' run: ./sw -static -shared -platform x86,x64 -config d,r build - name: build - if: matrix.os != 'windows-latest' + if: matrix.os != 'windows-latest' && matrix.os != 'windows-2022' run: ./sw -static -shared -config d,r build -Dwith-tests=1 - name: download test data run: git clone https://github.com/egorpugin/tessdata tessdata_unittest - name: copy fonts - if: matrix.os != 'windows-latest' + if: matrix.os != 'windows-latest' && matrix.os != 'windows-2022' run: cp tessdata_unittest/fonts/* test/testing/ - name: copy fonts - if: matrix.os == 'windows-latest' + if: matrix.os == 'windows-latest' || matrix.os == 'windows-2022' run: Copy-Item -Path "tessdata_unittest\fonts\*" -Destination "test\testing" -Recurse shell: pwsh - name: test - if: matrix.os != 'windows-latest' + if: matrix.os != 'windows-latest' && matrix.os != 'windows-2022' run: ./sw -static -shared -config "d,r" test -Dwith-tests=1 "-Dskip-tests=lstm,lstm_recode" continue-on-error: true - name: test-nightly - if: matrix.os != 'windows-latest' && github.event.schedule=='0 0 * * *' + if: matrix.os != 'windows-latest' && matrix.os != 'windows-2022' && github.event.schedule=='0 0 * * *' run: ./sw -static -shared -config "d,r" test -Dwith-tests=1 continue-on-error: true # windows tests hang here for some reason, investigate #- name: test - #if: matrix.os == 'windows-latest' + #if: matrix.os == 'windows-latest' || matrix.os == 'windows-2022' #run: ./sw test -Dwith-tests=1 "-Dskip-tests=lstm,lstm_recode" #continue-on-error: true - name: Upload Unit Test Results - if: always() && matrix.os != 'windows-latest' + if: always() && matrix.os != 'windows-latest' && matrix.os != 'windows-2022' uses: actions/upload-artifact@v2 with: name: Test Results (${{ matrix.os }}) path: .sw/test/results.xml - name: Publish Test Report - if: always() && matrix.os != 'windows-latest' + if: always() && matrix.os != 'windows-latest' && matrix.os != 'windows-2022' uses: mikepenz/action-junit-report@v1 with: check_name: test (${{ matrix.os }}) diff --git a/.github/workflows/unittest-disablelegacy.yml b/.github/workflows/unittest-disablelegacy.yml index 812fa0a44..9683f6bbc 100644 --- a/.github/workflows/unittest-disablelegacy.yml +++ b/.github/workflows/unittest-disablelegacy.yml @@ -5,7 +5,7 @@ name: unittest-disablelegacy on: #push: schedule: - - cron: 0 0 1 * * + - cron: 0 10 * * * jobs: linux: @@ -14,7 +14,8 @@ jobs: fail-fast: false matrix: compiler: [ g++, clang++ ] - os: [ ubuntu-18.04, ubuntu-20.04 ] + #os: [ ubuntu-18.04, ubuntu-20.04 ] + os: [ ubuntu-20.04 ] steps: - uses: actions/checkout@v2 @@ -25,7 +26,8 @@ jobs: run: | sudo apt-get install autoconf libleptonica-dev libpango1.0-dev -y sudo apt-get install cabextract -y - + #sudo apt-get install libc++-7-dev libc++abi-7-dev -y + - name: Setup run: | mkdir -p m4 diff --git a/.github/workflows/unittest-macos.yml b/.github/workflows/unittest-macos.yml index 11e35f622..4a3c722a8 100644 --- a/.github/workflows/unittest-macos.yml +++ b/.github/workflows/unittest-macos.yml @@ -25,7 +25,7 @@ jobs: run: | brew install autoconf automake libarchive brew install leptonica cairo pango - brew install cabextract abseil + brew install cabextract - name: Setup run: | @@ -36,8 +36,7 @@ jobs: run: | ./configure '--disable-shared' '--with-pic' \ 'CXX=${{ matrix.config.cxx }}' \ - 'CXXFLAGS=-g -O2 -fsanitize=address,undefined' \ - "PKG_CONFIG_PATH=$(brew --prefix)/opt/icu4c/lib/pkgconfig:$(brew --prefix)/opt/libarchive/lib/pkgconfig:$(brew --prefix)/opt/libffi/lib/pkgconfig" + 'CXXFLAGS=-g -O2 -fsanitize=address,undefined' - name: Make and Install Tesseract run: | diff --git a/.github/workflows/vcpkg-4.1.1.yml b/.github/workflows/vcpkg-4.1.1.yml index b8875fd02..a27c7cc42 100644 --- a/.github/workflows/vcpkg-4.1.1.yml +++ b/.github/workflows/vcpkg-4.1.1.yml @@ -13,7 +13,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest] steps: - name: Checkout Tesseract Source (for test images) @@ -56,7 +56,7 @@ jobs: git clone https://github.com/egorpugin/tessdata tessdata_unittest mv tessdata_unittest/* ../ if: runner.os == 'Windows' - + - name: Create CMakeLists.txt file for basicapitest shell: bash run: | @@ -75,7 +75,7 @@ jobs: EOF cat CMakeLists.txt if: runner.os == 'Windows' - + - name: Configure basicapitest run: | cd test @@ -87,7 +87,7 @@ jobs: cd test cmake --build . --config Release if: runner.os == 'Windows' - + - name: Run basicapitest (Windows) run: | cd test diff --git a/.github/workflows/vcpkg.yml b/.github/workflows/vcpkg.yml index b65bc32a7..fc3319103 100644 --- a/.github/workflows/vcpkg.yml +++ b/.github/workflows/vcpkg.yml @@ -15,7 +15,7 @@ jobs: os: [windows-2019] steps: - - name: Checkout Tesseract Source (--head from master branch) + - name: Checkout Tesseract Source (--head from main branch) uses: actions/checkout@v2 with: submodules: recursive @@ -35,7 +35,7 @@ jobs: run: | vcpkg/vcpkg install leptonica:x64-windows - - name: Configure and Build Tesseract (--head from master branch) with cmake + - name: Configure and Build Tesseract (--head from main branch) with cmake run: | cmake . -B build -DCMAKE_BUILD_TYPE=Release -DSW_BUILD=OFF -DOPENMP_BUILD=OFF -DBUILD_TRAINING_TOOLS=OFF "-DCMAKE_TOOLCHAIN_FILE=${env:GITHUB_WORKSPACE}/vcpkg/scripts/buildsystems/vcpkg.cmake" cmake --build build --config Release --target install @@ -57,10 +57,10 @@ jobs: include_directories(${Leptonica_INCLUDE_DIRS}) add_executable( basicapitest testing/basicapitest.cpp ) target_link_libraries(basicapitest ${Leptonica_LIBRARIES}) - target_link_libraries(basicapitest ${Tesseract_LIBRARIES}) + target_link_libraries(basicapitest Tesseract::libtesseract) add_library(libtesseract UNKNOWN IMPORTED) set_property(TARGET libtesseract PROPERTY IMPORTED_LOCATION D:/a/tesseract/tesseract/build/Release/tesseract50.lib) - target_link_libraries(basicapitest libtesseract) + target_link_libraries(basicapitest Tesseract::libtesseract) EOF cat CMakeLists.txt diff --git a/.gitmodules b/.gitmodules index a20e2768f..7d15927d0 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,8 +1,5 @@ -[submodule "abseil"] - path = abseil - url = https://github.com/abseil/abseil-cpp.git [submodule "googletest"] - path = googletest + path = unittest/third_party/googletest url = https://github.com/google/googletest.git [submodule "test"] path = test diff --git a/.travis.yml b/.travis.yml index d1f8c5bd2..19b9bcbfb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ cache: before_install: - sudo apt-get install libleptonica-dev libpango1.0-dev libtiff5-dev -y - + install: script: @@ -30,7 +30,7 @@ script: - cmake .. -DSW_BUILD=OFF - make - sudo make install - + #after_script: # let those commands trigger build errors - tesseract -v - text2image -v diff --git a/CMakeLists.txt b/CMakeLists.txt index 947693e98..8d2292912 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,25 +2,25 @@ # tesseract # -############################################################################### +# ############################################################################## # # cmake settings # -############################################################################### +# ############################################################################## -cmake_minimum_required(VERSION 3.7 FATAL_ERROR) +cmake_minimum_required(VERSION 3.10 FATAL_ERROR) # In-source builds are disabled. -if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}") - message(FATAL_ERROR - "CMake generation is not possible within the source directory!" - "\n Remove the CMakeCache.txt file and try again from another folder, e.g.:" - "\n " - "\n rm CMakeCache.txt" - "\n mkdir build" - "\n cd build" - "\n cmake .." - ) +if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}") + message( + FATAL_ERROR + "CMake generation is not possible within the source directory!" + "\n Remove the CMakeCache.txt file and try again from another folder, " + "e.g.:\n " + "\n rm CMakeCache.txt" + "\n mkdir build" + "\n cd build" + "\n cmake ..") endif() set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/cmake") @@ -33,32 +33,34 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMake Targets") if(NOT ${CMAKE_VERSION} VERSION_LESS "3.15.0") - if (WIN32) - cmake_policy(SET CMP0091 NEW) - message(STATUS "Setting policy CMP0091 to NEW") - endif() + if(WIN32) + cmake_policy(SET CMP0091 NEW) + message(STATUS "Setting policy CMP0091 to NEW") + endif() endif() -############################################################################### +# ############################################################################## # # project settings # -############################################################################### +# ############################################################################## project(tesseract C CXX) # Get version with components from VERSION file. file(STRINGS "VERSION" VERSION_PLAIN) string(REGEX REPLACE "^([^.]*)\\..*" "\\1" VERSION_MAJOR ${VERSION_PLAIN}) -string(REGEX REPLACE "^[^.]*\\.([^.]*)\\..*" "\\1" VERSION_MINOR ${VERSION_PLAIN}) -string(REGEX REPLACE "^[^.]*\\.[^.]*\\.([0-9]*).*" "\\1" VERSION_PATCH ${VERSION_PLAIN}) +string(REGEX REPLACE "^[^.]*\\.([^.]*)\\..*" "\\1" VERSION_MINOR + ${VERSION_PLAIN}) +string(REGEX REPLACE "^[^.]*\\.[^.]*\\.([0-9]*).*" "\\1" VERSION_PATCH + ${VERSION_PLAIN}) if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.git) - execute_process(COMMAND git --git-dir ${CMAKE_CURRENT_SOURCE_DIR}/.git describe --abbrev=4 - OUTPUT_VARIABLE GIT_REV) - string(REGEX REPLACE "\n$" "" PACKAGE_VERSION "${GIT_REV}") + execute_process(COMMAND git --git-dir ${CMAKE_CURRENT_SOURCE_DIR}/.git + describe --abbrev=4 OUTPUT_VARIABLE GIT_REV) + string(REGEX REPLACE "\n$" "" PACKAGE_VERSION "${GIT_REV}") endif() if(NOT PACKAGE_VERSION) - set(PACKAGE_VERSION ${VERSION_PLAIN}) + set(PACKAGE_VERSION ${VERSION_PLAIN}) endif() # Provide also same macro names as autoconf (see configure.ac). @@ -68,56 +70,66 @@ set(GENERIC_MICRO_VERSION ${VERSION_PATCH}) set(MINIMUM_LEPTONICA_VERSION 1.74) -############################################################################### +# ############################################################################## # # options # -############################################################################### +# ############################################################################## -message( "Configuring tesseract version ${PACKAGE_VERSION}...") +message("Configuring tesseract version ${PACKAGE_VERSION}...") -if (WIN32) - option(SW_BUILD "Build with sw" ON) +if(WIN32) + option(SW_BUILD "Build with sw" ON) else() - option(SW_BUILD "Build with sw" OFF) + option(SW_BUILD "Build with sw" OFF) endif() -option(OPENMP_BUILD "Build with openmp support" OFF) # see issue #1662 +option(OPENMP_BUILD "Build with openmp support" OFF) # see issue #1662 option(GRAPHICS_DISABLED "Disable disable graphics (ScrollView)" OFF) option(DISABLED_LEGACY_ENGINE "Disable the legacy OCR engine" OFF) option(ENABLE_LTO "Enable link-time optimization" OFF) +option(FAST_FLOAT "Enable float for LSTM" ON) option(BUILD_TRAINING_TOOLS "Build training tools" ON) option(BUILD_TESTS "Build tests" OFF) option(USE_SYSTEM_ICU "Use system ICU" OFF) +option(DISABLE_ARCHIVE "Disable build with libarchive (if available)" OFF) +option(DISABLE_CURL "Disable build with libcurl (if available)" OFF) +option(INSTALL_CONFIGS "Install tesseract configs" ON) + if(NOT ${CMAKE_VERSION} VERSION_LESS "3.15.0") - if(WIN32 AND MSVC) - option(WIN32_MT_BUILD "Build with MT flag for MSVC" OFF) - endif() + if(WIN32 AND MSVC) + option(WIN32_MT_BUILD "Build with MT flag for MSVC" OFF) + endif() endif() -############################################################################### +# ############################################################################## # # compiler and linker # -############################################################################### +# ############################################################################## if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(CLANG 1) endif() if(NOT CMAKE_BUILD_TYPE) - message(STATUS "Setting build type to 'Release' as none was specified.") - set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release") + message(STATUS "Setting build type to 'Release' as none was specified.") + set(CMAKE_BUILD_TYPE + Release + CACHE STRING "Choose the type of build." FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release") endif() include(CheckCXXCompilerFlag) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + # cygwin gnu c++ needs to use -std=gnu++17 instead of -std=c++17 + set(CMAKE_CXX_EXTENSIONS OFF) +endif() -if (BUILD_SHARED_LIBS) - set(CMAKE_CXX_VISIBILITY_PRESET hidden) +if(BUILD_SHARED_LIBS) + set(CMAKE_CXX_VISIBILITY_PRESET hidden) endif() # LTO @@ -125,321 +137,377 @@ cmake_policy(SET CMP0069 NEW) include(CheckIPOSupported) check_ipo_supported(RESULT LTO_SUPPORTED OUTPUT error) if(LTO_SUPPORTED) - message(STATUS "IPO / LTO supported") + message(STATUS "IPO / LTO supported") else() - message(STATUS "IPO / LTO not supported: <${error}>") + message(STATUS "IPO / LTO not supported: <${error}>") endif() -CHECK_CXX_COMPILER_FLAG("-march=native" COMPILER_SUPPORTS_MARCH_NATIVE) +check_cxx_compiler_flag("-march=native" COMPILER_SUPPORTS_MARCH_NATIVE) if(COMPILER_SUPPORTS_MARCH_NATIVE) - set(MARCH_NATIVE_FLAGS "${MARCH_NATIVE_FLAGS} -march=native") - if(NOT CLANG AND MSVC) - # clang-cl does not know this argument - set(MARCH_NATIVE_FLAGS "${MARCH_NATIVE_FLAGS} -mtune=native") - endif() - set(MARCH_NATIVE_OPT ON) + set(MARCH_NATIVE_FLAGS "${MARCH_NATIVE_FLAGS} -march=native") + if(NOT CLANG AND MSVC) + # clang-cl does not know this argument + set(MARCH_NATIVE_FLAGS "${MARCH_NATIVE_FLAGS} -mtune=native") + endif() + set(MARCH_NATIVE_OPT ON) endif() -if (CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64.*|AARCH64.*") - set(AARCH64 TRUE) -endif() +message("CMAKE_SYSTEM_PROCESSOR=<${CMAKE_SYSTEM_PROCESSOR}>") -if(AARCH64) +if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86|x86_64|AMD64|amd64|i386|i686") -add_definitions("-DHAVE_NEON") -set(HAVE_NEON TRUE) + set(HAVE_NEON FALSE) -else() - -CHECK_CXX_COMPILER_FLAG("-mavx" HAVE_AVX) -if(HAVE_AVX) + check_cxx_compiler_flag("-mavx" HAVE_AVX) + if(HAVE_AVX) set(AVX_COMPILE_FLAGS "-mavx") add_definitions("-DHAVE_AVX") -endif(HAVE_AVX) + endif(HAVE_AVX) -CHECK_CXX_COMPILER_FLAG("-mavx2" HAVE_AVX2) -if(HAVE_AVX2) + check_cxx_compiler_flag("-mavx2" HAVE_AVX2) + if(HAVE_AVX2) set(AVX2_COMPILE_FLAGS "-mavx2") add_definitions("-DHAVE_AVX2") -endif() + endif() -CHECK_CXX_COMPILER_FLAG("-mfma" HAVE_FMA) -if(HAVE_FMA) + check_cxx_compiler_flag("-mfma" HAVE_FMA) + if(HAVE_FMA) set(FMA_COMPILE_FLAGS "-mfma") add_definitions("-DHAVE_FMA") -endif() + endif() -CHECK_CXX_COMPILER_FLAG("-msse4.1" HAVE_SSE4_1) -if(HAVE_SSE4_1) + check_cxx_compiler_flag("-msse4.1" HAVE_SSE4_1) + if(HAVE_SSE4_1) set(SSE4_1_COMPILE_FLAGS "-msse4.1") add_definitions("-DHAVE_SSE4_1") -endif() + endif() -if(NOT APPLE) - # NEON support relies on getauxval, which is not available on OSX, only on Linux and Android - CHECK_CXX_COMPILER_FLAG("-mfpu=neon" HAVE_NEON) - if(HAVE_NEON) - set(NEON_COMPILE_FLAGS "-mfpu=neon") - add_definitions("-DHAVE_NEON") - endif() -endif(NOT APPLE) - -if(MSVC) + if(MSVC) if(NOT HAVE_AVX) - set(AVX_COMPILE_FLAGS "/arch:AVX") - set(HAVE_AVX ON) - add_definitions("-DHAVE_AVX") + set(AVX_COMPILE_FLAGS "/arch:AVX") + set(HAVE_AVX ON) + add_definitions("-DHAVE_AVX") endif() if(NOT HAVE_AVX2) - set(AVX2_COMPILE_FLAGS "/arch:AVX2") - set(HAVE_AVX2 ON) - add_definitions("-DHAVE_AVX2") - set(FMA_COMPILE_FLAGS "-D__FMA__") - set(HAVE_FMA ON) - add_definitions("-DHAVE_FMA") + set(AVX2_COMPILE_FLAGS "/arch:AVX2") + set(HAVE_AVX2 ON) + add_definitions("-DHAVE_AVX2") + set(FMA_COMPILE_FLAGS "-D__FMA__") + set(HAVE_FMA ON) + add_definitions("-DHAVE_FMA") endif() if(NOT HAVE_SSE4_1) - set(SSE4_1_COMPILE_FLAGS "-D__SSE4_1__") - set(HAVE_SSE4_1 ON) - add_definitions("-DHAVE_SSE4_1") + set(SSE4_1_COMPILE_FLAGS "-D__SSE4_1__") + set(HAVE_SSE4_1 ON) + add_definitions("-DHAVE_SSE4_1") endif() # clang with MSVC compatibility if(CLANG) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-microsoft-unqualified-friend") - if(HAVE_FMA) - set(FMA_COMPILE_FLAGS "-mfma ${FMA_COMPILE_FLAGS}") - endif(HAVE_FMA) - if(HAVE_SSE4_1) - set(SSE4_1_COMPILE_FLAGS "-msse4.1 ${SSE4_1_COMPILE_FLAGS}") - endif(HAVE_SSE4_1) + set(CMAKE_CXX_FLAGS + "${CMAKE_CXX_FLAGS} -Wno-microsoft-unqualified-friend") + if(HAVE_FMA) + set(FMA_COMPILE_FLAGS "-mfma ${FMA_COMPILE_FLAGS}") + endif(HAVE_FMA) + if(HAVE_SSE4_1) + set(SSE4_1_COMPILE_FLAGS "-msse4.1 ${SSE4_1_COMPILE_FLAGS}") + endif(HAVE_SSE4_1) endif(CLANG) -endif(MSVC) + endif(MSVC) -endif(AARCH64) +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64.*|AARCH64.*") -# auto optimize - used only for information about available vectors -include(OptimizeForArchitecture) -OptimizeForArchitecture() -# remove global definition to eliminate effect on build -foreach(_flag ${_enable_vector_unit_list}) - string(TOUPPER "${_flag}" _flag) - string(REPLACE "." "_" _flag "__${_flag}__") - remove_definitions("-D${_flag}") -endforeach(_flag) -foreach(flag ${Vc_ARCHITECTURE_FLAGS}) - set(Vc_CXX_FLAGS "${Vc_CXX_FLAGS} ${flag}") -endforeach() + set(HAVE_AVX FALSE) + set(HAVE_AVX2 FALSE) + set(HAVE_FMA FALSE) + set(HAVE_SSE4_1 FALSE) + + add_definitions("-DHAVE_NEON") + set(HAVE_NEON TRUE) + +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm.*") + + set(HAVE_AVX FALSE) + set(HAVE_AVX2 FALSE) + set(HAVE_FMA FALSE) + set(HAVE_SSE4_1 FALSE) + + check_cxx_compiler_flag("-mfpu=neon" HAVE_NEON) + if(HAVE_NEON) + set(NEON_COMPILE_FLAGS "-mfpu=neon") + add_definitions("-DHAVE_NEON") + endif() + +else() + + set(HAVE_AVX FALSE) + set(HAVE_AVX2 FALSE) + set(HAVE_FMA FALSE) + set(HAVE_NEON FALSE) + set(HAVE_SSE4_1 FALSE) + +endif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86|x86_64|AMD64|amd64|i386|i686") # Compiler specific environments if(CMAKE_COMPILER_IS_GNUCXX OR MINGW) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wall -DDEBUG -pedantic -Og") + set(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -Wall -DDEBUG -pedantic -Og") elseif(MSVC) - add_definitions(-D_CRT_SECURE_NO_WARNINGS) - add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE) # strdup - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8") - if (NOT CLANG) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") - endif() - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /wd4244 /wd4305 /wd4267") - # Don't use /Wall because it generates too many warnings. - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /W0 /bigobj") - # MT flag - if(WIN32_MT_BUILD) - set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") - message (STATUS "Building with static CRT.") - endif() + add_definitions(-D_CRT_SECURE_NO_WARNINGS) + add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE) # strdup + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8") + if(NOT CLANG) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") + endif() + # Hide some warnings for release target wd4244 'argument': conversion from + # 'uint64_t' to 'unsigned int', possible loss of data wd4251 needs to have + # dll-interface wd4267 return': conversion from 'size_t' to 'int', possible + # loss of data wd4275 non dll-interface class wd4305 ...truncation from + # 'double' to 'float' + set(CMAKE_CXX_FLAGS_RELEASE + "${CMAKE_CXX_FLAGS_RELEASE} /wd4244 /wd4305 /wd4267 /wd4251 /wd4275") + # Don't use /Wall because it generates too many warnings. + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /W0 /bigobj") + # MT flag + if(WIN32_MT_BUILD) + set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") + message(STATUS "Building with static CRT.") + endif() endif() -if(CLANG) # clang all platforms - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wno-unused-command-line-argument") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wall -DDEBUG -pedantic -O0") +if(CLANG) # clang all platforms + set(CMAKE_CXX_FLAGS_RELEASE + "${CMAKE_CXX_FLAGS_RELEASE} -Wno-unused-command-line-argument") + set(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -Wall -DDEBUG -pedantic -O0") endif() -if (OPENMP_BUILD) - find_package(OpenMP QUIET) - if (OpenMP_FOUND) - message(">> ${OpenMP_FOUND} ${OpenMP_VERSION}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") - if(${CMAKE_VERSION} VERSION_LESS "3.9.0") - add_library(OpenMP::OpenMP_CXX IMPORTED INTERFACE) - endif() - endif() - # https://stackoverflow.com/questions/12399422/how-to-set-linker-flags-for-openmp-in-cmakes-try-compile-function - if (NOT OpenMP_FOUND AND CLANG AND WIN32) - # workaroung because find_package(OpenMP) does not work for clang-cl - # https://gitlab.kitware.com/cmake/cmake/issues/19404 - check_include_file_cxx(omp.h HAVE_OMP_H_INCLUDE) - find_library(OpenMP_LIBRARY NAMES omp libomp.lib) - message(">> OpenMP_LIBRARY: ${OpenMP_LIBRARY}") - if (MSVC) - set(OpenMP_CXX_FLAGS "${OpenMP_CXX_FLAGS} /openmp") - else() - set(OpenMP_CXX_FLAGS "${OpenMP_CXX_FLAGS} -fopenmp") - endif() - set(OpenMP_FOUND 1) - add_definitions(-D_OPENMP=201107) # 3.1 version is supported from Clang 3.8.0 +if(OPENMP_BUILD + AND MSVC + AND "${MSVC_VERSION}" LESS 1929) + set(OPENMP_BUILD OFF) +endif() +if(OPENMP_BUILD) + find_package(OpenMP QUIET) + if(OpenMP_FOUND) + message(">> OpenMP_FOUND ${OpenMP_FOUND} ${OpenMP_VERSION}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + add_library(OpenMP::OpenMP_CXX IMPORTED INTERFACE) + endif() + # https://stackoverflow.com/questions/12399422 + # how-to-set-linker-flags-for-openmp-in-cmakes-try-compile-function + if(NOT OpenMP_FOUND + AND CLANG + AND WIN32) + # workaroung because find_package(OpenMP) does not work for clang-cl + # https://gitlab.kitware.com/cmake/cmake/issues/19404 + check_include_file_cxx(omp.h HAVE_OMP_H_INCLUDE) + find_library(OpenMP_LIBRARY NAMES omp libomp.lib) + message(">> OpenMP_LIBRARY: ${OpenMP_LIBRARY}") + if(MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /openmp") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") endif() + set(OpenMP_FOUND 1) + # OpenMP 3.1 is fully supported from Clang 3.8.0 + add_definitions(-D_OPENMP=201107) + endif() + if(MSVC) + string(REPLACE "/openmp" "/openmp:llvm -openmp:experimental" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + string(REPLACE "-openmp" "/openmp:llvm -openmp:experimental" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + endif() endif() -if (CYGWIN) - add_definitions(-D__CYGWIN__) +if(CYGWIN) + add_definitions(-D__CYGWIN__) elseif(UNIX) - if (NOT ANDROID) - set(LIB_pthread pthread) - endif() + if(NOT ANDROID) + set(LIB_pthread pthread) + endif() elseif(WIN32) - set(LIB_Ws2_32 Ws2_32) + set(LIB_Ws2_32 Ws2_32) endif() add_definitions("-DCMAKE_BUILD") -############################################################################### +# ############################################################################## # # packages # -############################################################################### +# ############################################################################## -if (SW_BUILD) - find_package(SW REQUIRED) - if (BUILD_SHARED_LIBS) - set(SW_BUILD_SHARED_LIBS 1) - else() - set(SW_BUILD_SHARED_LIBS 0) - endif() - sw_add_package( - org.sw.demo.danbloomberg.leptonica - org.sw.demo.libarchive.libarchive - ) - if (BUILD_TRAINING_TOOLS) - sw_add_package( - org.sw.demo.gnome.pango.pangocairo - org.sw.demo.unicode.icu.i18n - ) - endif() - sw_execute() +if(SW_BUILD) + find_package(SW REQUIRED) + if(BUILD_SHARED_LIBS) + set(SW_BUILD_SHARED_LIBS 1) + else() + set(SW_BUILD_SHARED_LIBS 0) + endif() + sw_add_package(org.sw.demo.danbloomberg.leptonica + org.sw.demo.libarchive.libarchive) + if(BUILD_TRAINING_TOOLS) + sw_add_package(org.sw.demo.gnome.pango.pangocairo + org.sw.demo.unicode.icu.i18n) + endif() + sw_execute() else() - find_package(PkgConfig) - if(PKG_CONFIG_EXECUTABLE AND NOT Leptonica_DIR) - pkg_check_modules(Leptonica REQUIRED lept>=${MINIMUM_LEPTONICA_VERSION}) - link_directories(${Leptonica_LIBRARY_DIRS}) - else() - find_package(Leptonica ${MINIMUM_LEPTONICA_VERSION} REQUIRED CONFIG) - endif() - if (NOT Leptonica_FOUND) - message(FATAL_ERROR "Cannot find required library Leptonica. Quitting!") - endif(NOT Leptonica_FOUND) + find_package(PkgConfig) + if(BUILD_TRAINING_TOOLS AND NOT PKG_CONFIG_FOUND) + message(WARNING "Building of Training Tools requires PkgConfig") + set(BUILD_TRAINING_TOOLS OFF) + endif(BUILD_TRAINING_TOOLS AND NOT PKG_CONFIG_FOUND) + # Check for required library. option -DLeptonica_DIR=path => cmake hint where + # to find leptonica + find_package(Leptonica ${MINIMUM_LEPTONICA_VERSION} CONFIG) + if(NOT Leptonica_FOUND AND PKG_CONFIG_EXECUTABLE) + pkg_check_modules(Leptonica lept>=${MINIMUM_LEPTONICA_VERSION}) + link_directories(${Leptonica_LIBRARY_DIRS}) + endif() + if(NOT Leptonica_FOUND) + message(FATAL_ERROR "Cannot find required library Leptonica. Quitting!") + endif(NOT Leptonica_FOUND) + include_directories(${Leptonica_INCLUDE_DIRS}) - # Check for optional libarchive. - if(PKG_CONFIG_EXECUTABLE) - pkg_check_modules(LibArchive libarchive) - else() - find_package(LibArchive) + # Check for optional libraries. + if(WIN32) + find_package(TIFF) # for tesseract + if(NOT TIFF_FOUND AND PKG_CONFIG_EXECUTABLE) + # try PKG_CONFIG to find libtiff if cmake failed + pkg_check_modules(TIFF libtiff-4) + endif() + if(TIFF_FOUND) + set(HAVE_TIFFIO_H ON) + include_directories(${TIFF_INCLUDE_DIRS}) + endif(TIFF_FOUND) + endif(WIN32) + if(DISABLE_ARCHIVE) + set(HAVE_LIBARCHIVE OFF) + else(DISABLE_ARCHIVE) + find_package(LibArchive) + if(NOT LibArchive_FOUND AND PKG_CONFIG_EXECUTABLE) + # try PKG_CONFIG to find libarchive if cmake failed + pkg_check_modules(LibArchive libarchive) endif() if(LibArchive_FOUND) - set(HAVE_LIBARCHIVE ON) + set(HAVE_LIBARCHIVE ON) + include_directories(${LibArchive_INCLUDE_DIRS}) + endif(LibArchive_FOUND) + endif(DISABLE_ARCHIVE) + if(DISABLE_CURL) + set(HAVE_LIBCURL OFF) + else(DISABLE_CURL) + find_package(CURL) + if(NOT CURL_FOUND AND PKG_CONFIG_EXECUTABLE) + # try PKG_CONFIG to find libcurl if cmake failed + pkg_check_modules(CURL libcurl) endif() + if(CURL_FOUND) + set(HAVE_LIBCURL ON) + include_directories(${CURL_INCLUDE_DIRS}) + endif(CURL_FOUND) + endif(DISABLE_CURL) endif() find_package(OpenCL QUIET) - -############################################################################### +# ############################################################################## # # configure # -############################################################################### +# ############################################################################## -if (NOT MSVC) - set(MARCH_NATIVE_FLAGS "${MARCH_NATIVE_FLAGS} -O3 -ffast-math") +if(NOT MSVC) + set(MARCH_NATIVE_FLAGS "${MARCH_NATIVE_FLAGS} -O3 -ffast-math") endif() +if(NOT DEFINED CMAKE_INSTALL_LIBDIR) + set(CMAKE_INSTALL_LIBDIR lib) +endif(NOT DEFINED CMAKE_INSTALL_LIBDIR) + set(AUTOCONFIG_SRC ${CMAKE_CURRENT_BINARY_DIR}/config_auto.h.in) set(AUTOCONFIG ${CMAKE_CURRENT_BINARY_DIR}/config_auto.h) add_definitions(-DHAVE_CONFIG_H) if(GRAPHICS_DISABLED) - message("ScrollView debugging disabled.") + message("ScrollView debugging disabled.") endif() -set (CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES} "${CMAKE_PREFIX_PATH}/include" "${CMAKE_INSTALL_PREFIX}/include") +set(CMAKE_REQUIRED_INCLUDES + ${CMAKE_REQUIRED_INCLUDES} "${CMAKE_PREFIX_PATH}/include" + "${CMAKE_INSTALL_PREFIX}/include") include(Configure) configure_file(${AUTOCONFIG_SRC} ${AUTOCONFIG} @ONLY) set(INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/include") +set(LIBRARY_DIRS "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") -configure_file( - ${CMAKE_CURRENT_SOURCE_DIR}/include/tesseract/version.h.in - ${CMAKE_CURRENT_BINARY_DIR}/include/tesseract/version.h @ONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/include/tesseract/version.h.in + ${CMAKE_CURRENT_BINARY_DIR}/include/tesseract/version.h @ONLY) include(CMakePackageConfigHelpers) include(GenerateExportHeader) -configure_package_config_file( - cmake/templates/TesseractConfig.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/cmake/tesseract/TesseractConfig.cmake - INSTALL_DESTINATION lib/cmake/tesseract - PATH_VARS INCLUDE_DIR) -write_basic_package_version_file( - ${CMAKE_CURRENT_BINARY_DIR}/cmake/tesseract/TesseractConfigVersion.cmake - VERSION ${PACKAGE_VERSION} - COMPATIBILITY SameMajorVersion) # show summary of configuration if(${CMAKE_BUILD_TYPE} MATCHES Debug) - set(COMPILER_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") + set(COMPILER_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") elseif(${CMAKE_BUILD_TYPE} MATCHES Release) - set(COMPILER_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}") - if (LTO_SUPPORTED AND ENABLE_LTO) - set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) - else() - set(CMAKE_INTERPROCEDURAL_OPTIMIZATION FALSE) - endif() # LTO_SUPPORTED + set(COMPILER_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}") + if(LTO_SUPPORTED AND ENABLE_LTO) + set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) + else() + set(CMAKE_INTERPROCEDURAL_OPTIMIZATION FALSE) + endif() # LTO_SUPPORTED endif() -message( STATUS ) -message( STATUS "General configuration for Tesseract ${PACKAGE_VERSION}") -message( STATUS "--------------------------------------------------------") -message( STATUS "Build type: ${CMAKE_BUILD_TYPE}") -message( STATUS "Compiler: ${CMAKE_CXX_COMPILER_ID}") -message( STATUS "Used standard: C++${CMAKE_CXX_STANDARD}") -message( STATUS "CXX compiler options: ${COMPILER_FLAGS}") -get_directory_property( DirCompDefs COMPILE_DEFINITIONS) -message( STATUS "Compile definitions = ${DirCompDefs}") -message( STATUS "Linker options: ${CMAKE_EXE_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE_UP}}") -message( STATUS "Install directory: ${CMAKE_INSTALL_PREFIX}") -message( STATUS "Architecture flags: ${Vc_ARCHITECTURE_FLAGS}") -message( STATUS "Vector unit list: ${_enable_vector_unit_list}") -message( STATUS "HAVE_AVX: ${HAVE_AVX}") -message( STATUS "HAVE_AVX2: ${HAVE_AVX2}") -message( STATUS "HAVE_FMA: ${HAVE_FMA}") -message( STATUS "HAVE_SSE4_1: ${HAVE_SSE4_1}") -message( STATUS "MARCH_NATIVE_OPT: ${MARCH_NATIVE_OPT}") -message( STATUS "HAVE_NEON: ${HAVE_NEON}") -message( STATUS "Link-time optimization: ${CMAKE_INTERPROCEDURAL_OPTIMIZATION}") -message( STATUS "--------------------------------------------------------") -message( STATUS "Build with sw [SW_BUILD]: ${SW_BUILD}") -message( STATUS "Build with openmp support [OPENMP_BUILD]: ${OPENMP_BUILD}") -message( STATUS "Disable disable graphics (ScrollView) [GRAPHICS_DISABLED]: ${GRAPHICS_DISABLED}") -message( STATUS "Disable the legacy OCR engine [DISABLED_LEGACY_ENGINE]: ${DISABLED_LEGACY_ENGINE}") -message( STATUS "Build training tools [BUILD_TRAINING_TOOLS]: ${BUILD_TRAINING_TOOLS}") -message( STATUS "Build tests [BUILD_TESTS]: ${BUILD_TESTS}") -message( STATUS "Use system ICU Library [USE_SYSTEM_ICU]: ${USE_SYSTEM_ICU}") -message( STATUS "--------------------------------------------------------") -message( STATUS ) +message(STATUS) +message(STATUS "General configuration for Tesseract ${PACKAGE_VERSION}") +message(STATUS "--------------------------------------------------------") +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") +message(STATUS "Compiler: ${CMAKE_CXX_COMPILER_ID}") +message(STATUS "Used standard: C++${CMAKE_CXX_STANDARD}") +message(STATUS "CXX compiler options: ${COMPILER_FLAGS}") +get_directory_property(DirCompDefs COMPILE_DEFINITIONS) +message(STATUS "Compile definitions = ${DirCompDefs}") +message(STATUS "Linker options: ${CMAKE_EXE_LINKER_FLAGS} " + "${CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE_UP}}") +message(STATUS "Install directory: ${CMAKE_INSTALL_PREFIX}") +message(STATUS "HAVE_AVX: ${HAVE_AVX}") +message(STATUS "HAVE_AVX2: ${HAVE_AVX2}") +message(STATUS "HAVE_FMA: ${HAVE_FMA}") +message(STATUS "HAVE_SSE4_1: ${HAVE_SSE4_1}") +message(STATUS "MARCH_NATIVE_OPT: ${MARCH_NATIVE_OPT}") +message(STATUS "HAVE_NEON: ${HAVE_NEON}") +message(STATUS "Link-time optimization: ${CMAKE_INTERPROCEDURAL_OPTIMIZATION}") +message(STATUS "--------------------------------------------------------") +message(STATUS "Build with sw [SW_BUILD]: ${SW_BUILD}") +message(STATUS "Build with openmp support [OPENMP_BUILD]: ${OPENMP_BUILD}") +message(STATUS "Build with libarchive support [HAVE_LIBARCHIVE]: " + "${HAVE_LIBARCHIVE}") +message(STATUS "Build with libcurl support [HAVE_LIBCURL]: ${HAVE_LIBCURL}") +message(STATUS "Enable float for LSTM [FAST_FLOAT]: ${FAST_FLOAT}") +message(STATUS "Disable disable graphics (ScrollView) [GRAPHICS_DISABLED]: " + "${GRAPHICS_DISABLED}") +message(STATUS "Disable the legacy OCR engine [DISABLED_LEGACY_ENGINE]: " + "${DISABLED_LEGACY_ENGINE}") +message(STATUS "Build training tools [BUILD_TRAINING_TOOLS]: " + "${BUILD_TRAINING_TOOLS}") +message(STATUS "Build tests [BUILD_TESTS]: ${BUILD_TESTS}") +message(STATUS "Use system ICU Library [USE_SYSTEM_ICU]: ${USE_SYSTEM_ICU}") +message(STATUS "Install tesseract configs [INSTALL_CONFIGS]: ${INSTALL_CONFIGS}") +message(STATUS "--------------------------------------------------------") +message(STATUS) -############################################################################### +# ############################################################################## # # build # -############################################################################### +# ############################################################################## include(BuildFunctions) include(SourceGroups) add_definitions(-D_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS=1) -include_directories(${Leptonica_INCLUDE_DIRS}) -include_directories(${LibArchive_INCLUDE_DIRS}) - include_directories(${CMAKE_CURRENT_BINARY_DIR}) include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) if(ANDROID_TOOLCHAIN) @@ -447,157 +515,163 @@ if(ANDROID_TOOLCHAIN) add_compile_definitions(__ANDROID_API_FUTURE__) endif() -######################################## +# ############################################################################## # LIBRARY tesseract -######################################## +# ############################################################################## -file(GLOB tesseract_src - src/ccmain/*.cpp - src/ccstruct/*.cpp - src/ccutil/*.cpp - src/classify/*.cpp - src/cutil/*.cpp - src/dict/*.cpp - src/lstm/*.cpp - src/opencl/*.cpp - src/textord/*.cpp - src/viewer/*.cpp - src/wordrec/*.cpp -) +file( + GLOB + TESSERACT_SRC + src/ccmain/*.cpp + src/ccstruct/*.cpp + src/ccutil/*.cpp + src/classify/*.cpp + src/cutil/*.cpp + src/dict/*.cpp + src/lstm/*.cpp + src/opencl/*.cpp + src/textord/*.cpp + src/viewer/*.cpp + src/wordrec/*.cpp) -if (DISABLED_LEGACY_ENGINE) - function(prepend_path srcs path) - set(tmp, "") - foreach(src IN LISTS ${srcs}) - list(APPEND tmp ${path}/${src}) - endforeach(src ${srcs}) - set(${srcs} ${tmp} PARENT_SCOPE) - endfunction() +if(DISABLED_LEGACY_ENGINE) + # prepend path to list of source files + function(prepend_path srcs path) + set(tmp, "") + foreach(src IN LISTS ${srcs}) + list(APPEND tmp ${path}/${src}) + endforeach(src ${srcs}) + set(${srcs} + ${tmp} + PARENT_SCOPE) + endfunction() - SET(tesseract_src_legacy - src/ccmain/adaptions.cpp - src/ccmain/docqual.cpp - src/ccmain/equationdetect.cpp - src/ccmain/fixspace.cpp - src/ccmain/fixxht.cpp - src/ccmain/osdetect.cpp - src/ccmain/par_control.cpp - src/ccmain/recogtraining.cpp - src/ccmain/superscript.cpp - src/ccmain/tessbox.cpp - src/ccmain/tfacepp.cpp - src/ccstruct/fontinfo.cpp - src/ccstruct/params_training_featdef.cpp - src/ccutil/ambigs.cpp - src/ccutil/bitvector.cpp - src/ccutil/indexmapbidi.cpp - src/ccutil/universalambigs.cpp - src/classify/adaptive.cpp - src/classify/adaptmatch.cpp - src/classify/blobclass.cpp - src/classify/cluster.cpp - src/classify/clusttool.cpp - src/classify/cutoffs.cpp - src/classify/featdefs.cpp - src/classify/float2int.cpp - src/classify/fpoint.cpp - src/classify/intfeaturespace.cpp - src/classify/intfx.cpp - src/classify/intmatcher.cpp - src/classify/intproto.cpp - src/classify/kdtree.cpp - src/classify/mf.cpp - src/classify/mfoutline.cpp - src/classify/mfx.cpp - src/classify/normfeat.cpp - src/classify/normmatch.cpp - src/classify/ocrfeatures.cpp - src/classify/outfeat.cpp - src/classify/picofeat.cpp - src/classify/protos.cpp - src/classify/shapeclassifier.cpp - src/classify/shapetable.cpp - src/classify/tessclassifier.cpp - src/classify/trainingsample.cpp - src/dict/permdawg.cpp - src/dict/hyphen.cpp - src/wordrec/associate.cpp - src/wordrec/chop.cpp - src/wordrec/chopper.cpp - src/wordrec/drawfx.cpp - src/wordrec/findseam.cpp - src/wordrec/gradechop.cpp - src/wordrec/language_model.cpp - src/wordrec/lm_consistency.cpp - src/wordrec/lm_pain_points.cpp - src/wordrec/lm_state.cpp - src/wordrec/outlines.cpp - src/wordrec/params_model.cpp - src/wordrec/pieces.cpp - src/wordrec/plotedges.cpp - src/wordrec/render.cpp - src/wordrec/segsearch.cpp - src/wordrec/wordclass.cpp - ) - prepend_path(tesseract_src_legacy "${CMAKE_CURRENT_SOURCE_DIR}") - list(REMOVE_ITEM tesseract_src ${tesseract_src_legacy}) +set(TESSERACT_SRC_LEGACY + src/ccmain/adaptions.cpp + src/ccmain/docqual.cpp + src/ccmain/equationdetect.cpp + src/ccmain/fixspace.cpp + src/ccmain/fixxht.cpp + src/ccmain/osdetect.cpp + src/ccmain/par_control.cpp + src/ccmain/recogtraining.cpp + src/ccmain/superscript.cpp + src/ccmain/tessbox.cpp + src/ccmain/tfacepp.cpp + src/ccstruct/fontinfo.cpp + src/ccstruct/params_training_featdef.cpp + src/ccutil/ambigs.cpp + src/ccutil/bitvector.cpp + src/ccutil/indexmapbidi.cpp + src/classify/adaptive.cpp + src/classify/adaptmatch.cpp + src/classify/blobclass.cpp + src/classify/cluster.cpp + src/classify/clusttool.cpp + src/classify/cutoffs.cpp + src/classify/featdefs.cpp + src/classify/float2int.cpp + src/classify/fpoint.cpp + src/classify/intfeaturespace.cpp + src/classify/intfx.cpp + src/classify/intmatcher.cpp + src/classify/intproto.cpp + src/classify/kdtree.cpp + src/classify/mf.cpp + src/classify/mfoutline.cpp + src/classify/mfx.cpp + src/classify/normfeat.cpp + src/classify/normmatch.cpp + src/classify/ocrfeatures.cpp + src/classify/outfeat.cpp + src/classify/picofeat.cpp + src/classify/protos.cpp + src/classify/shapeclassifier.cpp + src/classify/shapetable.cpp + src/classify/tessclassifier.cpp + src/classify/trainingsample.cpp + src/dict/permdawg.cpp + src/dict/hyphen.cpp + src/wordrec/associate.cpp + src/wordrec/chop.cpp + src/wordrec/chopper.cpp + src/wordrec/drawfx.cpp + src/wordrec/findseam.cpp + src/wordrec/gradechop.cpp + src/wordrec/language_model.cpp + src/wordrec/lm_consistency.cpp + src/wordrec/lm_pain_points.cpp + src/wordrec/lm_state.cpp + src/wordrec/outlines.cpp + src/wordrec/params_model.cpp + src/wordrec/pieces.cpp + src/wordrec/plotedges.cpp + src/wordrec/render.cpp + src/wordrec/segsearch.cpp + src/wordrec/wordclass.cpp) + prepend_path(TESSERACT_SRC_LEGACY "${CMAKE_CURRENT_SOURCE_DIR}") + list(REMOVE_ITEM TESSERACT_SRC ${TESSERACT_SRC_LEGACY}) endif(DISABLED_LEGACY_ENGINE) -list(APPEND arch_files - src/arch/dotproduct.cpp - src/arch/simddetect.cpp - src/arch/intsimdmatrix.cpp -) +list(APPEND arch_files src/arch/dotproduct.cpp src/arch/simddetect.cpp + src/arch/intsimdmatrix.cpp) if(MARCH_NATIVE_FLAGS) - set_source_files_properties(src/arch/dotproduct.cpp - PROPERTIES COMPILE_FLAGS ${MARCH_NATIVE_FLAGS}) + set_source_files_properties(src/arch/dotproduct.cpp + PROPERTIES COMPILE_FLAGS ${MARCH_NATIVE_FLAGS}) endif(MARCH_NATIVE_FLAGS) if(HAVE_AVX) - list(APPEND arch_files_opt src/arch/dotproductavx.cpp) - set_source_files_properties(src/arch/dotproductavx.cpp - PROPERTIES COMPILE_FLAGS ${AVX_COMPILE_FLAGS}) + list(APPEND arch_files_opt src/arch/dotproductavx.cpp) + set_source_files_properties(src/arch/dotproductavx.cpp + PROPERTIES COMPILE_FLAGS ${AVX_COMPILE_FLAGS}) endif(HAVE_AVX) if(HAVE_AVX2) - list(APPEND arch_files_opt src/arch/intsimdmatrixavx2.cpp src/arch/dotproductavx.cpp) - set_source_files_properties(src/arch/intsimdmatrixavx2.cpp - PROPERTIES COMPILE_FLAGS ${AVX2_COMPILE_FLAGS}) + list(APPEND arch_files_opt src/arch/intsimdmatrixavx2.cpp + src/arch/dotproductavx.cpp) + set_source_files_properties(src/arch/intsimdmatrixavx2.cpp + PROPERTIES COMPILE_FLAGS ${AVX2_COMPILE_FLAGS}) endif(HAVE_AVX2) if(HAVE_FMA) - list(APPEND arch_files_opt src/arch/dotproductfma.cpp) - set_source_files_properties(src/arch/dotproductfma.cpp - PROPERTIES COMPILE_FLAGS ${FMA_COMPILE_FLAGS}) + list(APPEND arch_files_opt src/arch/dotproductfma.cpp) + set_source_files_properties(src/arch/dotproductfma.cpp + PROPERTIES COMPILE_FLAGS ${FMA_COMPILE_FLAGS}) endif(HAVE_FMA) if(HAVE_SSE4_1) - list(APPEND arch_files_opt src/arch/dotproductsse.cpp src/arch/intsimdmatrixsse.cpp) - set_source_files_properties(src/arch/dotproductsse.cpp src/arch/intsimdmatrixsse.cpp - PROPERTIES COMPILE_FLAGS ${SSE4_1_COMPILE_FLAGS}) + list(APPEND arch_files_opt src/arch/dotproductsse.cpp + src/arch/intsimdmatrixsse.cpp) + set_source_files_properties( + src/arch/dotproductsse.cpp src/arch/intsimdmatrixsse.cpp + PROPERTIES COMPILE_FLAGS ${SSE4_1_COMPILE_FLAGS}) endif(HAVE_SSE4_1) if(HAVE_NEON) - list(APPEND arch_files_opt src/arch/intsimdmatrixneon.cpp) - if(NEON_COMPILE_FLAGS) - set_source_files_properties(src/arch/intsimdmatrixneon.cpp - PROPERTIES COMPILE_FLAGS ${NEON_COMPILE_FLAGS}) - endif() + list(APPEND arch_files_opt src/arch/dotproductneon.cpp + src/arch/intsimdmatrixneon.cpp) + if(NEON_COMPILE_FLAGS) + set_source_files_properties( + src/arch/dotproductneon.cpp src/arch/intsimdmatrixneon.cpp + PROPERTIES COMPILE_FLAGS ${NEON_COMPILE_FLAGS}) + endif() endif(HAVE_NEON) -file(GLOB_RECURSE tesseract_hdr - include/* - src/arch/*.h - src/ccmain/*.h - src/ccstruct/*.h - src/ccutil/*.h - src/classify/*.h - src/cutil/*.h - src/dict/*.h - src/lstm/*.h - src/opencl/*.h - src/textord/*.h - src/viewer/*.h - src/wordrec/*.h -) +file( + GLOB_RECURSE + TESSERACT_HDR + include/* + src/arch/*.h + src/ccmain/*.h + src/ccstruct/*.h + src/ccutil/*.h + src/classify/*.h + src/cutil/*.h + src/dict/*.h + src/lstm/*.h + src/opencl/*.h + src/textord/*.h + src/viewer/*.h + src/wordrec/*.h) -set(tesseract_src ${tesseract_src} +set(TESSERACT_SRC + ${TESSERACT_SRC} src/api/baseapi.cpp src/api/capi.cpp src/api/renderer.cpp @@ -605,138 +679,225 @@ set(tesseract_src ${tesseract_src} src/api/hocrrenderer.cpp src/api/lstmboxrenderer.cpp src/api/pdfrenderer.cpp - src/api/wordstrboxrenderer.cpp -) + src/api/wordstrboxrenderer.cpp) -set(libtessfiles ${tesseract_src} ${arch_files} ${arch_files_opt} ${tesseract_hdr}) +set(TESSERACT_CONFIGS + tessdata/configs/alto + tessdata/configs/ambigs.train + tessdata/configs/api_config + tessdata/configs/bazaar + tessdata/configs/bigram + tessdata/configs/box.train + tessdata/configs/box.train.stderr + tessdata/configs/digits + tessdata/configs/get.images + tessdata/configs/hocr + tessdata/configs/inter + tessdata/configs/kannada + tessdata/configs/linebox + tessdata/configs/logfile + tessdata/configs/lstm.train + tessdata/configs/lstmbox + tessdata/configs/lstmdebug + tessdata/configs/makebox + tessdata/configs/pdf + tessdata/configs/quiet + tessdata/configs/rebox + tessdata/configs/strokewidth + tessdata/configs/tsv + tessdata/configs/txt + tessdata/configs/unlv + tessdata/configs/wordstrbox) -source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${libtessfiles}) +set(TESSERACT_TESSCONFIGS + tessdata/tessconfigs/batch + tessdata/tessconfigs/batch.nochop + tessdata/tessconfigs/matdemo + tessdata/tessconfigs/msdemo + tessdata/tessconfigs/nobatch + tessdata/tessconfigs/segdemo) -add_library (libtesseract ${libtessfiles}) -target_include_directories (libtesseract - PUBLIC $ - PRIVATE src +set(LIBTESSFILES ${TESSERACT_SRC} ${arch_files} ${arch_files_opt} + ${TESSERACT_HDR}) - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ - PUBLIC $ -) -if (BUILD_SHARED_LIBS) -target_compile_definitions (libtesseract +source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${LIBTESSFILES}) + +add_library(libtesseract ${LIBTESSFILES}) +target_include_directories( + libtesseract + BEFORE + PRIVATE src + PUBLIC $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $) +if(BUILD_SHARED_LIBS) + target_compile_definitions( + libtesseract PRIVATE -DTESS_EXPORTS - INTERFACE -DTESS_IMPORTS -) -#generate_export_header (libtesseract EXPORT_MACRO_NAME TESS_API) + INTERFACE -DTESS_IMPORTS) + # generate_export_header (libtesseract EXPORT_MACRO_NAME TESS_API) endif() -target_link_libraries (libtesseract PRIVATE ${LIB_Ws2_32} ${LIB_pthread}) +target_link_libraries(libtesseract PRIVATE ${LIB_Ws2_32} ${LIB_pthread}) if(OpenMP_CXX_FOUND) - target_link_libraries(libtesseract PUBLIC OpenMP::OpenMP_CXX) + target_link_libraries(libtesseract PUBLIC OpenMP::OpenMP_CXX) endif() -set_target_properties (libtesseract PROPERTIES VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) -set_target_properties (libtesseract PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) -if (WIN32) -set_target_properties (libtesseract PROPERTIES OUTPUT_NAME tesseract${VERSION_MAJOR}${VERSION_MINOR}) -set_target_properties (libtesseract PROPERTIES DEBUG_OUTPUT_NAME tesseract${VERSION_MAJOR}${VERSION_MINOR}d) +if(LibArchive_FOUND) + target_link_libraries(libtesseract PUBLIC ${LibArchive_LIBRARIES}) +endif(LibArchive_FOUND) +if(CURL_FOUND) + if(NOT CURL_LIBRARIES) + target_link_libraries(libtesseract PUBLIC CURL::libcurl) + else() + target_link_libraries(libtesseract PUBLIC ${CURL_LIBRARIES}) + endif() +endif(CURL_FOUND) + +set_target_properties(libtesseract + PROPERTIES VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) +set_target_properties(libtesseract + PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) + +if(WIN32) + set_target_properties(libtesseract + PROPERTIES OUTPUT_NAME tesseract${VERSION_MAJOR}${VERSION_MINOR}) + set_target_properties(libtesseract + PROPERTIES DEBUG_OUTPUT_NAME tesseract${VERSION_MAJOR}${VERSION_MINOR}d) else() -set_target_properties (libtesseract PROPERTIES OUTPUT_NAME tesseract) + set_target_properties(libtesseract PROPERTIES OUTPUT_NAME tesseract) endif() -if (SW_BUILD) - target_link_libraries (libtesseract PUBLIC - org.sw.demo.danbloomberg.leptonica - org.sw.demo.libarchive.libarchive - ) - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/TesseractTargets.cmake "include(${CMAKE_CURRENT_BINARY_DIR}/cppan.cmake)\n") - export(TARGETS libtesseract APPEND FILE ${CMAKE_CURRENT_BINARY_DIR}/TesseractTargets.cmake NAMESPACE Tesseract::) +if(SW_BUILD) + target_link_libraries(libtesseract PUBLIC org.sw.demo.danbloomberg.leptonica + org.sw.demo.libarchive.libarchive) + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/TesseractTargets.cmake + "include(${CMAKE_CURRENT_BINARY_DIR}/cppan.cmake)\n") + export( + TARGETS libtesseract + APPEND + FILE ${CMAKE_CURRENT_BINARY_DIR}/TesseractTargets.cmake + NAMESPACE Tesseract::) else() - target_link_libraries (libtesseract PUBLIC - ${Leptonica_LIBRARIES} - ${LibArchive_LIBRARIES} - ) - export(TARGETS libtesseract FILE ${CMAKE_CURRENT_BINARY_DIR}/TesseractTargets.cmake NAMESPACE Tesseract::) + target_link_libraries(libtesseract PUBLIC ${Leptonica_LIBRARIES}) + export( + TARGETS libtesseract + FILE ${CMAKE_CURRENT_BINARY_DIR}/TesseractTargets.cmake + NAMESPACE Tesseract::) endif() -if (WIN32 AND CLANG AND OPENMP_BUILD) - # Workaround for "libomp.lib is not automatically added on Windows" - # see: http://lists.llvm.org/pipermail/openmp-dev/2015-August/000857.html - target_link_libraries (libtesseract PRIVATE ${OpenMP_LIBRARY}) +if(WIN32 + AND CLANG + AND OPENMP_BUILD) + # Workaround for "libomp.lib is not automatically added on Windows" see: + # http://lists.llvm.org/pipermail/openmp-dev/2015-August/000857.html + target_link_libraries(libtesseract PRIVATE ${OpenMP_LIBRARY}) endif() -######################################## -# EXECUTABLE tesseractmain -######################################## +# ############################################################################## +# EXECUTABLE tesseract +# ############################################################################## -add_executable (tesseract src/api/tesseractmain.cpp) -target_link_libraries (tesseract libtesseract) -if (HAVE_TIFFIO_H) - target_link_libraries(tesseract tiff) +add_executable(tesseract src/tesseract.cpp) +target_link_libraries(tesseract libtesseract) +if(HAVE_TIFFIO_H AND WIN32) + target_link_libraries(tesseract ${TIFF_LIBRARIES}) endif() -if (OPENMP_BUILD AND UNIX) -target_link_libraries (tesseract pthread) +if(OPENMP_BUILD AND UNIX) + target_link_libraries(tesseract pthread) endif() -######################################## +# ############################################################################## -if (BUILD_TESTS AND EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/googletest/CMakeLists.txt) - add_subdirectory(googletest) +if(BUILD_TESTS AND EXISTS + ${CMAKE_CURRENT_SOURCE_DIR}/unittest/third_party/googletest/CMakeLists.txt +) + add_subdirectory(unittest/third_party/googletest) endif() -if (BUILD_TRAINING_TOOLS) -add_subdirectory(src/training) +if(BUILD_TRAINING_TOOLS) + add_subdirectory(src/training) endif() get_target_property(tesseract_NAME libtesseract NAME) get_target_property(tesseract_VERSION libtesseract VERSION) get_target_property(tesseract_OUTPUT_NAME libtesseract OUTPUT_NAME) -configure_file(tesseract.pc.cmake ${CMAKE_CURRENT_BINARY_DIR}/tesseract.pc @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/tesseract.pc DESTINATION lib/pkgconfig) -install(TARGETS tesseract RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) -install(TARGETS libtesseract EXPORT TesseractTargets RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) -install(EXPORT TesseractTargets NAMESPACE Tesseract:: DESTINATION lib/cmake/tesseract) -install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/cmake DESTINATION lib) -install(FILES - include/tesseract/baseapi.h - include/tesseract/capi.h - include/tesseract/renderer.h - ${CMAKE_CURRENT_BINARY_DIR}/include/tesseract/version.h +configure_file(tesseract.pc.cmake ${CMAKE_CURRENT_BINARY_DIR}/tesseract.pc + @ONLY) - include/tesseract/ltrresultiterator.h - include/tesseract/pageiterator.h - include/tesseract/resultiterator.h - include/tesseract/osdetect.h +configure_package_config_file( + cmake/templates/TesseractConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/cmake/tesseract/TesseractConfig.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/tesseract + PATH_VARS INCLUDE_DIR LIBRARY_DIRS) +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/cmake/tesseract/TesseractConfigVersion.cmake + VERSION ${PACKAGE_VERSION} + COMPATIBILITY SameMajorVersion) - include/tesseract/publictypes.h +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/tesseract.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) +install(TARGETS tesseract DESTINATION bin) +install( + TARGETS libtesseract + EXPORT TesseractTargets + RUNTIME DESTINATION bin + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) +install( + EXPORT TesseractTargets + NAMESPACE Tesseract:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/tesseract) +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}) - include/tesseract/ocrclass.h - include/tesseract/export.h - include/tesseract/unichar.h +install( + FILES include/tesseract/baseapi.h + include/tesseract/capi.h + include/tesseract/renderer.h + ${CMAKE_CURRENT_BINARY_DIR}/include/tesseract/version.h + include/tesseract/ltrresultiterator.h + include/tesseract/pageiterator.h + include/tesseract/resultiterator.h + include/tesseract/osdetect.h + include/tesseract/publictypes.h + include/tesseract/ocrclass.h + include/tesseract/export.h + include/tesseract/unichar.h + # ${CMAKE_CURRENT_BINARY_DIR}/src/endianness.h + DESTINATION include/tesseract) - #${CMAKE_CURRENT_BINARY_DIR}/src/endianness.h - DESTINATION include/tesseract) - -######################################## -# uninstall target -######################################## -if(NOT TARGET uninstall) - configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/cmake_uninstall.cmake.in" - "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" - IMMEDIATE @ONLY) - - add_custom_target(uninstall - COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) +if(INSTALL_CONFIGS) +install(FILES ${TESSERACT_CONFIGS} + DESTINATION ${CMAKE_INSTALL_PREFIX}/share/tessdata/configs) +install(FILES ${TESSERACT_TESSCONFIGS} + DESTINATION ${CMAKE_INSTALL_PREFIX}/share/tessdata/tessconfigs) endif() -############################################################################### +# ############################################################################## +# uninstall target +# ############################################################################## +if(NOT TARGET uninstall) + configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/templates/cmake_uninstall.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) + + add_custom_target( + uninstall + COMMENT "Uninstall installed files" + COMMAND ${CMAKE_COMMAND} -P + ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) +endif() + +# ############################################################################## diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2cc8f1d07..394f8397b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -69,7 +69,7 @@ your question has been asked (and has been answered) many times before... You should always make sure your changes build and run successfully. -For that, your clone needs to have all submodules (`abseil`, `googletest`, `test`) included. To do so, either specify `--recurse-submodules` during the initial clone, or run `git submodule update --init --recursive NAME` for each `NAME` later. If `configure` already created those directories (blocking the clone), remove them first (or `make distclean`), then clone and reconfigure. +For that, your clone needs to have all submodules (`googletest`, `test`) included. To do so, either specify `--recurse-submodules` during the initial clone, or run `git submodule update --init --recursive NAME` for each `NAME` later. If `configure` already created those directories (blocking the clone), remove them first (or `make distclean`), then clone and reconfigure. Have a look at [the README](./README.md) and [testing README](./test/testing/README.md) and the [documentation](https://tesseract-ocr.github.io/tessdoc/Compiling-%E2%80%93-GitInstallation.html#unit-test-builds) on installation. diff --git a/ChangeLog b/ChangeLog index df7551846..76455c04f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,51 @@ +2022-01-06 - V5.0.1 + * Add SPDX-License-Identifier to public include files. + * Support redirections when running OCR on a URL. + * Lots of fixes and improvements for cmake builds. + Distributions should use the autoconf build. + * Fix broken msys2 build with gcc 11. + * Fix parameter certainty_scale (was duplicated). + * Fix some compiler warnings and clean code. + * Correctly detect amd64 and i386 on FreeBSD. + * Add libarchive and libcurl in continuous integration actions. + * Update submodule googletest to release v1.11.0. + +2021-11-22 - V5.0.0 + * Faster training and recognition by default (float instead of + double calculations) + * More options for binarization + * Improved support for ARM NEON + * Modernized code + * Removed proprietary data types like GenericVector and STRING + from public API + * pdf.ttf no longer needed, now integrated into the code + * Faster flat build with automake + * New options for combine_tessdata to show details of traineddata files + * Improved training messages + * Improved unit tests and fuzzing tests + * Lots of bug fixes + +2021-11-15 - V4.1.3 + * Fix build regression for autoconf build + +2021-11-14 - V4.1.2 + * Add RowAttributes getter to PageIterator + * Allow line images with larger width for training + * Fix memory leaks + * Improve build process + * Don't output empty ALTO sourceImageInformation (issue #2700) + * Extend URI support for Tesseract with libcurl + * Abort LSTM training with integer model (fixes issue #1573) + * Update documentation + * Make automake builds less noisy by default + * Don't use -march=native in automake builds + +2019-12-26 - V4.1.1 + * Implemented sw build (cppan is depreciated) + * Improved cmake build + * Code cleanup and optimization + * A lot of bug fixes... + 2019-07-07 - V4.1.0 * Added new renders Alto, LSTMBox, WordStrBox. * Added character boxes in hOCR output. @@ -150,7 +198,7 @@ * `OcrEngineMode` in `Init` replaces `AccuracyVSpeed` to control cube. * Greatly improved segmentation search with consequent accuracy and speed improvements, especially for Chinese. * Added `PageIterator` and `ResultIterator` as cleaner ways to get the full results out of Tesseract, that are not currently provided by any of the `TessBaseAPI::Get*` methods. All other methods, such as the `ETEXT_STRUCT` in particular are deprecated and will be deleted in the future. - * ApplyBoxes totally rewritten to make training easier. It can now cope with touching/overlapping training characters, and a new boxfile format allows word boxes instead of character boxes, BUT to use that you have to have already boostrapped the language with character boxes. "Cyclic dependency" on traineddata. + * ApplyBoxes totally rewritten to make training easier. It can now cope with touching/overlapping training characters, and a new boxfile format allows word boxes instead of character boxes, BUT to use that you have to have already bootstrapped the language with character boxes. "Cyclic dependency" on traineddata. * Auto orientation and script detection added to page layout analysis. * Deleted *lots* of dead code. * Fixxht module replaced with scalable data-driven module. diff --git a/Makefile.am b/Makefile.am index e95f18ea1..239e1c9e5 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,16 +1,10 @@ ## run autogen.sh to create Makefile.in from this file - -# Be less noisy by default. Can be overridden with `make V=1`. -V = 0 - ACLOCAL_AMFLAGS = -I m4 .PHONY: doc html install-langs ScrollView.jar install-jars pdf training CLEANFILES = -DEFAULT_INCLUDES = - SUBDIRS = . tessdata EXTRA_DIST = README.md LICENSE @@ -80,7 +74,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/include AM_CPPFLAGS += -I$(top_builddir)/include if VISIBILITY AM_CPPFLAGS += -DTESS_EXPORTS -AM_CPPFLAGS += -fvisibility=hidden -fvisibility-inlines-hidden +AM_CPPFLAGS += -fvisibility=hidden -fvisibility-inlines-hidden -fPIC endif AM_CPPFLAGS += $(OPENCL_CPPFLAGS) @@ -144,13 +138,15 @@ noinst_HEADERS += src/arch/simddetect.h noinst_LTLIBRARIES += libtesseract_native.la libtesseract_native_la_CXXFLAGS = -O3 -ffast-math -if MARCH_NATIVE_OPT -libtesseract_native_la_CXXFLAGS += -march=native -mtune=native +if OPENMP_SIMD +libtesseract_native_la_CXXFLAGS += -fopenmp-simd -DOPENMP_SIMD endif +libtesseract_native_la_CXXFLAGS += -I$(top_srcdir)/src/ccutil libtesseract_native_la_SOURCES = src/arch/dotproduct.cpp if HAVE_AVX libtesseract_avx_la_CXXFLAGS = -mavx +libtesseract_avx_la_CXXFLAGS += -I$(top_srcdir)/src/ccutil libtesseract_avx_la_SOURCES = src/arch/dotproductavx.cpp libtesseract_la_LIBADD += libtesseract_avx.la noinst_LTLIBRARIES += libtesseract_avx.la @@ -158,6 +154,7 @@ endif if HAVE_AVX2 libtesseract_avx2_la_CXXFLAGS = -mavx2 +libtesseract_avx2_la_CXXFLAGS += -I$(top_srcdir)/src/ccutil libtesseract_avx2_la_SOURCES = src/arch/intsimdmatrixavx2.cpp libtesseract_la_LIBADD += libtesseract_avx2.la noinst_LTLIBRARIES += libtesseract_avx2.la @@ -165,6 +162,7 @@ endif if HAVE_FMA libtesseract_fma_la_CXXFLAGS = -mfma +libtesseract_fma_la_CXXFLAGS += -I$(top_srcdir)/src/ccutil libtesseract_fma_la_SOURCES = src/arch/dotproductfma.cpp libtesseract_la_LIBADD += libtesseract_fma.la noinst_LTLIBRARIES += libtesseract_fma.la @@ -172,6 +170,7 @@ endif if HAVE_SSE4_1 libtesseract_sse_la_CXXFLAGS = -msse4.1 +libtesseract_sse_la_CXXFLAGS += -I$(top_srcdir)/src/ccutil libtesseract_sse_la_SOURCES = src/arch/dotproductsse.cpp src/arch/intsimdmatrixsse.cpp libtesseract_la_LIBADD += libtesseract_sse.la noinst_LTLIBRARIES += libtesseract_sse.la @@ -179,7 +178,13 @@ endif if HAVE_NEON libtesseract_neon_la_CXXFLAGS = $(NEON_CXXFLAGS) +libtesseract_neon_la_CXXFLAGS += -O3 +if OPENMP_SIMD +libtesseract_neon_la_CXXFLAGS += -fopenmp-simd -DOPENMP_SIMD +endif +libtesseract_neon_la_CXXFLAGS += -I$(top_srcdir)/src/ccutil libtesseract_neon_la_SOURCES = src/arch/intsimdmatrixneon.cpp +libtesseract_neon_la_SOURCES += src/arch/dotproductneon.cpp libtesseract_la_LIBADD += libtesseract_neon.la noinst_LTLIBRARIES += libtesseract_neon.la endif @@ -281,7 +286,6 @@ noinst_HEADERS += src/ccstruct/seam.h noinst_HEADERS += src/ccstruct/split.h noinst_HEADERS += src/ccstruct/statistc.h noinst_HEADERS += src/ccstruct/stepblob.h -noinst_HEADERS += src/ccstruct/tabletransfer.h noinst_HEADERS += src/ccstruct/werd.h if !DISABLED_LEGACY_ENGINE noinst_HEADERS += src/ccstruct/fontinfo.h @@ -334,7 +338,7 @@ endif libtesseract_ccutil_la_CPPFLAGS = $(AM_CPPFLAGS) libtesseract_ccutil_la_CPPFLAGS += $(libarchive_CFLAGS) if !NO_TESSDATA_PREFIX -libtesseract_ccutil_la_CPPFLAGS += -DTESSDATA_PREFIX=@datadir@ +libtesseract_ccutil_la_CPPFLAGS += -DTESSDATA_PREFIX='"@datadir@"' endif noinst_HEADERS += src/ccutil/ccutil.h @@ -389,7 +393,6 @@ if !DISABLED_LEGACY_ENGINE libtesseract_ccutil_la_SOURCES += src/ccutil/ambigs.cpp libtesseract_ccutil_la_SOURCES += src/ccutil/bitvector.cpp libtesseract_ccutil_la_SOURCES += src/ccutil/indexmapbidi.cpp -libtesseract_ccutil_la_SOURCES += src/ccutil/universalambigs.cpp endif # Rules for src/classify. @@ -501,7 +504,7 @@ libtesseract_lstm_la_CPPFLAGS += -DINCLUDE_TENSORFLOW libtesseract_lstm_la_CPPFLAGS += -I/usr/include/tensorflow endif if !NO_TESSDATA_PREFIX -libtesseract_lstm_la_CPPFLAGS += -DTESSDATA_PREFIX=@datadir@ +libtesseract_lstm_la_CPPFLAGS += -DTESSDATA_PREFIX='"@datadir@"' endif noinst_HEADERS += src/lstm/convolve.h @@ -712,13 +715,16 @@ endif # Rules for tesseract executable. bin_PROGRAMS = tesseract -tesseract_SOURCES = src/api/tesseractmain.cpp +tesseract_SOURCES = src/tesseract.cpp tesseract_CPPFLAGS = tesseract_CPPFLAGS += -I$(top_srcdir)/src/arch tesseract_CPPFLAGS += -I$(top_srcdir)/src/ccstruct tesseract_CPPFLAGS += -I$(top_srcdir)/src/ccutil tesseract_CPPFLAGS += -I$(top_srcdir)/src/dict tesseract_CPPFLAGS += -I$(top_srcdir)/src/viewer +if OPENCL +tesseract_CPPFLAGS += -I$(top_srcdir)/src/opencl +endif tesseract_CPPFLAGS += $(AM_CPPFLAGS) if VISIBILITY tesseract_CPPFLAGS += -DTESS_IMPORTS @@ -1143,65 +1149,34 @@ unittest_CPPFLAGS += $(pangocairo_CFLAGS) endif # ENABLE_TRAINING unittest_CPPFLAGS += -I$(top_srcdir)/src/viewer unittest_CPPFLAGS += -I$(top_srcdir)/src/wordrec -unittest_CPPFLAGS += -I$(top_srcdir)/abseil +unittest_CPPFLAGS += -I$(top_srcdir)/unittest if TENSORFLOW unittest_CPPFLAGS += -DINCLUDE_TENSORFLOW -unittest_CPPFLAGS += -I$(top_srcdir)/unittest unittest_CPPFLAGS += -I/usr/include/tensorflow endif # TENSORFLOW # Build googletest: check_LTLIBRARIES = libgtest.la libgtest_main.la libgmock.la libgmock_main.la -libgtest_la_SOURCES = googletest/googletest/src/gtest-all.cc -libgtest_la_CPPFLAGS = -I$(top_srcdir)/googletest/googletest/include -I$(top_srcdir)/googletest/googletest -pthread -libgtest_main_la_SOURCES = googletest/googletest/src/gtest_main.cc +libgtest_la_SOURCES = unittest/third_party/googletest/googletest/src/gtest-all.cc +libgtest_la_CPPFLAGS = -I$(top_srcdir)/unittest/third_party/googletest/googletest/include +libgtest_la_CPPFLAGS += -I$(top_srcdir)/unittest/third_party/googletest/googletest +libgtest_la_CPPFLAGS += -pthread +libgtest_main_la_SOURCES = unittest/third_party/googletest/googletest/src/gtest_main.cc libgtest_main_la_CPPFLAGS = $(libgtest_la_CPPFLAGS) -# Build Abseil (needed for some unit tests). -check_LTLIBRARIES += libabseil.la -libabseil_la_SOURCES = -libabseil_la_SOURCES += abseil/absl/base/internal/cycleclock.cc -libabseil_la_SOURCES += abseil/absl/base/internal/raw_logging.cc -libabseil_la_SOURCES += abseil/absl/base/internal/spinlock.cc -libabseil_la_SOURCES += abseil/absl/base/internal/spinlock_wait.cc -libabseil_la_SOURCES += abseil/absl/base/internal/sysinfo.cc -libabseil_la_SOURCES += abseil/absl/base/internal/throw_delegate.cc -libabseil_la_SOURCES += abseil/absl/base/internal/unscaledcycleclock.cc -libabseil_la_SOURCES += abseil/absl/numeric/int128.cc -libabseil_la_SOURCES += abseil/absl/strings/ascii.cc -libabseil_la_SOURCES += abseil/absl/strings/charconv.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/charconv_bigint.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/charconv_parse.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/memutil.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/str_format/arg.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/str_format/bind.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/str_format/extension.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/str_format/float_conversion.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/str_format/output.cc -libabseil_la_SOURCES += abseil/absl/strings/internal/str_format/parser.cc -libabseil_la_SOURCES += abseil/absl/strings/match.cc -libabseil_la_SOURCES += abseil/absl/strings/numbers.cc -libabseil_la_SOURCES += abseil/absl/strings/str_cat.cc -libabseil_la_SOURCES += abseil/absl/strings/str_split.cc -libabseil_la_SOURCES += abseil/absl/strings/string_view.cc -libabseil_la_SOURCES += abseil/absl/time/clock.cc -libabseil_la_SOURCES += abseil/absl/time/duration.cc -libabseil_la_CPPFLAGS = -I$(top_srcdir)/abseil +GMOCK_INCLUDES = -I$(top_srcdir)/unittest/third_party/googletest/googlemock/include \ + -I$(top_srcdir)/unittest/third_party/googletest/googlemock \ + -I$(top_srcdir)/unittest/third_party/googletest/googletest/include \ + -I$(top_srcdir)/unittest/third_party/googletest/googletest -GMOCK_INCLUDES = -I$(top_srcdir)/googletest/googlemock/include \ - -I$(top_srcdir)/googletest/googlemock \ - -I$(top_srcdir)/googletest/googletest/include \ - -I$(top_srcdir)/googletest/googletest - -libgmock_la_SOURCES = googletest/googlemock/src/gmock-all.cc +libgmock_la_SOURCES = unittest/third_party/googletest/googlemock/src/gmock-all.cc libgmock_la_CPPFLAGS = $(GMOCK_INCLUDES) \ -pthread -libgmock_main_la_SOURCES = googletest/googlemock/src/gmock_main.cc +libgmock_main_la_SOURCES = unittest/third_party/googletest/googlemock/src/gmock_main.cc libgmock_main_la_CPPFLAGS = $(GMOCK_INCLUDES) \ -pthread # Build unittests -ABSEIL_LIBS = libabseil.la GTEST_LIBS = libgtest.la libgtest_main.la -lpthread GMOCK_LIBS = libgmock.la libgmock_main.la TESS_LIBS = $(GTEST_LIBS) @@ -1209,8 +1184,8 @@ TESS_LIBS += libtesseract.la $(libarchive_LIBS) TESS_LIBS += $(TENSORFLOW_LIBS) TRAINING_LIBS = libtesseract_training.la TRAINING_LIBS += $(TESS_LIBS) -unittest_CPPFLAGS += -isystem $(top_srcdir)/googletest/googletest/include -unittest_CPPFLAGS += -isystem $(top_srcdir)/googletest/googlemock/include +unittest_CPPFLAGS += -isystem $(top_srcdir)/unittest/third_party/googletest/googletest/include +unittest_CPPFLAGS += -isystem $(top_srcdir)/unittest/third_party/googletest/googlemock/include check_PROGRAMS = apiexample_test if ENABLE_TRAINING @@ -1323,12 +1298,11 @@ endif # !DISABLED_LEGACY_ENGINE baseapi_test_SOURCES = unittest/baseapi_test.cc baseapi_test_CPPFLAGS = $(unittest_CPPFLAGS) -baseapi_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(LEPTONICA_LIBS) +baseapi_test_LDADD = $(TRAINING_LIBS) $(LEPTONICA_LIBS) baseapi_thread_test_SOURCES = unittest/baseapi_thread_test.cc baseapi_thread_test_CPPFLAGS = $(unittest_CPPFLAGS) -baseapi_thread_test_LDADD = $(ABSEIL_LIBS) -baseapi_thread_test_LDADD += $(TESS_LIBS) $(LEPTONICA_LIBS) +baseapi_thread_test_LDADD = $(TESS_LIBS) $(LEPTONICA_LIBS) if !DISABLED_LEGACY_ENGINE bitvector_test_SOURCES = unittest/bitvector_test.cc @@ -1364,7 +1338,7 @@ endif # !DISABLED_LEGACY_ENGINE fileio_test_SOURCES = unittest/fileio_test.cc fileio_test_CPPFLAGS = $(unittest_CPPFLAGS) -fileio_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +fileio_test_LDADD = $(TRAINING_LIBS) heap_test_SOURCES = unittest/heap_test.cc heap_test_CPPFLAGS = $(unittest_CPPFLAGS) @@ -1372,7 +1346,7 @@ heap_test_LDADD = $(TESS_LIBS) imagedata_test_SOURCES = unittest/imagedata_test.cc imagedata_test_CPPFLAGS = $(unittest_CPPFLAGS) -imagedata_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +imagedata_test_LDADD = $(TRAINING_LIBS) if !DISABLED_LEGACY_ENGINE indexmapbidi_test_SOURCES = unittest/indexmapbidi_test.cc @@ -1398,7 +1372,7 @@ intsimdmatrix_test_LDADD = $(TESS_LIBS) lang_model_test_SOURCES = unittest/lang_model_test.cc lang_model_test_CPPFLAGS = $(unittest_CPPFLAGS) -lang_model_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +lang_model_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) layout_test_SOURCES = unittest/layout_test.cc layout_test_CPPFLAGS = $(unittest_CPPFLAGS) @@ -1425,24 +1399,24 @@ loadlang_test_LDADD = $(TESS_LIBS) $(LEPTONICA_LIBS) lstm_recode_test_SOURCES = unittest/lstm_recode_test.cc lstm_recode_test_CPPFLAGS = $(unittest_CPPFLAGS) -lstm_recode_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +lstm_recode_test_LDADD = $(TRAINING_LIBS) lstm_squashed_test_SOURCES = unittest/lstm_squashed_test.cc lstm_squashed_test_CPPFLAGS = $(unittest_CPPFLAGS) -lstm_squashed_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +lstm_squashed_test_LDADD = $(TRAINING_LIBS) lstm_test_SOURCES = unittest/lstm_test.cc lstm_test_CPPFLAGS = $(unittest_CPPFLAGS) -lstm_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +lstm_test_LDADD = $(TRAINING_LIBS) lstmtrainer_test_SOURCES = unittest/lstmtrainer_test.cc lstmtrainer_test_CPPFLAGS = $(unittest_CPPFLAGS) -lstmtrainer_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(LEPTONICA_LIBS) +lstmtrainer_test_LDADD = $(TRAINING_LIBS) $(LEPTONICA_LIBS) if !DISABLED_LEGACY_ENGINE mastertrainer_test_SOURCES = unittest/mastertrainer_test.cc mastertrainer_test_CPPFLAGS = $(unittest_CPPFLAGS) -mastertrainer_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(LEPTONICA_LIBS) +mastertrainer_test_LDADD = $(TRAINING_LIBS) $(LEPTONICA_LIBS) endif # !DISABLED_LEGACY_ENGINE matrix_test_SOURCES = unittest/matrix_test.cc @@ -1459,7 +1433,7 @@ normstrngs_test_SOURCES += unittest/third_party/utf/rune.c normstrngs_test_SOURCES += unittest/util/utf8/unilib.cc endif # TENSORFLOW normstrngs_test_CPPFLAGS = $(unittest_CPPFLAGS) -normstrngs_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +normstrngs_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) nthitem_test_SOURCES = unittest/nthitem_test.cc nthitem_test_CPPFLAGS = $(unittest_CPPFLAGS) @@ -1482,14 +1456,14 @@ pango_font_info_test_SOURCES += unittest/util/utf8/unicodetext.cc pango_font_info_test_SOURCES += unittest/util/utf8/unilib.cc endif # TENSORFLOW pango_font_info_test_CPPFLAGS = $(unittest_CPPFLAGS) -pango_font_info_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(LEPTONICA_LIBS) +pango_font_info_test_LDADD = $(TRAINING_LIBS) $(LEPTONICA_LIBS) pango_font_info_test_LDADD += $(ICU_I18N_LIBS) pango_font_info_test_LDADD += $(pangocairo_LIBS) pango_font_info_test_LDADD += $(pangoft2_LIBS) paragraphs_test_SOURCES = unittest/paragraphs_test.cc paragraphs_test_CPPFLAGS = $(unittest_CPPFLAGS) -paragraphs_test_LDADD = $(ABSEIL_LIBS) $(TESS_LIBS) +paragraphs_test_LDADD = $(TESS_LIBS) if !DISABLED_LEGACY_ENGINE params_model_test_SOURCES = unittest/params_model_test.cc @@ -1504,11 +1478,11 @@ progress_test_LDADD = $(GTEST_LIBS) $(GMOCK_LIBS) $(TESS_LIBS) $(LEPTONICA_LIBS) qrsequence_test_SOURCES = unittest/qrsequence_test.cc qrsequence_test_CPPFLAGS = $(unittest_CPPFLAGS) -qrsequence_test_LDADD = $(ABSEIL_LIBS) $(TESS_LIBS) +qrsequence_test_LDADD = $(TESS_LIBS) recodebeam_test_SOURCES = unittest/recodebeam_test.cc recodebeam_test_CPPFLAGS = $(unittest_CPPFLAGS) -recodebeam_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +recodebeam_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) rect_test_SOURCES = unittest/rect_test.cc rect_test_CPPFLAGS = $(unittest_CPPFLAGS) @@ -1516,7 +1490,7 @@ rect_test_LDADD = $(TESS_LIBS) resultiterator_test_SOURCES = unittest/resultiterator_test.cc resultiterator_test_CPPFLAGS = $(unittest_CPPFLAGS) -resultiterator_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +resultiterator_test_LDADD = $(TRAINING_LIBS) resultiterator_test_LDADD += $(LEPTONICA_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) scanutils_test_SOURCES = unittest/scanutils_test.cc @@ -1526,7 +1500,7 @@ scanutils_test_LDADD = $(TRAINING_LIBS) if !DISABLED_LEGACY_ENGINE shapetable_test_SOURCES = unittest/shapetable_test.cc shapetable_test_CPPFLAGS = $(unittest_CPPFLAGS) -shapetable_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) +shapetable_test_LDADD = $(TRAINING_LIBS) endif # !DISABLED_LEGACY_ENGINE stats_test_SOURCES = unittest/stats_test.cc @@ -1539,7 +1513,7 @@ stridemap_test_LDADD = $(TESS_LIBS) stringrenderer_test_SOURCES = unittest/stringrenderer_test.cc stringrenderer_test_CPPFLAGS = $(unittest_CPPFLAGS) -stringrenderer_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(LEPTONICA_LIBS) +stringrenderer_test_LDADD = $(TRAINING_LIBS) $(LEPTONICA_LIBS) stringrenderer_test_LDADD += $(ICU_I18N_LIBS) $(ICU_UC_LIBS) stringrenderer_test_LDADD += $(pangocairo_LIBS) $(pangoft2_LIBS) stringrenderer_test_LDADD += $(cairo_LIBS) $(pango_LIBS) @@ -1557,17 +1531,15 @@ tabvector_test_CPPFLAGS = $(unittest_CPPFLAGS) tabvector_test_LDADD = $(TESS_LIBS) tatweel_test_SOURCES = unittest/tatweel_test.cc -if TENSORFLOW tatweel_test_SOURCES += unittest/third_party/utf/rune.c tatweel_test_SOURCES += unittest/util/utf8/unicodetext.cc tatweel_test_SOURCES += unittest/util/utf8/unilib.cc -endif # TENSORFLOW tatweel_test_CPPFLAGS = $(unittest_CPPFLAGS) tatweel_test_LDADD = $(TRAINING_LIBS) textlineprojection_test_SOURCES = unittest/textlineprojection_test.cc textlineprojection_test_CPPFLAGS = $(unittest_CPPFLAGS) -textlineprojection_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(LEPTONICA_LIBS) +textlineprojection_test_LDADD = $(TRAINING_LIBS) $(LEPTONICA_LIBS) tfile_test_SOURCES = unittest/tfile_test.cc tfile_test_CPPFLAGS = $(unittest_CPPFLAGS) @@ -1579,7 +1551,7 @@ unichar_test_LDADD = $(TRAINING_LIBS) $(ICU_UC_LIBS) unicharcompress_test_SOURCES = unittest/unicharcompress_test.cc unicharcompress_test_CPPFLAGS = $(unittest_CPPFLAGS) -unicharcompress_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_UC_LIBS) +unicharcompress_test_LDADD = $(TRAINING_LIBS) $(ICU_UC_LIBS) unicharset_test_SOURCES = unittest/unicharset_test.cc unicharset_test_CPPFLAGS = $(unittest_CPPFLAGS) @@ -1587,19 +1559,19 @@ unicharset_test_LDADD = $(TRAINING_LIBS) $(ICU_UC_LIBS) validate_grapheme_test_SOURCES = unittest/validate_grapheme_test.cc validate_grapheme_test_CPPFLAGS = $(unittest_CPPFLAGS) -validate_grapheme_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +validate_grapheme_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) validate_indic_test_SOURCES = unittest/validate_indic_test.cc validate_indic_test_CPPFLAGS = $(unittest_CPPFLAGS) -validate_indic_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +validate_indic_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) validate_khmer_test_SOURCES = unittest/validate_khmer_test.cc validate_khmer_test_CPPFLAGS = $(unittest_CPPFLAGS) -validate_khmer_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +validate_khmer_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) validate_myanmar_test_SOURCES = unittest/validate_myanmar_test.cc validate_myanmar_test_CPPFLAGS = $(unittest_CPPFLAGS) -validate_myanmar_test_LDADD = $(ABSEIL_LIBS) $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) +validate_myanmar_test_LDADD = $(TRAINING_LIBS) $(ICU_I18N_LIBS) $(ICU_UC_LIBS) validator_test_SOURCES = unittest/validator_test.cc validator_test_CPPFLAGS = $(unittest_CPPFLAGS) diff --git a/README.md b/README.md index de0f5b68a..df339663d 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,24 @@ [![Total Alerts](https://img.shields.io/lgtm/alerts/g/tesseract-ocr/tesseract.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/tesseract-ocr/tesseract/alerts) [![OSS-Fuzz](https://img.shields.io/badge/oss--fuzz-fuzzing-brightgreen)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=2&q=proj:tesseract-ocr)
-[![GitHub license](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](https://raw.githubusercontent.com/tesseract-ocr/tesseract/master/LICENSE) +[![GitHub license](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](https://raw.githubusercontent.com/tesseract-ocr/tesseract/main/LICENSE) [![Downloads](https://img.shields.io/badge/download-all%20releases-brightgreen.svg)](https://github.com/tesseract-ocr/tesseract/releases/) +Table of Contents +================= + +* [Tesseract OCR](#tesseract-ocr) + * [About](#about) + * [Brief history](#brief-history) + * [Installing Tesseract](#installing-tesseract) + * [Running Tesseract](#running-tesseract) + * [For developers](#for-developers) + * [Support](#support) + * [License](#license) + * [Dependencies](#dependencies) + * [Latest Version of README](#latest-version-of-readme) + + ## About This package contains an **OCR engine** - `libtesseract` and a **command line program** - `tesseract`. @@ -22,12 +37,12 @@ It also needs [traineddata](https://tesseract-ocr.github.io/tessdoc/Data-Files.h those from the tessdata repository. The lead developer is Ray Smith. The maintainer is Zdenko Podobny. -For a list of contributors see [AUTHORS](https://github.com/tesseract-ocr/tesseract/blob/master/AUTHORS) +For a list of contributors see [AUTHORS](https://github.com/tesseract-ocr/tesseract/blob/main/AUTHORS) and GitHub's log of [contributors](https://github.com/tesseract-ocr/tesseract/graphs/contributors). Tesseract has **unicode (UTF-8) support**, and can **recognize more than 100 languages** "out of the box". -Tesseract supports **various output formats**: plain text, hOCR (HTML), PDF, invisible-text-only PDF, TSV. The master branch also has experimental support for ALTO (XML) output. +Tesseract supports **various output formats**: plain text, hOCR (HTML), PDF, invisible-text-only PDF, TSV. The main branch also has experimental support for ALTO (XML) output. You should note that in many cases, in order to get better OCR results, you'll need to **[improve the quality](https://tesseract-ocr.github.io/tessdoc/ImproveQuality.html) of the image** you are giving Tesseract. @@ -45,23 +60,24 @@ at Hewlett-Packard Co, Greeley Colorado between 1985 and 1994, with some more changes made in 1996 to port to Windows, and some C++izing in 1998. In 2005 Tesseract was open sourced by HP. From 2006 until November 2018 it was developed by Google. -The latest (LSTM based) stable version is **[4.1.1](https://github.com/tesseract-ocr/tesseract/releases/tag/4.1.1)**, released on December 26, 2019. -Latest source code is available from [master branch on GitHub](https://github.com/tesseract-ocr/tesseract/tree/master). +Major version 5 is the current stable version and started with release +[5.0.0](https://github.com/tesseract-ocr/tesseract/releases/tag/5.0.0) on November 30, 2021. +Newer minor versions and bugfix versions are available from +[GitHub](https://github.com/tesseract-ocr/tesseract/releases/). + +Latest source code is available from [main branch on GitHub](https://github.com/tesseract-ocr/tesseract/tree/main). Open issues can be found in [issue tracker](https://github.com/tesseract-ocr/tesseract/issues), and [planning documentation](https://tesseract-ocr.github.io/tessdoc/Planning.html). -The latest 3.0x version is **[3.05.02](https://github.com/tesseract-ocr/tesseract/releases/tag/3.05.02)**, released on June 19, 2018. Latest source code for 3.05 is available from [3.05 branch on GitHub](https://github.com/tesseract-ocr/tesseract/tree/3.05). -There is no development for this version, but it can be used for special cases (e.g. see [Regression of features from 3.0x](https://tesseract-ocr.github.io/tessdoc/Planning.html#regression-of-features-from-30x)). - See **[Release Notes](https://tesseract-ocr.github.io/tessdoc/ReleaseNotes.html)** -and **[Change Log](https://github.com/tesseract-ocr/tesseract/blob/master/ChangeLog)** for more details of the releases. +and **[Change Log](https://github.com/tesseract-ocr/tesseract/blob/main/ChangeLog)** for more details of the releases. ## Installing Tesseract You can either [Install Tesseract via pre-built binary package](https://tesseract-ocr.github.io/tessdoc/Home.html) or [build it from source](https://tesseract-ocr.github.io/tessdoc/Compiling.html). -C++17 support is required for building. +A C++ compiler with good C++17 support is required for building Tesseract from source. ## Running Tesseract @@ -75,8 +91,8 @@ Examples can be found in the [documentation](https://tesseract-ocr.github.io/tes ## For developers -Developers can use `libtesseract` [C](https://github.com/tesseract-ocr/tesseract/blob/master/include/tesseract/capi.h) or -[C++](https://github.com/tesseract-ocr/tesseract/blob/master/include/tesseract/baseapi.h) API to build their own application. +Developers can use `libtesseract` [C](https://github.com/tesseract-ocr/tesseract/blob/main/include/tesseract/capi.h) or +[C++](https://github.com/tesseract-ocr/tesseract/blob/main/include/tesseract/baseapi.h) API to build their own application. If you need bindings to `libtesseract` for other programming languages, please see the [wrapper](https://tesseract-ocr.github.io/tessdoc/AddOns.html#tesseract-wrappers) section in the AddOns documentation. @@ -84,7 +100,7 @@ Documentation of Tesseract generated from source code by doxygen can be found on ## Support -Before you submit an issue, please review **[the guidelines for this repository](https://github.com/tesseract-ocr/tesseract/blob/master/CONTRIBUTING.md)**. +Before you submit an issue, please review **[the guidelines for this repository](https://github.com/tesseract-ocr/tesseract/blob/main/CONTRIBUTING.md)**. For support, first read the [documentation](https://tesseract-ocr.github.io/tessdoc/), particularly the [FAQ](https://tesseract-ocr.github.io/tessdoc/FAQ.html) to see if your problem is addressed there. @@ -127,4 +143,4 @@ It is suggested to use leptonica with built-in support for [zlib](https://zlib.n For the latest online version of the README.md see: -https://github.com/tesseract-ocr/tesseract/blob/master/README.md +https://github.com/tesseract-ocr/tesseract/blob/main/README.md diff --git a/VERSION b/VERSION index 2a683af48..6b244dcd6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -5.0.0-alpha-20210401 +5.0.1 diff --git a/abseil b/abseil deleted file mode 160000 index e1d388e7e..000000000 --- a/abseil +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e1d388e7e74803050423d035e4374131b9b57919 diff --git a/autogen.sh b/autogen.sh index 8c1090d15..362bcc6f9 100755 --- a/autogen.sh +++ b/autogen.sh @@ -82,6 +82,10 @@ echo "Running $LIBTOOLIZE" $LIBTOOLIZE -f -c || bail_out $LIBTOOLIZE --automake || bail_out +# Run aclocal a 2nd time because glibtoolize created additional m4 files. +echo "Running aclocal" +aclocal -I config || bail_out + # --- Step 3: Generate configure and include/miaconfig.h from: # . configure.ac # diff --git a/cmake/AddCompilerFlag.cmake b/cmake/AddCompilerFlag.cmake deleted file mode 100644 index f31e4e035..000000000 --- a/cmake/AddCompilerFlag.cmake +++ /dev/null @@ -1,130 +0,0 @@ -# - Add a given compiler flag to flags variables. -# AddCompilerFlag( []) -# or -# AddCompilerFlag( [C_FLAGS ] [CXX_FLAGS ] [C_RESULT ] -# [CXX_RESULT ]) - -#============================================================================= -# Copyright 2010-2015 Matthias Kretz -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the names of contributing organizations nor the -# names of its contributors may be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#============================================================================= - -get_filename_component(_currentDir "${CMAKE_CURRENT_LIST_FILE}" PATH) -include(CheckCCompilerFlag) -include(CheckCXXCompilerFlag) - -macro(AddCompilerFlag _flag) - string(REGEX REPLACE "[-.+/:= ]" "_" _flag_esc "${_flag}") - - set(_c_flags "CMAKE_C_FLAGS") - set(_cxx_flags "CMAKE_CXX_FLAGS") - set(_c_result tmp) - set(_cxx_result tmp) - if(${ARGC} EQUAL 2) - message(WARNING "Deprecated use of the AddCompilerFlag macro.") - unset(_c_result) - set(_cxx_result ${ARGV1}) - elseif(${ARGC} GREATER 2) - set(state 0) - unset(_c_flags) - unset(_cxx_flags) - unset(_c_result) - unset(_cxx_result) - foreach(_arg ${ARGN}) - if("x${_arg}" STREQUAL "xC_FLAGS") - set(state 1) - if(NOT DEFINED _c_result) - set(_c_result tmp0) - endif() - elseif("x${_arg}" STREQUAL "xCXX_FLAGS") - set(state 2) - if(NOT DEFINED _cxx_result) - set(_cxx_result tmp1) - endif() - elseif("x${_arg}" STREQUAL "xC_RESULT") - set(state 3) - elseif("x${_arg}" STREQUAL "xCXX_RESULT") - set(state 4) - elseif(state EQUAL 1) - set(_c_flags "${_arg}") - elseif(state EQUAL 2) - set(_cxx_flags "${_arg}") - elseif(state EQUAL 3) - set(_c_result "${_arg}") - elseif(state EQUAL 4) - set(_cxx_result "${_arg}") - else() - message(FATAL_ERROR "Syntax error for AddCompilerFlag") - endif() - endforeach() - endif() - - set(_c_code "int main() { return 0; }") - set(_cxx_code "int main() { return 0; }") - if("${_flag}" STREQUAL "-mfma") - # Compiling with FMA3 support may fail only at the assembler level. - # In that case we need to have such an instruction in the test code - set(_c_code "#include - __m128 foo(__m128 x) { return _mm_fmadd_ps(x, x, x); } - int main() { return 0; }") - set(_cxx_code "${_c_code}") - elseif("${_flag}" STREQUAL "-stdlib=libc++") - # Compiling with libc++ not only requires a compiler that understands it, but also - # the libc++ headers itself - set(_cxx_code "#include - #include - int main() { return 0; }") - else() - set(_cxx_code "#include - int main() { return 0; }") - endif() - - if(DEFINED _c_result) - check_c_compiler_flag("${_flag}" check_c_compiler_flag_${_flag_esc} "${_c_code}") - set(${_c_result} ${check_c_compiler_flag_${_flag_esc}}) - endif() - if(DEFINED _cxx_result) - check_cxx_compiler_flag("${_flag}" check_cxx_compiler_flag_${_flag_esc} "${_cxx_code}") - set(${_cxx_result} ${check_cxx_compiler_flag_${_flag_esc}}) - endif() - - macro(my_append _list _flag _special) - if("x${_list}" STREQUAL "x${_special}") - set(${_list} "${${_list}} ${_flag}") - else() - list(APPEND ${_list} "${_flag}") - endif() - endmacro() - - if(check_c_compiler_flag_${_flag_esc} AND DEFINED _c_flags) - my_append(${_c_flags} "${_flag}" CMAKE_C_FLAGS) - endif() - if(check_cxx_compiler_flag_${_flag_esc} AND DEFINED _cxx_flags) - my_append(${_cxx_flags} "${_flag}" CMAKE_CXX_FLAGS) - endif() -endmacro(AddCompilerFlag) diff --git a/cmake/Configure.cmake b/cmake/Configure.cmake index 15d01e041..12448f708 100644 --- a/cmake/Configure.cmake +++ b/cmake/Configure.cmake @@ -94,7 +94,6 @@ set(include_files_list CL/cl.h OpenCL/cl.h pango-1.0/pango/pango-features.h - tiffio.h unicode/uchar.h ) check_includes(include_files_list) @@ -105,18 +104,27 @@ set(types_list ) check_types(types_list) +list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) +list(APPEND CMAKE_REQUIRED_LIBRARIES -lm) +set(functions_list + feenableexcept +) +check_functions(functions_list) + file(APPEND ${AUTOCONFIG_SRC} " /* Version number */ #cmakedefine PACKAGE_VERSION \"${PACKAGE_VERSION}\" #cmakedefine GRAPHICS_DISABLED ${GRAPHICS_DISABLED} +#cmakedefine FAST_FLOAT ${FAST_FLOAT} #cmakedefine DISABLED_LEGACY_ENGINE ${DISABLED_LEGACY_ENGINE} +#cmakedefine HAVE_TIFFIO_H ${HAVE_TIFFIO_H} #cmakedefine HAVE_LIBARCHIVE ${HAVE_LIBARCHIVE} +#cmakedefine HAVE_LIBCURL ${HAVE_LIBCURL} ") if(TESSDATA_PREFIX) - add_definitions(-DTESSDATA_PREFIX=${TESSDATA_PREFIX}) file(APPEND ${AUTOCONFIG_SRC} " -#cmakedefine TESSDATA_PREFIX ${TESSDATA_PREFIX} +#cmakedefine TESSDATA_PREFIX \"${TESSDATA_PREFIX}\" ") endif() diff --git a/cmake/OptimizeForArchitecture.cmake b/cmake/OptimizeForArchitecture.cmake deleted file mode 100644 index 075956c2e..000000000 --- a/cmake/OptimizeForArchitecture.cmake +++ /dev/null @@ -1,581 +0,0 @@ -# Determine the host CPU feature set and determine the best set of compiler -# flags to enable all supported SIMD relevant features. Alternatively, the -# target CPU can be explicitly selected (for generating more generic binaries -# or for targeting a different system). -# Compilers provide e.g. the -march=native flag to achieve a similar result. -# This fails to address the need for building for a different microarchitecture -# than the current host. -# The script tries to deduce all settings from the model and family numbers of -# the CPU instead of reading the CPUID flags from e.g. /proc/cpuinfo. This makes -# the detection more independent from the CPUID code in the kernel (e.g. avx2 is -# not listed on older kernels). -# -# Usage: -# OptimizeForArchitecture() -# If either of Vc_SSE_INTRINSICS_BROKEN, Vc_AVX_INTRINSICS_BROKEN, -# Vc_AVX2_INTRINSICS_BROKEN is defined and set, the OptimizeForArchitecture -# macro will consequently disable the relevant features via compiler flags. - -#============================================================================= -# Copyright 2010-2016 Matthias Kretz -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the names of contributing organizations nor the -# names of its contributors may be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#============================================================================= - -get_filename_component(_currentDir "${CMAKE_CURRENT_LIST_FILE}" PATH) -include("${_currentDir}/AddCompilerFlag.cmake") -include(CheckIncludeFileCXX) - -macro(_my_find _list _value _ret) - list(FIND ${_list} "${_value}" _found) - if(_found EQUAL -1) - set(${_ret} FALSE) - else(_found EQUAL -1) - set(${_ret} TRUE) - endif(_found EQUAL -1) -endmacro(_my_find) - -macro(AutodetectHostArchitecture) - set(TARGET_ARCHITECTURE "generic") - set(Vc_ARCHITECTURE_FLAGS) - set(_vendor_id) - set(_cpu_family) - set(_cpu_model) - if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - file(READ "/proc/cpuinfo" _cpuinfo) - string(REGEX REPLACE ".*vendor_id[ \t]*:[ \t]+([a-zA-Z0-9_-]+).*" "\\1" _vendor_id "${_cpuinfo}") - string(REGEX REPLACE ".*cpu family[ \t]*:[ \t]+([a-zA-Z0-9_-]+).*" "\\1" _cpu_family "${_cpuinfo}") - string(REGEX REPLACE ".*model[ \t]*:[ \t]+([a-zA-Z0-9_-]+).*" "\\1" _cpu_model "${_cpuinfo}") - string(REGEX REPLACE ".*flags[ \t]*:[ \t]+([^\n]+).*" "\\1" _cpu_flags "${_cpuinfo}") - elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin") - exec_program("/usr/sbin/sysctl -n machdep.cpu.vendor machdep.cpu.model machdep.cpu.family machdep.cpu.features" OUTPUT_VARIABLE _sysctl_output_string) - string(REPLACE "\n" ";" _sysctl_output ${_sysctl_output_string}) - list(GET _sysctl_output 0 _vendor_id) - list(GET _sysctl_output 1 _cpu_model) - list(GET _sysctl_output 2 _cpu_family) - list(GET _sysctl_output 3 _cpu_flags) - - string(TOLOWER "${_cpu_flags}" _cpu_flags) - string(REPLACE "." "_" _cpu_flags "${_cpu_flags}") - elseif(CMAKE_SYSTEM_NAME STREQUAL "Windows") - get_filename_component(_vendor_id "[HKEY_LOCAL_MACHINE\\Hardware\\Description\\System\\CentralProcessor\\0;VendorIdentifier]" NAME CACHE) - get_filename_component(_cpu_id "[HKEY_LOCAL_MACHINE\\Hardware\\Description\\System\\CentralProcessor\\0;Identifier]" NAME CACHE) - mark_as_advanced(_vendor_id _cpu_id) - string(REGEX REPLACE ".* Family ([0-9]+) .*" "\\1" _cpu_family "${_cpu_id}") - string(REGEX REPLACE ".* Model ([0-9]+) .*" "\\1" _cpu_model "${_cpu_id}") - endif(CMAKE_SYSTEM_NAME STREQUAL "Linux") - if(_vendor_id STREQUAL "GenuineIntel") - if(_cpu_family EQUAL 6) - # taken from the Intel ORM - # http://www.intel.com/content/www/us/en/processors/architectures-software-developer-manuals.html - # CPUID Signature Values of Of Recent Intel Microarchitectures - # 4E 5E | Skylake microarchitecture - # 3D 47 56 | Broadwell microarchitecture - # 3C 45 46 3F | Haswell microarchitecture - # 3A 3E | Ivy Bridge microarchitecture - # 2A 2D | Sandy Bridge microarchitecture - # 25 2C 2F | Intel microarchitecture Westmere - # 1A 1E 1F 2E | Intel microarchitecture Nehalem - # 17 1D | Enhanced Intel Core microarchitecture - # 0F | Intel Core microarchitecture - # - # Intel SDM Vol. 3C 35-1 / December 2016: - # 57 | Xeon Phi 3200, 5200, 7200 [Knights Landing] - # 85 | Future Xeon Phi - # 8E 9E | 7th gen. Core [Kaby Lake] - # 55 | Future Xeon [Skylake w/ AVX512] - # 4E 5E | 6th gen. Core / E3 v5 [Skylake w/o AVX512] - # 56 | Xeon D-1500 [Broadwell] - # 4F | Xeon E5 v4, E7 v4, i7-69xx [Broadwell] - # 47 | 5th gen. Core / Xeon E3 v4 [Broadwell] - # 3D | M-5xxx / 5th gen. [Broadwell] - # 3F | Xeon E5 v3, E7 v3, i7-59xx [Haswell-E] - # 3C 45 46 | 4th gen. Core, Xeon E3 v3 [Haswell] - # 3E | Xeon E5 v2, E7 v2, i7-49xx [Ivy Bridge-E] - # 3A | 3rd gen. Core, Xeon E3 v2 [Ivy Bridge] - # 2D | Xeon E5, i7-39xx [Sandy Bridge] - # 2F | Xeon E7 - # 2A | Xeon E3, 2nd gen. Core [Sandy Bridge] - # 2E | Xeon 7500, 6500 series - # 25 2C | Xeon 3600, 5600 series, Core i7, i5 and i3 - # - # Values from the Intel SDE: - # 5C | Goldmont - # 5A | Silvermont - # 57 | Knights Landing - # 66 | Cannonlake - # 55 | Skylake Server - # 4E | Skylake Client - # 3C | Broadwell (likely a bug in the SDE) - # 3C | Haswell - if(_cpu_model EQUAL 87) # 57 - set(TARGET_ARCHITECTURE "knl") # Knights Landing - elseif(_cpu_model EQUAL 92) - set(TARGET_ARCHITECTURE "goldmont") - elseif(_cpu_model EQUAL 90 OR _cpu_model EQUAL 76) - set(TARGET_ARCHITECTURE "silvermont") - elseif(_cpu_model EQUAL 102) - set(TARGET_ARCHITECTURE "cannonlake") - elseif(_cpu_model EQUAL 142 OR _cpu_model EQUAL 158) # 8E, 9E - set(TARGET_ARCHITECTURE "kaby-lake") - elseif(_cpu_model EQUAL 85) # 55 - set(TARGET_ARCHITECTURE "skylake-avx512") - elseif(_cpu_model EQUAL 78 OR _cpu_model EQUAL 94) # 4E, 5E - set(TARGET_ARCHITECTURE "skylake") - elseif(_cpu_model EQUAL 61 OR _cpu_model EQUAL 71 OR _cpu_model EQUAL 79 OR _cpu_model EQUAL 86) # 3D, 47, 4F, 56 - set(TARGET_ARCHITECTURE "broadwell") - elseif(_cpu_model EQUAL 60 OR _cpu_model EQUAL 69 OR _cpu_model EQUAL 70 OR _cpu_model EQUAL 63) - set(TARGET_ARCHITECTURE "haswell") - elseif(_cpu_model EQUAL 58 OR _cpu_model EQUAL 62) - set(TARGET_ARCHITECTURE "ivy-bridge") - elseif(_cpu_model EQUAL 42 OR _cpu_model EQUAL 45) - set(TARGET_ARCHITECTURE "sandy-bridge") - elseif(_cpu_model EQUAL 37 OR _cpu_model EQUAL 44 OR _cpu_model EQUAL 47) - set(TARGET_ARCHITECTURE "westmere") - elseif(_cpu_model EQUAL 26 OR _cpu_model EQUAL 30 OR _cpu_model EQUAL 31 OR _cpu_model EQUAL 46) - set(TARGET_ARCHITECTURE "nehalem") - elseif(_cpu_model EQUAL 23 OR _cpu_model EQUAL 29) - set(TARGET_ARCHITECTURE "penryn") - elseif(_cpu_model EQUAL 15) - set(TARGET_ARCHITECTURE "merom") - elseif(_cpu_model EQUAL 28) - set(TARGET_ARCHITECTURE "atom") - elseif(_cpu_model EQUAL 14) - set(TARGET_ARCHITECTURE "core") - elseif(_cpu_model LESS 14) - message(WARNING "Your CPU (family ${_cpu_family}, model ${_cpu_model}) is not known. Auto-detection of optimization flags failed and will use the generic CPU settings with SSE2.") - set(TARGET_ARCHITECTURE "generic") - else() - message(WARNING "Your CPU (family ${_cpu_family}, model ${_cpu_model}) is not known. Auto-detection of optimization flags failed and will use the 65nm Core 2 CPU settings.") - set(TARGET_ARCHITECTURE "merom") - endif() - elseif(_cpu_family EQUAL 7) # Itanium (not supported) - message(WARNING "Your CPU (Itanium: family ${_cpu_family}, model ${_cpu_model}) is not supported by OptimizeForArchitecture.cmake.") - elseif(_cpu_family EQUAL 15) # NetBurst - list(APPEND _available_vector_units_list "sse" "sse2") - if(_cpu_model GREATER 2) # Not sure whether this must be 3 or even 4 instead - list(APPEND _available_vector_units_list "sse" "sse2" "sse3") - endif(_cpu_model GREATER 2) - endif(_cpu_family EQUAL 6) - elseif(_vendor_id STREQUAL "AuthenticAMD") - if(_cpu_family EQUAL 23) - set(TARGET_ARCHITECTURE "zen") - elseif(_cpu_family EQUAL 22) # 16h - set(TARGET_ARCHITECTURE "AMD 16h") - elseif(_cpu_family EQUAL 21) # 15h - if(_cpu_model LESS 2) - set(TARGET_ARCHITECTURE "bulldozer") - else() - set(TARGET_ARCHITECTURE "piledriver") - endif() - elseif(_cpu_family EQUAL 20) # 14h - set(TARGET_ARCHITECTURE "AMD 14h") - elseif(_cpu_family EQUAL 18) # 12h - elseif(_cpu_family EQUAL 16) # 10h - set(TARGET_ARCHITECTURE "barcelona") - elseif(_cpu_family EQUAL 15) - set(TARGET_ARCHITECTURE "k8") - if(_cpu_model GREATER 64) # I don't know the right number to put here. This is just a guess from the hardware I have access to - set(TARGET_ARCHITECTURE "k8-sse3") - endif(_cpu_model GREATER 64) - endif() - endif(_vendor_id STREQUAL "GenuineIntel") -endmacro() - -macro(OptimizeForArchitecture) - if("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "(x86|AMD64)") - OptimizeForArchitectureX86() - else() - message(STATUS "No support for auto-detection of the target instruction set/extension") - set(TARGET_ARCHITECTURE "unused" CACHE STRING "CPU architecture to optimize for. (unused)") - endif() -endmacro() - -macro(OptimizeForArchitectureX86) - set(TARGET_ARCHITECTURE "auto" CACHE STRING "CPU architecture to optimize for. \ -Using an incorrect setting here can result in crashes of the resulting binary because of invalid instructions used. \ -Setting the value to \"auto\" will try to optimize for the architecture where cmake is called. \ -Other supported values are: \"none\", \"generic\", \"core\", \"merom\" (65nm Core2), \ -\"penryn\" (45nm Core2), \"nehalem\", \"westmere\", \"sandy-bridge\", \"ivy-bridge\", \ -\"haswell\", \"broadwell\", \"skylake\", \"skylake-xeon\", \"kaby-lake\", \"cannonlake\", \"silvermont\", \ -\"goldmont\", \"knl\" (Knights Landing), \"atom\", \"k8\", \"k8-sse3\", \"barcelona\", \ -\"istanbul\", \"magny-cours\", \"bulldozer\", \"interlagos\", \"piledriver\", \ -\"AMD 14h\", \"AMD 16h\", \"zen\".") - set(_force) - if(NOT _last_target_arch STREQUAL "${TARGET_ARCHITECTURE}") - message(STATUS "target changed from \"${_last_target_arch}\" to \"${TARGET_ARCHITECTURE}\"") - set(_force FORCE) - endif() - set(_last_target_arch "${TARGET_ARCHITECTURE}" CACHE STRING "" FORCE) - mark_as_advanced(_last_target_arch) - string(TOLOWER "${TARGET_ARCHITECTURE}" TARGET_ARCHITECTURE) - - set(_march_flag_list) - set(_available_vector_units_list) - - if(TARGET_ARCHITECTURE STREQUAL "auto") - AutodetectHostArchitecture() - message(STATUS "Detected CPU: ${TARGET_ARCHITECTURE}") - endif(TARGET_ARCHITECTURE STREQUAL "auto") - - macro(_nehalem) - list(APPEND _march_flag_list "nehalem") - list(APPEND _march_flag_list "corei7") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4.1" "sse4.2") - endmacro() - macro(_westmere) - list(APPEND _march_flag_list "westmere") - _nehalem() - endmacro() - macro(_sandybridge) - list(APPEND _march_flag_list "sandybridge") - list(APPEND _march_flag_list "corei7-avx") - _westmere() - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4.1" "sse4.2" "avx") - endmacro() - macro(_ivybridge) - list(APPEND _march_flag_list "ivybridge") - list(APPEND _march_flag_list "core-avx-i") - _sandybridge() - list(APPEND _available_vector_units_list "rdrnd" "f16c") - endmacro() - macro(_haswell) - list(APPEND _march_flag_list "haswell") - list(APPEND _march_flag_list "core-avx2") - _ivybridge() - list(APPEND _available_vector_units_list "avx2" "fma" "bmi" "bmi2") - endmacro() - macro(_broadwell) - list(APPEND _march_flag_list "broadwell") - _haswell() - endmacro() - macro(_skylake) - list(APPEND _march_flag_list "skylake") - _broadwell() - endmacro() - macro(_skylake_avx512) - list(APPEND _march_flag_list "skylake-avx512") - _skylake() - list(APPEND _available_vector_units_list "avx512f" "avx512cd" "avx512dq" "avx512bw" "avx512vl") - endmacro() - macro(_cannonlake) - list(APPEND _march_flag_list "cannonlake") - _skylake_avx512() - list(APPEND _available_vector_units_list "avx512ifma" "avx512vbmi") - endmacro() - macro(_knightslanding) - list(APPEND _march_flag_list "knl") - _broadwell() - list(APPEND _available_vector_units_list "avx512f" "avx512pf" "avx512er" "avx512cd") - endmacro() - macro(_silvermont) - list(APPEND _march_flag_list "silvermont") - _westmere() - list(APPEND _available_vector_units_list "rdrnd") - endmacro() - macro(_goldmont) - list(APPEND _march_flag_list "goldmont") - _silvermont() - endmacro() - - if(TARGET_ARCHITECTURE STREQUAL "core") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3") - elseif(TARGET_ARCHITECTURE STREQUAL "merom") - list(APPEND _march_flag_list "merom") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3") - elseif(TARGET_ARCHITECTURE STREQUAL "penryn") - list(APPEND _march_flag_list "penryn") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3") - message(STATUS "Sadly the Penryn architecture exists in variants with SSE4.1 and without SSE4.1.") - if(_cpu_flags MATCHES "sse4_1") - message(STATUS "SSE4.1: enabled (auto-detected from this computer's CPU flags)") - list(APPEND _available_vector_units_list "sse4.1") - else() - message(STATUS "SSE4.1: disabled (auto-detected from this computer's CPU flags)") - endif() - elseif(TARGET_ARCHITECTURE STREQUAL "knl") - _knightslanding() - elseif(TARGET_ARCHITECTURE STREQUAL "cannonlake") - _cannonlake() - elseif(TARGET_ARCHITECTURE STREQUAL "kaby-lake") - _skylake() - elseif(TARGET_ARCHITECTURE STREQUAL "skylake-xeon" OR TARGET_ARCHITECTURE STREQUAL "skylake-avx512") - _skylake_avx512() - elseif(TARGET_ARCHITECTURE STREQUAL "skylake") - _skylake() - elseif(TARGET_ARCHITECTURE STREQUAL "broadwell") - _broadwell() - elseif(TARGET_ARCHITECTURE STREQUAL "haswell") - _haswell() - elseif(TARGET_ARCHITECTURE STREQUAL "ivy-bridge") - _ivybridge() - elseif(TARGET_ARCHITECTURE STREQUAL "sandy-bridge") - _sandybridge() - elseif(TARGET_ARCHITECTURE STREQUAL "westmere") - _westmere() - elseif(TARGET_ARCHITECTURE STREQUAL "nehalem") - _nehalem() - elseif(TARGET_ARCHITECTURE STREQUAL "goldmont") - _goldmont() - elseif(TARGET_ARCHITECTURE STREQUAL "silvermont") - _silvermont() - elseif(TARGET_ARCHITECTURE STREQUAL "atom") - list(APPEND _march_flag_list "atom") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3") - elseif(TARGET_ARCHITECTURE STREQUAL "k8") - list(APPEND _march_flag_list "k8") - list(APPEND _available_vector_units_list "sse" "sse2") - elseif(TARGET_ARCHITECTURE STREQUAL "k8-sse3") - list(APPEND _march_flag_list "k8-sse3") - list(APPEND _march_flag_list "k8") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3") - elseif(TARGET_ARCHITECTURE STREQUAL "AMD 16h") - list(APPEND _march_flag_list "btver2") - list(APPEND _march_flag_list "btver1") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4a" "sse4.1" "sse4.2" "avx" "f16c") - elseif(TARGET_ARCHITECTURE STREQUAL "AMD 14h") - list(APPEND _march_flag_list "btver1") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4a") - elseif(TARGET_ARCHITECTURE STREQUAL "zen") - list(APPEND _march_flag_list "znver1") - _skylake() - list(APPEND _available_vector_units_list "sse4a") - elseif(TARGET_ARCHITECTURE STREQUAL "piledriver") - list(APPEND _march_flag_list "bdver2") - list(APPEND _march_flag_list "bdver1") - list(APPEND _march_flag_list "bulldozer") - list(APPEND _march_flag_list "barcelona") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4a" "sse4.1" "sse4.2" "avx" "xop" "fma4" "fma" "f16c") - elseif(TARGET_ARCHITECTURE STREQUAL "interlagos") - list(APPEND _march_flag_list "bdver1") - list(APPEND _march_flag_list "bulldozer") - list(APPEND _march_flag_list "barcelona") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4a" "sse4.1" "sse4.2" "avx" "xop" "fma4") - elseif(TARGET_ARCHITECTURE STREQUAL "bulldozer") - list(APPEND _march_flag_list "bdver1") - list(APPEND _march_flag_list "bulldozer") - list(APPEND _march_flag_list "barcelona") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "ssse3" "sse4a" "sse4.1" "sse4.2" "avx" "xop" "fma4") - elseif(TARGET_ARCHITECTURE STREQUAL "barcelona") - list(APPEND _march_flag_list "barcelona") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "sse4a") - elseif(TARGET_ARCHITECTURE STREQUAL "istanbul") - list(APPEND _march_flag_list "barcelona") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "sse4a") - elseif(TARGET_ARCHITECTURE STREQUAL "magny-cours") - list(APPEND _march_flag_list "barcelona") - list(APPEND _march_flag_list "core2") - list(APPEND _available_vector_units_list "sse" "sse2" "sse3" "sse4a") - elseif(TARGET_ARCHITECTURE STREQUAL "generic") - list(APPEND _march_flag_list "generic") - elseif(TARGET_ARCHITECTURE STREQUAL "none") - # add this clause to remove it from the else clause - else(TARGET_ARCHITECTURE STREQUAL "core") - message(FATAL_ERROR "Unknown target architecture: \"${TARGET_ARCHITECTURE}\". Please set TARGET_ARCHITECTURE to a supported value.") - endif(TARGET_ARCHITECTURE STREQUAL "core") - - if(NOT TARGET_ARCHITECTURE STREQUAL "none") - set(_disable_vector_unit_list) - set(_enable_vector_unit_list) - if(DEFINED Vc_AVX_INTRINSICS_BROKEN AND Vc_AVX_INTRINSICS_BROKEN) - UserWarning("AVX disabled per default because of old/broken toolchain") - set(_avx_broken true) - set(_avx2_broken true) - set(_fma4_broken true) - set(_xop_broken true) - else() - set(_avx_broken false) - if(DEFINED Vc_FMA4_INTRINSICS_BROKEN AND Vc_FMA4_INTRINSICS_BROKEN) - UserWarning("FMA4 disabled per default because of old/broken toolchain") - set(_fma4_broken true) - else() - set(_fma4_broken false) - endif() - if(DEFINED Vc_XOP_INTRINSICS_BROKEN AND Vc_XOP_INTRINSICS_BROKEN) - UserWarning("XOP disabled per default because of old/broken toolchain") - set(_xop_broken true) - else() - set(_xop_broken false) - endif() - if(DEFINED Vc_AVX2_INTRINSICS_BROKEN AND Vc_AVX2_INTRINSICS_BROKEN) - UserWarning("AVX2 disabled per default because of old/broken toolchain") - set(_avx2_broken true) - else() - set(_avx2_broken false) - endif() - endif() - - macro(_enable_or_disable _name _flag _documentation _broken) - if(_broken) - set(_found false) - else() - _my_find(_available_vector_units_list "${_flag}" _found) - endif() - set(USE_${_name} ${_found} CACHE BOOL "${documentation}" ${_force}) - mark_as_advanced(USE_${_name}) - if(USE_${_name}) - list(APPEND _enable_vector_unit_list "${_flag}") - else() - list(APPEND _disable_vector_unit_list "${_flag}") - endif() - endmacro() - _enable_or_disable(SSE2 "sse2" "Use SSE2. If SSE2 instructions are not enabled the SSE implementation will be disabled." false) - _enable_or_disable(SSE3 "sse3" "Use SSE3. If SSE3 instructions are not enabled they will be emulated." false) - _enable_or_disable(SSSE3 "ssse3" "Use SSSE3. If SSSE3 instructions are not enabled they will be emulated." false) - _enable_or_disable(SSE4_1 "sse4.1" "Use SSE4.1. If SSE4.1 instructions are not enabled they will be emulated." false) - _enable_or_disable(SSE4_2 "sse4.2" "Use SSE4.2. If SSE4.2 instructions are not enabled they will be emulated." false) - _enable_or_disable(SSE4a "sse4a" "Use SSE4a. If SSE4a instructions are not enabled they will be emulated." false) - _enable_or_disable(AVX "avx" "Use AVX. This will all floating-point vector sizes relative to SSE." _avx_broken) - _enable_or_disable(FMA "fma" "Use FMA." _avx_broken) - _enable_or_disable(BMI2 "bmi2" "Use BMI2." _avx_broken) - _enable_or_disable(AVX2 "avx2" "Use AVX2. This will double all of the vector sizes relative to SSE." _avx2_broken) - _enable_or_disable(XOP "xop" "Use XOP." _xop_broken) - _enable_or_disable(FMA4 "fma4" "Use FMA4." _fma4_broken) - _enable_or_disable(AVX512F "avx512f" "Use AVX512F. This will double all floating-point vector sizes relative to AVX2." false) - _enable_or_disable(AVX512VL "avx512vl" "Use AVX512VL. This enables 128- and 256-bit vector length instructions with EVEX coding (improved write-masking & more vector registers)." _avx2_broken) - _enable_or_disable(AVX512PF "avx512pf" "Use AVX512PF. This enables prefetch instructions for gathers and scatters." false) - _enable_or_disable(AVX512ER "avx512er" "Use AVX512ER. This enables exponential and reciprocal instructions." false) - _enable_or_disable(AVX512CD "avx512cd" "Use AVX512CD." false) - _enable_or_disable(AVX512DQ "avx512dq" "Use AVX512DQ." false) - _enable_or_disable(AVX512BW "avx512bw" "Use AVX512BW." false) - _enable_or_disable(AVX512IFMA "avx512ifma" "Use AVX512IFMA." false) - _enable_or_disable(AVX512VBMI "avx512vbmi" "Use AVX512VBMI." false) - - if(MSVC) - # MSVC on 32 bit can select /arch:SSE2 (since 2010 also /arch:AVX) - # MSVC on 64 bit cannot select anything (should have changed with MSVC 2010) - _my_find(_enable_vector_unit_list "avx2" _found) - if(_found) - AddCompilerFlag("/arch:AVX2" CXX_FLAGS Vc_ARCHITECTURE_FLAGS CXX_RESULT _found) - endif() - if(NOT _found) - _my_find(_enable_vector_unit_list "avx" _found) - if(_found) - AddCompilerFlag("/arch:AVX" CXX_FLAGS Vc_ARCHITECTURE_FLAGS CXX_RESULT _found) - endif() - endif() - if(NOT _found) - _my_find(_enable_vector_unit_list "sse2" _found) - if(_found) - AddCompilerFlag("/arch:SSE2" CXX_FLAGS Vc_ARCHITECTURE_FLAGS) - endif() - endif() - foreach(_flag ${_enable_vector_unit_list}) - string(TOUPPER "${_flag}" _flag) - string(REPLACE "." "_" _flag "__${_flag}__") - add_definitions("-D${_flag}") - endforeach(_flag) - elseif(CMAKE_CXX_COMPILER MATCHES "/(icpc|icc)$") # ICC (on Linux) - set(OFA_map_knl "-xMIC-AVX512") - set(OFA_map_cannonlake "-xCORE-AVX512") - set(OFA_map_skylake-avx512 "-xCORE-AVX512") - set(OFA_map_skylake "-xCORE-AVX2") - set(OFA_map_broadwell "-xCORE-AVX2") - set(OFA_map_haswell "-xCORE-AVX2") - set(OFA_map_ivybridge "-xCORE-AVX-I") - set(OFA_map_sandybridge "-xAVX") - set(OFA_map_westmere "-xSSE4.2") - set(OFA_map_nehalem "-xSSE4.2") - set(OFA_map_penryn "-xSSSE3") - set(OFA_map_merom "-xSSSE3") - set(OFA_map_core2 "-xSSE3") - set(_ok FALSE) - foreach(arch ${_march_flag_list}) - if(DEFINED OFA_map_${arch}) - AddCompilerFlag(${OFA_map_${arch}} CXX_FLAGS Vc_ARCHITECTURE_FLAGS CXX_RESULT _ok) - if(_ok) - break() - endif() - endif() - endforeach() - if(NOT _ok) - # This is the Intel compiler, so SSE2 is a very reasonable baseline. - message(STATUS "Did not recognize the requested architecture flag, falling back to SSE2") - AddCompilerFlag("-xSSE2" CXX_FLAGS Vc_ARCHITECTURE_FLAGS) - endif() - else() # not MSVC and not ICC => GCC, Clang, Open64 - foreach(_flag ${_march_flag_list}) - AddCompilerFlag("-march=${_flag}" CXX_RESULT _good CXX_FLAGS Vc_ARCHITECTURE_FLAGS) - if(_good) - break() - endif(_good) - endforeach(_flag) - foreach(_flag ${_enable_vector_unit_list}) - AddCompilerFlag("-m${_flag}" CXX_RESULT _result) - if(_result) - set(_header FALSE) - if(_flag STREQUAL "sse3") - set(_header "pmmintrin.h") - elseif(_flag STREQUAL "ssse3") - set(_header "tmmintrin.h") - elseif(_flag STREQUAL "sse4.1") - set(_header "smmintrin.h") - elseif(_flag STREQUAL "sse4.2") - set(_header "smmintrin.h") - elseif(_flag STREQUAL "sse4a") - set(_header "ammintrin.h") - elseif(_flag STREQUAL "avx") - set(_header "immintrin.h") - elseif(_flag STREQUAL "avx2") - set(_header "immintrin.h") - elseif(_flag STREQUAL "fma4") - set(_header "x86intrin.h") - elseif(_flag STREQUAL "xop") - set(_header "x86intrin.h") - endif() - set(_resultVar "HAVE_${_header}") - string(REPLACE "." "_" _resultVar "${_resultVar}") - if(_header) - CHECK_INCLUDE_FILE_CXX("${_header}" ${_resultVar} "-m${_flag}") - if(NOT ${_resultVar}) - set(_useVar "USE_${_flag}") - string(TOUPPER "${_useVar}" _useVar) - string(REPLACE "." "_" _useVar "${_useVar}") - message(STATUS "disabling ${_useVar} because ${_header} is missing") - set(${_useVar} FALSE) - list(APPEND _disable_vector_unit_list "${_flag}") - endif() - endif() - if(NOT _header OR ${_resultVar}) - list(APPEND Vc_ARCHITECTURE_FLAGS "-m${_flag}") - endif() - endif() - endforeach(_flag) - foreach(_flag ${_disable_vector_unit_list}) - AddCompilerFlag("-mno-${_flag}" CXX_FLAGS Vc_ARCHITECTURE_FLAGS) - endforeach(_flag) - endif() - endif() -endmacro() diff --git a/cmake/templates/TesseractConfig.cmake.in b/cmake/templates/TesseractConfig.cmake.in index 8e32e6608..8e336ba63 100644 --- a/cmake/templates/TesseractConfig.cmake.in +++ b/cmake/templates/TesseractConfig.cmake.in @@ -10,8 +10,13 @@ # target_link_libraries(MY_TARGET_NAME Tesseract::libtesseract) # # This file will define the following variables: -# - Tesseract_LIBRARIES : The list of all imported targets for OpenCV modules. +# - Tesseract_LIBRARIES : The list of all imported targets. # - Tesseract_INCLUDE_DIRS : The Tesseract include directories. +# - Tesseract_LIBRARY_DIRS : The Tesseract library directories. +# - Tesseract_VERSION : The version of this Tesseract build: "@VERSION_PLAIN@" +# - Tesseract_VERSION_MAJOR : Major version part of Tesseract_VERSION: "@VERSION_MAJOR@" +# - Tesseract_VERSION_MINOR : Minor version part of Tesseract_VERSION: "@VERSION_MINOR@" +# - Tesseract_VERSION_PATCH : Patch version part of Tesseract_VERSION: "@VERSION_PATCH@" # # =================================================================================== @@ -22,7 +27,13 @@ include(${CMAKE_CURRENT_LIST_DIR}/TesseractTargets.cmake) @PACKAGE_INIT@ +SET(Tesseract_VERSION @VERSION_PLAIN@) +SET(Tesseract_VERSION_MAJOR @VERSION_MAJOR@) +SET(Tesseract_VERSION_MINOR @VERSION_MINOR@) +SET(Tesseract_VERSION_PATCH @VERSION_PATCH@) + set_and_check(Tesseract_INCLUDE_DIRS "@PACKAGE_INCLUDE_DIR@") -set(Tesseract_LIBRARIES libtesseract) +set_and_check(Tesseract_LIBRARY_DIRS "@PACKAGE_LIBRARY_DIRS@") +set(Tesseract_LIBRARIES @tesseract_OUTPUT_NAME@) check_required_components(Tesseract) diff --git a/configure.ac b/configure.ac index 37a6bf452..5f7f899cf 100644 --- a/configure.ac +++ b/configure.ac @@ -5,7 +5,7 @@ # ---------------------------------------- # Initialization # ---------------------------------------- -AC_PREREQ([2.63]) +AC_PREREQ([2.69]) AC_INIT([tesseract], [m4_esyscmd_s([test -d .git && git describe --abbrev=4 || cat VERSION])], [https://github.com/tesseract-ocr/tesseract/issues],, @@ -20,16 +20,16 @@ AC_LANG_COMPILER_REQUIRE CXXFLAGS=${CXXFLAGS:-""} AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_AUX_DIR([config]) -AC_CONFIG_SRCDIR([src/api/tesseractmain.cpp]) +AC_CONFIG_SRCDIR([src/tesseract.cpp]) AC_PREFIX_DEFAULT([/usr/local]) # Automake configuration. Do not require README file (we use README.md). -AM_INIT_AUTOMAKE([foreign subdir-objects]) +AM_INIT_AUTOMAKE([foreign subdir-objects nostdinc]) # Define date of package, etc. Could be useful in auto-generated # documentation. -PACKAGE_YEAR=2021 -PACKAGE_DATE="04/01" +PACKAGE_YEAR=2022 +PACKAGE_DATE="01/07" abs_top_srcdir=`AS_DIRNAME([$0])` @@ -75,6 +75,10 @@ AM_CONDITIONAL([T_WIN], false) AM_CONDITIONAL([GRAPHICS_DISABLED], false) AC_SUBST([AM_CPPFLAGS]) +# Be less noisy by default. +# Can be overridden with `configure --disable-silent-rules` or with `make V=1`. +AM_SILENT_RULES([yes]) + OPENCL_INC="/opt/AMDAPP/include" OPENCL_LIBS="-lOpenCL" ############################# @@ -184,8 +188,11 @@ case "${host_cpu}" in esac -AX_CHECK_COMPILE_FLAG([-march=native], [arch_native=true], [arch_native=false], [$WERROR]) -AM_CONDITIONAL([MARCH_NATIVE_OPT], $arch_native) +# check whether feenableexcept is supported. some C libraries (e.g. uclibc) don't. +AC_CHECK_FUNCS([feenableexcept]) + +AX_CHECK_COMPILE_FLAG([-fopenmp-simd], [openmp_simd=true], [openmp_simd=false], [$WERROR]) +AM_CONDITIONAL([OPENMP_SIMD], $openmp_simd) AC_ARG_WITH([extra-includes], [AS_HELP_STRING([--with-extra-includes=DIR], @@ -205,6 +212,14 @@ AC_ARG_WITH([extra-libraries], AC_MSG_ERROR([Cannot stat directory $withval]) fi]) +AC_MSG_CHECKING([--enable-float32 argument]) +AC_ARG_ENABLE([float32], + AS_HELP_STRING([--disable-float32], [disable float and enable double for LSTM])) +AC_MSG_RESULT([$enable_float32]) +if test "$enable_float32" != "no"; then + AC_DEFINE([FAST_FLOAT], [1], [Enable float for LSTM]) +fi + AC_MSG_CHECKING([--enable-graphics argument]) AC_ARG_ENABLE([graphics], AS_HELP_STRING([--disable-graphics], [disable graphics (ScrollView)])) @@ -284,7 +299,7 @@ m4_define([MY_CHECK_FRAMEWORK], ]) if test "$my_cv_framework_$1"="yes"; then AC_DEFINE(AS_TR_CPP([HAVE_FRAMEWORK_$1]), 1, - [Define if you have the $1 framework]) + [Define if you have the $1 framework]) AS_TR_CPP([FRAMEWORK_$1])="-framework $1" AC_SUBST(AS_TR_CPP([FRAMEWORK_$1])) fi] @@ -295,13 +310,14 @@ OPENCL_CPPFLAGS='' OPENCL_LDFLAGS='' case "${host_os}" in *darwin* | *-macos10*) - echo "checking for OpenCL framework" - MY_CHECK_FRAMEWORK([OpenCL]) - if test $my_cv_framework_OpenCL = yes; then - have_opencl_lib=true + MY_CHECK_FRAMEWORK([Accelerate]) + if test $my_cv_framework_Accelerate = yes; then + AM_CPPFLAGS="-DHAVE_FRAMEWORK_ACCELERATE $AM_CPPFLAGS" + AM_LDFLAGS="$AM_LDFLAGS -framework Accelerate" fi + MY_CHECK_FRAMEWORK([OpenCL]) if test "$enable_opencl" = "yes"; then - if !($have_opencl_lib); then + if test $my_cv_framework_OpenCL = no; then AC_MSG_ERROR([Required OpenCL library not found!]) fi AM_CPPFLAGS="-DUSE_OPENCL $AM_CPPFLAGS" @@ -430,6 +446,15 @@ esac AC_SEARCH_LIBS([pthread_create], [pthread]) +# Set PKG_CONFIG_PATH for MacOS with Homebrew unless it is already set. +AC_CHECK_PROG([have_brew], brew, true, false) +if $have_brew; then + brew_prefix=$(brew --prefix) + if test -z "$PKG_CONFIG_PATH"; then + PKG_CONFIG_PATH=$brew_prefix/opt/icu4c/lib/pkgconfig:$brew_prefix/opt/libarchive/lib/pkgconfig + export PKG_CONFIG_PATH + fi +fi # ---------------------------------------- # Check for programs needed to build documentation. @@ -449,9 +474,7 @@ AS_IF([test "$enable_doc" != "no"], [ if $have_asciidoc && $have_xsltproc; then AM_CONDITIONAL([ASCIIDOC], true) XML_CATALOG_FILES= - AC_CHECK_PROG([have_brew], brew, true, false) if $have_brew; then - brew_prefix=$(brew --prefix) catalog_file=$brew_prefix/etc/xml/catalog if test -f $catalog_file; then AM_CONDITIONAL([HAVE_XML_CATALOG_FILES], true) diff --git a/doc/combine_tessdata.1.asc b/doc/combine_tessdata.1.asc index 0ce1a1ffa..b47f8a2b4 100644 --- a/doc/combine_tessdata.1.asc +++ b/doc/combine_tessdata.1.asc @@ -177,7 +177,7 @@ lang.lstm-recoder:: lang.version:: (Optional) Version string for the traineddata file. First appeared in version 4.0 of Tesseract. - Old version of traineddata files will report Version string:Pre-4.0.0. + Old version of traineddata files will report Version:Pre-4.0.0. 4.0 version of traineddata files may include the network spec used for LSTM training as part of version string. diff --git a/doc/tesseract.1.asc b/doc/tesseract.1.asc index a54bba026..7e097da56 100644 --- a/doc/tesseract.1.asc +++ b/doc/tesseract.1.asc @@ -107,7 +107,7 @@ OPTIONS * *pdf* -- Output PDF ('OUTPUTBASE'`.pdf`). * *tsv* -- Output TSV ('OUTPUTBASE'`.tsv`). * *txt* -- Output plain text ('OUTPUTBASE'`.txt`). - * *get.images* -- Write processed input images to file (`tessinput.tif`). + * *get.images* -- Write processed input images to file ('OUTPUTBASE'`.processedPAGENUMBER.tif`). * *logfile* -- Redirect debug messages to file (`tesseract.log`). * *lstm.train* -- Output files used by LSTM training ('OUTPUTBASE'`.lstmf`). * *makebox* -- Write box file ('OUTPUTBASE'`.box`). @@ -432,7 +432,7 @@ Version 2.00 brought Unicode (UTF-8) support, six languages, and the ability to train Tesseract. Tesseract was included in UNLV's Fourth Annual Test of OCR Accuracy. -See . +See . Since Tesseract 2.00, scripts are now included to allow anyone to reproduce some of these tests. See for more @@ -482,7 +482,7 @@ Romano, Ray Smith, Rika Antonova, Robert Moss, Samuel Charron, Sheelagh Lloyd, Shobhit Saxena, and Thomas Kielbus. For a list of contributors see -. +. COPYING ------- diff --git a/googletest b/googletest deleted file mode 160000 index 703bd9caa..000000000 --- a/googletest +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 703bd9caab50b139428cea1aaff9974ebee5742e diff --git a/include/tesseract/baseapi.h b/include/tesseract/baseapi.h index d2acbcee8..dd9fe4a29 100644 --- a/include/tesseract/baseapi.h +++ b/include/tesseract/baseapi.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: baseapi.h // Description: Simple API for calling tesseract. // Author: Ray Smith @@ -13,8 +13,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_API_BASEAPI_H_ #define TESSERACT_API_BASEAPI_H_ @@ -32,7 +30,6 @@ #include #include -#include // for std::tuple #include // for std::vector struct Pix; @@ -155,7 +152,7 @@ public: /** * Print Tesseract fonts table to the given file. */ - void PrintFontsTable(FILE* fp) const; + void PrintFontsTable(FILE *fp) const; #endif @@ -248,14 +245,6 @@ public: */ void GetAvailableLanguagesAsVector(std::vector *langs) const; - /** - * Init only the lang model component of Tesseract. The only functions - * that work after this init are SetVariable and IsValidWord. - * WARNING: temporary! This function will be removed from here and placed - * in a separate API at some future time. - */ - int InitLangMod(const char *datapath, const char *language); - /** * Init only for page layout analysis. Use only for calls to SetImage and * AnalysePage. Calls that attempt recognition will generate an error. @@ -539,31 +528,6 @@ public: */ char *GetUTF8Text(); - size_t GetNumberOfTables() const; - - /// Return the i-th table bounding box coordinates - /// - /// Gives the (top_left.x, top_left.y, bottom_right.x, bottom_right.y) - /// coordinates of the i-th table. - std::tuple GetTableBoundingBox( - unsigned - i ///< Index of the table, for upper limit \see GetNumberOfTables() - ); - - /// Get bounding boxes of the rows of a table - /// return values are (top_left.x, top_left.y, bottom_right.x, bottom_right.y) - std::vector > GetTableRows( - unsigned - i ///< Index of the table, for upper limit \see GetNumberOfTables() - ); - - /// Get bounding boxes of the cols of a table - /// return values are (top_left.x, top_left.y, bottom_right.x, bottom_right.y) - std::vector > GetTableCols( - unsigned - i ///< Index of the table, for upper limit \see GetNumberOfTables() - ); - /** * Make a HTML-formatted string with hOCR markup from the internal * data structures. diff --git a/include/tesseract/capi.h b/include/tesseract/capi.h index eae114d46..a8514fd01 100644 --- a/include/tesseract/capi.h +++ b/include/tesseract/capi.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: capi.h // Description: C-API TessBaseAPI // @@ -12,8 +12,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef API_CAPI_H_ #define API_CAPI_H_ @@ -240,8 +238,6 @@ TESS_API char **TessBaseAPIGetLoadedLanguagesAsVector( TESS_API char **TessBaseAPIGetAvailableLanguagesAsVector( const TessBaseAPI *handle); -TESS_API int TessBaseAPIInitLangMod(TessBaseAPI *handle, const char *datapath, - const char *language); TESS_API void TessBaseAPIInitForAnalysePage(TessBaseAPI *handle); TESS_API void TessBaseAPIReadConfigFile(TessBaseAPI *handle, diff --git a/include/tesseract/export.h b/include/tesseract/export.h index d9b533a11..d238b628f 100644 --- a/include/tesseract/export.h +++ b/include/tesseract/export.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: export.h // Description: Place holder // @@ -12,8 +12,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_PLATFORM_H_ #define TESSERACT_PLATFORM_H_ diff --git a/include/tesseract/ltrresultiterator.h b/include/tesseract/ltrresultiterator.h index 1ff45ee40..eb06d0141 100644 --- a/include/tesseract/ltrresultiterator.h +++ b/include/tesseract/ltrresultiterator.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: ltrresultiterator.h // Description: Iterator for tesseract results in strict left-to-right // order that avoids using tesseract internal data structures. @@ -14,8 +14,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H_ #define TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H_ @@ -93,10 +91,6 @@ public: // The number should be interpreted as a percent probability. (0.0f-100.0f) float Confidence(PageIteratorLevel level) const; - // Returns the attributes of the current row. - void RowAttributes(float *row_height, float *descenders, - float *ascenders) const; - // ============= Functions that refer to words only ============. // Returns the font attributes of the current word. If iterating at a higher diff --git a/include/tesseract/ocrclass.h b/include/tesseract/ocrclass.h index 46654c1e0..a55e65286 100644 --- a/include/tesseract/ocrclass.h +++ b/include/tesseract/ocrclass.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: Apache-2.0 /********************************************************************** * File: ocrclass.h * Description: Class definitions and constants for the OCR API. diff --git a/include/tesseract/osdetect.h b/include/tesseract/osdetect.h index 40444c3c5..34bfb557d 100644 --- a/include/tesseract/osdetect.h +++ b/include/tesseract/osdetect.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: osdetect.h // Description: Orientation and script detection. // Author: Samuel Charron @@ -14,8 +14,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_CCMAIN_OSDETECT_H_ #define TESSERACT_CCMAIN_OSDETECT_H_ diff --git a/include/tesseract/pageiterator.h b/include/tesseract/pageiterator.h index 670314aaf..687397150 100644 --- a/include/tesseract/pageiterator.h +++ b/include/tesseract/pageiterator.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: pageiterator.h // Description: Iterator for tesseract page structure that avoids using // tesseract internal data structures. @@ -14,8 +14,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_CCMAIN_PAGEITERATOR_H_ #define TESSERACT_CCMAIN_PAGEITERATOR_H_ @@ -263,6 +261,10 @@ public: bool Baseline(PageIteratorLevel level, int *x1, int *y1, int *x2, int *y2) const; + // Returns the attributes of the current row. + void RowAttributes(float *row_height, float *descenders, + float *ascenders) const; + /** * Returns orientation for the block the iterator points to. * orientation, writing_direction, textline_order: see publictypes.h diff --git a/include/tesseract/publictypes.h b/include/tesseract/publictypes.h index f5a264700..0069cf28e 100644 --- a/include/tesseract/publictypes.h +++ b/include/tesseract/publictypes.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: publictypes.h // Description: Types used in both the API and internally // Author: Ray Smith @@ -13,8 +13,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_CCSTRUCT_PUBLICTYPES_H_ #define TESSERACT_CCSTRUCT_PUBLICTYPES_H_ @@ -278,18 +276,6 @@ enum OcrEngineMode { OEM_COUNT // Number of OEMs }; -/** - * Except when Otsu is chosen - * Leptonica is used for thresholding - */ -enum class ThresholdMethod { - Otsu, // Legacy Tesseract's Otsu thresholding - AdaptiveOtsu, - Sauvola, - - Max, // Number of Thresholding methods -}; - } // namespace tesseract. #endif // TESSERACT_CCSTRUCT_PUBLICTYPES_H_ diff --git a/include/tesseract/renderer.h b/include/tesseract/renderer.h index dad799559..aba79eac1 100644 --- a/include/tesseract/renderer.h +++ b/include/tesseract/renderer.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: renderer.h // Description: Rendering interface to inject into TessBaseAPI // @@ -12,8 +12,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_API_RENDERER_H_ #define TESSERACT_API_RENDERER_H_ @@ -141,13 +139,12 @@ protected: void AppendData(const char *s, int len); private: + TessResultRenderer *next_; // Can link multiple renderers together + FILE *fout_; // output file pointer const char *file_extension_; // standard extension for generated output std::string title_; // title of document being rendered int imagenum_; // index of last image added - - FILE *fout_; // output file pointer - TessResultRenderer *next_; // Can link multiple renderers together - bool happy_; // I get grumpy when the disk fills up, etc. + bool happy_; // I get grumpy when the disk fills up, etc. }; /** @@ -189,6 +186,9 @@ protected: bool BeginDocumentHandler() override; bool AddImageHandler(TessBaseAPI *api) override; bool EndDocumentHandler() override; + +private: + bool begin_document; }; /** diff --git a/include/tesseract/resultiterator.h b/include/tesseract/resultiterator.h index 72ec2802c..3e4d5807e 100644 --- a/include/tesseract/resultiterator.h +++ b/include/tesseract/resultiterator.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: resultiterator.h // Description: Iterator for tesseract results that is capable of // iterating in proper reading order over Bi Directional @@ -15,8 +15,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_CCMAIN_RESULT_ITERATOR_H_ #define TESSERACT_CCMAIN_RESULT_ITERATOR_H_ diff --git a/include/tesseract/unichar.h b/include/tesseract/unichar.h index effcf3e63..015109d74 100644 --- a/include/tesseract/unichar.h +++ b/include/tesseract/unichar.h @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: unichar.h // Description: Unicode character/ligature class. // Author: Ray Smith @@ -13,8 +13,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_CCUTIL_UNICHAR_H_ #define TESSERACT_CCUTIL_UNICHAR_H_ @@ -32,7 +30,6 @@ namespace tesseract { // at least 4. Must not exceed 31 without changing the coding of length. #define UNICHAR_LEN 30 -// TODO(rays) Move these to the tesseract namespace. // A UNICHAR_ID is the unique id of a unichar. using UNICHAR_ID = int; @@ -100,10 +97,10 @@ public: // for (UNICHAR::const_iterator it = UNICHAR::begin(str, str_len); // it != UNICHAR::end(str, len); // ++it) { - // tprintf("UCS-4 symbol code = %d\n", *it); + // printf("UCS-4 symbol code = %d\n", *it); // char buf[5]; // int char_len = it.get_utf8(buf); buf[char_len] = '\0'; - // tprintf("Char = %s\n", buf); + // printf("Char = %s\n", buf); // } class TESS_API const_iterator { using CI = const_iterator; diff --git a/include/tesseract/version.h.in b/include/tesseract/version.h.in index b283bc2ec..6bac5d665 100644 --- a/include/tesseract/version.h.in +++ b/include/tesseract/version.h.in @@ -1,4 +1,4 @@ -/////////////////////////////////////////////////////////////////////// +// SPDX-License-Identifier: Apache-2.0 // File: version.h // Description: Version information // @@ -12,8 +12,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -/////////////////////////////////////////////////////////////////////// #ifndef TESSERACT_API_VERSION_H_ #define TESSERACT_API_VERSION_H_ diff --git a/src/api/altorenderer.cpp b/src/api/altorenderer.cpp index c0afcb07c..089189ef5 100644 --- a/src/api/altorenderer.cpp +++ b/src/api/altorenderer.cpp @@ -55,7 +55,17 @@ static void AddBoxToAlto(const ResultIterator *it, PageIteratorLevel level, /// Append the ALTO XML for the beginning of the document /// bool TessAltoRenderer::BeginDocumentHandler() { - AppendString( + // Delay the XML output because we need the name of the image file. + begin_document = true; + return true; +} + +/// +/// Append the ALTO XML for the layout of the image +/// +bool TessAltoRenderer::AddImageHandler(TessBaseAPI *api) { + if (begin_document) { + AppendString( "\n" "\n" "\t\t\t"); - AppendString(title()); + AppendString(api->GetInputName()); - AppendString( + AppendString( "\n" "\t\t\n" "\t\t\n" "\t\t\t\n" "\t\t\t\t\n" "\t\t\t\t\ttesseract "); - AppendString(TessBaseAPI::Version()); - AppendString( + AppendString(TessBaseAPI::Version()); + AppendString( "\n" "\t\t\t\t\n" "\t\t\t\n" "\t\t\n" "\t\n" "\t\n"); + begin_document = false; + } - return true; -} - -/// -/// Append the ALTO XML for the layout of the image -/// -bool TessAltoRenderer::AddImageHandler(TessBaseAPI *api) { const std::unique_ptr text(api->GetAltoText(imagenum())); if (text == nullptr) { return false; @@ -112,7 +117,8 @@ bool TessAltoRenderer::EndDocumentHandler() { } TessAltoRenderer::TessAltoRenderer(const char *outputbase) - : TessResultRenderer(outputbase, "xml") {} + : TessResultRenderer(outputbase, "xml"), + begin_document(false) {} /// /// Make an XML-formatted string with ALTO markup from the internal diff --git a/src/api/baseapi.cpp b/src/api/baseapi.cpp index 8127b5cd6..114c67875 100644 --- a/src/api/baseapi.cpp +++ b/src/api/baseapi.cpp @@ -30,8 +30,8 @@ #include "elst.h" // for ELIST_ITERATOR, ELISTIZE, ELISTIZEH #include "environ.h" // for l_uint8 #ifndef DISABLED_LEGACY_ENGINE -# include "equationdetect.h" // for EquationDetect -#endif +#include "equationdetect.h" // for EquationDetect, destructor of equ_detect_ +#endif // ndef DISABLED_LEGACY_ENGINE #include "errcode.h" // for ASSERT_HOST #include "helpers.h" // for IntCastRounded, chomp_string #include "host.h" // for MAX_PATH @@ -56,7 +56,6 @@ #include "tesseractclass.h" // for Tesseract #include "tprintf.h" // for tprintf #include "werd.h" // for WERD, WERD_IT, W_FUZZY_NON, W_FUZZY_SP -#include "tabletransfer.h" // for detected tables from tablefind.h #include "thresholder.h" // for ImageThresholder #include @@ -100,6 +99,9 @@ namespace tesseract { static BOOL_VAR(stream_filelist, false, "Stream a filelist from stdin"); static STRING_VAR(document_title, "", "Title of output document (used for hOCR and PDF output)"); +#ifdef HAVE_LIBCURL +static INT_VAR(curl_timeout, 0, "Timeout for curl in seconds"); +#endif /** Minimum sensible image size to be worth running tesseract. */ const int kMinRectSize = 10; @@ -109,17 +111,17 @@ const char kTesseractReject = '~'; const char kUNLVReject = '~'; /** Character used by UNLV as a suspect marker. */ const char kUNLVSuspect = '^'; -/** - * Filename used for input image file, from which to derive a name to search - * for a possible UNLV zone file, if none is specified by SetInputName. - */ -static const char *kInputFile = "noname.tif"; /** * Temp file used for storing current parameters before applying retry values. */ static const char *kOldVarsFile = "failed_vars.txt"; #ifndef DISABLED_LEGACY_ENGINE +/** + * Filename used for input image file, from which to derive a name to search + * for a possible UNLV zone file, if none is specified by SetInputName. + */ +static const char *kInputFile = "noname.tif"; static const char kUnknownFontName[] = "UnknownFont"; static STRING_VAR(classify_font_name, kUnknownFontName, @@ -377,9 +379,8 @@ int TessBaseAPI::Init(const char *data, int data_size, const char *language, Ocr char **configs, int configs_size, const std::vector *vars_vec, const std::vector *vars_values, bool set_only_non_debug_params, FileReader reader) { - // Default language is "eng". if (language == nullptr) { - language = "eng"; + language = ""; } if (data == nullptr) { data = ""; @@ -420,7 +421,7 @@ int TessBaseAPI::Init(const char *data, int data_size, const char *language, Ocr // Update datapath and language requested for the last valid initialization. datapath_ = datapath; - if ((strcmp(datapath_.c_str(), "") == 0) && (strcmp(tesseract_->datadir.c_str(), "") != 0)) { + if (datapath_.empty() && !tesseract_->datadir.empty()) { datapath_ = tesseract_->datadir; } @@ -475,25 +476,6 @@ void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector *langs) } } -// TODO(amit): Adapt to lstm -#ifndef DISABLED_LEGACY_ENGINE -/** - * Init only the lang model component of Tesseract. The only functions - * that work after this init are SetVariable and IsValidWord. - * WARNING: temporary! This function will be removed from here and placed - * in a separate API at some future time. - */ -int TessBaseAPI::InitLangMod(const char *datapath, const char *language) { - if (tesseract_ == nullptr) { - tesseract_ = new Tesseract; - } else { - ParamUtils::ResetToDefaults(tesseract_->params()); - } - TessdataManager mgr; - return tesseract_->init_tesseract_lm(datapath, nullptr, language, &mgr); -} -#endif // ndef DISABLED_LEGACY_ENGINE - /** * Init only for page layout analysis. Use only for calls to SetImage and * AnalysePage. Calls that attempt recognition will generate an error. @@ -1143,7 +1125,7 @@ bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_c if (stdInput) { buf.assign((std::istreambuf_iterator(std::cin)), (std::istreambuf_iterator())); data = reinterpret_cast(buf.data()); - } else if (strncmp(filename, "http:", 5) == 0 || strncmp(filename, "https:", 6) == 0) { + } else if (strstr(filename, "://") != nullptr) { // Get image or image list by URL. #ifdef HAVE_LIBCURL CURL *curl = curl_easy_init(); @@ -1161,6 +1143,27 @@ bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_c if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); } + // Follow HTTP, HTTPS, FTP and FTPS redirects. + curlcode = curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); + if (curlcode != CURLE_OK) { + return error("curl_easy_setopt"); + } + // Allow no more than 8 redirections to prevent endless loops. + curlcode = curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 8); + if (curlcode != CURLE_OK) { + return error("curl_easy_setopt"); + } + int timeout = curl_timeout; + if (timeout > 0) { + curlcode = curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L); + if (curlcode != CURLE_OK) { + return error("curl_easy_setopt"); + } + curlcode = curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); + if (curlcode != CURLE_OK) { + return error("curl_easy_setopt"); + } + } curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); if (curlcode != CURLE_OK) { return error("curl_easy_setopt"); @@ -1277,7 +1280,13 @@ bool TessBaseAPI::ProcessPage(Pix *pix, int page_index, const char *filename, if (tesseract_->tessedit_write_images) { Pix *page_pix = GetThresholdedImage(); - pixWrite("tessinput.tif", page_pix, IFF_TIFF_G4); + std::string output_filename = output_file_ + ".processed"; + if (page_index > 0) { + output_filename += std::to_string(page_index); + } + output_filename += ".tif"; + pixWrite(output_filename.c_str(), page_pix, IFF_TIFF_G4); + pixDestroy(&page_pix); } if (failed && retry_config != nullptr && retry_config[0] != '\0') { @@ -1370,66 +1379,6 @@ char *TessBaseAPI::GetUTF8Text() { return result; } -size_t TessBaseAPI::GetNumberOfTables() const -{ - return constUniqueInstance>().size(); -} - -std::tuple TessBaseAPI::GetTableBoundingBox(unsigned i) -{ - const auto &t = constUniqueInstance>(); - - if (i >= t.size()) { - return std::tuple(0, 0, 0, 0); - } - - const int height = tesseract_->ImageHeight(); - - return std::make_tuple( - t[i].box.left(), height - t[i].box.top(), - t[i].box.right(), height - t[i].box.bottom()); -} - -std::vector> TessBaseAPI::GetTableRows(unsigned i) -{ - const auto &t = constUniqueInstance>(); - - if (i >= t.size()) { - return std::vector>(); - } - - std::vector> rows(t[i].rows.size()); - const int height = tesseract_->ImageHeight(); - - for (unsigned j = 0; j < t[i].rows.size(); ++j) { - rows[j] = - std::make_tuple(t[i].rows[j].left(), height - t[i].rows[j].top(), - t[i].rows[j].right(), height - t[i].rows[j].bottom()); - } - - return rows; -} - -std::vector > TessBaseAPI::GetTableCols(unsigned i) -{ - const auto &t = constUniqueInstance>(); - - if (i >= t.size()) { - return std::vector>(); - } - - std::vector> cols(t[i].cols.size()); - const int height = tesseract_->ImageHeight(); - - for (unsigned j = 0; j < t[i].cols.size(); ++j) { - cols[j] = - std::make_tuple(t[i].cols[j].left(), height - t[i].cols[j].top(), - t[i].cols[j].right(), height - t[i].cols[j].bottom()); - } - - return cols; -} - static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) { int left, top, right, bottom; it->BoundingBox(level, &left, &top, &right, &bottom); @@ -1966,15 +1915,17 @@ void TessBaseAPI::End() { delete paragraph_models_; paragraph_models_ = nullptr; } +#ifndef DISABLED_LEGACY_ENGINE if (osd_tesseract_ == tesseract_) { osd_tesseract_ = nullptr; } - delete tesseract_; - tesseract_ = nullptr; delete osd_tesseract_; osd_tesseract_ = nullptr; delete equ_detect_; equ_detect_ = nullptr; +#endif // ndef DISABLED_LEGACY_ENGINE + delete tesseract_; + tesseract_ = nullptr; input_file_.clear(); output_file_.clear(); datapath_.clear(); @@ -2127,7 +2078,7 @@ bool TessBaseAPI::Threshold(Pix **pix) { tesseract_->set_pix_grey(nullptr); } } else { - auto [ok, pix_grey, pix_binary, pix_thresholds] = thresholder_->Threshold(thresholding_method); + auto [ok, pix_grey, pix_binary, pix_thresholds] = thresholder_->Threshold(this, thresholding_method); if (!ok) { return false; @@ -2196,6 +2147,7 @@ int TessBaseAPI::FindLines() { Tesseract *osd_tess = osd_tesseract_; OSResults osr; +#ifndef DISABLED_LEGACY_ENGINE if (PSM_OSD_ENABLED(tesseract_->tessedit_pageseg_mode) && osd_tess == nullptr) { if (strcmp(language_.c_str(), "osd") == 0) { osd_tess = tesseract_; @@ -2221,6 +2173,7 @@ int TessBaseAPI::FindLines() { } } } +#endif // ndef DISABLED_LEGACY_ENGINE if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) { return -1; @@ -2252,8 +2205,6 @@ void TessBaseAPI::ClearResults() { delete paragraph_models_; paragraph_models_ = nullptr; } - - uniqueInstance>().clear(); } /** diff --git a/src/api/capi.cpp b/src/api/capi.cpp index 12459a3aa..87aa38373 100644 --- a/src/api/capi.cpp +++ b/src/api/capi.cpp @@ -254,12 +254,6 @@ char **TessBaseAPIGetAvailableLanguagesAsVector(const TessBaseAPI *handle) { return arr; } -#ifndef DISABLED_LEGACY_ENGINE -int TessBaseAPIInitLangMod(TessBaseAPI *handle, const char *datapath, const char *language) { - return handle->InitLangMod(datapath, language); -} -#endif - void TessBaseAPIInitForAnalysePage(TessBaseAPI *handle) { handle->InitForAnalysePage(); } diff --git a/src/api/hocrrenderer.cpp b/src/api/hocrrenderer.cpp index b7541c3e0..a3b042a31 100644 --- a/src/api/hocrrenderer.cpp +++ b/src/api/hocrrenderer.cpp @@ -37,7 +37,8 @@ static tesseract::Orientation GetBlockTextOrientation(const PageIterator *it) { tesseract::WritingDirection writing_direction; tesseract::TextlineOrder textline_order; float deskew_angle; - it->Orientation(&orientation, &writing_direction, &textline_order, &deskew_angle); + it->Orientation(&orientation, &writing_direction, &textline_order, + &deskew_angle); return orientation; } @@ -49,7 +50,8 @@ static tesseract::Orientation GetBlockTextOrientation(const PageIterator *it) { * method currently only inserts a 'textangle' property to indicate the rotation * direction and does not add any baseline information to the hocr string. */ -static void AddBaselineCoordsTohOCR(const PageIterator *it, PageIteratorLevel level, +static void AddBaselineCoordsTohOCR(const PageIterator *it, + PageIteratorLevel level, std::stringstream &hocr_str) { tesseract::Orientation orientation = GetBlockTextOrientation(it); if (orientation != ORIENTATION_PAGE_UP) { @@ -82,7 +84,8 @@ static void AddBaselineCoordsTohOCR(const PageIterator *it, PageIteratorLevel le double p1 = (y2 - y1) / static_cast(x2 - x1); double p0 = y1 - p1 * x1; - hocr_str << "; baseline " << round(p1 * 1000.0) / 1000.0 << " " << round(p0 * 1000.0) / 1000.0; + hocr_str << "; baseline " << round(p1 * 1000.0) / 1000.0 << " " + << round(p0 * 1000.0) / 1000.0; } static void AddBoxTohOCR(const ResultIterator *it, PageIteratorLevel level, @@ -91,7 +94,8 @@ static void AddBoxTohOCR(const ResultIterator *it, PageIteratorLevel level, it->BoundingBox(level, &left, &top, &right, &bottom); // This is the only place we use double quotes instead of single quotes, // but it may too late to change for consistency - hocr_str << " title=\"bbox " << left << " " << top << " " << right << " " << bottom; + hocr_str << " title=\"bbox " << left << " " << top << " " << right << " " + << bottom; // Add baseline coordinates & heights for textlines only. if (level == RIL_TEXTLINE) { AddBaselineCoordsTohOCR(it, level, hocr_str); @@ -99,8 +103,8 @@ static void AddBoxTohOCR(const ResultIterator *it, PageIteratorLevel level, float row_height, descenders, ascenders; // row attributes it->RowAttributes(&row_height, &descenders, &ascenders); // TODO(rays): Do we want to limit these to a single decimal place? - hocr_str << "; x_size " << row_height << "; x_descenders " << -descenders << "; x_ascenders " - << ascenders; + hocr_str << "; x_size " << row_height << "; x_descenders " << -descenders + << "; x_ascenders " << ascenders; } hocr_str << "\">"; } @@ -128,7 +132,8 @@ char *TessBaseAPI::GetHOCRText(int page_number) { * Returned string must be freed with the delete [] operator. */ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { - if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(monitor) < 0)) { + if (tesseract_ == nullptr || + (page_res_ == nullptr && Recognize(monitor) < 0)) { return nullptr; } @@ -147,13 +152,16 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { #ifdef _WIN32 // convert input name from ANSI encoding to utf-8 - int str16_len = MultiByteToWideChar(CP_ACP, 0, input_file_.c_str(), -1, nullptr, 0); + int str16_len = + MultiByteToWideChar(CP_ACP, 0, input_file_.c_str(), -1, nullptr, 0); wchar_t *uni16_str = new WCHAR[str16_len]; - str16_len = MultiByteToWideChar(CP_ACP, 0, input_file_.c_str(), -1, uni16_str, str16_len); - int utf8_len = - WideCharToMultiByte(CP_UTF8, 0, uni16_str, str16_len, nullptr, 0, nullptr, nullptr); + str16_len = MultiByteToWideChar(CP_ACP, 0, input_file_.c_str(), -1, uni16_str, + str16_len); + int utf8_len = WideCharToMultiByte(CP_UTF8, 0, uni16_str, str16_len, nullptr, + 0, nullptr, nullptr); char *utf8_str = new char[utf8_len]; - WideCharToMultiByte(CP_UTF8, 0, uni16_str, str16_len, utf8_str, utf8_len, nullptr, nullptr); + WideCharToMultiByte(CP_UTF8, 0, uni16_str, str16_len, utf8_str, utf8_len, + nullptr, nullptr); input_file_ = utf8_str; delete[] uni16_str; delete[] utf8_str; @@ -173,8 +181,11 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { } else { hocr_str << "unknown"; } - hocr_str << "\"; bbox " << rect_left_ << " " << rect_top_ << " " << rect_width_ << " " - << rect_height_ << "; ppageno " << page_number << "'>\n"; + + hocr_str << "\"; bbox " << rect_left_ << " " << rect_top_ << " " + << rect_width_ << " " << rect_height_ << "; ppageno " << page_number + << "; scan_res " << GetSourceYResolution() << " " + << GetSourceYResolution() << "'>\n"; std::unique_ptr res_it(GetIterator()); while (!res_it->Empty(RIL_BLOCK)) { @@ -227,7 +238,8 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { // Now, process the word... int32_t lstm_choice_mode = tesseract_->lstm_choice_mode; - std::vector>>> *rawTimestepMap = nullptr; + std::vector>>> + *rawTimestepMap = nullptr; std::vector>> *CTCMap = nullptr; if (lstm_choice_mode) { CTCMap = res_it->GetBestLSTMSymbolChoices(); @@ -241,10 +253,12 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { int pointsize, font_id; const char *font_name; res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom); - font_name = res_it->WordFontAttributes(&bold, &italic, &underlined, &monospace, &serif, - &smallcaps, &pointsize, &font_id); - hocr_str << " title='bbox " << left << " " << top << " " << right << " " << bottom - << "; x_wconf " << static_cast(res_it->Confidence(RIL_WORD)); + font_name = + res_it->WordFontAttributes(&bold, &italic, &underlined, &monospace, + &serif, &smallcaps, &pointsize, &font_id); + hocr_str << " title='bbox " << left << " " << top << " " << right << " " + << bottom << "; x_wconf " + << static_cast(res_it->Confidence(RIL_WORD)); if (font_info) { if (font_name) { hocr_str << "; x_font " << HOcrEscape(font_name).c_str(); @@ -284,31 +298,36 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { hocr_str << ""; } do { - const std::unique_ptr grapheme(res_it->GetUTF8Text(RIL_SYMBOL)); + const std::unique_ptr grapheme( + res_it->GetUTF8Text(RIL_SYMBOL)); if (grapheme && grapheme[0] != 0) { if (hocr_boxes) { res_it->BoundingBox(RIL_SYMBOL, &left, &top, &right, &bottom); - hocr_str << "\n "; + hocr_str << "\n "; } hocr_str << HOcrEscape(grapheme.get()).c_str(); if (hocr_boxes) { hocr_str << ""; tesseract::ChoiceIterator ci(*res_it); if (lstm_choice_mode == 1 && ci.Timesteps() != nullptr) { - std::vector>> *symbol = ci.Timesteps(); + std::vector>> *symbol = + ci.Timesteps(); hocr_str << "\n "; - for (auto timestep : *symbol) { + << "symbol_" << page_id << "_" << wcnt << "_" << scnt + << "'>"; + for (const auto ×tep : *symbol) { hocr_str << "\n "; + << "timestep" << page_id << "_" << wcnt << "_" << tcnt + << "'>"; for (auto conf : timestep) { hocr_str << "\n " << HOcrEscape(conf.first).c_str() << ""; ++ccnt; @@ -319,19 +338,20 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { hocr_str << "\n "; ++scnt; } else if (lstm_choice_mode == 2) { - tesseract::ChoiceIterator ci(*res_it); hocr_str << "\n "; + << "lstm_choices_" << page_id << "_" << wcnt << "_" << tcnt + << "'>"; do { const char *choice = ci.GetUTF8Text(); float choiceconf = ci.Confidence(); if (choice != nullptr) { hocr_str << "\n " << HOcrEscape(choice).c_str() - << ""; + << "choice_" << page_id << "_" << wcnt << "_" << ccnt + << "'" + << " title='x_confs " << choiceconf << "'>" + << HOcrEscape(choice).c_str() << ""; ccnt++; } } while (ci.Next()); @@ -350,18 +370,20 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { } // If the lstm choice mode is required it is added here if (lstm_choice_mode == 1 && !hocr_boxes && rawTimestepMap != nullptr) { - for (auto symbol : *rawTimestepMap) { + for (const auto &symbol : *rawTimestepMap) { hocr_str << "\n "; - for (auto timestep : symbol) { + for (const auto ×tep : symbol) { hocr_str << "\n "; + << "timestep" << page_id << "_" << wcnt << "_" << tcnt + << "'>"; for (auto conf : timestep) { hocr_str << "\n " << HOcrEscape(conf.first).c_str() << ""; ++ccnt; @@ -373,11 +395,12 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { ++scnt; } } else if (lstm_choice_mode == 2 && !hocr_boxes && CTCMap != nullptr) { - for (auto timestep : *CTCMap) { + for (const auto ×tep : *CTCMap) { if (timestep.size() > 0) { hocr_str << "\n "; + << "lstm_choices_" << page_id << "_" << wcnt << "_" << tcnt + << "'>"; for (auto &j : timestep) { float conf = 100 - tesseract_->lstm_rating_coefficient * j.second; if (conf < 0.0f) { @@ -388,9 +411,10 @@ char *TessBaseAPI::GetHOCRText(ETEXT_DESC *monitor, int page_number) { } hocr_str << "\n " << HOcrEscape(j.first).c_str() - << ""; + << "choice_" << page_id << "_" << wcnt << "_" << ccnt + << "'" + << " title='x_confs " << conf << "'>" + << HOcrEscape(j.first).c_str() << ""; ccnt++; } hocr_str << ""; diff --git a/src/api/renderer.cpp b/src/api/renderer.cpp index aa7dd09e7..4f9ec0038 100644 --- a/src/api/renderer.cpp +++ b/src/api/renderer.cpp @@ -31,11 +31,11 @@ namespace tesseract { * Base Renderer interface implementation **********************************************************************/ TessResultRenderer::TessResultRenderer(const char *outputbase, const char *extension) - : file_extension_(extension) + : next_(nullptr) + , fout_(stdout) + , file_extension_(extension) , title_("") , imagenum_(-1) - , fout_(stdout) - , next_(nullptr) , happy_(true) { if (strcmp(outputbase, "-") && strcmp(outputbase, "stdout")) { std::string outfile = std::string(outputbase) + "." + extension; @@ -139,13 +139,13 @@ bool TessTextRenderer::AddImageHandler(TessBaseAPI *api) { return false; } - AppendString(utf8.get()); - const char *pageSeparator = api->GetStringVariable("page_separator"); - if (pageSeparator != nullptr && *pageSeparator != '\0') { + if (pageSeparator != nullptr && *pageSeparator != '\0' && imagenum() > 0) { AppendString(pageSeparator); } + AppendString(utf8.get()); + return true; } diff --git a/src/arch/dotproduct.cpp b/src/arch/dotproduct.cpp index 62bcc00ce..bb9fa9879 100644 --- a/src/arch/dotproduct.cpp +++ b/src/arch/dotproduct.cpp @@ -1,5 +1,5 @@ /////////////////////////////////////////////////////////////////////// -// File: dotproduct.h +// File: dotproduct.cpp // Description: Native dot product function. // // (C) Copyright 2018, Google Inc. @@ -19,9 +19,12 @@ namespace tesseract { // Computes and returns the dot product of the two n-vectors u and v. -double DotProductNative(const double *u, const double *v, int n) { - double total = 0.0; - for (int k = 0; k < n; ++k) { +TFloat DotProductNative(const TFloat *u, const TFloat *v, int n) { + TFloat total = 0; +#if defined(OPENMP_SIMD) || defined(_OPENMP) +#pragma omp simd reduction(+:total) +#endif + for (int k = 0; k < n; k++) { total += u[k] * v[k]; } return total; diff --git a/src/arch/dotproduct.h b/src/arch/dotproduct.h index bbdf6df9b..4ee8ddd4d 100644 --- a/src/arch/dotproduct.h +++ b/src/arch/dotproduct.h @@ -17,19 +17,24 @@ #ifndef TESSERACT_ARCH_DOTPRODUCT_H_ #define TESSERACT_ARCH_DOTPRODUCT_H_ +#include "tesstypes.h" + namespace tesseract { // Computes and returns the dot product of the n-vectors u and v. -double DotProductNative(const double *u, const double *v, int n); +TFloat DotProductNative(const TFloat *u, const TFloat *v, int n); // Uses Intel AVX intrinsics to access the SIMD instruction set. -double DotProductAVX(const double *u, const double *v, int n); +TFloat DotProductAVX(const TFloat *u, const TFloat *v, int n); // Use Intel FMA. -double DotProductFMA(const double *u, const double *v, int n); +TFloat DotProductFMA(const TFloat *u, const TFloat *v, int n); // Uses Intel SSE intrinsics to access the SIMD instruction set. -double DotProductSSE(const double *u, const double *v, int n); +TFloat DotProductSSE(const TFloat *u, const TFloat *v, int n); + +// Use NEON intrinsics. +TFloat DotProductNEON(const TFloat *u, const TFloat *v, int n); } // namespace tesseract. diff --git a/src/arch/dotproductavx.cpp b/src/arch/dotproductavx.cpp index 3f243906d..98e829959 100644 --- a/src/arch/dotproductavx.cpp +++ b/src/arch/dotproductavx.cpp @@ -29,6 +29,28 @@ namespace tesseract { // Computes and returns the dot product of the n-vectors u and v. // Uses Intel AVX intrinsics to access the SIMD instruction set. +#if defined(FAST_FLOAT) +float DotProductAVX(const float *u, const float *v, int n) { + const unsigned quot = n / 8; + const unsigned rem = n % 8; + __m256 t0 = _mm256_setzero_ps(); + for (unsigned k = 0; k < quot; k++) { + __m256 f0 = _mm256_loadu_ps(u); + __m256 f1 = _mm256_loadu_ps(v); + f0 = _mm256_mul_ps(f0, f1); + t0 = _mm256_add_ps(t0, f0); + u += 8; + v += 8; + } + alignas(32) float tmp[8]; + _mm256_store_ps(tmp, t0); + float result = tmp[0] + tmp[1] + tmp[2] + tmp[3] + tmp[4] + tmp[5] + tmp[6] + tmp[7]; + for (unsigned k = 0; k < rem; k++) { + result += *u++ * *v++; + } + return result; +} +#else double DotProductAVX(const double *u, const double *v, int n) { const unsigned quot = n / 8; const unsigned rem = n % 8; @@ -57,6 +79,7 @@ double DotProductAVX(const double *u, const double *v, int n) { } return result; } +#endif } // namespace tesseract. diff --git a/src/arch/dotproductfma.cpp b/src/arch/dotproductfma.cpp index ede46298e..01a788948 100644 --- a/src/arch/dotproductfma.cpp +++ b/src/arch/dotproductfma.cpp @@ -29,6 +29,34 @@ namespace tesseract { // Computes and returns the dot product of the n-vectors u and v. // Uses Intel FMA intrinsics to access the SIMD instruction set. +#if defined(FAST_FLOAT) +float DotProductFMA(const float *u, const float *v, int n) { + const unsigned quot = n / 16; + const unsigned rem = n % 16; + __m256 t0 = _mm256_setzero_ps(); + __m256 t1 = _mm256_setzero_ps(); + for (unsigned k = 0; k < quot; k++) { + __m256 f0 = _mm256_loadu_ps(u); + __m256 f1 = _mm256_loadu_ps(v); + t0 = _mm256_fmadd_ps(f0, f1, t0); + u += 8; + v += 8; + __m256 f2 = _mm256_loadu_ps(u); + __m256 f3 = _mm256_loadu_ps(v); + t1 = _mm256_fmadd_ps(f2, f3, t1); + u += 8; + v += 8; + } + t0 = _mm256_hadd_ps(t0, t1); + alignas(32) float tmp[8]; + _mm256_store_ps(tmp, t0); + float result = tmp[0] + tmp[1] + tmp[2] + tmp[3] + tmp[4] + tmp[5] + tmp[6] + tmp[7]; + for (unsigned k = 0; k < rem; k++) { + result += *u++ * *v++; + } + return result; +} +#else double DotProductFMA(const double *u, const double *v, int n) { const unsigned quot = n / 8; const unsigned rem = n % 8; @@ -55,6 +83,7 @@ double DotProductFMA(const double *u, const double *v, int n) { } return result; } +#endif } // namespace tesseract. diff --git a/src/arch/dotproductneon.cpp b/src/arch/dotproductneon.cpp new file mode 100644 index 000000000..af68b12d8 --- /dev/null +++ b/src/arch/dotproductneon.cpp @@ -0,0 +1,71 @@ +/////////////////////////////////////////////////////////////////////// +// File: dotproductneon.cpp +// Description: Dot product function for ARM NEON. +// Author: Stefan Weil +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/////////////////////////////////////////////////////////////////////// + +#if defined(__ARM_NEON) + +#include +#include "dotproduct.h" + +namespace tesseract { + +// Documentation: +// https://developer.arm.com/architectures/instruction-sets/intrinsics/ + +#if defined(FAST_FLOAT) && defined(__ARM_ARCH_ISA_A64) + +float DotProductNEON(const float *u, const float *v, int n) { + float32x4_t result0123 = vdupq_n_f32(0.0f); + float32x4_t result4567 = vdupq_n_f32(0.0f); + while (n > 7) { + // Calculate 8 dot products per iteration. + float32x4_t u0 = vld1q_f32(u); + float32x4_t v0 = vld1q_f32(v); + float32x4_t u4 = vld1q_f32(u + 4); + float32x4_t v4 = vld1q_f32(v + 4); + result0123 = vfmaq_f32(result0123, u0, v0); + result4567 = vfmaq_f32(result4567, u4, v4); + u += 8; + v += 8; + n -= 8; + } + float total = vaddvq_f32(result0123); + total += vaddvq_f32(result4567); + while (n > 0) { + total += *u++ * *v++; + n--; + } + return total; +} + +#else + +// Computes and returns the dot product of the two n-vectors u and v. +TFloat DotProductNEON(const TFloat *u, const TFloat *v, int n) { + TFloat total = 0; +#if defined(OPENMP_SIMD) || defined(_OPENMP) +#pragma omp simd reduction(+:total) +#endif + for (int k = 0; k < n; k++) { + total += u[k] * v[k]; + } + return total; +} + +#endif + +} // namespace tesseract + +#endif /* __ARM_NEON */ diff --git a/src/arch/dotproductsse.cpp b/src/arch/dotproductsse.cpp index 1dbd18fb8..9122e9d1b 100644 --- a/src/arch/dotproductsse.cpp +++ b/src/arch/dotproductsse.cpp @@ -30,6 +30,66 @@ namespace tesseract { // Computes and returns the dot product of the n-vectors u and v. // Uses Intel SSE intrinsics to access the SIMD instruction set. +#if defined(FAST_FLOAT) +float DotProductSSE(const float *u, const float *v, int n) { + int max_offset = n - 4; + int offset = 0; + // Accumulate a set of 4 sums in sum, by loading pairs of 4 values from u and + // v, and multiplying them together in parallel. + __m128 sum = _mm_setzero_ps(); + if (offset <= max_offset) { + offset = 4; + // Aligned load is reputedly faster but requires 16 byte aligned input. + if ((reinterpret_cast(u) & 15) == 0 && + (reinterpret_cast(v) & 15) == 0) { + // Use aligned load. + sum = _mm_load_ps(u); + __m128 floats2 = _mm_load_ps(v); + // Multiply. + sum = _mm_mul_ps(sum, floats2); + while (offset <= max_offset) { + __m128 floats1 = _mm_load_ps(u + offset); + floats2 = _mm_load_ps(v + offset); + floats1 = _mm_mul_ps(floats1, floats2); + sum = _mm_add_ps(sum, floats1); + offset += 4; + } + } else { + // Use unaligned load. + sum = _mm_loadu_ps(u); + __m128 floats2 = _mm_loadu_ps(v); + // Multiply. + sum = _mm_mul_ps(sum, floats2); + while (offset <= max_offset) { + __m128 floats1 = _mm_loadu_ps(u + offset); + floats2 = _mm_loadu_ps(v + offset); + floats1 = _mm_mul_ps(floats1, floats2); + sum = _mm_add_ps(sum, floats1); + offset += 4; + } + } + } + // Add the 4 sums in sum horizontally. +#if 0 + alignas(32) float tmp[4]; + _mm_store_ps(tmp, sum); + float result = tmp[0] + tmp[1] + tmp[2] + tmp[3]; +#else + __m128 zero = _mm_setzero_ps(); + // https://www.felixcloutier.com/x86/haddps + sum = _mm_hadd_ps(sum, zero); + sum = _mm_hadd_ps(sum, zero); + // Extract the low result. + float result = _mm_cvtss_f32(sum); +#endif + // Add on any left-over products. + while (offset < n) { + result += u[offset] * v[offset]; + ++offset; + } + return result; +} +#else double DotProductSSE(const double *u, const double *v, int n) { int max_offset = n - 2; int offset = 0; @@ -39,7 +99,8 @@ double DotProductSSE(const double *u, const double *v, int n) { if (offset <= max_offset) { offset = 2; // Aligned load is reputedly faster but requires 16 byte aligned input. - if ((reinterpret_cast(u) & 15) == 0 && (reinterpret_cast(v) & 15) == 0) { + if ((reinterpret_cast(u) & 15) == 0 && + (reinterpret_cast(v) & 15) == 0) { // Use aligned load. sum = _mm_load_pd(u); __m128d floats2 = _mm_load_pd(v); @@ -78,6 +139,7 @@ double DotProductSSE(const double *u, const double *v, int n) { } return result; } +#endif } // namespace tesseract. diff --git a/src/arch/intsimdmatrix.cpp b/src/arch/intsimdmatrix.cpp index 5d113542c..fa4afa7c8 100644 --- a/src/arch/intsimdmatrix.cpp +++ b/src/arch/intsimdmatrix.cpp @@ -76,7 +76,7 @@ void IntSimdMatrix::Init(const GENERIC_2D_ARRAY &w, std::vector // u is imagined to have an extra element at the end with value 1, to // implement the bias, but it doesn't actually have it. void IntSimdMatrix::MatrixDotVector(const GENERIC_2D_ARRAY &w, - const std::vector &scales, const int8_t *u, double *v) { + const std::vector &scales, const int8_t *u, TFloat *v) { int num_out = w.dim1(); int num_in = w.dim2() - 1; // Base implementation. diff --git a/src/arch/intsimdmatrix.h b/src/arch/intsimdmatrix.h index c2947b06f..d93f928db 100644 --- a/src/arch/intsimdmatrix.h +++ b/src/arch/intsimdmatrix.h @@ -23,6 +23,8 @@ #include #include +#include "tesstypes.h" + namespace tesseract { template @@ -78,8 +80,8 @@ struct TESS_API IntSimdMatrix { // u is imagined to have an extra element at the end with value 1, to // implement the bias, but it doesn't actually have it. // Computes the base C++ implementation. - static void MatrixDotVector(const GENERIC_2D_ARRAY &w, const std::vector &scales, - const int8_t *u, double *v); + static void MatrixDotVector(const GENERIC_2D_ARRAY &w, const std::vector &scales, + const int8_t *u, TFloat *v); // Rounds the input up to a multiple of the given factor. static int Roundup(int input, int factor) { @@ -95,8 +97,8 @@ struct TESS_API IntSimdMatrix { // RoundInputs above. // The input will be over-read to the extent of the padding. There are no // alignment requirements. - using MatrixDotVectorFunction = void (*)(int, int, const int8_t *, const double *, const int8_t *, - double *); + using MatrixDotVectorFunction = void (*)(int, int, const int8_t *, const TFloat *, const int8_t *, + TFloat *); MatrixDotVectorFunction matrixDotVectorFunction; // Number of 32 bit outputs held in each register. @@ -113,7 +115,7 @@ struct TESS_API IntSimdMatrix { static const IntSimdMatrix *intSimdMatrix; // Only available with NEON. static const IntSimdMatrix intSimdMatrixNEON; - // Only available with AVX2 / SSE. + // Only available with AVX2 / AVX / FMA / SSE. static const IntSimdMatrix intSimdMatrixAVX2; static const IntSimdMatrix intSimdMatrixSSE; }; diff --git a/src/arch/intsimdmatrixavx2.cpp b/src/arch/intsimdmatrixavx2.cpp index ce5d8ea9f..b333cf51a 100644 --- a/src/arch/intsimdmatrixavx2.cpp +++ b/src/arch/intsimdmatrixavx2.cpp @@ -15,14 +15,13 @@ // limitations under the License. /////////////////////////////////////////////////////////////////////// +#include "intsimdmatrix.h" + #if !defined(__AVX2__) # if defined(__i686__) || defined(__x86_64__) # error Implementation only for AVX2 capable architectures # endif #else - -# include "intsimdmatrix.h" - # include # include # include @@ -86,6 +85,243 @@ static inline __m128i load64_to_128(const int8_t *wi_) { return _mm_set_epi64x(0, wi[0]); } +#if defined(FAST_FLOAT) + +static inline void ExtractResults8(__m256i result, const int8_t *wi, + const float *scales, float *v) { + __m128i w128 = load64_to_128(wi); // 8x8bit vals in bottom of 128bit reg + __m256i w256 = _mm256_cvtepi8_epi32(w128); // 8x32bit vals in 256bit reg + __m256i bias_scale = _mm256_set_epi32(127, 127, 127, 127, 127, 127, 127, 127); + __m256 scale01234567 = _mm256_loadu_ps(scales); + w256 = _mm256_mullo_epi32(w256, bias_scale); // 8x32 + result = _mm256_add_epi32(result, w256); // result += bias * 127 + __m256 res01234567 = _mm256_cvtepi32_ps(result); + result = _mm256_permute4x64_epi64(result, 2 + (3 << 2)); + res01234567 = _mm256_mul_ps(res01234567, scale01234567); + _mm256_storeu_ps(v, res01234567); +} + +static inline void ExtractResults16(__m256i result0, __m256i result1, + const int8_t *&wi, const float *&scales, + float *&v) { + __m128i w8 = _mm_loadu_si128(reinterpret_cast(wi)); + // 8x8bit vals in bottom of 128bit reg + const __m256i bias_scale = + _mm256_set_epi32(127, 127, 127, 127, 127, 127, 127, 127); + __m256i w256 = _mm256_cvtepi8_epi32(w8); // 8x32bit vals in 256bit reg + __m256 scale01234567 = _mm256_loadu_ps(scales); + w256 = _mm256_mullo_epi32(w256, bias_scale); // 8x32 + result0 = _mm256_add_epi32(result0, w256); // result += bias * 127 + __m256 res01234567 = _mm256_cvtepi32_ps(result0); + result0 = _mm256_permute4x64_epi64(result0, 2 + (3 << 2)); + res01234567 = _mm256_mul_ps(res01234567, scale01234567); + _mm256_storeu_ps(v, res01234567); + w8 = _mm_shuffle_epi32(w8, 2 + (3 << 2)); + w256 = _mm256_cvtepi8_epi32(w8); // 8x32bit vals in 256bit reg + scale01234567 = _mm256_loadu_ps(scales + 8); + w256 = _mm256_mullo_epi32(w256, bias_scale); // 8x32 + result1 = _mm256_add_epi32(result1, w256); // result += bias * 127 + res01234567 = _mm256_cvtepi32_ps(result1); + result1 = _mm256_permute4x64_epi64(result1, 2 + (3 << 2)); + res01234567 = _mm256_mul_ps(res01234567, scale01234567); + _mm256_storeu_ps(v + 8, res01234567); + wi += 16; + scales += 16; + v += 16; +} + +// Computes part of matrix.vector v = Wu. Computes N=64 results. +// The weights *must* be arranged so that consecutive reads from wi +// provides (num_in/kNumInputsPerGroup groups of (N output dim groups of +// (kNumInputsPerGroup inputs))). After that there must be N consecutive +// bias weights, before continuing with any more weights. +// u must be padded out with zeros to +// kNumInputsPerGroup*ceil(num_in/kNumInputsPerGroup) elements. +static void PartialMatrixDotVector64(const int8_t *wi, const float *scales, const int8_t *u, + int num_in, float *v) { + // Register containing 16-bit ones for horizontal add with 16->32 bit + // conversion. + __m256i ones = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); + __m256i shift_id = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); + // Initialize all the results to 0. + __m256i result0 = _mm256_setzero_si256(); + __m256i result1 = _mm256_setzero_si256(); + __m256i result2 = _mm256_setzero_si256(); + __m256i result3 = _mm256_setzero_si256(); + __m256i result4 = _mm256_setzero_si256(); + __m256i result5 = _mm256_setzero_si256(); + __m256i result6 = _mm256_setzero_si256(); + __m256i result7 = _mm256_setzero_si256(); + // Iterate over the input (u), one registerful at a time. + for (int j = 0; j < num_in;) { + __m256i inputs = _mm256_loadu_si256(reinterpret_cast(u + j)); + // Inputs are processed in groups of kNumInputsPerGroup, replicated + // kNumInputGroups times. + for (int ig = 0; ig < kNumInputGroups && j < num_in; ++ig, j += kNumInputsPerGroup) { + // Replicate the low 32 bits (4 inputs) 8 times. + __m256i rep_input = _mm256_broadcastd_epi32(_mm256_castsi256_si128(inputs)); + // Rotate the inputs in groups of 4, so the next 4 inputs are ready. + inputs = _mm256_permutevar8x32_epi32(inputs, shift_id); + __m256i weights, reps; + // Mul-add, with horizontal add of the 4 inputs to each of the results. + MultiplyGroup(rep_input, ones, wi, weights, reps, result0); + MultiplyGroup(rep_input, ones, wi, weights, reps, result1); + MultiplyGroup(rep_input, ones, wi, weights, reps, result2); + MultiplyGroup(rep_input, ones, wi, weights, reps, result3); + MultiplyGroup(rep_input, ones, wi, weights, reps, result4); + MultiplyGroup(rep_input, ones, wi, weights, reps, result5); + MultiplyGroup(rep_input, ones, wi, weights, reps, result6); + MultiplyGroup(rep_input, ones, wi, weights, reps, result7); + } + } + ExtractResults16(result0, result1, wi, scales, v); + ExtractResults16(result2, result3, wi, scales, v); + ExtractResults16(result4, result5, wi, scales, v); + ExtractResults16(result6, result7, wi, scales, v); +} + +// Computes part of matrix.vector v = Wu. Computes N=32 results. +// For details see PartialMatrixDotVector64 with N=32. +static void PartialMatrixDotVector32(const int8_t *wi, const float *scales, const int8_t *u, + int num_in, float *v) { + // Register containing 16-bit ones for horizontal add with 16->32 bit + // conversion. + __m256i ones = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); + __m256i shift_id = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); + // Initialize all the results to 0. + __m256i result0 = _mm256_setzero_si256(); + __m256i result1 = _mm256_setzero_si256(); + __m256i result2 = _mm256_setzero_si256(); + __m256i result3 = _mm256_setzero_si256(); + // Iterate over the input (u), one registerful at a time. + for (int j = 0; j < num_in;) { + __m256i inputs = _mm256_loadu_si256(reinterpret_cast(u + j)); + // Inputs are processed in groups of kNumInputsPerGroup, replicated + // kNumInputGroups times. + for (int ig = 0; ig < kNumInputGroups && j < num_in; ++ig, j += kNumInputsPerGroup) { + // Replicate the low 32 bits (4 inputs) 8 times. + __m256i rep_input = _mm256_broadcastd_epi32(_mm256_castsi256_si128(inputs)); + // Rotate the inputs in groups of 4, so the next 4 inputs are ready. + inputs = _mm256_permutevar8x32_epi32(inputs, shift_id); + __m256i weights, reps; + // Mul-add, with horizontal add of the 4 inputs to each of the results. + MultiplyGroup(rep_input, ones, wi, weights, reps, result0); + MultiplyGroup(rep_input, ones, wi, weights, reps, result1); + MultiplyGroup(rep_input, ones, wi, weights, reps, result2); + MultiplyGroup(rep_input, ones, wi, weights, reps, result3); + } + } + ExtractResults16(result0, result1, wi, scales, v); + ExtractResults16(result2, result3, wi, scales, v); +} + +// Computes part of matrix.vector v = Wu. Computes N=16 results. +// For details see PartialMatrixDotVector64 with N=16. +static void PartialMatrixDotVector16(const int8_t *wi, const float *scales, const int8_t *u, + int num_in, float *v) { + // Register containing 16-bit ones for horizontal add with 16->32 bit + // conversion. + __m256i ones = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); + __m256i shift_id = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); + // Initialize all the results to 0. + __m256i result0 = _mm256_setzero_si256(); + __m256i result1 = _mm256_setzero_si256(); + // Iterate over the input (u), one registerful at a time. + for (int j = 0; j < num_in;) { + __m256i inputs = _mm256_loadu_si256(reinterpret_cast(u + j)); + // Inputs are processed in groups of kNumInputsPerGroup, replicated + // kNumInputGroups times. + for (int ig = 0; ig < kNumInputGroups && j < num_in; ++ig, j += kNumInputsPerGroup) { + // Replicate the low 32 bits (4 inputs) 8 times. + __m256i rep_input = _mm256_broadcastd_epi32(_mm256_castsi256_si128(inputs)); + // Rotate the inputs in groups of 4, so the next 4 inputs are ready. + inputs = _mm256_permutevar8x32_epi32(inputs, shift_id); + __m256i weights, reps; + // Mul-add, with horizontal add of the 4 inputs to each of the results. + MultiplyGroup(rep_input, ones, wi, weights, reps, result0); + MultiplyGroup(rep_input, ones, wi, weights, reps, result1); + } + } + ExtractResults16(result0, result1, wi, scales, v); +} + +// Computes part of matrix.vector v = Wu. Computes N=8 results. +// For details see PartialMatrixDotVector64 with N=8. +static inline void PartialMatrixDotVector8(const int8_t *wi, const float *scales, const int8_t *u, + int num_in, float *v) { + // Register containing 16-bit ones for horizontal add with 16->32 bit + // conversion. + __m256i ones = _mm256_set_epi16(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); + __m256i shift_id = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); + // Initialize all the results to 0. + __m256i result0 = _mm256_setzero_si256(); + // Iterate over the input (u), one registerful at a time. + for (int j = 0; j < num_in;) { + __m256i inputs = _mm256_loadu_si256(reinterpret_cast(u + j)); + // Inputs are processed in groups of kNumInputsPerGroup, replicated + // kNumInputGroups times. + for (int ig = 0; ig < kNumInputGroups && j < num_in; ++ig, j += kNumInputsPerGroup) { + // Replicate the low 32 bits (4 inputs) 8 times. + __m256i rep_input = _mm256_broadcastd_epi32(_mm256_castsi256_si128(inputs)); + // Rotate the inputs in groups of 4, so the next 4 inputs are ready. + inputs = _mm256_permutevar8x32_epi32(inputs, shift_id); + __m256i weights, reps; + // Mul-add, with horizontal add of the 4 inputs to each of the results. + MultiplyGroup(rep_input, ones, wi, weights, reps, result0); + } + } + ExtractResults8(result0, wi, scales, v); +} + +static void matrixDotVector(int dim1, int dim2, const int8_t *wi, const float *scales, + const int8_t *u, float *v) { + const int num_out = dim1; + const int num_in = dim2 - 1; + // Each call to a partial_func_ produces group_size outputs, except the + // last one, which can produce less. + const int rounded_num_in = IntSimdMatrix::Roundup(num_in, kNumInputsPerGroup); + const int rounded_num_out = IntSimdMatrix::Roundup(num_out, kNumOutputsPerRegister); + int group_size = kNumOutputsPerRegister * kMaxOutputRegisters; + int output = 0; + + int w_step = (rounded_num_in + 1) * group_size; + + // Run with this group size, until it would produce too much output, then + // switch to a smaller size. + for (; output + group_size <= rounded_num_out; output += group_size) { + PartialMatrixDotVector64(wi, scales, u, rounded_num_in, v); + wi += w_step; + scales += group_size; + v += group_size; + } + group_size /= 2; + w_step /= 2; + + if (output + group_size <= rounded_num_out) { + PartialMatrixDotVector32(wi, scales, u, rounded_num_in, v); + wi += w_step; + scales += group_size; + v += group_size; + output += group_size; + } + group_size /= 2; + w_step /= 2; + + if (output + group_size <= rounded_num_out) { + PartialMatrixDotVector16(wi, scales, u, rounded_num_in, v); + wi += w_step; + scales += group_size; + v += group_size; + output += group_size; + } + group_size /= 2; + w_step /= 2; + + if (output + group_size <= rounded_num_out) { + PartialMatrixDotVector8(wi, scales, u, rounded_num_in, v); + } +} +#else static inline void ExtractResults8(__m256i result, const int8_t *wi, const double *scales, double *v) { __m128i w128 = load64_to_128(wi); // 8x8bit vals in bottom of 128bit reg @@ -330,6 +566,7 @@ static void matrixDotVector(int dim1, int dim2, const int8_t *wi, const double * PartialMatrixDotVector8(wi, scales, u, rounded_num_in, v); } } +#endif const IntSimdMatrix IntSimdMatrix::intSimdMatrixAVX2 = { // Function. @@ -341,7 +578,8 @@ const IntSimdMatrix IntSimdMatrix::intSimdMatrixAVX2 = { // Number of 8 bit inputs in the inputs register. kNumInputsPerRegister, // Number of inputs in each weight group. - kNumInputsPerGroup}; + kNumInputsPerGroup +}; } // namespace tesseract. diff --git a/src/arch/intsimdmatrixneon.cpp b/src/arch/intsimdmatrixneon.cpp index cd44c639d..6dadd8878 100644 --- a/src/arch/intsimdmatrixneon.cpp +++ b/src/arch/intsimdmatrixneon.cpp @@ -19,6 +19,7 @@ #if defined(__ARM_NEON) # include "intsimdmatrix.h" +# include "tesstypes.h" # include # include @@ -52,9 +53,9 @@ constexpr int kNumInputsPerGroup = 8; // u must be padded out with zeros to // kNumInputsPerGroup*ceil(num_in/kNumInputsPerGroup) elements. static inline void PartialMatrixDotVector8(const int8_t *__restrict wi, - const double *__restrict scales, + const TFloat *__restrict scales, const int8_t *__restrict u, int num_in, - double *__restrict v, int num_out) { + TFloat *__restrict v, int num_out) { // Initialize all the results to 0. int32x4_t result0123 = {0, 0, 0, 0}; int32x4_t result4567 = {0, 0, 0, 0}; @@ -163,8 +164,8 @@ static inline void PartialMatrixDotVector8(const int8_t *__restrict wi, } } -static void matrixDotVector(int dim1, int dim2, const int8_t *wi, const double *scales, - const int8_t *u, double *v) { +static void matrixDotVector(int dim1, int dim2, const int8_t *wi, const TFloat *scales, + const int8_t *u, TFloat *v) { const int num_out = dim1; const int num_in = dim2 - 1; // Each call to a partial_func_ produces group_size outputs, except the @@ -196,7 +197,8 @@ const IntSimdMatrix IntSimdMatrix::intSimdMatrixNEON = { // Number of 8 bit inputs in the inputs register. kNumInputsPerRegister, // Number of inputs in each weight group. - kNumInputsPerGroup}; + kNumInputsPerGroup +}; } // namespace tesseract. diff --git a/src/arch/intsimdmatrixsse.cpp b/src/arch/intsimdmatrixsse.cpp index 7af6f81be..98fecc88f 100644 --- a/src/arch/intsimdmatrixsse.cpp +++ b/src/arch/intsimdmatrixsse.cpp @@ -69,15 +69,15 @@ static int32_t IntDotProductSSE(const int8_t *u, const int8_t *v, int n) { } // Computes part of matrix.vector v = Wu. Computes 1 result. -static void PartialMatrixDotVector1(const int8_t *wi, const double *scales, const int8_t *u, - int num_in, double *v) { - double total = IntDotProductSSE(u, wi, num_in); +static void PartialMatrixDotVector1(const int8_t *wi, const TFloat *scales, const int8_t *u, + int num_in, TFloat *v) { + TFloat total = IntDotProductSSE(u, wi, num_in); // Add in the bias and correct for integer values. *v = (total + wi[num_in] * INT8_MAX) * *scales; } -static void matrixDotVector(int dim1, int dim2, const int8_t *wi, const double *scales, - const int8_t *u, double *v) { +static void matrixDotVector(int dim1, int dim2, const int8_t *wi, const TFloat *scales, + const int8_t *u, TFloat *v) { const int num_out = dim1; const int num_in = dim2 - 1; int output = 0; @@ -99,7 +99,8 @@ const IntSimdMatrix IntSimdMatrix::intSimdMatrixSSE = { // Number of 8 bit inputs in the inputs register. 1, // Number of inputs in each weight group. - 1}; + 1 +}; } // namespace tesseract. diff --git a/src/arch/simddetect.cpp b/src/arch/simddetect.cpp index a14bd19ac..bb0efcf5d 100644 --- a/src/arch/simddetect.cpp +++ b/src/arch/simddetect.cpp @@ -25,6 +25,21 @@ #include "simddetect.h" #include "tprintf.h" // for tprintf +#if !defined(__clang__) && defined(__GNUC__) && (__GNUC__ < 12) +// The GNU compiler g++ fails to compile with the Accelerate framework +// (tested with versions 10 and 11), so unconditionally disable it. +#undef HAVE_FRAMEWORK_ACCELERATE +#endif + +#if defined(HAVE_FRAMEWORK_ACCELERATE) + +// Use Apple Accelerate framework. +// https://developer.apple.com/documentation/accelerate/simd + +#include + +#endif + #if defined(HAVE_AVX) || defined(HAVE_AVX2) || defined(HAVE_FMA) || defined(HAVE_SSE4_1) # define HAS_CPUID #endif @@ -83,9 +98,22 @@ bool SIMDDetect::fma_available_; bool SIMDDetect::sse_available_; #endif +#if defined(HAVE_FRAMEWORK_ACCELERATE) +static TFloat DotProductAccelerate(const TFloat* u, const TFloat* v, int n) { + TFloat total = 0; + const int stride = 1; +#if defined(FAST_FLOAT) + vDSP_dotpr(u, stride, v, stride, &total, n); +#else + vDSP_dotprD(u, stride, v, stride, &total, n); +#endif + return total; +} +#endif + // Computes and returns the dot product of the two n-vectors u and v. -static double DotProductGeneric(const double *u, const double *v, int n) { - double total = 0.0; +static TFloat DotProductGeneric(const TFloat *u, const TFloat *v, int n) { + TFloat total = 0; for (int k = 0; k < n; ++k) { total += u[k] * v[k]; } @@ -93,8 +121,8 @@ static double DotProductGeneric(const double *u, const double *v, int n) { } // Compute dot product using std::inner_product. -static double DotProductStdInnerProduct(const double *u, const double *v, int n) { - return std::inner_product(u, u + n, v, 0.0); +static TFloat DotProductStdInnerProduct(const TFloat *u, const TFloat *v, int n) { + return std::inner_product(u, u + n, v, static_cast(0)); } static void SetDotProduct(DotProductFunction f, const IntSimdMatrix *m = nullptr) { @@ -215,64 +243,90 @@ SIMDDetect::SIMDDetect() { #if defined(HAVE_NEON) || defined(__aarch64__) } else if (neon_available_) { // NEON detected. - SetDotProduct(DotProduct, &IntSimdMatrix::intSimdMatrixNEON); + SetDotProduct(DotProductNEON, &IntSimdMatrix::intSimdMatrixNEON); #endif } + + const char *dotproduct_env = getenv("DOTPRODUCT"); + if (dotproduct_env != nullptr) { + // Override automatic settings by value from environment variable. + dotproduct = dotproduct_env; + Update(); + } } void SIMDDetect::Update() { // Select code for calculation of dot product based on the // value of the config variable if that value is not empty. const char *dotproduct_method = "generic"; - if (!strcmp(dotproduct.c_str(), "auto")) { + if (dotproduct == "auto") { // Automatic detection. Nothing to be done. - } else if (!strcmp(dotproduct.c_str(), "generic")) { + } else if (dotproduct == "generic") { // Generic code selected by config variable. SetDotProduct(DotProductGeneric); dotproduct_method = "generic"; - } else if (!strcmp(dotproduct.c_str(), "native")) { + } else if (dotproduct == "native") { // Native optimized code selected by config variable. - SetDotProduct(DotProductNative); + SetDotProduct(DotProductNative, IntSimdMatrix::intSimdMatrix); dotproduct_method = "native"; #if defined(HAVE_AVX2) - } else if (!strcmp(dotproduct.c_str(), "avx2")) { + } else if (dotproduct == "avx2") { // AVX2 selected by config variable. SetDotProduct(DotProductAVX, &IntSimdMatrix::intSimdMatrixAVX2); dotproduct_method = "avx2"; #endif #if defined(HAVE_AVX) - } else if (!strcmp(dotproduct.c_str(), "avx")) { + } else if (dotproduct == "avx") { // AVX selected by config variable. SetDotProduct(DotProductAVX, &IntSimdMatrix::intSimdMatrixSSE); dotproduct_method = "avx"; #endif #if defined(HAVE_FMA) - } else if (!strcmp(dotproduct.c_str(), "fma")) { + } else if (dotproduct == "fma") { // FMA selected by config variable. SetDotProduct(DotProductFMA, IntSimdMatrix::intSimdMatrix); dotproduct_method = "fma"; #endif #if defined(HAVE_SSE4_1) - } else if (!strcmp(dotproduct.c_str(), "sse")) { + } else if (dotproduct == "sse") { // SSE selected by config variable. SetDotProduct(DotProductSSE, &IntSimdMatrix::intSimdMatrixSSE); dotproduct_method = "sse"; #endif - } else if (!strcmp(dotproduct.c_str(), "std::inner_product")) { +#if defined(HAVE_FRAMEWORK_ACCELERATE) + } else if (dotproduct == "accelerate") { + SetDotProduct(DotProductAccelerate, IntSimdMatrix::intSimdMatrix); +#endif +#if defined(HAVE_NEON) || defined(__aarch64__) + } else if (dotproduct == "neon" && neon_available_) { + // NEON selected by config variable. + SetDotProduct(DotProductNEON, &IntSimdMatrix::intSimdMatrixNEON); + dotproduct_method = "neon"; +#endif + } else if (dotproduct == "std::inner_product") { // std::inner_product selected by config variable. - SetDotProduct(DotProductStdInnerProduct); + SetDotProduct(DotProductStdInnerProduct, IntSimdMatrix::intSimdMatrix); dotproduct_method = "std::inner_product"; } else { // Unsupported value of config variable. tprintf("Warning, ignoring unsupported config variable value: dotproduct=%s\n", dotproduct.c_str()); tprintf( - "Support values for dotproduct: auto generic native" + "Supported values for dotproduct: auto generic native" +#if defined(HAVE_AVX2) + " avx2" +#endif #if defined(HAVE_AVX) " avx" #endif +#if defined(HAVE_FMA) + " fma" +#endif #if defined(HAVE_SSE4_1) " sse" +#endif +#if defined(HAVE_FRAMEWORK_ACCELERATE) + " accelerate" #endif " std::inner_product.\n"); } diff --git a/src/arch/simddetect.h b/src/arch/simddetect.h index e986a1eca..409319465 100644 --- a/src/arch/simddetect.h +++ b/src/arch/simddetect.h @@ -18,11 +18,12 @@ #define TESSERACT_ARCH_SIMDDETECT_H_ #include +#include "tesstypes.h" namespace tesseract { // Function pointer for best calculation of dot product. -using DotProductFunction = double (*)(const double *, const double *, int); +using DotProductFunction = TFloat (*)(const TFloat *, const TFloat *, int); extern DotProductFunction DotProduct; // Architecture detector. Add code here to detect any other architectures for diff --git a/src/ccmain/applybox.cpp b/src/ccmain/applybox.cpp index f5559448b..708114234 100644 --- a/src/ccmain/applybox.cpp +++ b/src/ccmain/applybox.cpp @@ -243,7 +243,7 @@ void Tesseract::MaximallyChopWord(const std::vector &boxes, BLOCK *block, std::vector blob_choices; ASSERT_HOST(!word_res->chopped_word->blobs.empty()); auto rating = static_cast(INT8_MAX); - for (int i = 0; i < word_res->chopped_word->NumBlobs(); ++i) { + for (unsigned i = 0; i < word_res->chopped_word->NumBlobs(); ++i) { // The rating and certainty are not quite arbitrary. Since // select_blob_to_chop uses the worst certainty to choose, they all have // to be different, so starting with INT8_MAX, subtract 1/8 for each blob @@ -257,7 +257,7 @@ void Tesseract::MaximallyChopWord(const std::vector &boxes, BLOCK *block, rating -= 0.125f; } const double e = exp(1.0); // The base of natural logs. - int blob_number; + unsigned blob_number; int right_chop_index = 0; if (!assume_fixed_pitch_char_segment) { // We only chop if the language is not fixed pitch like CJK. @@ -613,8 +613,8 @@ bool Tesseract::FindSegmentation(const std::vector &target_text, WER /// @param best_rating /// @param best_segmentation void Tesseract::SearchForText(const std::vector *choices, int choices_pos, - int choices_length, const std::vector &target_text, - int text_index, float rating, std::vector *segmentation, + unsigned choices_length, const std::vector &target_text, + unsigned text_index, float rating, std::vector *segmentation, float *best_rating, std::vector *best_segmentation) { const UnicharAmbigsVector &table = getDict().getUnicharAmbigs().dang_ambigs(); for (unsigned length = 1; length <= choices[choices_pos].size(); ++length) { @@ -625,12 +625,12 @@ void Tesseract::SearchForText(const std::vector *choices, in for (choice_it.mark_cycle_pt(); !choice_it.cycled_list(); choice_it.forward()) { const BLOB_CHOICE *choice = choice_it.data(); choice_rating = choice->rating(); - UNICHAR_ID class_id = choice->unichar_id(); + auto class_id = choice->unichar_id(); if (class_id == target_text[text_index]) { break; } // Search ambigs table. - if (class_id < table.size() && table[class_id] != nullptr) { + if (static_cast(class_id) < table.size() && table[class_id] != nullptr) { AmbigSpec_IT spec_it(table[class_id]); for (spec_it.mark_cycle_pt(); !spec_it.cycled_list(); spec_it.forward()) { const AmbigSpec *ambig_spec = spec_it.data(); diff --git a/src/ccmain/control.cpp b/src/ccmain/control.cpp index b439aa649..944ed1efd 100644 --- a/src/ccmain/control.cpp +++ b/src/ccmain/control.cpp @@ -45,9 +45,11 @@ #include "werdit.h" const char *const kBackUpConfigFile = "tempconfigdata.config"; +#ifndef DISABLED_LEGACY_ENGINE // Min believable x-height for any text when refitting as a fraction of // original x-height const double kMinRefitXHeightFraction = 0.5; +#endif // ! DISABLED_LEGACY_ENGINE /** * Make a word from the selected blobs and run Tess on them. @@ -227,7 +229,7 @@ bool Tesseract::RecogAllWordsPassN(int pass_n, ETEXT_DESC *monitor, PAGE_RES_IT } } if (word->word->tess_failed) { - int s; + unsigned s; for (s = 0; s < word->lang_words.size() && word->lang_words[s]->tess_failed; ++s) { } // If all are failed, skip it. Image words are skipped by this test. @@ -727,7 +729,7 @@ void Tesseract::script_pos_pass(PAGE_RES *page_res) { // Scan for upper/lower. int num_upper = 0; int num_lower = 0; - for (int i = 0; i < word->best_choice->length(); ++i) { + for (unsigned i = 0; i < word->best_choice->length(); ++i) { if (word->uch_set->get_isupper(word->best_choice->unichar_id(i))) { ++num_upper; } else if (word->uch_set->get_islower(word->best_choice->unichar_id(i))) { @@ -743,7 +745,7 @@ void Tesseract::script_pos_pass(PAGE_RES *page_res) { } // Helper finds the gap between the index word and the next. -static void WordGap(const PointerVector &words, int index, int *right, int *next_left) { +static void WordGap(const PointerVector &words, unsigned index, int *right, int *next_left) { *right = -INT32_MAX; *next_left = INT32_MAX; if (index < words.size()) { @@ -756,13 +758,13 @@ static void WordGap(const PointerVector &words, int index, int *right, // Factored helper computes the rating, certainty, badness and validity of // the permuter of the words in [first_index, end_index). -static void EvaluateWordSpan(const PointerVector &words, int first_index, int end_index, +static void EvaluateWordSpan(const PointerVector &words, unsigned first_index, unsigned end_index, float *rating, float *certainty, bool *bad, bool *valid_permuter) { if (end_index <= first_index) { *bad = true; *valid_permuter = false; } - for (int index = first_index; index < end_index && index < words.size(); ++index) { + for (unsigned index = first_index; index < end_index && index < words.size(); ++index) { WERD_CHOICE *choice = words[index]->best_choice; if (choice == nullptr) { *bad = true; @@ -790,11 +792,11 @@ static int SelectBestWords(double rating_ratio, double certainty_margin, bool de // boundary at the end. std::vector out_words; // Index into each word vector (best, new). - int b = 0, n = 0; + unsigned b = 0, n = 0; int num_best = 0, num_new = 0; while (b < best_words->size() || n < new_words->size()) { // Start of the current run in each. - int start_b = b, start_n = n; + auto start_b = b, start_n = n; while (b < best_words->size() || n < new_words->size()) { int b_right = -INT32_MAX; int next_b_left = INT32_MAX; @@ -884,7 +886,7 @@ int Tesseract::RetryWithLanguage(const WordData &word_data, WordRecognizer recog *in_word = nullptr; } if (debug) { - for (int i = 0; i < new_words.size(); ++i) { + for (unsigned i = 0; i < new_words.size(); ++i) { new_words[i]->DebugTopChoice("Lang result"); } } @@ -896,7 +898,7 @@ int Tesseract::RetryWithLanguage(const WordData &word_data, WordRecognizer recog // Helper returns true if all the words are acceptable. static bool WordsAcceptable(const PointerVector &words) { - for (int w = 0; w < words.size(); ++w) { + for (unsigned w = 0; w < words.size(); ++w) { if (words[w]->tess_failed || !words[w]->tess_accepted) { return false; } @@ -982,9 +984,12 @@ void Tesseract::AssignDiacriticsToOverlappingBlobs(const std::vector *overlapped_any_blob, std::vector *target_blobs) { std::vector blob_wanted; - word_wanted->resize(outlines.size(), false); - overlapped_any_blob->resize(outlines.size(), false); - target_blobs->resize(outlines.size(), nullptr); + word_wanted->clear(); + word_wanted->resize(outlines.size()); + overlapped_any_blob->clear(); + overlapped_any_blob->resize(outlines.size()); + target_blobs->clear(); + target_blobs->resize(outlines.size()); // For each real blob, find the outlines that seriously overlap it. // A single blob could be several merged characters, so there can be quite // a few outlines overlapping, and the full engine needs to be used to chop @@ -993,7 +998,8 @@ void Tesseract::AssignDiacriticsToOverlappingBlobs(const std::vectorbounding_box(); - blob_wanted.resize(outlines.size(), false); + blob_wanted.clear(); + blob_wanted.resize(outlines.size()); int num_blob_outlines = 0; for (unsigned i = 0; i < outlines.size(); ++i) { if (blob_box.major_x_overlap(outlines[i]->bounding_box()) && !(*word_wanted)[i]) { @@ -1032,15 +1038,18 @@ void Tesseract::AssignDiacriticsToNewBlobs(const std::vector &outli std::vector *word_wanted, std::vector *target_blobs) { std::vector blob_wanted; - word_wanted->resize(outlines.size(), false); - target_blobs->resize(outlines.size(), nullptr); + word_wanted->clear(); + word_wanted->resize(outlines.size()); + target_blobs->clear(); + target_blobs->resize(outlines.size()); // Check for outlines that need to be turned into stand-alone blobs. for (unsigned i = 0; i < outlines.size(); ++i) { if (outlines[i] == nullptr) { continue; } // Get a set of adjacent outlines that don't overlap any existing blob. - blob_wanted.resize(outlines.size(), false); + blob_wanted.clear(); + blob_wanted.resize(outlines.size()); int num_blob_outlines = 0; TBOX total_ol_box(outlines[i]->bounding_box()); while (i < outlines.size() && outlines[i] != nullptr) { @@ -1590,10 +1599,10 @@ void Tesseract::match_word_pass_n(int pass_n, WERD_RES *word, ROW *row, BLOCK *b word->fix_hyphens(); } /* Don't trust fix_quotes! - though I think I've fixed the bug */ - if (word->best_choice->length() != word->box_word->length()) { + if (static_cast(word->best_choice->length()) != word->box_word->length()) { tprintf( "POST FIX_QUOTES FAIL String:\"%s\"; Strlen=%d;" - " #Blobs=%d\n", + " #Blobs=%u\n", word->best_choice->debug_string().c_str(), word->best_choice->length(), word->box_word->length()); } @@ -1614,7 +1623,7 @@ void Tesseract::match_word_pass_n(int pass_n, WERD_RES *word, ROW *row, BLOCK *b static BLOB_CHOICE *FindBestMatchingChoice(UNICHAR_ID char_id, WERD_RES *word_res) { // Find the corresponding best BLOB_CHOICE from any position in the word_res. BLOB_CHOICE *best_choice = nullptr; - for (int i = 0; i < word_res->best_choice->length(); ++i) { + for (unsigned i = 0; i < word_res->best_choice->length(); ++i) { BLOB_CHOICE *choice = FindMatchingChoice(char_id, word_res->GetBlobChoices(i)); if (choice != nullptr) { if (best_choice == nullptr || choice->rating() < best_choice->rating()) { @@ -1630,7 +1639,7 @@ static BLOB_CHOICE *FindBestMatchingChoice(UNICHAR_ID char_id, WERD_RES *word_re // in the best_choice. static void CorrectRepcharChoices(BLOB_CHOICE *blob_choice, WERD_RES *word_res) { WERD_CHOICE *word = word_res->best_choice; - for (int i = 0; i < word_res->best_choice->length(); ++i) { + for (unsigned i = 0; i < word_res->best_choice->length(); ++i) { BLOB_CHOICE *choice = FindMatchingChoice(blob_choice->unichar_id(), word_res->GetBlobChoices(i)); if (choice == nullptr) { @@ -1639,7 +1648,7 @@ static void CorrectRepcharChoices(BLOB_CHOICE *blob_choice, WERD_RES *word_res) } } // Correct any incorrect results in word. - for (int i = 0; i < word->length(); ++i) { + for (unsigned i = 0; i < word->length(); ++i) { if (word->unichar_id(i) != blob_choice->unichar_id()) { word->set_unichar_id(blob_choice->unichar_id(), i); } @@ -1659,7 +1668,7 @@ void Tesseract::fix_rep_char(PAGE_RES_IT *page_res_it) { // Find the frequency of each unique character in the word. SortHelper rep_ch(word.length()); - for (int i = 0; i < word.length(); ++i) { + for (unsigned i = 0; i < word.length(); ++i) { rep_ch.Add(word.unichar_id(i), 1); } @@ -1888,6 +1897,7 @@ bool Tesseract::check_debug_pt(WERD_RES *word, int location) { * * Find the modal font and remove from the stats. */ +#ifndef DISABLED_LEGACY_ENGINE static void find_modal_font( // good chars in word STATS *fonts, // font stats int16_t *font_out, // output font @@ -1907,6 +1917,7 @@ static void find_modal_font( // good chars in word *font_count = 0; } } +#endif // ! DISABLED_LEGACY_ENGINE /** * set_word_fonts @@ -1944,7 +1955,7 @@ void Tesseract::set_word_fonts(WERD_RES *word) { if (tessedit_debug_fonts) { tprintf("Examining fonts in %s\n", word->best_choice->debug_string().c_str()); } - for (int b = 0; b < word->best_choice->length(); ++b) { + for (unsigned b = 0; b < word->best_choice->length(); ++b) { const BLOB_CHOICE *choice = word->GetBlobChoice(b); if (choice == nullptr) { continue; diff --git a/src/ccmain/docqual.cpp b/src/ccmain/docqual.cpp index 422a25638..8941f8f0d 100644 --- a/src/ccmain/docqual.cpp +++ b/src/ccmain/docqual.cpp @@ -64,7 +64,7 @@ int16_t Tesseract::word_outline_errs(WERD_RES *word) { int16_t err_count = 0; if (word->rebuild_word != nullptr) { - for (int b = 0; b < word->rebuild_word->NumBlobs(); ++b) { + for (unsigned b = 0; b < word->rebuild_word->NumBlobs(); ++b) { TBLOB *blob = word->rebuild_word->blobs[b]; err_count += count_outline_errs(word->best_choice->unichar_string()[i], blob->NumOutlines()); i++; @@ -911,7 +911,7 @@ bool Tesseract::noise_outlines(TWERD *word) { int16_t max_dimension; float small_limit = kBlnXHeight * crunch_small_outlines_size; - for (int b = 0; b < word->NumBlobs(); ++b) { + for (unsigned b = 0; b < word->NumBlobs(); ++b) { TBLOB *blob = word->blobs[b]; for (TESSLINE *ol = blob->outlines; ol != nullptr; ol = ol->next) { outline_count++; diff --git a/src/ccmain/equationdetect.cpp b/src/ccmain/equationdetect.cpp index a5058eab4..a50ce6d1f 100644 --- a/src/ccmain/equationdetect.cpp +++ b/src/ccmain/equationdetect.cpp @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -189,11 +190,11 @@ void EquationDetect::IdentifySpecialText(BLOBNBOX *blobnbox, const int height_th const float kConfScoreTh = -5.0f, kConfDiffTh = 1.8; // The scores here are negative, so the max/min == fabs(min/max). // float ratio = fmax(lang_score, equ_score) / fmin(lang_score, equ_score); - const float diff = fabs(lang_score - equ_score); + const float diff = std::fabs(lang_score - equ_score); BlobSpecialTextType type = BSTT_NONE; // Classification. - if (fmax(lang_score, equ_score) < kConfScoreTh) { + if (std::fmax(lang_score, equ_score) < kConfScoreTh) { // If both score are very small, then mark it as unclear. type = BSTT_UNCLEAR; } else if (diff > kConfDiffTh && equ_score > lang_score) { @@ -727,7 +728,7 @@ int EquationDetect::CountAlignment(const std::vector &sorted_vec, const int if (sorted_vec.empty()) { return 0; } - const int kDistTh = static_cast(round(0.03f * resolution_)); + const int kDistTh = static_cast(std::round(0.03f * resolution_)); auto pos = std::upper_bound(sorted_vec.begin(), sorted_vec.end(), val); if (pos > sorted_vec.begin()) { --pos; @@ -742,7 +743,7 @@ int EquationDetect::CountAlignment(const std::vector &sorted_vec, const int // Search right side. index = pos + 1 - sorted_vec.begin(); - while (index < sorted_vec.size() && sorted_vec[index++] - val < kDistTh) { + while (static_cast(index) < sorted_vec.size() && sorted_vec[index++] - val < kDistTh) { count++; } @@ -772,7 +773,7 @@ void EquationDetect::IdentifyInlinePartsHorizontal() { ASSERT_HOST(cps_super_bbox_); std::vector new_seeds; const int kMarginDiffTh = IntCastRounded(0.5 * lang_tesseract_->source_resolution()); - const int kGapTh = static_cast(round(1.0f * lang_tesseract_->source_resolution())); + const int kGapTh = static_cast(std::round(1.0f * lang_tesseract_->source_resolution())); ColPartitionGridSearch search(part_grid_); search.SetUniqueMode(true); // The center x coordinate of the cp_super_bbox_. @@ -923,8 +924,8 @@ bool EquationDetect::IsInline(const bool search_bottom, const int textparts_line // Check if neighbor and part is inline similar. const float kHeightRatioTh = 0.5; const int kYGapTh = textparts_linespacing > 0 - ? textparts_linespacing + static_cast(round(0.02f * resolution_)) - : static_cast(round(0.05f * resolution_)); // Default value. + ? textparts_linespacing + static_cast(std::round(0.02f * resolution_)) + : static_cast(std::round(0.05f * resolution_)); // Default value. if (part_box.x_overlap(neighbor_box) && // Location feature. part_box.y_gap(neighbor_box) <= kYGapTh && // Line spacing. // Geo feature. @@ -978,9 +979,9 @@ EquationDetect::IndentType EquationDetect::IsIndented(ColPartition *part) { ColPartitionGridSearch search(part_grid_); ColPartition *neighbor = nullptr; const TBOX &part_box(part->bounding_box()); - const int kXGapTh = static_cast(round(0.5f * resolution_)); - const int kRadiusTh = static_cast(round(3.0f * resolution_)); - const int kYGapTh = static_cast(round(0.5f * resolution_)); + const int kXGapTh = static_cast(std::round(0.5f * resolution_)); + const int kRadiusTh = static_cast(std::round(3.0f * resolution_)); + const int kYGapTh = static_cast(std::round(0.5f * resolution_)); // Here we use a simple approximation algorithm: from the center of part, We // perform the radius search, and check if we can find a neighboring partition @@ -1080,7 +1081,7 @@ void EquationDetect::ExpandSeedHorizontal(const bool search_left, ColPartition * std::vector *parts_to_merge) { ASSERT_HOST(seed != nullptr && parts_to_merge != nullptr); const float kYOverlapTh = 0.6; - const int kXGapTh = static_cast(round(0.2f * resolution_)); + const int kXGapTh = static_cast(std::round(0.2f * resolution_)); ColPartitionGridSearch search(part_grid_); const TBOX &seed_box(seed->bounding_box()); @@ -1132,7 +1133,7 @@ void EquationDetect::ExpandSeedVertical(const bool search_bottom, ColPartition * std::vector *parts_to_merge) { ASSERT_HOST(seed != nullptr && parts_to_merge != nullptr && cps_super_bbox_ != nullptr); const float kXOverlapTh = 0.4; - const int kYGapTh = static_cast(round(0.2f * resolution_)); + const int kYGapTh = static_cast(std::round(0.2f * resolution_)); ColPartitionGridSearch search(part_grid_); const TBOX &seed_box(seed->bounding_box()); @@ -1210,8 +1211,8 @@ void EquationDetect::ExpandSeedVertical(const bool search_bottom, ColPartition * } bool EquationDetect::IsNearSmallNeighbor(const TBOX &seed_box, const TBOX &part_box) const { - const int kXGapTh = static_cast(round(0.25f * resolution_)); - const int kYGapTh = static_cast(round(0.05f * resolution_)); + const int kXGapTh = static_cast(std::round(0.25f * resolution_)); + const int kYGapTh = static_cast(std::round(0.05f * resolution_)); // Check geometric feature. if (part_box.height() > seed_box.height() || part_box.width() > seed_box.width()) { @@ -1266,7 +1267,7 @@ void EquationDetect::ProcessMathBlockSatelliteParts() { int med_height = text_box.height(); if (text_parts.size() % 2 == 0 && text_parts.size() > 1) { const TBOX &text_box = text_parts[text_parts.size() / 2 - 1]->bounding_box(); - med_height = static_cast(round(0.5f * (text_box.height() + med_height))); + med_height = static_cast(std::round(0.5f * (text_box.height() + med_height))); } // Iterate every text_parts and check if it is a math block satellite. @@ -1348,7 +1349,7 @@ bool EquationDetect::IsMathBlockSatellite(ColPartition *part, ColPartition *EquationDetect::SearchNNVertical(const bool search_bottom, const ColPartition *part) { ASSERT_HOST(part); ColPartition *nearest_neighbor = nullptr, *neighbor = nullptr; - const int kYGapTh = static_cast(round(resolution_ * 0.5f)); + const int kYGapTh = static_cast(std::round(resolution_ * 0.5f)); ColPartitionGridSearch search(part_grid_); search.SetUniqueMode(true); @@ -1383,7 +1384,7 @@ bool EquationDetect::IsNearMathNeighbor(const int y_gap, const ColPartition *nei if (!neighbor) { return false; } - const int kYGapTh = static_cast(round(resolution_ * 0.1f)); + const int kYGapTh = static_cast(std::round(resolution_ * 0.1f)); return neighbor->type() == PT_EQUATION && y_gap <= kYGapTh; } diff --git a/src/ccmain/fixspace.cpp b/src/ccmain/fixspace.cpp index 71fbade3b..dee79395b 100644 --- a/src/ccmain/fixspace.cpp +++ b/src/ccmain/fixspace.cpp @@ -262,7 +262,7 @@ int16_t Tesseract::eval_word_spacing(WERD_RES_LIST &word_res_list) { int16_t total_score = 0; int16_t word_count = 0; int16_t done_word_count = 0; - int16_t i; + int i; int16_t offset; int16_t prev_word_score = 0; bool prev_word_done = false; @@ -684,7 +684,6 @@ void Tesseract::break_noisiest_blob_word(WERD_RES_LIST &words) { int16_t Tesseract::worst_noise_blob(WERD_RES *word_res, float *worst_noise_score) { float noise_score[512]; - int i; int min_noise_blob; // 1st contender int max_noise_blob; // last contender int non_noise_count; @@ -697,7 +696,7 @@ int16_t Tesseract::worst_noise_blob(WERD_RES *word_res, float *worst_noise_score } // Normalised. - int blob_count = word_res->box_word->length(); + auto blob_count = word_res->box_word->length(); ASSERT_HOST(blob_count <= 512); if (blob_count < 5) { return -1; // too short to split @@ -712,7 +711,7 @@ int16_t Tesseract::worst_noise_blob(WERD_RES *word_res, float *worst_noise_score } #endif - for (i = 0; i < blob_count && i < word_res->rebuild_word->NumBlobs(); i++) { + for (unsigned i = 0; i < blob_count && i < word_res->rebuild_word->NumBlobs(); i++) { TBLOB *blob = word_res->rebuild_word->blobs[i]; if (word_res->reject_map[i].accepted()) { noise_score[i] = non_noise_limit; @@ -731,7 +730,8 @@ int16_t Tesseract::worst_noise_blob(WERD_RES *word_res, float *worst_noise_score /* Now find the worst one which is far enough away from the end of the word */ non_noise_count = 0; - for (i = 0; i < blob_count && non_noise_count < fixsp_non_noise_limit; i++) { + int i; + for (i = 0; static_cast(i) < blob_count && non_noise_count < fixsp_non_noise_limit; i++) { if (noise_score[i] >= non_noise_limit) { non_noise_count++; } @@ -760,7 +760,7 @@ int16_t Tesseract::worst_noise_blob(WERD_RES *word_res, float *worst_noise_score *worst_noise_score = small_limit; worst_noise_blob = -1; - for (i = min_noise_blob; i <= max_noise_blob; i++) { + for (auto i = min_noise_blob; i <= max_noise_blob; i++) { if (noise_score[i] < *worst_noise_score) { worst_noise_blob = i; *worst_noise_score = noise_score[i]; @@ -838,7 +838,6 @@ int16_t Tesseract::fp_eval_word_spacing(WERD_RES_LIST &word_res_list) { WERD_RES_IT word_it(&word_res_list); WERD_RES *word; int16_t score = 0; - int16_t i; float small_limit = kBlnXHeight * fixsp_small_outlines_size; for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) { @@ -849,9 +848,9 @@ int16_t Tesseract::fp_eval_word_spacing(WERD_RES_LIST &word_res_list) { if (word->done || word->tess_accepted || word->best_choice->permuter() == SYSTEM_DAWG_PERM || word->best_choice->permuter() == FREQ_DAWG_PERM || word->best_choice->permuter() == USER_DAWG_PERM || safe_dict_word(word) > 0) { - int num_blobs = word->rebuild_word->NumBlobs(); + auto num_blobs = word->rebuild_word->NumBlobs(); UNICHAR_ID space = word->uch_set->unichar_to_id(" "); - for (i = 0; i < word->best_choice->length() && i < num_blobs; ++i) { + for (unsigned i = 0; i < word->best_choice->length() && i < num_blobs; ++i) { TBLOB *blob = word->rebuild_word->blobs[i]; if (word->best_choice->unichar_id(i) == space || blob_noise_score(blob) < small_limit) { score -= 1; // penalise possibly erroneous non-space diff --git a/src/ccmain/fixxht.cpp b/src/ccmain/fixxht.cpp index 20f154986..80ea08313 100644 --- a/src/ccmain/fixxht.cpp +++ b/src/ccmain/fixxht.cpp @@ -23,6 +23,7 @@ #include #include +#include #include namespace tesseract { @@ -205,7 +206,7 @@ float Tesseract::ComputeCompatibleXheight(WERD_RES *word_res, float *baseline_sh new_xht / word_res->denorm.y_scale()); } // The xheight must change by at least x_ht_min_change to be used. - if (fabs(new_xht - kBlnXHeight) >= x_ht_min_change) { + if (std::fabs(new_xht - kBlnXHeight) >= x_ht_min_change) { return new_xht / word_res->denorm.y_scale(); } else { return bottom_shift != 0 ? word_res->x_height : 0.0f; diff --git a/src/ccmain/linerec.cpp b/src/ccmain/linerec.cpp index 4f85fd670..9d2055a79 100644 --- a/src/ccmain/linerec.cpp +++ b/src/ccmain/linerec.cpp @@ -151,8 +151,7 @@ ImageData *Tesseract::GetLineData(const TBOX &line_box, const std::vector line_boxes.push_back(box); line_texts.push_back(texts[b]); } - std::vector page_numbers; - page_numbers.resize(line_boxes.size(), applybox_page); + std::vector page_numbers(line_boxes.size(), applybox_page); image_data->AddBoxes(line_boxes, line_texts, page_numbers); return image_data; } @@ -270,22 +269,14 @@ void Tesseract::SearchWords(PointerVector *words) { if (stopper_dict == nullptr) { stopper_dict = &getDict(); } - bool any_nonspace_delimited = false; - for (int w = 0; w < words->size(); ++w) { - WERD_RES *word = (*words)[w]; - if (word->best_choice != nullptr && word->best_choice->ContainsAnyNonSpaceDelimited()) { - any_nonspace_delimited = true; - break; - } - } - for (int w = 0; w < words->size(); ++w) { + for (unsigned w = 0; w < words->size(); ++w) { WERD_RES *word = (*words)[w]; if (word->best_choice == nullptr) { // It is a dud. word->SetupFake(lstm_recognizer_->GetUnicharset()); } else { // Set the best state. - for (int i = 0; i < word->best_choice->length(); ++i) { + for (unsigned i = 0; i < word->best_choice->length(); ++i) { int length = word->best_choice->state(i); word->best_state.push_back(length); } diff --git a/src/ccmain/ltrresultiterator.cpp b/src/ccmain/ltrresultiterator.cpp index ccbd38625..0073d3dd8 100644 --- a/src/ccmain/ltrresultiterator.cpp +++ b/src/ccmain/ltrresultiterator.cpp @@ -147,14 +147,6 @@ float LTRResultIterator::Confidence(PageIteratorLevel level) const { return 0.0f; } -void LTRResultIterator::RowAttributes(float *row_height, float *descenders, - float *ascenders) const { - *row_height = - it_->row()->row->x_height() + it_->row()->row->ascenders() - it_->row()->row->descenders(); - *descenders = it_->row()->row->descenders(); - *ascenders = it_->row()->row->ascenders(); -} - // Returns the font attributes of the current word. If iterating at a higher // level object than words, eg textlines, then this will return the // attributes of the first word in that textline. @@ -335,10 +327,10 @@ char *LTRResultIterator::WordNormedUTF8Text() const { WERD_CHOICE *best_choice = it_->word()->best_choice; const UNICHARSET *unicharset = it_->word()->uch_set; ASSERT_HOST(best_choice != nullptr); - for (int i = 0; i < best_choice->length(); ++i) { + for (unsigned i = 0; i < best_choice->length(); ++i) { ocr_text += unicharset->get_normed_unichar(best_choice->unichar_id(i)); } - int length = ocr_text.length() + 1; + auto length = ocr_text.length() + 1; char *result = new char[length]; strncpy(result, ocr_text.c_str(), length); return result; @@ -404,7 +396,7 @@ ChoiceIterator::ChoiceIterator(const LTRResultIterator &result_it) { strcmp(word_res_->CTC_symbol_choices[0][0].first, " ")) { blanks_before_word_ = 0; } - auto index = *tstep_index_; + unsigned index = *tstep_index_; index += blanks_before_word_; if (index < word_res_->CTC_symbol_choices.size()) { LSTM_choices_ = &word_res_->CTC_symbol_choices[index]; @@ -432,7 +424,8 @@ ChoiceIterator::~ChoiceIterator() { // are none left. bool ChoiceIterator::Next() { if (oemLSTM_ && LSTM_choices_ != nullptr && !LSTM_choices_->empty()) { - if (LSTM_choice_it_ != LSTM_choices_->end() && next(LSTM_choice_it_) == LSTM_choices_->end()) { + if (LSTM_choice_it_ == LSTM_choices_->end() || + next(LSTM_choice_it_) == LSTM_choices_->end()) { return false; } else { ++LSTM_choice_it_; @@ -484,7 +477,7 @@ float ChoiceIterator::Confidence() const { // Returns the set of timesteps which belong to the current symbol std::vector>> *ChoiceIterator::Timesteps() const { - int offset = *tstep_index_ + blanks_before_word_; + unsigned offset = *tstep_index_ + blanks_before_word_; if (offset >= word_res_->segmented_timesteps.size() || !oemLSTM_) { return nullptr; } diff --git a/src/ccmain/osdetect.cpp b/src/ccmain/osdetect.cpp index 5f545a849..daee2b405 100644 --- a/src/ccmain/osdetect.cpp +++ b/src/ccmain/osdetect.cpp @@ -381,7 +381,7 @@ bool OrientationDetector::detect_blob(BLOB_CHOICE_LIST *scores) { for (choice_it.mark_cycle_pt(); !choice_it.cycled_list() && choice == nullptr; choice_it.forward()) { int choice_script = choice_it.data()->script_id(); - int s = 0; + unsigned s = 0; for (s = 0; s < allowed_scripts_->size(); ++s) { if ((*allowed_scripts_)[s] == choice_script) { choice = choice_it.data(); @@ -428,7 +428,7 @@ bool OrientationDetector::detect_blob(BLOB_CHOICE_LIST *scores) { // Normalize the orientation scores for the blob and use them to // update the aggregated orientation score. for (int i = 0; total_blob_o_score != 0 && i < 4; ++i) { - osr_->orientations[i] += log(blob_o_score[i] / total_blob_o_score); + osr_->orientations[i] += std::log(blob_o_score[i] / total_blob_o_score); } // TODO(ranjith) Add an early exit test, based on min_orientation_margin, @@ -477,7 +477,7 @@ void ScriptDetector::detect_blob(BLOB_CHOICE_LIST *scores) { int id = choice->script_id(); if (allowed_scripts_ != nullptr && !allowed_scripts_->empty()) { // Check that the choice is in an allowed script. - int s = 0; + size_t s = 0; for (s = 0; s < allowed_scripts_->size(); ++s) { if ((*allowed_scripts_)[s] == id) { break; diff --git a/src/ccmain/output.cpp b/src/ccmain/output.cpp index a45784dc7..73f732945 100644 --- a/src/ccmain/output.cpp +++ b/src/ccmain/output.cpp @@ -101,7 +101,6 @@ void Tesseract::write_results(PAGE_RES_IT &page_res_it, bool force_eol) { // override tilde crunch? WERD_RES *word = page_res_it.word(); const UNICHARSET &uchset = *word->uch_set; - int i; bool need_reject = false; UNICHAR_ID space = uchset.unichar_to_id(" "); @@ -181,7 +180,7 @@ void Tesseract::write_results(PAGE_RES_IT &page_res_it, if (!word->word->flag(W_REP_CHAR) || !tessedit_write_rep_codes) { if (tessedit_zero_rejection) { /* OVERRIDE ALL REJECTION MECHANISMS - ONLY REJECT TESS FAILURES */ - for (i = 0; i < word->best_choice->length(); ++i) { + for (unsigned i = 0; i < word->best_choice->length(); ++i) { if (word->reject_map[i].rejected()) { word->reject_map[i].setrej_minimal_rej_accept(); } @@ -189,7 +188,7 @@ void Tesseract::write_results(PAGE_RES_IT &page_res_it, } if (tessedit_minimal_rejection) { /* OVERRIDE ALL REJECTION MECHANISMS - ONLY REJECT TESS FAILURES */ - for (i = 0; i < word->best_choice->length(); ++i) { + for (unsigned i = 0; i < word->best_choice->length(); ++i) { if ((word->best_choice->unichar_id(i) != space) && word->reject_map[i].rejected()) { word->reject_map[i].setrej_minimal_rej_accept(); } @@ -365,7 +364,7 @@ void Tesseract::set_unlv_suspects(WERD_RES *word_res) { int16_t Tesseract::count_alphas(const WERD_CHOICE &word) { int count = 0; - for (int i = 0; i < word.length(); ++i) { + for (unsigned i = 0; i < word.length(); ++i) { if (word.unicharset()->get_isalpha(word.unichar_id(i))) { count++; } @@ -375,7 +374,7 @@ int16_t Tesseract::count_alphas(const WERD_CHOICE &word) { int16_t Tesseract::count_alphanums(const WERD_CHOICE &word) { int count = 0; - for (int i = 0; i < word.length(); ++i) { + for (unsigned i = 0; i < word.length(); ++i) { if (word.unicharset()->get_isalpha(word.unichar_id(i)) || word.unicharset()->get_isdigit(word.unichar_id(i))) { count++; diff --git a/src/ccmain/pageiterator.cpp b/src/ccmain/pageiterator.cpp index fd893ac84..e8d528b66 100644 --- a/src/ccmain/pageiterator.cpp +++ b/src/ccmain/pageiterator.cpp @@ -27,22 +27,23 @@ namespace tesseract { -PageIterator::PageIterator(PAGE_RES *page_res, Tesseract *tesseract, int scale, int scaled_yres, - int rect_left, int rect_top, int rect_width, int rect_height) - : page_res_(page_res) - , tesseract_(tesseract) - , word_(nullptr) - , word_length_(0) - , blob_index_(0) - , cblob_it_(nullptr) - , include_upper_dots_(false) - , include_lower_dots_(false) - , scale_(scale) - , scaled_yres_(scaled_yres) - , rect_left_(rect_left) - , rect_top_(rect_top) - , rect_width_(rect_width) - , rect_height_(rect_height) { +PageIterator::PageIterator(PAGE_RES *page_res, Tesseract *tesseract, int scale, + int scaled_yres, int rect_left, int rect_top, + int rect_width, int rect_height) + : page_res_(page_res), + tesseract_(tesseract), + word_(nullptr), + word_length_(0), + blob_index_(0), + cblob_it_(nullptr), + include_upper_dots_(false), + include_lower_dots_(false), + scale_(scale), + scaled_yres_(scaled_yres), + rect_left_(rect_left), + rect_top_(rect_top), + rect_width_(rect_width), + rect_height_(rect_height) { it_ = new PAGE_RES_IT(page_res); PageIterator::Begin(); } @@ -58,20 +59,20 @@ PageIterator::~PageIterator() { * objects at a higher level. */ PageIterator::PageIterator(const PageIterator &src) - : page_res_(src.page_res_) - , tesseract_(src.tesseract_) - , word_(nullptr) - , word_length_(src.word_length_) - , blob_index_(src.blob_index_) - , cblob_it_(nullptr) - , include_upper_dots_(src.include_upper_dots_) - , include_lower_dots_(src.include_lower_dots_) - , scale_(src.scale_) - , scaled_yres_(src.scaled_yres_) - , rect_left_(src.rect_left_) - , rect_top_(src.rect_top_) - , rect_width_(src.rect_width_) - , rect_height_(src.rect_height_) { + : page_res_(src.page_res_), + tesseract_(src.tesseract_), + word_(nullptr), + word_length_(src.word_length_), + blob_index_(src.blob_index_), + cblob_it_(nullptr), + include_upper_dots_(src.include_upper_dots_), + include_lower_dots_(src.include_lower_dots_), + scale_(src.scale_), + scaled_yres_(src.scaled_yres_), + rect_left_(src.rect_left_), + rect_top_(src.rect_top_), + rect_width_(src.rect_width_), + rect_height_(src.rect_height_) { it_ = new PAGE_RES_IT(*src.it_); BeginWord(src.blob_index_); } @@ -201,8 +202,9 @@ bool PageIterator::IsAtBeginningOf(PageIteratorLevel level) const { case RIL_BLOCK: return blob_index_ == 0 && it_->block() != it_->prev_block(); case RIL_PARA: - return blob_index_ == 0 && (it_->block() != it_->prev_block() || - it_->row()->row->para() != it_->prev_row()->row->para()); + return blob_index_ == 0 && + (it_->block() != it_->prev_block() || + it_->row()->row->para() != it_->prev_row()->row->para()); case RIL_TEXTLINE: return blob_index_ == 0 && it_->row() != it_->prev_row(); case RIL_WORD: @@ -217,7 +219,8 @@ bool PageIterator::IsAtBeginningOf(PageIteratorLevel level) const { * Returns whether the iterator is positioned at the last element in a * given level. (e.g. the last word in a line, the last line in a block) */ -bool PageIterator::IsAtFinalElement(PageIteratorLevel level, PageIteratorLevel element) const { +bool PageIterator::IsAtFinalElement(PageIteratorLevel level, + PageIteratorLevel element) const { if (Empty(element)) { return true; // Already at the end! } @@ -280,7 +283,8 @@ int PageIterator::Cmp(const PageIterator &other) const { * See comment on coordinate system above. * Returns false if there is no such object at the current position. */ -bool PageIterator::BoundingBoxInternal(PageIteratorLevel level, int *left, int *top, int *right, +bool PageIterator::BoundingBoxInternal(PageIteratorLevel level, int *left, + int *top, int *right, int *bottom) const { if (Empty(level)) { return false; @@ -289,16 +293,19 @@ bool PageIterator::BoundingBoxInternal(PageIteratorLevel level, int *left, int * PARA *para = nullptr; switch (level) { case RIL_BLOCK: - box = it_->block()->block->restricted_bounding_box(include_upper_dots_, include_lower_dots_); + box = it_->block()->block->restricted_bounding_box(include_upper_dots_, + include_lower_dots_); break; case RIL_PARA: para = it_->row()->row->para(); // Fall through. case RIL_TEXTLINE: - box = it_->row()->row->restricted_bounding_box(include_upper_dots_, include_lower_dots_); + box = it_->row()->row->restricted_bounding_box(include_upper_dots_, + include_lower_dots_); break; case RIL_WORD: - box = it_->word()->word->restricted_bounding_box(include_upper_dots_, include_lower_dots_); + box = it_->word()->word->restricted_bounding_box(include_upper_dots_, + include_lower_dots_); break; case RIL_SYMBOL: if (cblob_it_ == nullptr) { @@ -311,8 +318,10 @@ bool PageIterator::BoundingBoxInternal(PageIteratorLevel level, int *left, int * PageIterator other = *this; other.Begin(); do { - if (other.it_->block() && other.it_->block()->block == it_->block()->block && - other.it_->row() && other.it_->row()->row && other.it_->row()->row->para() == para) { + if (other.it_->block() && + other.it_->block()->block == it_->block()->block && + other.it_->row() && other.it_->row()->row && + other.it_->row()->row->para() == para) { box = box.bounding_union(other.it_->row()->row->bounding_box()); } } while (other.Next(RIL_TEXTLINE)); @@ -337,23 +346,26 @@ bool PageIterator::BoundingBoxInternal(PageIteratorLevel level, int *left, int * * See comment on coordinate system above. * Returns false if there is no such object at the current position. */ -bool PageIterator::BoundingBox(PageIteratorLevel level, int *left, int *top, int *right, - int *bottom) const { +bool PageIterator::BoundingBox(PageIteratorLevel level, int *left, int *top, + int *right, int *bottom) const { return BoundingBox(level, 0, left, top, right, bottom); } -bool PageIterator::BoundingBox(PageIteratorLevel level, const int padding, int *left, int *top, - int *right, int *bottom) const { +bool PageIterator::BoundingBox(PageIteratorLevel level, const int padding, + int *left, int *top, int *right, + int *bottom) const { if (!BoundingBoxInternal(level, left, top, right, bottom)) { return false; } // Convert to the coordinate system of the original image. - *left = ClipToRange(*left / scale_ + rect_left_ - padding, rect_left_, rect_left_ + rect_width_); - *top = ClipToRange(*top / scale_ + rect_top_ - padding, rect_top_, rect_top_ + rect_height_); - *right = ClipToRange((*right + scale_ - 1) / scale_ + rect_left_ + padding, *left, - rect_left_ + rect_width_); - *bottom = ClipToRange((*bottom + scale_ - 1) / scale_ + rect_top_ + padding, *top, - rect_top_ + rect_height_); + *left = ClipToRange(*left / scale_ + rect_left_ - padding, rect_left_, + rect_left_ + rect_width_); + *top = ClipToRange(*top / scale_ + rect_top_ - padding, rect_top_, + rect_top_ + rect_height_); + *right = ClipToRange((*right + scale_ - 1) / scale_ + rect_left_ + padding, + *left, rect_left_ + rect_width_); + *bottom = ClipToRange((*bottom + scale_ - 1) / scale_ + rect_top_ + padding, + *top, rect_top_ + rect_height_); return true; } @@ -440,7 +452,8 @@ Pix *PageIterator::GetBinaryImage(PageIteratorLevel level) const { if (!BoundingBoxInternal(level, &left, &top, &right, &bottom)) { return nullptr; } - if (level == RIL_SYMBOL && cblob_it_ != nullptr && cblob_it_->data()->area() != 0) { + if (level == RIL_SYMBOL && cblob_it_ != nullptr && + cblob_it_->data()->area() != 0) { return cblob_it_->data()->render(); } Box *box = boxCreate(left, top, right - left, bottom - top); @@ -453,9 +466,9 @@ Pix *PageIterator::GetBinaryImage(PageIteratorLevel level) const { int mask_x = left - mask_box.left(); int mask_y = top - (tesseract_->ImageHeight() - mask_box.top()); // AND the mask and pix, putting the result in pix. - pixRasterop(pix, std::max(0, -mask_x), std::max(0, -mask_y), pixGetWidth(pix), - pixGetHeight(pix), PIX_SRC & PIX_DST, mask, std::max(0, mask_x), - std::max(0, mask_y)); + pixRasterop(pix, std::max(0, -mask_x), std::max(0, -mask_y), + pixGetWidth(pix), pixGetHeight(pix), PIX_SRC & PIX_DST, mask, + std::max(0, mask_x), std::max(0, mask_y)); mask.destroy(); } return pix; @@ -472,8 +485,8 @@ Pix *PageIterator::GetBinaryImage(PageIteratorLevel level) const { * If you do not supply an original image, you will get a binary one. * Use pixDestroy to delete the image after use. */ -Pix *PageIterator::GetImage(PageIteratorLevel level, int padding, Pix *original_img, int *left, - int *top) const { +Pix *PageIterator::GetImage(PageIteratorLevel level, int padding, + Pix *original_img, int *left, int *top) const { int right, bottom; if (!BoundingBox(level, left, top, &right, &bottom)) { return nullptr; @@ -500,10 +513,12 @@ Pix *PageIterator::GetImage(PageIteratorLevel level, int padding, Pix *original_ int width = pixGetWidth(grey_pix); int height = pixGetHeight(grey_pix); Image resized_mask = pixCreate(width, height, 1); - pixRasterop(resized_mask, std::max(0, -mask_x), std::max(0, -mask_y), width, height, PIX_SRC, - mask, std::max(0, mask_x), std::max(0, mask_y)); + pixRasterop(resized_mask, std::max(0, -mask_x), std::max(0, -mask_y), width, + height, PIX_SRC, mask, std::max(0, mask_x), + std::max(0, mask_y)); mask.destroy(); - pixDilateBrick(resized_mask, resized_mask, 2 * padding + 1, 2 * padding + 1); + pixDilateBrick(resized_mask, resized_mask, 2 * padding + 1, + 2 * padding + 1); pixInvert(resized_mask, resized_mask); pixSetMasked(grey_pix, resized_mask, UINT32_MAX); resized_mask.destroy(); @@ -516,14 +531,15 @@ Pix *PageIterator::GetImage(PageIteratorLevel level, int padding, Pix *original_ * The baseline is the line that passes through (x1, y1) and (x2, y2). * WARNING: with vertical text, baselines may be vertical! */ -bool PageIterator::Baseline(PageIteratorLevel level, int *x1, int *y1, int *x2, int *y2) const { +bool PageIterator::Baseline(PageIteratorLevel level, int *x1, int *y1, int *x2, + int *y2) const { if (it_->word() == nullptr) { return false; // Already at the end! } ROW *row = it_->row()->row; WERD *word = it_->word()->word; - TBOX box = - (level == RIL_WORD || level == RIL_SYMBOL) ? word->bounding_box() : row->bounding_box(); + TBOX box = (level == RIL_WORD || level == RIL_SYMBOL) ? word->bounding_box() + : row->bounding_box(); int left = box.left(); ICOORD startpt(left, static_cast(row->base_line(left) + 0.5)); int right = box.right(); @@ -538,6 +554,14 @@ bool PageIterator::Baseline(PageIteratorLevel level, int *x1, int *y1, int *x2, return true; } +void PageIterator::RowAttributes(float *row_height, float *descenders, + float *ascenders) const { + *row_height = it_->row()->row->x_height() + it_->row()->row->ascenders() - + it_->row()->row->descenders(); + *descenders = it_->row()->row->descenders(); + *ascenders = it_->row()->row->ascenders(); +} + void PageIterator::Orientation(tesseract::Orientation *orientation, tesseract::WritingDirection *writing_direction, tesseract::TextlineOrder *textline_order, @@ -564,23 +588,26 @@ void PageIterator::Orientation(tesseract::Orientation *orientation, // Writing direction bool is_vertical_text = (block->classify_rotation().x() == 0.0); bool right_to_left = block->right_to_left(); - *writing_direction = is_vertical_text ? WRITING_DIRECTION_TOP_TO_BOTTOM - : (right_to_left ? WRITING_DIRECTION_RIGHT_TO_LEFT - : WRITING_DIRECTION_LEFT_TO_RIGHT); + *writing_direction = is_vertical_text + ? WRITING_DIRECTION_TOP_TO_BOTTOM + : (right_to_left ? WRITING_DIRECTION_RIGHT_TO_LEFT + : WRITING_DIRECTION_LEFT_TO_RIGHT); // Textline Order const bool is_mongolian = false; // TODO(eger): fix me - *textline_order = is_vertical_text ? (is_mongolian ? TEXTLINE_ORDER_LEFT_TO_RIGHT - : TEXTLINE_ORDER_RIGHT_TO_LEFT) - : TEXTLINE_ORDER_TOP_TO_BOTTOM; + *textline_order = is_vertical_text + ? (is_mongolian ? TEXTLINE_ORDER_LEFT_TO_RIGHT + : TEXTLINE_ORDER_RIGHT_TO_LEFT) + : TEXTLINE_ORDER_TOP_TO_BOTTOM; // Deskew angle FCOORD skew = block->skew(); // true horizontal for textlines *deskew_angle = -skew.angle(); } -void PageIterator::ParagraphInfo(tesseract::ParagraphJustification *just, bool *is_list_item, - bool *is_crown, int *first_line_indent) const { +void PageIterator::ParagraphInfo(tesseract::ParagraphJustification *just, + bool *is_list_item, bool *is_crown, + int *first_line_indent) const { *just = tesseract::JUSTIFICATION_UNKNOWN; if (!it_->row() || !it_->row()->row || !it_->row()->row->para() || !it_->row()->row->para()->model) { @@ -612,12 +639,14 @@ void PageIterator::BeginWord(int offset) { // is already baseline denormalized. word_length_ = word_res->best_choice->length(); if (word_res->box_word != nullptr) { - if (word_res->box_word->length() != word_length_) { - tprintf("Corrupted word! best_choice[len=%d] = %s, box_word[len=%d]: ", word_length_, - word_res->best_choice->unichar_string().c_str(), word_res->box_word->length()); + if (word_res->box_word->length() != static_cast(word_length_)) { + tprintf("Corrupted word! best_choice[len=%d] = %s, box_word[len=%d]: ", + word_length_, word_res->best_choice->unichar_string().c_str(), + word_res->box_word->length()); word_res->box_word->bounding_box().print(); } - ASSERT_HOST(word_res->box_word->length() == word_length_); + ASSERT_HOST(word_res->box_word->length() == + static_cast(word_length_)); } word_ = nullptr; // We will be iterating the box_word. diff --git a/src/ccmain/par_control.cpp b/src/ccmain/par_control.cpp index 4dbefd4c4..efad33558 100644 --- a/src/ccmain/par_control.cpp +++ b/src/ccmain/par_control.cpp @@ -40,10 +40,10 @@ void Tesseract::PrerecAllWordsPar(const std::vector &words) { std::vector blobs; for (const auto &w : words) { if (w.word->ratings != nullptr && w.word->ratings->get(0, 0) == nullptr) { - for (int s = 0; s < w.lang_words.size(); ++s) { + for (size_t s = 0; s < w.lang_words.size(); ++s) { Tesseract *sub = s < sub_langs_.size() ? sub_langs_[s] : this; const WERD_RES &word = *w.lang_words[s]; - for (int b = 0; b < word.chopped_word->NumBlobs(); ++b) { + for (unsigned b = 0; b < word.chopped_word->NumBlobs(); ++b) { blobs.emplace_back(b, sub, word); } } diff --git a/src/ccmain/paragraphs.cpp b/src/ccmain/paragraphs.cpp index f817bafdd..601afe9f3 100644 --- a/src/ccmain/paragraphs.cpp +++ b/src/ccmain/paragraphs.cpp @@ -73,7 +73,7 @@ static int Epsilon(int space_pix) { static bool AcceptableRowArgs(int debug_level, int min_num_rows, const char *function_name, const std::vector *rows, int row_start, int row_end) { - if (row_start < 0 || row_end > rows->size() || row_start > row_end) { + if (row_start < 0 || static_cast(row_end) > rows->size() || row_start > row_end) { tprintf("Invalid arguments rows[%d, %d) while rows is of size %zu.\n", row_start, row_end, rows->size()); return false; @@ -94,8 +94,8 @@ static bool AcceptableRowArgs(int debug_level, int min_num_rows, const char *fun static void PrintTable(const std::vector> &rows, const char *colsep) { std::vector max_col_widths; for (const auto &row : rows) { - int num_columns = row.size(); - for (int c = 0; c < num_columns; c++) { + auto num_columns = row.size(); + for (size_t c = 0; c < num_columns; c++) { int num_unicodes = 0; for (char i : row[c]) { if ((i & 0xC0) != 0x80) { @@ -113,6 +113,7 @@ static void PrintTable(const std::vector> &rows, const } std::vector col_width_patterns; + col_width_patterns.reserve(max_col_widths.size()); for (int max_col_width : max_col_widths) { col_width_patterns.push_back(std::string("%-") + std::to_string(max_col_width) + "s"); } @@ -285,7 +286,7 @@ bool AsciiLikelyListItem(const std::string &word) { // ========== Brain Dead Language Model (Tesseract Version) ================ // Return the first Unicode Codepoint from werd[pos]. -int UnicodeFor(const UNICHARSET *u, const WERD_CHOICE *werd, int pos) { +static int UnicodeFor(const UNICHARSET *u, const WERD_CHOICE *werd, unsigned pos) { if (!u || !werd || pos > werd->length()) { return 0; } @@ -297,33 +298,32 @@ int UnicodeFor(const UNICHARSET *u, const WERD_CHOICE *werd, int pos) { class UnicodeSpanSkipper { public: UnicodeSpanSkipper(const UNICHARSET *unicharset, const WERD_CHOICE *word) - : u_(unicharset), word_(word) { - wordlen_ = word->length(); + : u_(unicharset), word_(word), wordlen_(word->length()) { } // Given an input position, return the first position >= pos not punc. - int SkipPunc(int pos); + unsigned SkipPunc(unsigned pos); // Given an input position, return the first position >= pos not digit. - int SkipDigits(int pos); + unsigned SkipDigits(unsigned pos); // Given an input position, return the first position >= pos not roman. - int SkipRomans(int pos); + unsigned SkipRomans(unsigned pos); // Given an input position, return the first position >= pos not alpha. - int SkipAlpha(int pos); + unsigned SkipAlpha(unsigned pos); private: const UNICHARSET *u_; const WERD_CHOICE *word_; - int wordlen_; + unsigned wordlen_; }; -int UnicodeSpanSkipper::SkipPunc(int pos) { +unsigned UnicodeSpanSkipper::SkipPunc(unsigned pos) { while (pos < wordlen_ && u_->get_ispunctuation(word_->unichar_id(pos))) { pos++; } return pos; } -int UnicodeSpanSkipper::SkipDigits(int pos) { +unsigned UnicodeSpanSkipper::SkipDigits(unsigned pos) { while (pos < wordlen_ && (u_->get_isdigit(word_->unichar_id(pos)) || IsDigitLike(UnicodeFor(u_, word_, pos)))) { pos++; @@ -331,7 +331,7 @@ int UnicodeSpanSkipper::SkipDigits(int pos) { return pos; } -int UnicodeSpanSkipper::SkipRomans(int pos) { +unsigned UnicodeSpanSkipper::SkipRomans(unsigned pos) { const char *kRomans = "ivxlmdIVXLMD"; while (pos < wordlen_) { int ch = UnicodeFor(u_, word_, pos); @@ -343,7 +343,7 @@ int UnicodeSpanSkipper::SkipRomans(int pos) { return pos; } -int UnicodeSpanSkipper::SkipAlpha(int pos) { +unsigned UnicodeSpanSkipper::SkipAlpha(unsigned pos) { while (pos < wordlen_ && u_->get_isalpha(word_->unichar_id(pos))) { pos++; } @@ -386,13 +386,13 @@ static bool UniLikelyListItem(const UNICHARSET *u, const WERD_CHOICE *werd) { UnicodeSpanSkipper m(u, werd); int num_segments = 0; - int pos = 0; + unsigned pos = 0; while (pos < werd->length() && num_segments < 3) { - int numeral_start = m.SkipPunc(pos); + auto numeral_start = m.SkipPunc(pos); if (numeral_start > pos + 1) { break; } - int numeral_end = m.SkipRomans(numeral_start); + auto numeral_end = m.SkipRomans(numeral_start); if (numeral_end == numeral_start) { numeral_end = m.SkipDigits(numeral_start); if (numeral_end == numeral_start) { @@ -2314,14 +2314,14 @@ void CanonicalizeDetectionResults(std::vector *row_owners, PARA_LIST *pa void DetectParagraphs(int debug_level, std::vector *row_infos, std::vector *row_owners, PARA_LIST *paragraphs, std::vector *models) { - std::vector rows; ParagraphTheory theory(models); // Initialize row_owners to be a bunch of nullptr pointers. + row_owners->clear(); row_owners->resize(row_infos->size()); // Set up row scratch registers for the main algorithm. - rows.resize(row_infos->size(), RowScratchRegisters()); + std::vector rows(row_infos->size()); for (unsigned i = 0; i < row_infos->size(); i++) { rows[i].Init((*row_infos)[i]); } @@ -2353,7 +2353,7 @@ void DetectParagraphs(int debug_level, std::vector *row_infos, LeftoverSegments(rows, &leftovers2, leftover.begin, leftover.end); bool pass2a_was_useful = leftovers2.size() > 1 || - (leftovers2.size() == 1 && (leftovers2[0].begin != 0 || leftovers2[0].end != rows.size())); + (leftovers2.size() == 1 && (leftovers2[0].begin != 0 || static_cast(leftovers2[0].end) != rows.size())); if (pass2a_was_useful) { for (auto &leftover2 : leftovers2) { StrongEvidenceClassify(debug_level, &rows, leftover2.begin, leftover2.end, &theory); diff --git a/src/ccmain/paragraphs_internal.h b/src/ccmain/paragraphs_internal.h index 516fd553e..91d01110b 100644 --- a/src/ccmain/paragraphs_internal.h +++ b/src/ccmain/paragraphs_internal.h @@ -34,9 +34,6 @@ class WERD_CHOICE; TESS_API bool AsciiLikelyListItem(const std::string &word); -// Return the first Unicode Codepoint from werd[pos]. -int UnicodeFor(const UNICHARSET *u, const WERD_CHOICE *werd, int pos); - // Set right word attributes given either a unicharset and werd or a utf8 // string. TESS_API diff --git a/src/ccmain/paramsd.cpp b/src/ccmain/paramsd.cpp index 38d0d5cb1..83fe6bd2a 100644 --- a/src/ccmain/paramsd.cpp +++ b/src/ccmain/paramsd.cpp @@ -278,7 +278,7 @@ void ParamsEditor::Notify(const SVEvent *sve) { } else { ParamContent *vc = ParamContent::GetParamContentById(sve->command_id); vc->SetValue(param); - sv_window_->AddMessage("Setting %s to %s", vc->GetName(), vc->GetValue().c_str()); + sv_window_->AddMessageF("Setting %s to %s", vc->GetName(), vc->GetValue().c_str()); } } } @@ -336,11 +336,7 @@ void ParamsEditor::WriteParams(char *filename, bool changes_only) { fp = fopen(filename, "wb"); // can we write to it? if (fp == nullptr) { - sv_window_->AddMessage( - "Can't write to file " - "%s" - "", - filename); + sv_window_->AddMessageF("Can't write to file %s", filename); return; } for (auto &iter : vcMap) { diff --git a/src/ccmain/pgedit.cpp b/src/ccmain/pgedit.cpp index d4151fded..9e4902bbb 100644 --- a/src/ccmain/pgedit.cpp +++ b/src/ccmain/pgedit.cpp @@ -122,13 +122,6 @@ INT_VAR(editor_image_ypos, 10, "Editor image Y Pos"); static INT_VAR(editor_image_menuheight, 50, "Add to image height for menu bar"); INT_VAR(editor_image_word_bb_color, ScrollView::BLUE, "Word bounding box colour"); INT_VAR(editor_image_blob_bb_color, ScrollView::YELLOW, "Blob bounding box colour"); -INT_VAR(editor_image_text_color, ScrollView::WHITE, "Correct text colour"); - -STRING_VAR(editor_dbwin_name, "EditorDBWin", "Editor debug window name"); -INT_VAR(editor_dbwin_xpos, 50, "Editor debug window X Pos"); -INT_VAR(editor_dbwin_ypos, 500, "Editor debug window Y Pos"); -INT_VAR(editor_dbwin_height, 24, "Editor debug window height"); -INT_VAR(editor_dbwin_width, 80, "Editor debug window width"); STRING_VAR(editor_word_name, "BlnWords", "BL normalized word window"); INT_VAR(editor_word_xpos, 60, "Word window X Pos"); diff --git a/src/ccmain/pgedit.h b/src/ccmain/pgedit.h index a7c9d9b73..1f41e8572 100644 --- a/src/ccmain/pgedit.h +++ b/src/ccmain/pgedit.h @@ -43,25 +43,16 @@ private: #endif // !GRAPHICS_DISABLED extern BLOCK_LIST *current_block_list; -extern STRING_VAR_H(editor_image_win_name, "EditorImage", "Editor image window name"); -extern INT_VAR_H(editor_image_xpos, 590, "Editor image X Pos"); -extern INT_VAR_H(editor_image_ypos, 10, "Editor image Y Pos"); -extern INT_VAR_H(editor_image_height, 680, "Editor image height"); -extern INT_VAR_H(editor_image_width, 655, "Editor image width"); -extern INT_VAR_H(editor_image_word_bb_color, BLUE, "Word bounding box colour"); -extern INT_VAR_H(editor_image_blob_bb_color, YELLOW, "Blob bounding box colour"); -extern INT_VAR_H(editor_image_text_color, WHITE, "Correct text colour"); -extern STRING_VAR_H(editor_dbwin_name, "EditorDBWin", "Editor debug window name"); -extern INT_VAR_H(editor_dbwin_xpos, 50, "Editor debug window X Pos"); -extern INT_VAR_H(editor_dbwin_ypos, 500, "Editor debug window Y Pos"); -extern INT_VAR_H(editor_dbwin_height, 24, "Editor debug window height"); -extern INT_VAR_H(editor_dbwin_width, 80, "Editor debug window width"); -extern STRING_VAR_H(editor_word_name, "BlnWords", "BL normalised word window"); -extern INT_VAR_H(editor_word_xpos, 60, "Word window X Pos"); -extern INT_VAR_H(editor_word_ypos, 510, "Word window Y Pos"); -extern INT_VAR_H(editor_word_height, 240, "Word window height"); -extern INT_VAR_H(editor_word_width, 655, "Word window width"); -extern double_VAR_H(editor_smd_scale_factor, 1.0, "Scaling for smd image"); +extern STRING_VAR_H(editor_image_win_name); +extern INT_VAR_H(editor_image_xpos); +extern INT_VAR_H(editor_image_ypos); +extern INT_VAR_H(editor_image_word_bb_color); +extern INT_VAR_H(editor_image_blob_bb_color); +extern STRING_VAR_H(editor_word_name); +extern INT_VAR_H(editor_word_xpos); +extern INT_VAR_H(editor_word_ypos); +extern INT_VAR_H(editor_word_height); +extern INT_VAR_H(editor_word_width); } // namespace tesseract diff --git a/src/ccmain/reject.cpp b/src/ccmain/reject.cpp index f184b00d3..e3d53925e 100644 --- a/src/ccmain/reject.cpp +++ b/src/ccmain/reject.cpp @@ -94,9 +94,6 @@ void Tesseract::set_done(WERD_RES *word, int16_t pass) { * Sets a reject map for the word. *************************************************************************/ void Tesseract::make_reject_map(WERD_RES *word, ROW *row, int16_t pass) { - int i; - int offset; - flip_0O(word); check_debug_pt(word, -1); // For trap only set_done(word, pass); // Set acceptance @@ -145,7 +142,7 @@ void Tesseract::make_reject_map(WERD_RES *word, ROW *row, int16_t pass) { // PASSED TEST } else if (best_choice->permuter() == NUMBER_PERM) { if (rej_alphas_in_number_perm) { - for (i = 0, offset = 0; best_choice->unichar_string()[offset] != '\0'; + for (int i = 0, offset = 0; best_choice->unichar_string()[offset] != '\0'; offset += best_choice->unichar_lengths()[i++]) { if (word->reject_map[i].accepted() && word->uch_set->get_isalpha(best_choice->unichar_string().c_str() + offset, @@ -210,7 +207,7 @@ void Tesseract::reject_I_1_L(WERD_RES *word) { void reject_poor_matches(WERD_RES *word) { float threshold = compute_reject_threshold(word->best_choice); - for (int i = 0; i < word->best_choice->length(); ++i) { + for (unsigned i = 0; i < word->best_choice->length(); ++i) { if (word->best_choice->unichar_id(i) == UNICHAR_SPACE) { word->reject_map[i].setrej_tess_failure(); } else if (word->best_choice->certainty(i) < threshold) { @@ -232,16 +229,16 @@ float compute_reject_threshold(WERD_CHOICE *word) { float bestgap = 0.0f; // biggest gap float gapstart; // bottom of gap - int blob_count = word->length(); + auto blob_count = word->length(); std::vector ratings; ratings.reserve(blob_count); - for (int i = 0; i < blob_count; ++i) { + for (unsigned i = 0; i < blob_count; ++i) { ratings.push_back(word->certainty(i)); } std::sort(ratings.begin(), ratings.end()); gapstart = ratings[0] - 1; // all reject if none better if (blob_count >= 3) { - for (int index = 0; index < blob_count - 1; index++) { + for (unsigned index = 0; index < blob_count - 1; index++) { if (ratings[index + 1] - ratings[index] > bestgap) { bestgap = ratings[index + 1] - ratings[index]; // find biggest @@ -514,14 +511,12 @@ bool Tesseract::word_contains_non_1_digit(const char *word, const char *word_len * Don't unreject LONE accepted 1Il conflict set chars *************************************************************************/ void Tesseract::dont_allow_1Il(WERD_RES *word) { - int i = 0; - int offset; int word_len = word->reject_map.length(); const char *s = word->best_choice->unichar_string().c_str(); const char *lengths = word->best_choice->unichar_lengths().c_str(); bool accepted_1Il = false; - for (i = 0, offset = 0; i < word_len; offset += word->best_choice->unichar_lengths()[i++]) { + for (int i = 0, offset = 0; i < word_len; offset += word->best_choice->unichar_lengths()[i++]) { if (word->reject_map[i].accepted()) { if (conflict_set_I_l_1.contains(s[offset])) { accepted_1Il = true; @@ -537,7 +532,7 @@ void Tesseract::dont_allow_1Il(WERD_RES *word) { return; // Nothing to worry about } - for (i = 0, offset = 0; i < word_len; offset += word->best_choice->unichar_lengths()[i++]) { + for (int i = 0, offset = 0; i < word_len; offset += word->best_choice->unichar_lengths()[i++]) { if (conflict_set_I_l_1.contains(s[offset]) && word->reject_map[i].accepted()) { word->reject_map[i].setrej_postNN_1Il(); } @@ -547,7 +542,7 @@ void Tesseract::dont_allow_1Il(WERD_RES *word) { int16_t Tesseract::count_alphanums(WERD_RES *word_res) { int count = 0; const WERD_CHOICE *best_choice = word_res->best_choice; - for (int i = 0; i < word_res->reject_map.length(); ++i) { + for (unsigned i = 0; i < word_res->reject_map.length(); ++i) { if ((word_res->reject_map[i].accepted()) && (word_res->uch_set->get_isalpha(best_choice->unichar_id(i)) || word_res->uch_set->get_isdigit(best_choice->unichar_id(i)))) { @@ -568,9 +563,6 @@ void Tesseract::reject_mostly_rejects(WERD_RES *word) { } bool Tesseract::repeated_nonalphanum_wd(WERD_RES *word, ROW *row) { - int16_t char_quality; - int16_t accepted_char_quality; - if (word->best_choice->unichar_lengths().length() <= 1) { return false; } @@ -580,15 +572,17 @@ bool Tesseract::repeated_nonalphanum_wd(WERD_RES *word, ROW *row) { } UNICHAR_ID uch_id = word->best_choice->unichar_id(0); - for (int i = 1; i < word->best_choice->length(); ++i) { + for (unsigned i = 1; i < word->best_choice->length(); ++i) { if (word->best_choice->unichar_id(i) != uch_id) { return false; } } + int16_t char_quality; + int16_t accepted_char_quality; word_char_quality(word, &char_quality, &accepted_char_quality); - if ((word->best_choice->unichar_lengths().length() == char_quality) && + if ((word->best_choice->unichar_lengths().length() == static_cast(char_quality)) && (char_quality == accepted_char_quality)) { return true; } else { @@ -607,7 +601,6 @@ int16_t Tesseract::safe_dict_word(const WERD_RES *werd_res) { // in word_res->best_choice. void Tesseract::flip_hyphens(WERD_RES *word_res) { WERD_CHOICE *best_choice = word_res->best_choice; - int i; int prev_right = -9999; int next_left; TBOX out_box; @@ -617,9 +610,9 @@ void Tesseract::flip_hyphens(WERD_RES *word_res) { return; } - int num_blobs = word_res->rebuild_word->NumBlobs(); + auto num_blobs = word_res->rebuild_word->NumBlobs(); UNICHAR_ID unichar_dash = word_res->uch_set->unichar_to_id("-"); - for (i = 0; i < best_choice->length() && i < num_blobs; ++i) { + for (unsigned i = 0; i < best_choice->length() && i < num_blobs; ++i) { TBLOB *blob = word_res->rebuild_word->blobs[i]; out_box = blob->bounding_box(); if (i + 1 == num_blobs) { @@ -666,15 +659,14 @@ void Tesseract::flip_hyphens(WERD_RES *word_res) { // in word_res->best_choice. void Tesseract::flip_0O(WERD_RES *word_res) { WERD_CHOICE *best_choice = word_res->best_choice; - int i; TBOX out_box; if (!tessedit_flip_0O) { return; } - int num_blobs = word_res->rebuild_word->NumBlobs(); - for (i = 0; i < best_choice->length() && i < num_blobs; ++i) { + auto num_blobs = word_res->rebuild_word->NumBlobs(); + for (unsigned i = 0; i < best_choice->length() && i < num_blobs; ++i) { TBLOB *blob = word_res->rebuild_word->blobs[i]; if (word_res->uch_set->get_isupper(best_choice->unichar_id(i)) || word_res->uch_set->get_isdigit(best_choice->unichar_id(i))) { @@ -691,7 +683,7 @@ void Tesseract::flip_0O(WERD_RES *word_res) { unichar_O == INVALID_UNICHAR_ID || !word_res->uch_set->get_enabled(unichar_O)) { return; // 0 or O are not present/enabled in unicharset } - for (i = 1; i < best_choice->length(); ++i) { + for (unsigned i = 1; i < best_choice->length(); ++i) { if (best_choice->unichar_id(i) == unichar_0 || best_choice->unichar_id(i) == unichar_O) { /* A0A */ if ((i + 1) < best_choice->length() && diff --git a/src/ccmain/resultiterator.cpp b/src/ccmain/resultiterator.cpp index db133501c..bf3676f07 100644 --- a/src/ccmain/resultiterator.cpp +++ b/src/ccmain/resultiterator.cpp @@ -228,7 +228,7 @@ void ResultIterator::CalculateBlobOrder(std::vector *blob_indices) const { i = j; } } - ASSERT_HOST(blob_indices->size() == word_length_); + ASSERT_HOST(blob_indices->size() == static_cast(word_length_)); } static void PrintScriptDirs(const std::vector &dirs) { @@ -501,7 +501,7 @@ bool ResultIterator::Next(PageIteratorLevel level) { case RIL_SYMBOL: { std::vector blob_order; CalculateBlobOrder(&blob_order); - int next_blob = 0; + unsigned next_blob = 0; while (next_blob < blob_order.size() && blob_index_ != blob_order[next_blob]) { next_blob++; } @@ -731,10 +731,12 @@ void ResultIterator::IterateAndAppendUTF8TextlineText(std::string *text) { std::vector textline_order; std::vector dirs; CalculateTextlineOrder(current_paragraph_is_ltr_, *this, &dirs, &textline_order); - tprintf("Strong Script dirs [%p/P=%s]: ", it_->row(), + tprintf("Strong Script dirs [%p/P=%s]: ", + static_cast(it_->row()), current_paragraph_is_ltr_ ? "ltr" : "rtl"); PrintScriptDirs(dirs); - tprintf("Logical textline order [%p/P=%s]: ", it_->row(), + tprintf("Logical textline order [%p/P=%s]: ", + static_cast(it_->row()), current_paragraph_is_ltr_ ? "ltr" : "rtl"); for (int i : textline_order) { tprintf("%d ", i); diff --git a/src/ccmain/superscript.cpp b/src/ccmain/superscript.cpp index 562c9e922..68ff974b1 100644 --- a/src/ccmain/superscript.cpp +++ b/src/ccmain/superscript.cpp @@ -502,13 +502,13 @@ WERD_RES *Tesseract::TrySuperscriptSplits(int num_chopped_leading, float leading */ bool Tesseract::BelievableSuperscript(bool debug, const WERD_RES &word, float certainty_threshold, int *left_ok, int *right_ok) const { - int initial_ok_run_count = 0; - int ok_run_count = 0; + unsigned initial_ok_run_count = 0; + unsigned ok_run_count = 0; float worst_certainty = 0.0f; const WERD_CHOICE &wc = *word.best_choice; const UnicityTable &fontinfo_table = get_fontinfo_table(); - for (int i = 0; i < wc.length(); i++) { + for (unsigned i = 0; i < wc.length(); i++) { TBLOB *blob = word.rebuild_word->blobs[i]; UNICHAR_ID unichar_id = wc.unichar_id(i); float char_certainty = wc.certainty(i); diff --git a/src/ccmain/tessedit.cpp b/src/ccmain/tessedit.cpp index b75666110..ad9299401 100644 --- a/src/ccmain/tessedit.cpp +++ b/src/ccmain/tessedit.cpp @@ -23,6 +23,8 @@ # include "config_auto.h" #endif +#include // for std::regex_match + #include "control.h" #include "matchdefs.h" #include "pageres.h" @@ -73,15 +75,12 @@ void Tesseract::read_config_file(const char *filename, SetParamConstraint constr // from the language-specific config file (stored in [lang].traineddata), from // the config files specified on the command line or left as the default // OEM_TESSERACT_ONLY if none of the configs specify this variable. -bool Tesseract::init_tesseract_lang_data(const std::string &arg0, const std::string &textbase, +bool Tesseract::init_tesseract_lang_data(const std::string &arg0, const std::string &language, OcrEngineMode oem, char **configs, int configs_size, const std::vector *vars_vec, const std::vector *vars_values, bool set_only_non_debug_params, TessdataManager *mgr) { - // Set the basename, compute the data directory. - main_setup(arg0, textbase); - // Set the language data path prefix lang = !language.empty() ? language : "eng"; language_data_path_prefix = datadir; @@ -247,6 +246,15 @@ static bool IsStrInList(const std::string &str, const std::vector & void Tesseract::ParseLanguageString(const std::string &lang_str, std::vector *to_load, std::vector *not_to_load) { std::string remains(lang_str); + // Look whether the model file uses a prefix which must be applied to + // included model files as well. + std::regex e("(.*)/[^/]*"); + std::cmatch cm; + std::string prefix; + if (std::regex_match(lang.c_str(), cm, e, std::regex_constants::match_default)) { + // A prefix was found. + prefix = cm[1].str() + "/"; + } while (!remains.empty()) { // Find the start of the lang code and which vector to add to. const char *start = remains.c_str(); @@ -268,6 +276,7 @@ void Tesseract::ParseLanguageString(const std::string &lang_str, std::vectorpush_back(lang_code); @@ -291,19 +300,26 @@ int Tesseract::init_tesseract(const std::string &arg0, const std::string &textba for (auto *lang : sub_langs_) { delete lang; } + + // Set the basename, compute the data directory. + main_setup(arg0, textbase); + sub_langs_.clear(); // Find the first loadable lang and load into this. // Add any languages that this language requires bool loaded_primary = false; // Load the rest into sub_langs_. - for (unsigned lang_index = 0; lang_index < langs_to_load.size(); ++lang_index) { - if (!IsStrInList(langs_to_load[lang_index], langs_not_to_load)) { - const char *lang_str = langs_to_load[lang_index].c_str(); + // A range based for loop does not work here because langs_to_load + // might be changed in the loop when a new submodel is found. + for (auto &lang_to_load : langs_to_load) { + if (!IsStrInList(lang_to_load, langs_not_to_load)) { + const char *lang_str = lang_to_load.c_str(); Tesseract *tess_to_init; if (!loaded_primary) { tess_to_init = this; } else { tess_to_init = new Tesseract; + tess_to_init->main_setup(arg0, textbase); } int result = tess_to_init->init_tesseract_internal(arg0, textbase, lang_str, oem, configs, @@ -316,7 +332,7 @@ int Tesseract::init_tesseract(const std::string &arg0, const std::string &textba if (result < 0) { tprintf("Failed loading language '%s'\n", lang_str); } else { - ParseLanguageString(tess_to_init->tessedit_load_sublangs.c_str(), &langs_to_load, + ParseLanguageString(tess_to_init->tessedit_load_sublangs, &langs_to_load, &langs_not_to_load); loaded_primary = true; } @@ -327,13 +343,13 @@ int Tesseract::init_tesseract(const std::string &arg0, const std::string &textba } else { sub_langs_.push_back(tess_to_init); // Add any languages that this language requires - ParseLanguageString(tess_to_init->tessedit_load_sublangs.c_str(), &langs_to_load, + ParseLanguageString(tess_to_init->tessedit_load_sublangs, &langs_to_load, &langs_not_to_load); } } } } - if (!loaded_primary) { + if (!loaded_primary && !langs_to_load.empty()) { tprintf("Tesseract couldn't load any languages!\n"); return -1; // Couldn't load any language! } @@ -384,7 +400,7 @@ int Tesseract::init_tesseract_internal(const std::string &arg0, const std::strin const std::vector *vars_vec, const std::vector *vars_values, bool set_only_non_debug_params, TessdataManager *mgr) { - if (!init_tesseract_lang_data(arg0, textbase, language, oem, configs, configs_size, vars_vec, + if (!init_tesseract_lang_data(arg0, language, oem, configs, configs_size, vars_vec, vars_values, set_only_non_debug_params, mgr)) { return -1; } @@ -412,7 +428,7 @@ static void CollectFonts(const UnicityTable &new_fonts, // Helper assigns an id to lang_fonts using the index in all_fonts table. static void AssignIds(const UnicityTable &all_fonts, UnicityTable *lang_fonts) { for (int i = 0; i < lang_fonts->size(); ++i) { - int index = all_fonts.get_id(lang_fonts->at(i)); + auto index = all_fonts.get_index(lang_fonts->at(i)); lang_fonts->at(i).universal_id = index; } } @@ -438,19 +454,6 @@ void Tesseract::SetupUniversalFontIds() { font_table_size_ = all_fonts.size(); } -// init the LM component -int Tesseract::init_tesseract_lm(const std::string &arg0, const std::string &textbase, - const std::string &language, TessdataManager *mgr) { - if (!init_tesseract_lang_data(arg0, textbase, language, OEM_TESSERACT_ONLY, nullptr, 0, nullptr, - nullptr, false, mgr)) { - return -1; - } - getDict().SetupForLoad(Dict::GlobalDawgCache()); - getDict().Load(lang, mgr); - getDict().FinishLoad(); - return 0; -} - #endif // ndef DISABLED_LEGACY_ENGINE void Tesseract::end_tesseract() { diff --git a/src/ccmain/tesseractclass.cpp b/src/ccmain/tesseractclass.cpp index 3ddabf537..8f9f451f8 100644 --- a/src/ccmain/tesseractclass.cpp +++ b/src/ccmain/tesseractclass.cpp @@ -46,6 +46,7 @@ # include "equationdetect.h" #endif #include "lstmrecognizer.h" +#include "thresholder.h" // for ThresholdMethod namespace tesseract { @@ -75,10 +76,40 @@ Tesseract::Tesseract() " (Values from PageSegMode enum in tesseract/publictypes.h)", this->params()) , INT_MEMBER(thresholding_method, - static_cast(tesseract::ThresholdMethod::Otsu), - "Thresholding " - "method: 0 = Otsu, 1 = Adaptive Otsu, 2 = Sauvola", + static_cast(ThresholdMethod::Otsu), + "Thresholding method: 0 = Otsu, 1 = LeptonicaOtsu, 2 = " + "Sauvola", this->params()) + , BOOL_MEMBER(thresholding_debug, false, + "Debug the thresholding process", + this->params()) + , double_MEMBER(thresholding_window_size, 0.33, + "Window size for measuring local statistics (to be " + "multiplied by image DPI). " + "This parameter is used by the Sauvola thresolding method", + this->params()) + , double_MEMBER(thresholding_kfactor, 0.34, + "Factor for reducing threshold due to variance. " + "This parameter is used by the Sauvola thresolding method." + " Normal range: 0.2-0.5", + this->params()) + , double_MEMBER(thresholding_tile_size, 0.33, + "Desired tile size (to be multiplied by image DPI). " + "This parameter is used by the LeptonicaOtsu thresolding " + "method", + this->params()) + , double_MEMBER(thresholding_smooth_kernel_size, 0.0, + "Size of convolution kernel applied to threshold array " + "(to be multiplied by image DPI). Use 0 for no smoothing. " + "This parameter is used by the LeptonicaOtsu thresolding " + "method", + this->params()) + , double_MEMBER(thresholding_score_fraction, 0.1, + "Fraction of the max Otsu score. " + "This parameter is used by the LeptonicaOtsu thresolding " + "method. " + "For standard Otsu use 0.0, otherwise 0.1 is recommended", + this->params()) , INT_INIT_MEMBER(tessedit_ocr_engine_mode, tesseract::OEM_DEFAULT, "Which OCR engine(s) to run (Tesseract, LSTM, both)." " Defaults to loading and running the most accurate" @@ -369,7 +400,9 @@ Tesseract::Tesseract() "instance is not going to be used for OCR but say only " "for layout analysis.", this->params()) +#ifndef DISABLED_LEGACY_ENGINE , BOOL_MEMBER(textord_equation_detect, false, "Turn on equation detector", this->params()) +#endif // ndef DISABLED_LEGACY_ENGINE , BOOL_MEMBER(textord_tabfind_vertical_text, true, "Enable vertical detection", this->params()) , BOOL_MEMBER(textord_tabfind_force_vertical_text, false, "Force using vertical text page mode", this->params()) @@ -403,7 +436,7 @@ Tesseract::Tesseract() "information is lost due to the cut off at 0. The standard value is " "5", this->params()) - , BOOL_MEMBER(pageseg_apply_music_mask, true, + , BOOL_MEMBER(pageseg_apply_music_mask, false, "Detect music staff and remove intersecting components", this->params()) , @@ -421,7 +454,9 @@ Tesseract::Tesseract() , reskew_(1.0f, 0.0f) , most_recently_used_(this) , font_table_size_(0) +#ifndef DISABLED_LEGACY_ENGINE , equ_detect_(nullptr) +#endif // ndef DISABLED_LEGACY_ENGINE , lstm_recognizer_(nullptr) , train_line_page_num_(0) {} diff --git a/src/ccmain/tesseractclass.h b/src/ccmain/tesseractclass.h index 63853fcad..94681ab61 100644 --- a/src/ccmain/tesseractclass.h +++ b/src/ccmain/tesseractclass.h @@ -69,7 +69,9 @@ class WERD_RES; class ColumnFinder; class DocumentData; +#ifndef DISABLED_LEGACY_ENGINE class EquationDetect; +#endif // ndef DISABLED_LEGACY_ENGINE class ImageData; class LSTMRecognizer; class Tesseract; @@ -110,7 +112,7 @@ class Tesseract; // NOTE: that each level contains members that correspond to global // data that is defined (and used) at that level, not necessarily where // the type is defined so for instance: -// BOOL_VAR_H(textord_show_blobs, false, "Display unsorted blobs"); +// BOOL_VAR_H(textord_show_blobs); // goes inside the Textord class, not the cc_util class. // A collection of various variables for statistics and debugging. @@ -189,8 +191,10 @@ public: // Clear the document dictionary for this and all subclassifiers. void ResetDocumentDictionary(); +#ifndef DISABLED_LEGACY_ENGINE // Set the equation detector. void SetEquationDetect(EquationDetect *detector); +#endif // ndef DISABLED_LEGACY_ENGINE // Simple accessors. const FCOORD &reskew() const { @@ -523,13 +527,10 @@ public: // instances of the same font loaded. void SetupUniversalFontIds(); - int init_tesseract_lm(const std::string &arg0, const std::string &textbase, - const std::string &language, TessdataManager *mgr); - void recognize_page(std::string &image_name); void end_tesseract(); - bool init_tesseract_lang_data(const std::string &arg0, const std::string &textbase, + bool init_tesseract_lang_data(const std::string &arg0, const std::string &language, OcrEngineMode oem, char **configs, int configs_size, const std::vector *vars_vec, const std::vector *vars_values, @@ -593,7 +594,7 @@ public: void recog_word_recursive(WERD_RES *word); void recog_word(WERD_RES *word); void split_and_recog_word(WERD_RES *word); - void split_word(WERD_RES *word, int split_pt, WERD_RES **right_piece, + void split_word(WERD_RES *word, unsigned split_pt, WERD_RES **right_piece, BlamerBundle **orig_blamer_bundle) const; void join_words(WERD_RES *word, WERD_RES *word2, BlamerBundle *orig_bb) const; //// fixspace.cpp /////////////////////////////////////////////////////// @@ -722,8 +723,8 @@ public: // vector holding classification results for a sequence of consecutive // blobs, with index 0 being a single blob, index 1 being 2 blobs etc. void SearchForText(const std::vector *choices, int choices_pos, - int choices_length, const std::vector &target_text, - int text_index, float rating, std::vector *segmentation, + unsigned choices_length, const std::vector &target_text, + unsigned text_index, float rating, std::vector *segmentation, float *best_rating, std::vector *best_segmentation); // Counts up the labelled words and the blobs within. // Deletes all unused or emptied words, counting the unused ones. @@ -748,282 +749,217 @@ public: float ComputeCompatibleXheight(WERD_RES *word_res, float *baseline_shift); //// Data members /////////////////////////////////////////////////////// // TODO(ocr-team): Find and remove obsolete parameters. - BOOL_VAR_H(tessedit_resegment_from_boxes, false, "Take segmentation and labeling from box file"); - BOOL_VAR_H(tessedit_resegment_from_line_boxes, false, - "Conversion of word/line box file to char box file"); - BOOL_VAR_H(tessedit_train_from_boxes, false, "Generate training data from boxed chars"); - BOOL_VAR_H(tessedit_make_boxes_from_boxes, false, "Generate more boxes from boxed chars"); - BOOL_VAR_H(tessedit_train_line_recognizer, false, - "Break input into lines and remap boxes if present"); - BOOL_VAR_H(tessedit_dump_pageseg_images, false, - "Dump intermediate images made during page segmentation"); - BOOL_VAR_H(tessedit_do_invert, true, "Try inverting the image in `LSTMRecognizeWord`"); - INT_VAR_H(tessedit_pageseg_mode, PSM_SINGLE_BLOCK, - "Page seg mode: 0=osd only, 1=auto+osd, 2=auto, 3=col, 4=block," - " 5=line, 6=word, 7=char" - " (Values from PageSegMode enum in tesseract/publictypes.h)"); - INT_VAR_H(thresholding_method, - static_cast(tesseract::ThreshMethod::Otsu), "Thresholding " - "method: 0 = Otsu, 1 = Adaptive Otsu, 2 = Sauvola"); - INT_VAR_H(tessedit_ocr_engine_mode, tesseract::OEM_DEFAULT, - "Which OCR engine(s) to run (Tesseract, LSTM, both). Defaults" - " to loading and running the most accurate available."); - STRING_VAR_H(tessedit_char_blacklist, "", "Blacklist of chars not to recognize"); - STRING_VAR_H(tessedit_char_whitelist, "", "Whitelist of chars to recognize"); - STRING_VAR_H(tessedit_char_unblacklist, "", "List of chars to override tessedit_char_blacklist"); - BOOL_VAR_H(tessedit_ambigs_training, false, "Perform training for ambiguities"); - INT_VAR_H(pageseg_devanagari_split_strategy, tesseract::ShiroRekhaSplitter::NO_SPLIT, - "Whether to use the top-line splitting process for Devanagari " - "documents while performing page-segmentation."); - INT_VAR_H(ocr_devanagari_split_strategy, tesseract::ShiroRekhaSplitter::NO_SPLIT, - "Whether to use the top-line splitting process for Devanagari " - "documents while performing ocr."); - STRING_VAR_H(tessedit_write_params_to_file, "", "Write all parameters to the given file."); - BOOL_VAR_H(tessedit_adaption_debug, false, "Generate and print debug information for adaption"); - INT_VAR_H(bidi_debug, 0, "Debug level for BiDi"); - INT_VAR_H(applybox_debug, 1, "Debug level"); - INT_VAR_H(applybox_page, 0, "Page number to apply boxes from"); - STRING_VAR_H(applybox_exposure_pattern, ".exp", - "Exposure value follows this pattern in the image" - " filename. The name of the image files are expected" - " to be in the form [lang].[fontname].exp[num].tif"); - BOOL_VAR_H(applybox_learn_chars_and_char_frags_mode, false, - "Learn both character fragments (as is done in the" - " special low exposure mode) as well as unfragmented" - " characters."); - BOOL_VAR_H(applybox_learn_ngrams_mode, false, - "Each bounding box is assumed to contain ngrams. Only" - " learn the ngrams whose outlines overlap horizontally."); - BOOL_VAR_H(tessedit_display_outwords, false, "Draw output words"); - BOOL_VAR_H(tessedit_dump_choices, false, "Dump char choices"); - BOOL_VAR_H(tessedit_timing_debug, false, "Print timing stats"); - BOOL_VAR_H(tessedit_fix_fuzzy_spaces, true, "Try to improve fuzzy spaces"); - BOOL_VAR_H(tessedit_unrej_any_wd, false, "Don't bother with word plausibility"); - BOOL_VAR_H(tessedit_fix_hyphens, true, "Crunch double hyphens?"); - BOOL_VAR_H(tessedit_enable_doc_dict, true, "Add words to the document dictionary"); - BOOL_VAR_H(tessedit_debug_fonts, false, "Output font info per char"); - INT_VAR_H(tessedit_font_id, 0, "Disable font detection and use the font" - " corresponding to the ID specified instead"); - BOOL_VAR_H(tessedit_debug_block_rejection, false, "Block and Row stats"); - BOOL_VAR_H(tessedit_enable_bigram_correction, true, - "Enable correction based on the word bigram dictionary."); - BOOL_VAR_H(tessedit_enable_dict_correction, false, - "Enable single word correction based on the dictionary."); - INT_VAR_H(tessedit_bigram_debug, 0, - "Amount of debug output for bigram " - "correction."); - BOOL_VAR_H(enable_noise_removal, true, - "Remove and conditionally reassign small outlines when they" - " confuse layout analysis, determining diacritics vs noise"); - INT_VAR_H(debug_noise_removal, 0, "Debug reassignment of small outlines"); + BOOL_VAR_H(tessedit_resegment_from_boxes); + BOOL_VAR_H(tessedit_resegment_from_line_boxes); + BOOL_VAR_H(tessedit_train_from_boxes); + BOOL_VAR_H(tessedit_make_boxes_from_boxes); + BOOL_VAR_H(tessedit_train_line_recognizer); + BOOL_VAR_H(tessedit_dump_pageseg_images); + BOOL_VAR_H(tessedit_do_invert); + INT_VAR_H(tessedit_pageseg_mode); + INT_VAR_H(thresholding_method); + BOOL_VAR_H(thresholding_debug); + double_VAR_H(thresholding_window_size); + double_VAR_H(thresholding_kfactor); + double_VAR_H(thresholding_tile_size); + double_VAR_H(thresholding_smooth_kernel_size); + double_VAR_H(thresholding_score_fraction); + INT_VAR_H(tessedit_ocr_engine_mode); + STRING_VAR_H(tessedit_char_blacklist); + STRING_VAR_H(tessedit_char_whitelist); + STRING_VAR_H(tessedit_char_unblacklist); + BOOL_VAR_H(tessedit_ambigs_training); + INT_VAR_H(pageseg_devanagari_split_strategy); + INT_VAR_H(ocr_devanagari_split_strategy); + STRING_VAR_H(tessedit_write_params_to_file); + BOOL_VAR_H(tessedit_adaption_debug); + INT_VAR_H(bidi_debug); + INT_VAR_H(applybox_debug); + INT_VAR_H(applybox_page); + STRING_VAR_H(applybox_exposure_pattern); + BOOL_VAR_H(applybox_learn_chars_and_char_frags_mode); + BOOL_VAR_H(applybox_learn_ngrams_mode); + BOOL_VAR_H(tessedit_display_outwords); + BOOL_VAR_H(tessedit_dump_choices); + BOOL_VAR_H(tessedit_timing_debug); + BOOL_VAR_H(tessedit_fix_fuzzy_spaces); + BOOL_VAR_H(tessedit_unrej_any_wd); + BOOL_VAR_H(tessedit_fix_hyphens); + BOOL_VAR_H(tessedit_enable_doc_dict); + BOOL_VAR_H(tessedit_debug_fonts); + INT_VAR_H(tessedit_font_id); + BOOL_VAR_H(tessedit_debug_block_rejection); + BOOL_VAR_H(tessedit_enable_bigram_correction); + BOOL_VAR_H(tessedit_enable_dict_correction); + INT_VAR_H(tessedit_bigram_debug); + BOOL_VAR_H(enable_noise_removal); + INT_VAR_H(debug_noise_removal); // Worst (min) certainty, for which a diacritic is allowed to make the base // character worse and still be included. - double_VAR_H(noise_cert_basechar, -8.0, "Hingepoint for base char certainty"); + double_VAR_H(noise_cert_basechar); // Worst (min) certainty, for which a non-overlapping diacritic is allowed to // make the base character worse and still be included. - double_VAR_H(noise_cert_disjoint, -2.5, "Hingepoint for disjoint certainty"); + double_VAR_H(noise_cert_disjoint); // Worst (min) certainty, for which a diacritic is allowed to make a new // stand-alone blob. - double_VAR_H(noise_cert_punc, -2.5, "Threshold for new punc char certainty"); + double_VAR_H(noise_cert_punc); // Factor of certainty margin for adding diacritics to not count as worse. - double_VAR_H(noise_cert_factor, 0.375, "Scaling on certainty diff from Hingepoint"); - INT_VAR_H(noise_maxperblob, 8, "Max diacritics to apply to a blob"); - INT_VAR_H(noise_maxperword, 16, "Max diacritics to apply to a word"); - INT_VAR_H(debug_x_ht_level, 0, "Reestimate debug"); - STRING_VAR_H(chs_leading_punct, "('`\"", "Leading punctuation"); - STRING_VAR_H(chs_trailing_punct1, ").,;:?!", "1st Trailing punctuation"); - STRING_VAR_H(chs_trailing_punct2, ")'`\"", "2nd Trailing punctuation"); - double_VAR_H(quality_rej_pc, 0.08, "good_quality_doc lte rejection limit"); - double_VAR_H(quality_blob_pc, 0.0, "good_quality_doc gte good blobs limit"); - double_VAR_H(quality_outline_pc, 1.0, "good_quality_doc lte outline error limit"); - double_VAR_H(quality_char_pc, 0.95, "good_quality_doc gte good char limit"); - INT_VAR_H(quality_min_initial_alphas_reqd, 2, "alphas in a good word"); - INT_VAR_H(tessedit_tess_adaption_mode, 0x27, "Adaptation decision algorithm for tess"); - BOOL_VAR_H(tessedit_minimal_rej_pass1, false, "Do minimal rejection on pass 1 output"); - BOOL_VAR_H(tessedit_test_adaption, false, "Test adaption criteria"); - BOOL_VAR_H(test_pt, false, "Test for point"); - double_VAR_H(test_pt_x, 99999.99, "xcoord"); - double_VAR_H(test_pt_y, 99999.99, "ycoord"); - INT_VAR_H(multilang_debug_level, 0, "Print multilang debug info."); - INT_VAR_H(paragraph_debug_level, 0, "Print paragraph debug info."); - BOOL_VAR_H(paragraph_text_based, true, - "Run paragraph detection on the post-text-recognition " - "(more accurate)"); - BOOL_VAR_H(lstm_use_matrix, 1, "Use ratings matrix/beam searct with lstm"); - STRING_VAR_H(outlines_odd, "%| ", "Non standard number of outlines"); - STRING_VAR_H(outlines_2, "ij!?%\":;", "Non standard number of outlines"); - BOOL_VAR_H(tessedit_good_quality_unrej, true, "Reduce rejection on good docs"); - BOOL_VAR_H(tessedit_use_reject_spaces, true, "Reject spaces?"); - double_VAR_H(tessedit_reject_doc_percent, 65.00, "%rej allowed before rej whole doc"); - double_VAR_H(tessedit_reject_block_percent, 45.00, "%rej allowed before rej whole block"); - double_VAR_H(tessedit_reject_row_percent, 40.00, "%rej allowed before rej whole row"); - double_VAR_H(tessedit_whole_wd_rej_row_percent, 70.00, - "Number of row rejects in whole word rejects" - "which prevents whole row rejection"); - BOOL_VAR_H(tessedit_preserve_blk_rej_perfect_wds, true, - "Only rej partially rejected words in block rejection"); - BOOL_VAR_H(tessedit_preserve_row_rej_perfect_wds, true, - "Only rej partially rejected words in row rejection"); - BOOL_VAR_H(tessedit_dont_blkrej_good_wds, false, "Use word segmentation quality metric"); - BOOL_VAR_H(tessedit_dont_rowrej_good_wds, false, "Use word segmentation quality metric"); - INT_VAR_H(tessedit_preserve_min_wd_len, 2, "Only preserve wds longer than this"); - BOOL_VAR_H(tessedit_row_rej_good_docs, true, "Apply row rejection to good docs"); - double_VAR_H(tessedit_good_doc_still_rowrej_wd, 1.1, - "rej good doc wd if more than this fraction rejected"); - BOOL_VAR_H(tessedit_reject_bad_qual_wds, true, "Reject all bad quality wds"); - BOOL_VAR_H(tessedit_debug_doc_rejection, false, "Page stats"); - BOOL_VAR_H(tessedit_debug_quality_metrics, false, "Output data to debug file"); - BOOL_VAR_H(bland_unrej, false, "unrej potential with no checks"); - double_VAR_H(quality_rowrej_pc, 1.1, "good_quality_doc gte good char limit"); - BOOL_VAR_H(unlv_tilde_crunching, false, "Mark v.bad words for tilde crunch"); - BOOL_VAR_H(hocr_font_info, false, "Add font info to hocr output"); - BOOL_VAR_H(hocr_char_boxes, false, "Add coordinates for each character to hocr output"); - BOOL_VAR_H(crunch_early_merge_tess_fails, true, "Before word crunch?"); - BOOL_VAR_H(crunch_early_convert_bad_unlv_chs, false, "Take out ~^ early?"); - double_VAR_H(crunch_terrible_rating, 80.0, "crunch rating lt this"); - BOOL_VAR_H(crunch_terrible_garbage, true, "As it says"); - double_VAR_H(crunch_poor_garbage_cert, -9.0, "crunch garbage cert lt this"); - double_VAR_H(crunch_poor_garbage_rate, 60, "crunch garbage rating lt this"); - double_VAR_H(crunch_pot_poor_rate, 40, "POTENTIAL crunch rating lt this"); - double_VAR_H(crunch_pot_poor_cert, -8.0, "POTENTIAL crunch cert lt this"); - double_VAR_H(crunch_del_rating, 60, "POTENTIAL crunch rating lt this"); - double_VAR_H(crunch_del_cert, -10.0, "POTENTIAL crunch cert lt this"); - double_VAR_H(crunch_del_min_ht, 0.7, "Del if word ht lt xht x this"); - double_VAR_H(crunch_del_max_ht, 3.0, "Del if word ht gt xht x this"); - double_VAR_H(crunch_del_min_width, 3.0, "Del if word width lt xht x this"); - double_VAR_H(crunch_del_high_word, 1.5, "Del if word gt xht x this above bl"); - double_VAR_H(crunch_del_low_word, 0.5, "Del if word gt xht x this below bl"); - double_VAR_H(crunch_small_outlines_size, 0.6, "Small if lt xht x this"); - INT_VAR_H(crunch_rating_max, 10, "For adj length in rating per ch"); - INT_VAR_H(crunch_pot_indicators, 1, "How many potential indicators needed"); - BOOL_VAR_H(crunch_leave_ok_strings, true, "Don't touch sensible strings"); - BOOL_VAR_H(crunch_accept_ok, true, "Use acceptability in okstring"); - BOOL_VAR_H(crunch_leave_accept_strings, false, "Don't pot crunch sensible strings"); - BOOL_VAR_H(crunch_include_numerals, false, "Fiddle alpha figures"); - INT_VAR_H(crunch_leave_lc_strings, 4, "Don't crunch words with long lower case strings"); - INT_VAR_H(crunch_leave_uc_strings, 4, "Don't crunch words with long lower case strings"); - INT_VAR_H(crunch_long_repetitions, 3, "Crunch words with long repetitions"); - INT_VAR_H(crunch_debug, 0, "As it says"); - INT_VAR_H(fixsp_non_noise_limit, 1, "How many non-noise blbs either side?"); - double_VAR_H(fixsp_small_outlines_size, 0.28, "Small if lt xht x this"); - BOOL_VAR_H(tessedit_prefer_joined_punct, false, "Reward punctuation joins"); - INT_VAR_H(fixsp_done_mode, 1, "What constitutes done for spacing"); - INT_VAR_H(debug_fix_space_level, 0, "Contextual fixspace debug"); - STRING_VAR_H(numeric_punctuation, ".,", "Punct. chs expected WITHIN numbers"); - INT_VAR_H(x_ht_acceptance_tolerance, 8, "Max allowed deviation of blob top outside of font data"); - INT_VAR_H(x_ht_min_change, 8, "Min change in xht before actually trying it"); - INT_VAR_H(superscript_debug, 0, "Debug level for sub & superscript fixer"); - double_VAR_H(superscript_worse_certainty, 2.0, - "How many times worse " - "certainty does a superscript position glyph need to be for us " - "to try classifying it as a char with a different baseline?"); - double_VAR_H(superscript_bettered_certainty, 0.97, - "What reduction in " - "badness do we think sufficient to choose a superscript over " - "what we'd thought. For example, a value of 0.6 means we want " - "to reduce badness of certainty by 40%"); - double_VAR_H(superscript_scaledown_ratio, 0.4, - "A superscript scaled down more than this is unbelievably " - "small. For example, 0.3 means we expect the font size to " - "be no smaller than 30% of the text line font size."); - double_VAR_H(subscript_max_y_top, 0.5, - "Maximum top of a character measured as a multiple of x-height " - "above the baseline for us to reconsider whether it's a " - "subscript."); - double_VAR_H(superscript_min_y_bottom, 0.3, - "Minimum bottom of a character measured as a multiple of " - "x-height above the baseline for us to reconsider whether it's " - "a superscript."); - BOOL_VAR_H(tessedit_write_block_separators, false, "Write block separators in output"); - BOOL_VAR_H(tessedit_write_rep_codes, false, "Write repetition char code"); - BOOL_VAR_H(tessedit_write_unlv, false, "Write .unlv output file"); - BOOL_VAR_H(tessedit_create_txt, false, "Write .txt output file"); - BOOL_VAR_H(tessedit_create_hocr, false, "Write .html hOCR output file"); - BOOL_VAR_H(tessedit_create_alto, false, "Write .xml ALTO output file"); - BOOL_VAR_H(tessedit_create_lstmbox, false, "Write .box file for LSTM training"); - BOOL_VAR_H(tessedit_create_tsv, false, "Write .tsv output file"); - BOOL_VAR_H(tessedit_create_wordstrbox, false, "Write WordStr format .box output file"); - BOOL_VAR_H(tessedit_create_pdf, false, "Write .pdf output file"); - BOOL_VAR_H(textonly_pdf, false, "Create PDF with only one invisible text layer"); - INT_VAR_H(jpg_quality, 85, "Set JPEG quality level"); - INT_VAR_H(user_defined_dpi, 0, "Specify DPI for input image"); - INT_VAR_H(min_characters_to_try, 50, "Specify minimum characters to try during OSD"); - STRING_VAR_H(unrecognised_char, "|", "Output char for unidentified blobs"); - INT_VAR_H(suspect_level, 99, "Suspect marker level"); - INT_VAR_H(suspect_short_words, 2, "Don't Suspect dict wds longer than this"); - BOOL_VAR_H(suspect_constrain_1Il, false, "UNLV keep 1Il chars rejected"); - double_VAR_H(suspect_rating_per_ch, 999.9, "Don't touch bad rating limit"); - double_VAR_H(suspect_accept_rating, -999.9, "Accept good rating limit"); - BOOL_VAR_H(tessedit_minimal_rejection, false, "Only reject tess failures"); - BOOL_VAR_H(tessedit_zero_rejection, false, "Don't reject ANYTHING"); - BOOL_VAR_H(tessedit_word_for_word, false, "Make output have exactly one word per WERD"); - BOOL_VAR_H(tessedit_zero_kelvin_rejection, false, "Don't reject ANYTHING AT ALL"); - INT_VAR_H(tessedit_reject_mode, 0, "Rejection algorithm"); - BOOL_VAR_H(tessedit_rejection_debug, false, "Adaption debug"); - BOOL_VAR_H(tessedit_flip_0O, true, "Contextual 0O O0 flips"); - double_VAR_H(tessedit_lower_flip_hyphen, 1.5, "Aspect ratio dot/hyphen test"); - double_VAR_H(tessedit_upper_flip_hyphen, 1.8, "Aspect ratio dot/hyphen test"); - BOOL_VAR_H(rej_trust_doc_dawg, false, "Use DOC dawg in 11l conf. detector"); - BOOL_VAR_H(rej_1Il_use_dict_word, false, "Use dictword test"); - BOOL_VAR_H(rej_1Il_trust_permuter_type, true, "Don't double check"); - BOOL_VAR_H(rej_use_tess_accepted, true, "Individual rejection control"); - BOOL_VAR_H(rej_use_tess_blanks, true, "Individual rejection control"); - BOOL_VAR_H(rej_use_good_perm, true, "Individual rejection control"); - BOOL_VAR_H(rej_use_sensible_wd, false, "Extend permuter check"); - BOOL_VAR_H(rej_alphas_in_number_perm, false, "Extend permuter check"); - double_VAR_H(rej_whole_of_mostly_reject_word_fract, 0.85, "if >this fract"); - INT_VAR_H(tessedit_image_border, 2, "Rej blbs near image edge limit"); - STRING_VAR_H(ok_repeated_ch_non_alphanum_wds, "-?*\075", "Allow NN to unrej"); - STRING_VAR_H(conflict_set_I_l_1, "Il1[]", "Il1 conflict set"); - INT_VAR_H(min_sane_x_ht_pixels, 8, "Reject any x-ht lt or eq than this"); - BOOL_VAR_H(tessedit_create_boxfile, false, "Output text with boxes"); - INT_VAR_H(tessedit_page_number, -1, "-1 -> All pages, else specific page to process"); - BOOL_VAR_H(tessedit_write_images, false, "Capture the image from the IPE"); - BOOL_VAR_H(interactive_display_mode, false, "Run interactively?"); - STRING_VAR_H(file_type, ".tif", "Filename extension"); - BOOL_VAR_H(tessedit_override_permuter, true, "According to dict_word"); - STRING_VAR_H(tessedit_load_sublangs, "", "List of languages to load with this one"); - BOOL_VAR_H(tessedit_use_primary_params_model, false, - "In multilingual mode use params model of the primary language"); + double_VAR_H(noise_cert_factor); + INT_VAR_H(noise_maxperblob); + INT_VAR_H(noise_maxperword); + INT_VAR_H(debug_x_ht_level); + STRING_VAR_H(chs_leading_punct); + STRING_VAR_H(chs_trailing_punct1); + STRING_VAR_H(chs_trailing_punct2); + double_VAR_H(quality_rej_pc); + double_VAR_H(quality_blob_pc); + double_VAR_H(quality_outline_pc); + double_VAR_H(quality_char_pc); + INT_VAR_H(quality_min_initial_alphas_reqd); + INT_VAR_H(tessedit_tess_adaption_mode); + BOOL_VAR_H(tessedit_minimal_rej_pass1); + BOOL_VAR_H(tessedit_test_adaption); + BOOL_VAR_H(test_pt); + double_VAR_H(test_pt_x); + double_VAR_H(test_pt_y); + INT_VAR_H(multilang_debug_level); + INT_VAR_H(paragraph_debug_level); + BOOL_VAR_H(paragraph_text_based); + BOOL_VAR_H(lstm_use_matrix); + STRING_VAR_H(outlines_odd); + STRING_VAR_H(outlines_2); + BOOL_VAR_H(tessedit_good_quality_unrej); + BOOL_VAR_H(tessedit_use_reject_spaces); + double_VAR_H(tessedit_reject_doc_percent); + double_VAR_H(tessedit_reject_block_percent); + double_VAR_H(tessedit_reject_row_percent); + double_VAR_H(tessedit_whole_wd_rej_row_percent); + BOOL_VAR_H(tessedit_preserve_blk_rej_perfect_wds); + BOOL_VAR_H(tessedit_preserve_row_rej_perfect_wds); + BOOL_VAR_H(tessedit_dont_blkrej_good_wds); + BOOL_VAR_H(tessedit_dont_rowrej_good_wds); + INT_VAR_H(tessedit_preserve_min_wd_len); + BOOL_VAR_H(tessedit_row_rej_good_docs); + double_VAR_H(tessedit_good_doc_still_rowrej_wd); + BOOL_VAR_H(tessedit_reject_bad_qual_wds); + BOOL_VAR_H(tessedit_debug_doc_rejection); + BOOL_VAR_H(tessedit_debug_quality_metrics); + BOOL_VAR_H(bland_unrej); + double_VAR_H(quality_rowrej_pc); + BOOL_VAR_H(unlv_tilde_crunching); + BOOL_VAR_H(hocr_font_info); + BOOL_VAR_H(hocr_char_boxes); + BOOL_VAR_H(crunch_early_merge_tess_fails); + BOOL_VAR_H(crunch_early_convert_bad_unlv_chs); + double_VAR_H(crunch_terrible_rating); + BOOL_VAR_H(crunch_terrible_garbage); + double_VAR_H(crunch_poor_garbage_cert); + double_VAR_H(crunch_poor_garbage_rate); + double_VAR_H(crunch_pot_poor_rate); + double_VAR_H(crunch_pot_poor_cert); + double_VAR_H(crunch_del_rating); + double_VAR_H(crunch_del_cert); + double_VAR_H(crunch_del_min_ht); + double_VAR_H(crunch_del_max_ht); + double_VAR_H(crunch_del_min_width); + double_VAR_H(crunch_del_high_word); + double_VAR_H(crunch_del_low_word); + double_VAR_H(crunch_small_outlines_size); + INT_VAR_H(crunch_rating_max); + INT_VAR_H(crunch_pot_indicators); + BOOL_VAR_H(crunch_leave_ok_strings); + BOOL_VAR_H(crunch_accept_ok); + BOOL_VAR_H(crunch_leave_accept_strings); + BOOL_VAR_H(crunch_include_numerals); + INT_VAR_H(crunch_leave_lc_strings); + INT_VAR_H(crunch_leave_uc_strings); + INT_VAR_H(crunch_long_repetitions); + INT_VAR_H(crunch_debug); + INT_VAR_H(fixsp_non_noise_limit); + double_VAR_H(fixsp_small_outlines_size); + BOOL_VAR_H(tessedit_prefer_joined_punct); + INT_VAR_H(fixsp_done_mode); + INT_VAR_H(debug_fix_space_level); + STRING_VAR_H(numeric_punctuation); + INT_VAR_H(x_ht_acceptance_tolerance); + INT_VAR_H(x_ht_min_change); + INT_VAR_H(superscript_debug); + double_VAR_H(superscript_worse_certainty); + double_VAR_H(superscript_bettered_certainty); + double_VAR_H(superscript_scaledown_ratio); + double_VAR_H(subscript_max_y_top); + double_VAR_H(superscript_min_y_bottom); + BOOL_VAR_H(tessedit_write_block_separators); + BOOL_VAR_H(tessedit_write_rep_codes); + BOOL_VAR_H(tessedit_write_unlv); + BOOL_VAR_H(tessedit_create_txt); + BOOL_VAR_H(tessedit_create_hocr); + BOOL_VAR_H(tessedit_create_alto); + BOOL_VAR_H(tessedit_create_lstmbox); + BOOL_VAR_H(tessedit_create_tsv); + BOOL_VAR_H(tessedit_create_wordstrbox); + BOOL_VAR_H(tessedit_create_pdf); + BOOL_VAR_H(textonly_pdf); + INT_VAR_H(jpg_quality); + INT_VAR_H(user_defined_dpi); + INT_VAR_H(min_characters_to_try); + STRING_VAR_H(unrecognised_char); + INT_VAR_H(suspect_level); + INT_VAR_H(suspect_short_words); + BOOL_VAR_H(suspect_constrain_1Il); + double_VAR_H(suspect_rating_per_ch); + double_VAR_H(suspect_accept_rating); + BOOL_VAR_H(tessedit_minimal_rejection); + BOOL_VAR_H(tessedit_zero_rejection); + BOOL_VAR_H(tessedit_word_for_word); + BOOL_VAR_H(tessedit_zero_kelvin_rejection); + INT_VAR_H(tessedit_reject_mode); + BOOL_VAR_H(tessedit_rejection_debug); + BOOL_VAR_H(tessedit_flip_0O); + double_VAR_H(tessedit_lower_flip_hyphen); + double_VAR_H(tessedit_upper_flip_hyphen); + BOOL_VAR_H(rej_trust_doc_dawg); + BOOL_VAR_H(rej_1Il_use_dict_word); + BOOL_VAR_H(rej_1Il_trust_permuter_type); + BOOL_VAR_H(rej_use_tess_accepted); + BOOL_VAR_H(rej_use_tess_blanks); + BOOL_VAR_H(rej_use_good_perm); + BOOL_VAR_H(rej_use_sensible_wd); + BOOL_VAR_H(rej_alphas_in_number_perm); + double_VAR_H(rej_whole_of_mostly_reject_word_fract); + INT_VAR_H(tessedit_image_border); + STRING_VAR_H(ok_repeated_ch_non_alphanum_wds); + STRING_VAR_H(conflict_set_I_l_1); + INT_VAR_H(min_sane_x_ht_pixels); + BOOL_VAR_H(tessedit_create_boxfile); + INT_VAR_H(tessedit_page_number); + BOOL_VAR_H(tessedit_write_images); + BOOL_VAR_H(interactive_display_mode); + STRING_VAR_H(file_type); + BOOL_VAR_H(tessedit_override_permuter); + STRING_VAR_H(tessedit_load_sublangs); + BOOL_VAR_H(tessedit_use_primary_params_model); // Min acceptable orientation margin (difference in scores between top and 2nd // choice in OSResults::orientations) to believe the page orientation. - double_VAR_H(min_orientation_margin, 7.0, "Min acceptable orientation margin"); - BOOL_VAR_H(textord_tabfind_show_vlines, false, "Debug line finding"); - BOOL_VAR_H(textord_use_cjk_fp_model, false, "Use CJK fixed pitch model"); - BOOL_VAR_H(poly_allow_detailed_fx, false, "Allow feature extractors to see the original outline"); - BOOL_VAR_H(tessedit_init_config_only, false, - "Only initialize with the config file. Useful if the instance is " - "not going to be used for OCR but say only for layout analysis."); - BOOL_VAR_H(textord_equation_detect, false, "Turn on equation detector"); - BOOL_VAR_H(textord_tabfind_vertical_text, true, "Enable vertical detection"); - BOOL_VAR_H(textord_tabfind_force_vertical_text, false, "Force using vertical text page mode"); - double_VAR_H(textord_tabfind_vertical_text_ratio, 0.5, - "Fraction of textlines deemed vertical to use vertical page " - "mode"); - double_VAR_H(textord_tabfind_aligned_gap_fraction, 0.75, - "Fraction of height used as a minimum gap for aligned blobs."); - INT_VAR_H(tessedit_parallelize, 0, "Run in parallel where possible"); - BOOL_VAR_H(preserve_interword_spaces, false, "Preserve multiple interword spaces"); - STRING_VAR_H(page_separator, "\f", "Page separator (default is form feed control character)"); - INT_VAR_H(lstm_choice_mode, 0, - "Allows to include alternative symbols choices in the hOCR " - "output. " - "Valid input values are 0, 1 and 2. 0 is the default value. " - "With 1 the alternative symbol choices per timestep are included. " - "With 2 the alternative symbol choices are extracted from the CTC " - "process instead of the lattice. The choices are mapped per " - "character."); - INT_VAR_H(lstm_choice_iterations, 5, - "Sets the number of cascading iterations for the Beamsearch in " - "lstm_choice_mode. Note that lstm_choice_mode must be set to " - "a value greater than 0 to produce results."); - double_VAR_H(lstm_rating_coefficient, 5, - "Sets the rating coefficient for the lstm choices. The smaller " - "the coefficient, the better are the ratings for each choice " - "and less information is lost due to the cut off at 0. The " - "standard value is 5."); - BOOL_VAR_H(pageseg_apply_music_mask, true, - "Detect music staff and remove intersecting components"); + double_VAR_H(min_orientation_margin); + BOOL_VAR_H(textord_tabfind_show_vlines); + BOOL_VAR_H(textord_use_cjk_fp_model); + BOOL_VAR_H(poly_allow_detailed_fx); + BOOL_VAR_H(tessedit_init_config_only); +#ifndef DISABLED_LEGACY_ENGINE + BOOL_VAR_H(textord_equation_detect); +#endif // ndef DISABLED_LEGACY_ENGINE + BOOL_VAR_H(textord_tabfind_vertical_text); + BOOL_VAR_H(textord_tabfind_force_vertical_text); + double_VAR_H(textord_tabfind_vertical_text_ratio); + double_VAR_H(textord_tabfind_aligned_gap_fraction); + INT_VAR_H(tessedit_parallelize); + BOOL_VAR_H(preserve_interword_spaces); + STRING_VAR_H(page_separator); + INT_VAR_H(lstm_choice_mode); + INT_VAR_H(lstm_choice_iterations); + double_VAR_H(lstm_rating_coefficient); + BOOL_VAR_H(pageseg_apply_music_mask); //// ambigsrecog.cpp ///////////////////////////////////////////////////////// FILE *init_recog_training(const char *filename); @@ -1071,8 +1007,10 @@ private: Tesseract *most_recently_used_; // The size of the font table, ie max possible font id + 1. int font_table_size_; +#ifndef DISABLED_LEGACY_ENGINE // Equation detector. Note: this pointer is NOT owned by the class. EquationDetect *equ_detect_; +#endif // ndef DISABLED_LEGACY_ENGINE // LSTM recognizer, if available. LSTMRecognizer *lstm_recognizer_; // Output "page" number (actually line number) using TrainLineRecognizer. diff --git a/src/ccmain/tfacepp.cpp b/src/ccmain/tfacepp.cpp index 088459530..8aabf645e 100644 --- a/src/ccmain/tfacepp.cpp +++ b/src/ccmain/tfacepp.cpp @@ -47,14 +47,7 @@ void Tesseract::recog_word(WERD_RES *word) { ASSERT_HOST(!word->chopped_word->blobs.empty()); recog_word_recursive(word); word->SetupBoxWord(); - if (word->best_choice->length() != word->box_word->length()) { - tprintf( - "recog_word ASSERT FAIL String:\"%s\"; " - "Strlen=%d; #Blobs=%d\n", - word->best_choice->debug_string().c_str(), word->best_choice->length(), - word->box_word->length()); - } - ASSERT_HOST(word->best_choice->length() == word->box_word->length()); + ASSERT_HOST(static_cast(word->best_choice->length()) == word->box_word->length()); // Check that the ratings matrix size matches the sum of all the // segmentation states. if (!word->StatesAllValid()) { @@ -82,7 +75,7 @@ void Tesseract::recog_word(WERD_RES *word) { // Factored out from control.cpp ASSERT_HOST((word->best_choice == nullptr) == (word->raw_choice == nullptr)); if (word->best_choice == nullptr || word->best_choice->empty() || - static_cast(strspn(word->best_choice->unichar_string().c_str(), " ")) == + strspn(word->best_choice->unichar_string().c_str(), " ") == word->best_choice->length()) { word->tess_failed = true; word->reject_map.initialise(word->box_word->length()); @@ -99,7 +92,7 @@ void Tesseract::recog_word(WERD_RES *word) { * Convert the output back to editor form. **********************************************************************/ void Tesseract::recog_word_recursive(WERD_RES *word) { - int word_length = word->chopped_word->NumBlobs(); // no of blobs + auto word_length = word->chopped_word->NumBlobs(); // no of blobs if (word_length > MAX_UNDIVIDED_LENGTH) { return split_and_recog_word(word); } @@ -134,7 +127,7 @@ void Tesseract::split_and_recog_word(WERD_RES *word) { // Find the biggest blob gap in the chopped_word. int bestgap = -INT32_MAX; int split_index = 0; - for (int b = 1; b < word->chopped_word->NumBlobs(); ++b) { + for (unsigned b = 1; b < word->chopped_word->NumBlobs(); ++b) { TBOX prev_box = word->chopped_word->blobs[b - 1]->bounding_box(); TBOX blob_box = word->chopped_word->blobs[b]->bounding_box(); int gap = blob_box.left() - prev_box.right(); @@ -167,7 +160,7 @@ void Tesseract::split_and_recog_word(WERD_RES *word) { * and will now be owned by the caller. New blamer bundles are forged for the * two pieces. **********************************************************************/ -void Tesseract::split_word(WERD_RES *word, int split_pt, WERD_RES **right_piece, +void Tesseract::split_word(WERD_RES *word, unsigned split_pt, WERD_RES **right_piece, BlamerBundle **orig_blamer_bundle) const { ASSERT_HOST(split_pt > 0 && split_pt < word->chopped_word->NumBlobs()); @@ -181,7 +174,7 @@ void Tesseract::split_word(WERD_RES *word, int split_pt, WERD_RES **right_piece, TWERD *chopped = word->chopped_word; auto *chopped2 = new TWERD; chopped2->blobs.reserve(chopped->NumBlobs() - split_pt); - for (int i = split_pt; i < chopped->NumBlobs(); ++i) { + for (auto i = split_pt; i < chopped->NumBlobs(); ++i) { chopped2->blobs.push_back(chopped->blobs[i]); } chopped->blobs.resize(split_pt); diff --git a/src/ccmain/thresholder.cpp b/src/ccmain/thresholder.cpp index 4fc8f075f..f8da90ccf 100644 --- a/src/ccmain/thresholder.cpp +++ b/src/ccmain/thresholder.cpp @@ -25,8 +25,10 @@ #endif #include +#include // for api->GetIntVariable() -#include // for uint32_t +#include // for std::max, std::min +#include // for uint32_t #include #include @@ -186,7 +188,8 @@ void ImageThresholder::SetImage(const Image pix) { } std::tuple ImageThresholder::Threshold( - ThresholdMethod method) { + TessBaseAPI *api, + ThresholdMethod method) { Image pix_binary = nullptr; Image pix_thresholds = nullptr; @@ -196,19 +199,83 @@ std::tuple ImageThresholder::Threshold( Image original = GetPixRect(); pix_binary = original.copy(); original.destroy(); - return std::make_tuple(false, nullptr, pix_binary, nullptr); + return std::make_tuple(true, nullptr, pix_binary, nullptr); } auto pix_grey = GetPixRectGrey(); int r; + + l_int32 pix_w, pix_h; + pixGetDimensions(pix_grey, &pix_w, &pix_h, nullptr); + + bool thresholding_debug; + api->GetBoolVariable("thresholding_debug", &thresholding_debug); + if (thresholding_debug) { + tprintf("\nimage width: %d height: %d ppi: %d\n", pix_w, pix_h, yres_); + } + if (method == ThresholdMethod::Sauvola) { - r = pixSauvolaBinarizeTiled(pix_grey, 25, 0.40, 300, 300, pix_thresholds, - pix_binary); - } else { - // AdaptiveOtsu. - r = pixOtsuAdaptiveThreshold(pix_grey, 300, 300, 0, 0, 0.1, - pix_thresholds, pix_binary); + int window_size; + double window_size_factor; + api->GetDoubleVariable("thresholding_window_size", &window_size_factor); + window_size = window_size_factor * yres_; + window_size = std::max(7, window_size); + window_size = std::min(pix_w < pix_h ? pix_w - 3 : pix_h - 3, window_size); + int half_window_size = window_size / 2; + + // factor for image division into tiles; >= 1 + l_int32 nx, ny; + // tiles size will be approx. 250 x 250 pixels + nx = std::max(1, (pix_w + 125) / 250); + ny = std::max(1, (pix_h + 125) / 250); + auto xrat = pix_w / nx; + auto yrat = pix_h / ny; + if (xrat < half_window_size + 2) { + nx = pix_w / (half_window_size + 2); + } + if (yrat < half_window_size + 2) { + ny = pix_h / (half_window_size + 2); + } + + double kfactor; + api->GetDoubleVariable("thresholding_kfactor", &kfactor); + kfactor = std::max(0.0, kfactor); + + if (thresholding_debug) { + tprintf("window size: %d kfactor: %.3f nx:%d ny: %d\n", window_size, kfactor, nx, ny); + } + + r = pixSauvolaBinarizeTiled(pix_grey, half_window_size, kfactor, nx, ny, + (PIX**)pix_thresholds, + (PIX**)pix_binary); + } else { // if (method == ThresholdMethod::LeptonicaOtsu) + int tile_size; + double tile_size_factor; + api->GetDoubleVariable("thresholding_tile_size", &tile_size_factor); + tile_size = tile_size_factor * yres_; + tile_size = std::max(16, tile_size); + + int smooth_size; + double smooth_size_factor; + api->GetDoubleVariable("thresholding_smooth_kernel_size", + &smooth_size_factor); + smooth_size_factor = std::max(0.0, smooth_size_factor); + smooth_size = smooth_size_factor * yres_; + int half_smooth_size = smooth_size / 2; + + double score_fraction; + api->GetDoubleVariable("thresholding_score_fraction", &score_fraction); + + if (thresholding_debug) { + tprintf("tile size: %d smooth_size: %d score_fraction: %.2f\n", tile_size, smooth_size, score_fraction); + } + + r = pixOtsuAdaptiveThreshold(pix_grey, tile_size, tile_size, + half_smooth_size, half_smooth_size, + score_fraction, + (PIX**)pix_thresholds, + (PIX**)pix_binary); } bool ok = (r == 0); diff --git a/src/ccmain/thresholder.h b/src/ccmain/thresholder.h index 973e98fa1..e20c065bc 100644 --- a/src/ccmain/thresholder.h +++ b/src/ccmain/thresholder.h @@ -20,7 +20,6 @@ #define TESSERACT_CCMAIN_THRESHOLDER_H_ #include -#include #include // for std::vector @@ -28,6 +27,15 @@ struct Pix; namespace tesseract { +enum class ThresholdMethod { + Otsu, // Tesseract's legacy Otsu + LeptonicaOtsu, // Leptonica's Otsu + Sauvola, // Leptonica's Sauvola + Max, // Number of Thresholding methods +}; + +class TessBaseAPI; + /// Base class for all tesseract image thresholding classes. /// Specific classes can add new thresholding methods by /// overriding ThresholdToPix. @@ -121,7 +129,7 @@ public: /// Returns false on error. virtual bool ThresholdToPix(Image *pix); - virtual std::tuple Threshold( + virtual std::tuple Threshold(TessBaseAPI *api, ThresholdMethod method); // Gets a pix that contains an 8 bit threshold value at each pixel. The diff --git a/src/ccstruct/blamer.cpp b/src/ccstruct/blamer.cpp index bb257f98d..92260054e 100644 --- a/src/ccstruct/blamer.cpp +++ b/src/ccstruct/blamer.cpp @@ -72,7 +72,7 @@ void BlamerBundle::SetWordTruth(const UNICHARSET &unicharset, const char *truth_ std::vector lengths; unicharset.encode_string(truth_str, false, &encoding, &lengths, nullptr); int total_length = 0; - for (int i = 0; i < encoding.size(); total_length += lengths[i++]) { + for (size_t i = 0; i < encoding.size(); total_length += lengths[i++]) { std::string uch(truth_str + total_length); uch.resize(lengths[i] - total_length); UNICHAR_ID id = encoding[i]; @@ -119,7 +119,7 @@ bool BlamerBundle::ChoiceIsCorrect(const WERD_CHOICE *word_choice) const { } const UNICHARSET *uni_set = word_choice->unicharset(); std::string normed_choice_str; - for (int i = 0; i < word_choice->length(); ++i) { + for (unsigned i = 0; i < word_choice->length(); ++i) { normed_choice_str += uni_set->get_normed_unichar(word_choice->unichar_id(i)); } std::string truth_str = TruthString(); @@ -155,7 +155,7 @@ void BlamerBundle::SetupNormTruthWord(const DENORM &denorm) { TPOINT botright; TPOINT norm_topleft; TPOINT norm_botright; - for (int b = 0; b < truth_word_.length(); ++b) { + for (unsigned b = 0; b < truth_word_.length(); ++b) { const TBOX &box = truth_word_.BlobBox(b); topleft.x = box.left(); topleft.y = box.top(); @@ -175,8 +175,7 @@ void BlamerBundle::SplitBundle(int word1_right, int word2_left, bool debug, Blam BlamerBundle *bundle2) const { std::string debug_str; // Find truth boxes that correspond to the split in the blobs. - int b; - int begin2_truth_index = -1; + unsigned begin2_truth_index = 0; if (incorrect_result_reason_ != IRR_NO_TRUTH && truth_has_char_boxes_) { debug_str = "Looking for truth split at"; debug_str += " end1_x " + std::to_string(word1_right); @@ -184,7 +183,7 @@ void BlamerBundle::SplitBundle(int word1_right, int word2_left, bool debug, Blam debug_str += "\nnorm_truth_word boxes:\n"; if (norm_truth_word_.length() > 1) { norm_truth_word_.BlobBox(0).print_to_str(debug_str); - for (b = 1; b < norm_truth_word_.length(); ++b) { + for (unsigned b = 1; b < norm_truth_word_.length(); ++b) { norm_truth_word_.BlobBox(b).print_to_str(debug_str); if ((abs(word1_right - norm_truth_word_.BlobBox(b - 1).right()) < norm_box_tolerance_) && (abs(word2_left - norm_truth_word_.BlobBox(b).left()) < norm_box_tolerance_)) { @@ -204,7 +203,7 @@ void BlamerBundle::SplitBundle(int word1_right, int word2_left, bool debug, Blam bundle2->truth_has_char_boxes_ = true; bundle2->norm_box_tolerance_ = norm_box_tolerance_; BlamerBundle *curr_bb = bundle1; - for (b = 0; b < norm_truth_word_.length(); ++b) { + for (unsigned b = 0; b < norm_truth_word_.length(); ++b) { if (b == begin2_truth_index) { curr_bb = bundle2; } @@ -264,7 +263,7 @@ void BlamerBundle::BlameClassifier(const UNICHARSET &unicharset, const TBOX &blo return; // Nothing to do here. } - for (int b = 0; b < norm_truth_word_.length(); ++b) { + for (unsigned b = 0; b < norm_truth_word_.length(); ++b) { const TBOX &truth_box = norm_truth_word_.BlobBox(b); // Note that we are more strict on the bounding box boundaries here // than in other places (chopper, segmentation search), since we do @@ -311,10 +310,9 @@ void BlamerBundle::SetChopperBlame(const WERD_RES *word, bool debug) { if (NoTruth() || !truth_has_char_boxes_ || word->chopped_word->blobs.empty()) { return; } - std::string debug_str; bool missing_chop = false; int num_blobs = word->chopped_word->blobs.size(); - int box_index = 0; + unsigned box_index = 0; int blob_index = 0; int16_t truth_x = -1; while (box_index < truth_word_.length() && blob_index < num_blobs) { @@ -367,7 +365,7 @@ void BlamerBundle::BlameClassifierOrLangModel(const WERD_RES *word, const UNICHA if (valid_permuter) { // Find out whether best choice is a top choice. best_choice_is_dict_and_top_choice_ = true; - for (int i = 0; i < word->best_choice->length(); ++i) { + for (unsigned i = 0; i < word->best_choice->length(); ++i) { BLOB_CHOICE_IT blob_choice_it(word->GetBlobChoices(i)); ASSERT_HOST(!blob_choice_it.empty()); BLOB_CHOICE *first_choice = nullptr; @@ -415,7 +413,7 @@ void BlamerBundle::SetupCorrectSegmentation(const TWERD *word, bool debug) { } int blob_index = 0; int16_t next_box_x = word->blobs[blob_index]->bounding_box().right(); - for (int truth_idx = 0; blob_index < num_blobs && truth_idx < norm_truth_word_.length(); + for (unsigned truth_idx = 0; blob_index < num_blobs && truth_idx < norm_truth_word_.length(); ++blob_index) { ++next_box_col; int16_t curr_box_x = next_box_x; @@ -442,7 +440,7 @@ void BlamerBundle::SetupCorrectSegmentation(const TWERD *word, bool debug) { } if (blob_index < num_blobs || // trailing blobs correct_segmentation_cols_.size() != norm_truth_word_.length()) { - debug_str += + debug_str += "Blamer failed to find correct segmentation" " (tolerance=" + std::to_string(norm_box_tolerance_); @@ -478,7 +476,7 @@ void BlamerBundle::InitForSegSearch(const WERD_CHOICE *best_choice, MATRIX *rati // Fill pain points for any unclassifed blob corresponding to the // correct segmentation state. debug_str += "Correct segmentation:\n"; - for (int idx = 0; idx < correct_segmentation_cols_.size(); ++idx) { + for (unsigned idx = 0; idx < correct_segmentation_cols_.size(); ++idx) { debug_str += "col=" + std::to_string(correct_segmentation_cols_[idx]); debug_str += " row=" + std::to_string(correct_segmentation_rows_[idx]); debug_str += "\n"; diff --git a/src/ccstruct/blobbox.cpp b/src/ccstruct/blobbox.cpp index f6f1c4d26..6539a723d 100644 --- a/src/ccstruct/blobbox.cpp +++ b/src/ccstruct/blobbox.cpp @@ -33,6 +33,7 @@ #include // for pixGetHeight, pixGetPixel #include // for max, min +#include #include // for INT32_MAX, INT16_MAX #define PROJECTION_MARGIN 10 // arbitrary @@ -133,7 +134,7 @@ void BLOBNBOX::chop( // chop blobs BLOBNBOX_IT blob_it; // blob iterator // get no of chops - blobcount = static_cast(floor(box.width() / xheight)); + blobcount = static_cast(std::floor(box.width() / xheight)); if (blobcount > 1 && cblob_ptr != nullptr) { // width of each blobwidth = static_cast(box.width() + 1) / blobcount; @@ -150,12 +151,12 @@ void BLOBNBOX::chop( // chop blobs UpdateRange(test_ymin, test_ymax, &ymin, &ymax); } while (blob != end_it->data()); if (ymin < ymax) { - leftx = static_cast(floor(rightx - blobwidth)); + leftx = static_cast(std::floor(rightx - blobwidth)); if (leftx < box.left()) { leftx = box.left(); // clip to real box } - bl = ICOORD(leftx, static_cast(floor(ymin))); - tr = ICOORD(static_cast(ceil(rightx)), static_cast(ceil(ymax))); + bl = ICOORD(leftx, static_cast(std::floor(ymin))); + tr = ICOORD(static_cast(std::ceil(rightx)), static_cast(std::ceil(ymax))); if (blobindex == 0) { box = TBOX(bl, tr); // change box } else { diff --git a/src/ccstruct/blobbox.h b/src/ccstruct/blobbox.h index f2b935e2e..5fafb3da4 100644 --- a/src/ccstruct/blobbox.h +++ b/src/ccstruct/blobbox.h @@ -740,8 +740,11 @@ public: TO_ROW_IT row_it = &row_list; for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { auto row = row_it.data(); - tprintf("Row range (%g,%g), para_c=%g, blobcount=%" PRId32 "\n", row->min_y(), row->max_y(), - row->parallel_c(), row->blob_list()->length()); + tprintf("Row range (%g,%g), para_c=%g, blobcount=%" PRId32 "\n", + static_cast(row->min_y()), + static_cast(row->max_y()), + static_cast(row->parallel_c()), + row->blob_list()->length()); } } @@ -803,7 +806,6 @@ private: }; ELISTIZEH(TO_BLOCK) -extern double_VAR_H(textord_error_weight, 3, "Weighting for error in believability"); void find_cblob_limits( // get y limits C_BLOB *blob, // blob to search float leftx, // x limits diff --git a/src/ccstruct/blobs.cpp b/src/ccstruct/blobs.cpp index c61280ff8..9ab0fe78c 100644 --- a/src/ccstruct/blobs.cpp +++ b/src/ccstruct/blobs.cpp @@ -558,7 +558,9 @@ void TBLOB::GetPreciseBoundingBox(TBOX *precise_box) const { // Eg x_coords[1] is a collection of the x-coords of edges at y=bottom + 1. void TBLOB::GetEdgeCoords(const TBOX &box, std::vector> &x_coords, std::vector> &y_coords) const { + x_coords.clear(); x_coords.resize(box.height()); + y_coords.clear(); y_coords.resize(box.width()); CollectEdges(box, nullptr, nullptr, &x_coords, &y_coords); // Sort the output vectors. @@ -869,12 +871,15 @@ TBOX TWERD::bounding_box() const { // Merges the blobs from start to end, not including end, and deletes // the blobs between start and end. -void TWERD::MergeBlobs(int start, int end) { - if (start >= blobs.size() - 1) { +void TWERD::MergeBlobs(unsigned start, unsigned end) { + if (end > blobs.size()) { + end = blobs.size(); + } + if (start >= end) { return; // Nothing to do. } TESSLINE *outline = blobs[start]->outlines; - for (int i = start + 1; i < end && i < blobs.size(); ++i) { + for (auto i = start + 1; i < end; ++i) { TBLOB *next_blob = blobs[i]; // Take the outlines from the next blob. if (outline == nullptr) { @@ -893,7 +898,7 @@ void TWERD::MergeBlobs(int start, int end) { } // Remove dead blobs from the vector. // TODO: optimize. - for (int i = start + 1; i < end && start + 1 < blobs.size(); ++i) { + for (auto i = start + 1; i < end && start + 1 < blobs.size(); ++i) { blobs.erase(blobs.begin() + start + 1); } } @@ -925,8 +930,8 @@ bool divisible_blob(TBLOB *blob, bool italic_blob, TPOINT *location) { if (outline1->is_hole) { continue; // Holes do not count as separable. } - TPOINT mid_pt1(static_cast((outline1->topleft.x + outline1->botright.x) / 2), - static_cast((outline1->topleft.y + outline1->botright.y) / 2)); + TPOINT mid_pt1((outline1->topleft.x + outline1->botright.x) / 2, + (outline1->topleft.y + outline1->botright.y) / 2); int mid_prod1 = mid_pt1.cross(vertical); int min_prod1, max_prod1; outline1->MinMaxCrossProduct(vertical, &min_prod1, &max_prod1); @@ -934,8 +939,8 @@ bool divisible_blob(TBLOB *blob, bool italic_blob, TPOINT *location) { if (outline2->is_hole) { continue; // Holes do not count as separable. } - TPOINT mid_pt2(static_cast((outline2->topleft.x + outline2->botright.x) / 2), - static_cast((outline2->topleft.y + outline2->botright.y) / 2)); + TPOINT mid_pt2((outline2->topleft.x + outline2->botright.x) / 2, + (outline2->topleft.y + outline2->botright.y) / 2); int mid_prod2 = mid_pt2.cross(vertical); int min_prod2, max_prod2; outline2->MinMaxCrossProduct(vertical, &min_prod2, &max_prod2); @@ -972,8 +977,8 @@ void divide_blobs(TBLOB *blob, TBLOB *other_blob, bool italic_blob, const TPOINT int location_prod = location.cross(vertical); while (outline != nullptr) { - TPOINT mid_pt(static_cast((outline->topleft.x + outline->botright.x) / 2), - static_cast((outline->topleft.y + outline->botright.y) / 2)); + TPOINT mid_pt((outline->topleft.x + outline->botright.x) / 2, + (outline->topleft.y + outline->botright.y) / 2); int mid_prod = mid_pt.cross(vertical); if (mid_prod < location_prod) { // Outline is in left blob. diff --git a/src/ccstruct/blobs.h b/src/ccstruct/blobs.h index 02ae2e883..e7993ffc3 100644 --- a/src/ccstruct/blobs.h +++ b/src/ccstruct/blobs.h @@ -28,7 +28,7 @@ #include // for OcrEngineMode -#include // for int16_t +#include "tesstypes.h" // for TDimension struct Pix; @@ -46,8 +46,8 @@ class WERD; ----------------------------------------------------------------------*/ struct TPOINT { - TPOINT() : x(0), y(0) {} - TPOINT(int16_t vx, int16_t vy) : x(vx), y(vy) {} + TPOINT() = default; + TPOINT(TDimension vx, TDimension vy) : x(vx), y(vy) {} TPOINT(const ICOORD &ic) : x(ic.x()), y(ic.y()) {} void operator+=(const TPOINT &other) { @@ -86,8 +86,8 @@ struct TPOINT { return x * x + y * y; } - int16_t x; // absolute x coord. - int16_t y; // absolute y coord. + TDimension x = 0; // absolute x coord. + TDimension y = 0; // absolute y coord. }; using VECTOR = TPOINT; // structure for coordinates. @@ -196,7 +196,7 @@ struct EDGEPT { bool is_hidden = false; uint8_t runlength = 0; int8_t dir = 0; - int8_t fixed = 0; + bool fixed = false; EDGEPT *next = nullptr; // anticlockwise element EDGEPT *prev = nullptr; // clockwise element C_OUTLINE *src_outline = nullptr; // Outline it came from. @@ -446,14 +446,14 @@ struct TWERD { void ComputeBoundingBoxes(); // Returns the number of blobs in the word. - int NumBlobs() const { + unsigned NumBlobs() const { return blobs.size(); } TBOX bounding_box() const; // Merges the blobs from start to end, not including end, and deletes // the blobs between start and end. - void MergeBlobs(int start, int end); + void MergeBlobs(unsigned start, unsigned end); #ifndef GRAPHICS_DISABLED void plot(ScrollView *window); diff --git a/src/ccstruct/boxword.cpp b/src/ccstruct/boxword.cpp index 99e345e83..7627a21cd 100644 --- a/src/ccstruct/boxword.cpp +++ b/src/ccstruct/boxword.cpp @@ -46,7 +46,7 @@ void BoxWord::CopyFrom(const BoxWord &src) { length_ = src.length_; boxes_.clear(); boxes_.reserve(length_); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { boxes_.push_back(src.boxes_[i]); } } @@ -60,10 +60,11 @@ BoxWord *BoxWord::CopyFromNormalized(TWERD *tessword) { // Allocate memory. boxword->boxes_.reserve(boxword->length_); - for (int b = 0; b < boxword->length_; ++b) { + for (unsigned b = 0; b < boxword->length_; ++b) { TBLOB *tblob = tessword->blobs[b]; TBOX blob_box; - for (TESSLINE *outline = tblob->outlines; outline != nullptr; outline = outline->next) { + for (TESSLINE *outline = tblob->outlines; outline != nullptr; + outline = outline->next) { EDGEPT *edgept = outline->loop; // Iterate over the edges. do { @@ -89,10 +90,11 @@ BoxWord *BoxWord::CopyFromNormalized(TWERD *tessword) { // expanding slightly, then clipping to the blobs from the original_word // that overlap. If not null, the block provides the inverse rotation. void BoxWord::ClipToOriginalWord(const BLOCK *block, WERD *original_word) { - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { TBOX box = boxes_[i]; // Expand by a single pixel, as the poly approximation error is 1 pixel. - box = TBOX(box.left() - 1, box.bottom() - 1, box.right() + 1, box.top() + 1); + box = + TBOX(box.left() - 1, box.bottom() - 1, box.right() + 1, box.top() + 1); // Now find the original box that matches. TBOX original_box; C_BLOB_IT b_it(original_word->cblob_list()); @@ -106,16 +108,19 @@ void BoxWord::ClipToOriginalWord(const BLOCK *block, WERD *original_word) { } } if (!original_box.null_box()) { - if (NearlyEqual(original_box.left(), box.left(), kBoxClipTolerance)) { + if (NearlyEqual(original_box.left(), box.left(), + kBoxClipTolerance)) { box.set_left(original_box.left()); } - if (NearlyEqual(original_box.right(), box.right(), kBoxClipTolerance)) { + if (NearlyEqual(original_box.right(), box.right(), + kBoxClipTolerance)) { box.set_right(original_box.right()); } if (NearlyEqual(original_box.top(), box.top(), kBoxClipTolerance)) { box.set_top(original_box.top()); } - if (NearlyEqual(original_box.bottom(), box.bottom(), kBoxClipTolerance)) { + if (NearlyEqual(original_box.bottom(), box.bottom(), + kBoxClipTolerance)) { box.set_bottom(original_box.bottom()); } } @@ -130,18 +135,18 @@ void BoxWord::ClipToOriginalWord(const BLOCK *block, WERD *original_word) { // Merges the boxes from start to end, not including end, and deletes // the boxes between start and end. -void BoxWord::MergeBoxes(int start, int end) { - start = ClipToRange(start, 0, length_); - end = ClipToRange(end, 0, length_); +void BoxWord::MergeBoxes(unsigned start, unsigned end) { + start = ClipToRange(start, 0U, length_); + end = ClipToRange(end, 0U, length_); if (end <= start + 1) { return; } - for (int i = start + 1; i < end; ++i) { + for (unsigned i = start + 1; i < end; ++i) { boxes_[start] += boxes_[i]; } int shrinkage = end - 1 - start; length_ -= shrinkage; - for (int i = start + 1; i < length_; ++i) { + for (unsigned i = start + 1; i < length_; ++i) { boxes_[i] = boxes_[i + shrinkage]; } boxes_.resize(length_); @@ -149,7 +154,7 @@ void BoxWord::MergeBoxes(int start, int end) { // Inserts a new box before the given index. // Recomputes the bounding box. -void BoxWord::InsertBox(int index, const TBOX &box) { +void BoxWord::InsertBox(unsigned index, const TBOX &box) { if (index < length_) { boxes_.insert(boxes_.begin() + index, box); } else { @@ -161,15 +166,15 @@ void BoxWord::InsertBox(int index, const TBOX &box) { // Changes the box at the given index to the new box. // Recomputes the bounding box. -void BoxWord::ChangeBox(int index, const TBOX &box) { +void BoxWord::ChangeBox(unsigned index, const TBOX &box) { boxes_[index] = box; ComputeBoundingBox(); } // Deletes the box with the given index, and shuffles up the rest. // Recomputes the bounding box. -void BoxWord::DeleteBox(int index) { - ASSERT_HOST(0 <= index && index < length_); +void BoxWord::DeleteBox(unsigned index) { + ASSERT_HOST(index < length_); boxes_.erase(boxes_.begin() + index); --length_; ComputeBoundingBox(); @@ -185,7 +190,7 @@ void BoxWord::DeleteAllBoxes() { // Computes the bounding box of the word. void BoxWord::ComputeBoundingBox() { bbox_ = TBOX(); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { bbox_ += boxes_[i]; } } @@ -193,8 +198,9 @@ void BoxWord::ComputeBoundingBox() { // This and other putatively are the same, so call the (permanent) callback // for each blob index where the bounding boxes match. // The callback is deleted on completion. -void BoxWord::ProcessMatchedBlobs(const TWERD &other, std::function cb) const { - for (int i = 0; i < length_ && i < other.NumBlobs(); ++i) { +void BoxWord::ProcessMatchedBlobs(const TWERD &other, + const std::function &cb) const { + for (unsigned i = 0; i < length_ && i < other.NumBlobs(); ++i) { TBOX blob_box = other.blobs[i]->bounding_box(); if (blob_box == boxes_[i]) { cb(i); diff --git a/src/ccstruct/boxword.h b/src/ccstruct/boxword.h index 1e2211d5f..7966fad35 100644 --- a/src/ccstruct/boxword.h +++ b/src/ccstruct/boxword.h @@ -52,19 +52,19 @@ public: // Merges the boxes from start to end, not including end, and deletes // the boxes between start and end. - void MergeBoxes(int start, int end); + void MergeBoxes(unsigned start, unsigned end); // Inserts a new box before the given index. // Recomputes the bounding box. - void InsertBox(int index, const TBOX &box); + void InsertBox(unsigned index, const TBOX &box); // Changes the box at the given index to the new box. // Recomputes the bounding box. - void ChangeBox(int index, const TBOX &box); + void ChangeBox(unsigned index, const TBOX &box); // Deletes the box with the given index, and shuffles up the rest. // Recomputes the bounding box. - void DeleteBox(int index); + void DeleteBox(unsigned index); // Deletes all the boxes stored in BoxWord. void DeleteAllBoxes(); @@ -72,15 +72,16 @@ public: // This and other putatively are the same, so call the (permanent) callback // for each blob index where the bounding boxes match. // The callback is deleted on completion. - void ProcessMatchedBlobs(const TWERD &other, std::function cb) const; + void ProcessMatchedBlobs(const TWERD &other, + const std::function &cb) const; const TBOX &bounding_box() const { return bbox_; } - int length() const { + unsigned length() const { return length_; } - const TBOX &BlobBox(int index) const { + const TBOX &BlobBox(unsigned index) const { return boxes_[index]; } @@ -88,7 +89,7 @@ private: void ComputeBoundingBox(); TBOX bbox_; - int length_; + unsigned length_; std::vector boxes_; }; diff --git a/src/ccstruct/detlinefit.cpp b/src/ccstruct/detlinefit.cpp index 377459604..d100aa502 100644 --- a/src/ccstruct/detlinefit.cpp +++ b/src/ccstruct/detlinefit.cpp @@ -145,7 +145,7 @@ double DetLineFit::ConstrainedFit(const FCOORD &direction, double min_dist, doub if (debug) { tprintf("Constrained fit to dir %g, %g = %d, %d :%zu distances:\n", direction.x(), direction.y(), line_pt->x(), line_pt->y(), distances_.size()); - for (int i = 0; i < distances_.size(); ++i) { + for (unsigned i = 0; i < distances_.size(); ++i) { tprintf("%d: %d, %d -> %g\n", i, distances_[i].data().x(), distances_[i].data().y(), distances_[i].key()); } @@ -260,7 +260,7 @@ void DetLineFit::ComputeDistances(const ICOORD &start, const ICOORD &end) { // Compute the distance of each point from the line. int prev_abs_dist = 0; int prev_dot = 0; - for (int i = 0; i < pts_.size(); ++i) { + for (unsigned i = 0; i < pts_.size(); ++i) { ICOORD pt_vector = pts_[i].pt; pt_vector -= start; int dot = line_vector % pt_vector; diff --git a/src/ccstruct/fontinfo.cpp b/src/ccstruct/fontinfo.cpp index f7678dc39..ab5786749 100644 --- a/src/ccstruct/fontinfo.cpp +++ b/src/ccstruct/fontinfo.cpp @@ -83,7 +83,7 @@ bool FontInfoTable::SetContainsMultipleFontProperties( } int first_font = font_set[0].fontinfo_id; uint32_t properties = at(first_font).properties; - for (int f = 1; f < font_set.size(); ++f) { + for (unsigned f = 1; f < font_set.size(); ++f) { if (at(font_set[f].fontinfo_id).properties != properties) { return true; } @@ -95,7 +95,7 @@ bool FontInfoTable::SetContainsMultipleFontProperties( void FontInfoTable::MoveSpacingInfoFrom(FontInfoTable *other) { using namespace std::placeholders; // for _1, _2 set_clear_callback(std::bind(FontInfoDeleteCallback, _1)); - for (int i = 0; i < other->size(); ++i) { + for (unsigned i = 0; i < other->size(); ++i) { std::vector *spacing_vec = other->at(i).spacing_vec; if (spacing_vec != nullptr) { int target_index = get_index(other->at(i)); @@ -117,7 +117,7 @@ void FontInfoTable::MoveTo(UnicityTable *target) { target->clear(); using namespace std::placeholders; // for _1, _2 target->set_clear_callback(std::bind(FontInfoDeleteCallback, _1)); - for (int i = 0; i < size(); ++i) { + for (unsigned i = 0; i < size(); ++i) { // Bit copy the FontInfo and steal all the pointers. target->push_back(at(i)); at(i).name = nullptr; diff --git a/src/ccstruct/fontinfo.h b/src/ccstruct/fontinfo.h index 70edfa09b..1a84a5673 100644 --- a/src/ccstruct/fontinfo.h +++ b/src/ccstruct/fontinfo.h @@ -77,8 +77,7 @@ struct FontInfo { // Reserves unicharset_size spots in spacing_vec. void init_spacing(int unicharset_size) { - spacing_vec = new std::vector(); - spacing_vec->resize(unicharset_size); + spacing_vec = new std::vector(unicharset_size); } // Adds the given pointer to FontSpacingInfo to spacing_vec member // (FontInfo class takes ownership of the pointer). diff --git a/src/ccstruct/image.cpp b/src/ccstruct/image.cpp index bb24b1109..27a7facc7 100644 --- a/src/ccstruct/image.cpp +++ b/src/ccstruct/image.cpp @@ -22,7 +22,7 @@ namespace tesseract { Image Image::clone() const { - return pixClone(pix_); + return pix_ ? pixClone(pix_) : nullptr; } Image Image::copy() const { diff --git a/src/ccstruct/image.h b/src/ccstruct/image.h index a159b8d5b..52e57e45a 100644 --- a/src/ccstruct/image.h +++ b/src/ccstruct/image.h @@ -35,7 +35,7 @@ public: bool operator!=(decltype(nullptr)) const { return pix_ != nullptr; } explicit operator bool() const { return pix_ != nullptr; } operator Pix *() const { return pix_; } - operator Pix **() { return &pix_; } + explicit operator Pix **() { return &pix_; } Pix *operator->() const { return pix_; } // api diff --git a/src/ccstruct/imagedata.cpp b/src/ccstruct/imagedata.cpp index 2f97c6891..a094a2ac8 100644 --- a/src/ccstruct/imagedata.cpp +++ b/src/ccstruct/imagedata.cpp @@ -43,7 +43,8 @@ const int kMaxReadAhead = 8; ImageData::ImageData() : page_number_(-1), vertical_text_(false) {} // Takes ownership of the pix and destroys it. -ImageData::ImageData(bool vertical, Image pix) : page_number_(0), vertical_text_(vertical) { +ImageData::ImageData(bool vertical, Image pix) + : page_number_(0), vertical_text_(vertical) { SetPix(pix); } ImageData::~ImageData() { @@ -55,8 +56,8 @@ ImageData::~ImageData() { // Builds and returns an ImageData from the basic data. Note that imagedata, // truth_text, and box_text are all the actual file data, NOT filenames. ImageData *ImageData::Build(const char *name, int page_number, const char *lang, - const char *imagedata, int imagedatasize, const char *truth_text, - const char *box_text) { + const char *imagedata, int imagedatasize, + const char *truth_text, const char *box_text) { auto *image_data = new ImageData(); image_data->imagefilename_ = name; image_data->page_number_ = page_number; @@ -67,7 +68,8 @@ ImageData *ImageData::Build(const char *name, int page_number, const char *lang, memcpy(&image_data->image_data_[0], imagedata, imagedatasize); if (!image_data->AddBoxes(box_text)) { if (truth_text == nullptr || truth_text[0] == '\0') { - tprintf("Error: No text corresponding to page %d from image %s!\n", page_number, name); + tprintf("Error: No text corresponding to page %d from image %s!\n", + page_number, name); delete image_data; return nullptr; } @@ -210,8 +212,9 @@ Image ImageData::GetPix() const { // The return value is the scaled Pix, which must be pixDestroyed after use, // and scale_factor (if not nullptr) is set to the scale factor that was applied // to the image to achieve the target_height. -Image ImageData::PreScale(int target_height, int max_height, float *scale_factor, int *scaled_width, - int *scaled_height, std::vector *boxes) const { +Image ImageData::PreScale(int target_height, int max_height, + float *scale_factor, int *scaled_width, + int *scaled_height, std::vector *boxes) const { int input_width = 0; int input_height = 0; Image src_pix = GetPix(); @@ -231,8 +234,8 @@ Image ImageData::PreScale(int target_height, int max_height, float *scale_factor // Get the scaled image. Image pix = pixScale(src_pix, im_factor, im_factor); if (pix == nullptr) { - tprintf("Scaling pix of size %d, %d by factor %g made null pix!!\n", input_width, input_height, - im_factor); + tprintf("Scaling pix of size %d, %d by factor %g made null pix!!\n", + input_width, input_height, im_factor); src_pix.destroy(); return nullptr; } @@ -278,9 +281,9 @@ void ImageData::Display() const { } int width = pixGetWidth(pix); int height = pixGetHeight(pix); - auto *win = - new ScrollView("Imagedata", 100, 100, 2 * (width + 2 * kTextSize), - 2 * (height + 4 * kTextSize), width + 10, height + 3 * kTextSize, true); + auto *win = new ScrollView("Imagedata", 100, 100, 2 * (width + 2 * kTextSize), + 2 * (height + 4 * kTextSize), width + 10, + height + 3 * kTextSize, true); win->Draw(pix, 0, height - 1); pix.destroy(); // Draw the boxes. @@ -292,7 +295,7 @@ void ImageData::Display() const { } win->TextAttributes("Arial", text_size, false, false, false); if (!boxes_.empty()) { - for (int b = 0; b < boxes_.size(); ++b) { + for (unsigned b = 0; b < boxes_.size(); ++b) { boxes_[b].plot(win); win->Text(boxes_[b].left(), height + kTextSize, box_texts_[b].c_str()); } @@ -309,10 +312,11 @@ void ImageData::Display() const { // Adds the supplied boxes and transcriptions that correspond to the correct // page number. -void ImageData::AddBoxes(const std::vector &boxes, const std::vector &texts, +void ImageData::AddBoxes(const std::vector &boxes, + const std::vector &texts, const std::vector &box_pages) { // Copy the boxes and make the transcription. - for (int i = 0; i < box_pages.size(); ++i) { + for (unsigned i = 0; i < box_pages.size(); ++i) { if (page_number_ >= 0 && box_pages[i] != page_number_) { continue; } @@ -346,7 +350,8 @@ Image ImageData::GetPixInternal(const std::vector &image_data) { Image pix = nullptr; if (!image_data.empty()) { // Convert the array to an image. - const auto *u_data = reinterpret_cast(&image_data[0]); + const auto *u_data = + reinterpret_cast(&image_data[0]); pix = pixReadMem(u_data, image_data.size()); } return pix; @@ -361,23 +366,25 @@ bool ImageData::AddBoxes(const char *box_text) { std::vector texts; std::vector box_pages; if (ReadMemBoxes(page_number_, /*skip_blanks*/ false, box_text, - /*continue_on_failure*/ true, &boxes, &texts, nullptr, &box_pages)) { + /*continue_on_failure*/ true, &boxes, &texts, nullptr, + &box_pages)) { AddBoxes(boxes, texts, box_pages); return true; } else { - tprintf("Error: No boxes for page %d from image %s!\n", page_number_, imagefilename_.c_str()); + tprintf("Error: No boxes for page %d from image %s!\n", page_number_, + imagefilename_.c_str()); } } return false; } DocumentData::DocumentData(const std::string &name) - : document_name_(name) - , pages_offset_(-1) - , total_pages_(-1) - , memory_used_(0) - , max_memory_(0) - , reader_(nullptr) {} + : document_name_(name), + pages_offset_(-1), + total_pages_(-1), + memory_used_(0), + max_memory_(0), + reader_(nullptr) {} DocumentData::~DocumentData() { if (thread.joinable()) { @@ -392,15 +399,16 @@ DocumentData::~DocumentData() { // Reads all the pages in the given lstmf filename to the cache. The reader // is used to read the file. -bool DocumentData::LoadDocument(const char *filename, int start_page, int64_t max_memory, - FileReader reader) { +bool DocumentData::LoadDocument(const char *filename, int start_page, + int64_t max_memory, FileReader reader) { SetDocument(filename, max_memory, reader); pages_offset_ = start_page; return ReCachePages(); } // Sets up the document, without actually loading it. -void DocumentData::SetDocument(const char *filename, int64_t max_memory, FileReader reader) { +void DocumentData::SetDocument(const char *filename, int64_t max_memory, + FileReader reader) { std::lock_guard lock_p(pages_mutex_); std::lock_guard lock(general_mutex_); document_name_ = filename; @@ -435,19 +443,23 @@ void DocumentData::LoadPageInBackground(int index) { if (IsPageAvailable(index, &page)) { return; } - std::lock_guard lock(pages_mutex_); - if (pages_offset_ == index) { - return; + { + std::lock_guard lock(pages_mutex_); + if (pages_offset_ == index) { + return; + } + pages_offset_ = index; + for (auto page : pages_) { + delete page; + } + pages_.clear(); } - pages_offset_ = index; - for (auto page : pages_) { - delete page; - } - pages_.clear(); if (thread.joinable()) { thread.join(); } - thread = std::thread(&tesseract::DocumentData::ReCachePages, this); + // Don't run next statement asynchronously because that would + // create too many threads on Linux (see issue #3111). + ReCachePages(); } // Returns a pointer to the page with the given index, modulo the total @@ -481,7 +493,8 @@ bool DocumentData::IsPageAvailable(int index, ImageData **page) { } if (num_pages > 0) { index = Modulo(index, num_pages); - if (pages_offset_ <= index && index < pages_offset_ + pages_.size()) { + if (pages_offset_ <= index && + static_cast(index) < pages_offset_ + pages_.size()) { *page = pages_[index - pages_offset_]; // Page is available already. return true; } @@ -501,8 +514,8 @@ int64_t DocumentData::UnCache() { pages_offset_ = -1; set_total_pages(-1); set_memory_used(0); - tprintf("Unloaded document %s, saving %" PRId64 " memory\n", document_name_.c_str(), - memory_saved); + tprintf("Unloaded document %s, saving %" PRId64 " memory\n", + document_name_.c_str(), memory_saved); return memory_saved; } @@ -534,8 +547,8 @@ bool DocumentData::ReCachePages() { } pages_.clear(); TFile fp; - if (!fp.Open(document_name_.c_str(), reader_) || !fp.DeSerializeSize(&loaded_pages) || - loaded_pages <= 0) { + if (!fp.Open(document_name_.c_str(), reader_) || + !fp.DeSerializeSize(&loaded_pages) || loaded_pages <= 0) { tprintf("Deserialize header failed: %s\n", document_name_.c_str()); return false; } @@ -548,7 +561,8 @@ bool DocumentData::ReCachePages() { if (!fp.DeSerialize(&non_null)) { break; } - if (page < pages_offset_ || (max_memory_ > 0 && memory_used() > max_memory_)) { + if (page < pages_offset_ || + (max_memory_ > 0 && memory_used() > max_memory_)) { if (non_null && !ImageData::SkipDeSerialize(&fp)) { break; } @@ -570,16 +584,17 @@ bool DocumentData::ReCachePages() { } } if (page < loaded_pages) { - tprintf("Deserialize failed: %s read %d/%d lines\n", document_name_.c_str(), page, - loaded_pages); + tprintf("Deserialize failed: %s read %d/%d lines\n", document_name_.c_str(), + page, loaded_pages); for (auto page : pages_) { delete page; } pages_.clear(); } else if (loaded_pages > 1) { // Avoid lots of messages for training with single line images. - tprintf("Loaded %zu/%d lines (%d-%zu) of document %s\n", pages_.size(), loaded_pages, - pages_offset_ + 1, pages_offset_ + pages_.size(), document_name_.c_str()); + tprintf("Loaded %zu/%d lines (%d-%zu) of document %s\n", pages_.size(), + loaded_pages, pages_offset_ + 1, pages_offset_ + pages_.size(), + document_name_.c_str()); } set_total_pages(loaded_pages); return !pages_.empty(); @@ -597,7 +612,8 @@ DocumentCache::~DocumentCache() { // Adds all the documents in the list of filenames, counting memory. // The reader is used to read the files. bool DocumentCache::LoadDocuments(const std::vector &filenames, - CachingStrategy cache_strategy, FileReader reader) { + CachingStrategy cache_strategy, + FileReader reader) { cache_strategy_ = cache_strategy; int64_t fair_share_memory = 0; // In the round-robin case, each DocumentData handles restricting its content @@ -606,7 +622,7 @@ bool DocumentCache::LoadDocuments(const std::vector &filenames, if (cache_strategy_ == CS_ROUND_ROBIN) { fair_share_memory = max_memory_ / filenames.size(); } - for (auto filename : filenames) { + for (const auto &filename : filenames) { auto *document = new DocumentData(filename); document->SetDocument(filename.c_str(), fair_share_memory, reader); AddToCache(document); @@ -628,7 +644,8 @@ bool DocumentCache::AddToCache(DocumentData *data) { } // Finds and returns a document by name. -DocumentData *DocumentCache::FindDocument(const std::string &document_name) const { +DocumentData *DocumentCache::FindDocument( + const std::string &document_name) const { for (auto *document : documents_) { if (document->document_name() == document_name) { return document; @@ -692,7 +709,8 @@ const ImageData *DocumentCache::GetPageSequential(int serial) { } } int doc_index = serial / num_pages_per_doc_ % num_docs; - const ImageData *doc = documents_[doc_index]->GetPage(serial % num_pages_per_doc_); + const ImageData *doc = + documents_[doc_index]->GetPage(serial % num_pages_per_doc_); // Count up total memory. Background loading makes it more complicated to // keep a running count. int64_t total_memory = 0; @@ -706,7 +724,8 @@ const ImageData *DocumentCache::GetPageSequential(int serial) { // we create a hole between them and then un-caching the backmost occupied // will work for both. int num_in_front = CountNeighbourDocs(doc_index, 1); - for (int offset = num_in_front - 2; offset > 1 && total_memory >= max_memory_; --offset) { + for (int offset = num_in_front - 2; + offset > 1 && total_memory >= max_memory_; --offset) { int next_index = (doc_index + offset) % num_docs; total_memory -= documents_[next_index]->UnCache(); } @@ -714,7 +733,8 @@ const ImageData *DocumentCache::GetPageSequential(int serial) { // we take away the document that a 2nd reader is using, it will put it // back and make a hole between. int num_behind = CountNeighbourDocs(doc_index, -1); - for (int offset = num_behind; offset < 0 && total_memory >= max_memory_; ++offset) { + for (int offset = num_behind; offset < 0 && total_memory >= max_memory_; + ++offset) { int next_index = (doc_index + offset + num_docs) % num_docs; total_memory -= documents_[next_index]->UnCache(); } diff --git a/src/ccstruct/linlsq.cpp b/src/ccstruct/linlsq.cpp index 2bfa2b4e0..237b401cf 100644 --- a/src/ccstruct/linlsq.cpp +++ b/src/ccstruct/linlsq.cpp @@ -81,7 +81,7 @@ void LLSQ::add(const LLSQ &other) { void LLSQ::remove(double x, double y) { // delete an element if (total_weight <= 0.0) { // illegal - EMPTY_LLSQ.error("LLSQ::remove", ABORT, nullptr); + EMPTY_LLSQ.error("LLSQ::remove", ABORT); } total_weight--; // count elements sigx -= x; // update accumulators diff --git a/src/ccstruct/matrix.h b/src/ccstruct/matrix.h index 074196712..a97912ad7 100644 --- a/src/ccstruct/matrix.h +++ b/src/ccstruct/matrix.h @@ -417,7 +417,7 @@ public: // Accumulates the element-wise sums of squares of src into *this. void SumSquares(const GENERIC_2D_ARRAY &src, const T &decay_factor) { - T update_factor = 1.0 - decay_factor; + T update_factor = 1 - decay_factor; int size = num_elements(); for (int i = 0; i < size; ++i) { array_[i] = array_[i] * decay_factor + update_factor * src.array_[i] * src.array_[i]; diff --git a/src/ccstruct/mod128.cpp b/src/ccstruct/mod128.cpp index 4b862ef53..27280cd71 100644 --- a/src/ccstruct/mod128.cpp +++ b/src/ccstruct/mod128.cpp @@ -1,7 +1,7 @@ /********************************************************************** * File: mod128.cpp (Formerly dir128.c) * Description: Code to convert a DIR128 to an ICOORD. - * Author: Ray Smith + * Author: Ray Smith * * (C) Copyright 1991, Hewlett-Packard Ltd. ** Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,7 @@ namespace tesseract { -static const int16_t idirtab[] = { +static const TDimension idirtab[] = { 1000, 0, 998, 49, 995, 98, 989, 146, 980, 195, 970, 242, 956, 290, 941, 336, 923, 382, 903, 427, 881, 471, 857, 514, 831, 555, 803, 595, 773, 634, 740, 671, 707, 707, 671, 740, 634, 773, 595, 803, 555, 831, 514, 857, 471, diff --git a/src/ccstruct/mod128.h b/src/ccstruct/mod128.h index fda360fb7..b815f8186 100644 --- a/src/ccstruct/mod128.h +++ b/src/ccstruct/mod128.h @@ -1,8 +1,7 @@ /********************************************************************** * File: mod128.h (Formerly dir128.h) * Description: Header for class which implements modulo arithmetic. - * Author: Ray Smith - * Created: Tue Mar 26 17:48:13 GMT 1991 + * Author: Ray Smith * * (C) Copyright 1991, Hewlett-Packard Ltd. ** Licensed under the Apache License, Version 2.0 (the "License"); @@ -81,7 +80,6 @@ public: return dir; } -private: int8_t dir; // a direction }; diff --git a/src/ccstruct/normalis.cpp b/src/ccstruct/normalis.cpp index 599de79c5..077a7c75a 100644 --- a/src/ccstruct/normalis.cpp +++ b/src/ccstruct/normalis.cpp @@ -226,7 +226,9 @@ static void ComputeEdgeDensityProfiles(const TBOX &box, const GENERIC_2D_ARRAY &hx, std::vector &hy) { int width = box.width(); int height = box.height(); + hx.clear(); hx.resize(width + 1); + hy.clear(); hy.resize(height + 1); double total = 0.0; for (int iy = 0; iy < height; ++iy) { diff --git a/src/ccstruct/ocrblock.cpp b/src/ccstruct/ocrblock.cpp index 8a7fa6fa1..0ab809c59 100644 --- a/src/ccstruct/ocrblock.cpp +++ b/src/ccstruct/ocrblock.cpp @@ -31,13 +31,14 @@ namespace tesseract { * * Constructor for a simple rectangular block. */ -BLOCK::BLOCK(const char *name, ///< filename - bool prop, ///< proportional - int16_t kern, ///< kerning - int16_t space, ///< spacing - int16_t xmin, ///< bottom left - int16_t ymin, int16_t xmax, ///< top right - int16_t ymax) +BLOCK::BLOCK(const char *name, ///< filename + bool prop, ///< proportional + int16_t kern, ///< kerning + int16_t space, ///< spacing + TDimension xmin, ///< bottom left + TDimension ymin, + TDimension xmax, ///< top right + TDimension ymax) : pdblk(xmin, ymin, xmax, ymax) , filename(name) , re_rotation_(1.0f, 0.0f) diff --git a/src/ccstruct/ocrblock.h b/src/ccstruct/ocrblock.h index bde794d9b..88753b4b4 100644 --- a/src/ccstruct/ocrblock.h +++ b/src/ccstruct/ocrblock.h @@ -39,10 +39,10 @@ public: bool prop, ///< proportional int16_t kern, ///< kerning int16_t space, ///< spacing - int16_t xmin, ///< bottom left - int16_t ymin, - int16_t xmax, ///< top right - int16_t ymax); + TDimension xmin, ///< bottom left + TDimension ymin, + TDimension xmax, ///< top right + TDimension ymax); ~BLOCK() = default; diff --git a/src/ccstruct/otsuthr.cpp b/src/ccstruct/otsuthr.cpp index 689739416..ddceecfb1 100644 --- a/src/ccstruct/otsuthr.cpp +++ b/src/ccstruct/otsuthr.cpp @@ -75,7 +75,7 @@ int OtsuThreshold(Image src_pix, int left, int top, int width, int height, std:: // or to be a convincing background we must have a large fraction of H. // In between we assume this channel contains no thresholding information. int hi_value = best_omega_0 < H * 0.5; - (*thresholds)[ch] = best_t; + thresholds[ch] = best_t; if (best_omega_0 > H * 0.75) { any_good_hivalue = true; hi_values[ch] = 0; diff --git a/src/ccstruct/pageres.cpp b/src/ccstruct/pageres.cpp index 1005dc3be..65ea748ff 100644 --- a/src/ccstruct/pageres.cpp +++ b/src/ccstruct/pageres.cpp @@ -65,7 +65,8 @@ const double kMaxWordGapRatio = 2.0; // which words to keep, based on the adjustment factors of the two words. // TODO(rays) This is horrible. Replace with an enhance params training model. static double StopperAmbigThreshold(double f1, double f2) { - return (f2 - f1) * kStopperAmbiguityThresholdGain - kStopperAmbiguityThresholdOffset; + return (f2 - f1) * kStopperAmbiguityThresholdGain - + kStopperAmbiguityThresholdOffset; } /************************************************************************* @@ -79,7 +80,8 @@ PAGE_RES::PAGE_RES(bool merge_similar_words, BLOCK_LIST *the_block_list, BLOCK_IT block_it(the_block_list); BLOCK_RES_IT block_res_it(&block_res_list); for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) { - block_res_it.add_to_end(new BLOCK_RES(merge_similar_words, block_it.data())); + block_res_it.add_to_end( + new BLOCK_RES(merge_similar_words, block_it.data())); } prev_word_best_choice = prev_word_best_choice_ptr; } @@ -127,7 +129,8 @@ ROW_RES::ROW_RES(bool merge_similar_words, ROW *the_row) { row = the_row; bool add_next_word = false; TBOX union_box; - float line_height = the_row->x_height() + the_row->ascenders() - the_row->descenders(); + float line_height = + the_row->x_height() + the_row->ascenders() - the_row->descenders(); for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) { auto *word_res = new WERD_RES(word_it.data()); word_res->x_height = the_row->x_height(); @@ -298,14 +301,17 @@ void WERD_RES::InitForRetryRecognition(const WERD_RES &source) { // norm_box is used to override the word bounding box to determine the // normalization scale and offset. // Returns false if the word is empty and sets up fake results. -bool WERD_RES::SetupForRecognition(const UNICHARSET &unicharset_in, tesseract::Tesseract *tess, - Image pix, int norm_mode, const TBOX *norm_box, bool numeric_mode, - bool use_body_size, bool allow_detailed_fx, ROW *row, +bool WERD_RES::SetupForRecognition(const UNICHARSET &unicharset_in, + tesseract::Tesseract *tess, Image pix, + int norm_mode, const TBOX *norm_box, + bool numeric_mode, bool use_body_size, + bool allow_detailed_fx, ROW *row, const BLOCK *block) { auto norm_mode_hint = static_cast(norm_mode); tesseract = tess; POLY_BLOCK *pb = block != nullptr ? block->pdblk.poly_block() : nullptr; - if ((norm_mode_hint != tesseract::OEM_LSTM_ONLY && word->cblob_list()->empty()) || + if ((norm_mode_hint != tesseract::OEM_LSTM_ONLY && + word->cblob_list()->empty()) || (pb != nullptr && !pb->IsText())) { // Empty words occur when all the blobs have been moved to the rej_blobs // list, which seems to occur frequently in junk. @@ -317,9 +323,12 @@ bool WERD_RES::SetupForRecognition(const UNICHARSET &unicharset_in, tesseract::T SetupWordScript(unicharset_in); chopped_word = TWERD::PolygonalCopy(allow_detailed_fx, word); float word_xheight = - use_body_size && row != nullptr && row->body_size() > 0.0f ? row->body_size() : x_height; - chopped_word->BLNormalize(block, row, pix, word->flag(W_INVERSE), word_xheight, baseline_shift, - numeric_mode, norm_mode_hint, norm_box, &denorm); + use_body_size && row != nullptr && row->body_size() > 0.0f + ? row->body_size() + : x_height; + chopped_word->BLNormalize(block, row, pix, word->flag(W_INVERSE), + word_xheight, baseline_shift, numeric_mode, + norm_mode_hint, norm_box, &denorm); blob_row = row; SetupBasicsFromChoppedWord(unicharset_in); SetupBlamerBundle(); @@ -398,7 +407,8 @@ void WERD_RES::SetupBlobWidthsAndGaps() { TBOX box = blob->bounding_box(); blob_widths.push_back(box.width()); if (b + 1 < num_blobs) { - blob_gaps.push_back(chopped_word->blobs[b + 1]->bounding_box().left() - box.right()); + blob_gaps.push_back(chopped_word->blobs[b + 1]->bounding_box().left() - + box.right()); } } } @@ -449,19 +459,19 @@ bool WERD_RES::IsAmbiguous() { // Returns true if the ratings matrix size matches the sum of each of the // segmentation states. bool WERD_RES::StatesAllValid() { - int ratings_dim = ratings->dimension(); + unsigned ratings_dim = ratings->dimension(); if (raw_choice->TotalOfStates() != ratings_dim) { - tprintf("raw_choice has total of states = %d vs ratings dim of %d\n", + tprintf("raw_choice has total of states = %u vs ratings dim of %u\n", raw_choice->TotalOfStates(), ratings_dim); return false; } WERD_CHOICE_IT it(&best_choices); - int index = 0; + unsigned index = 0; for (it.mark_cycle_pt(); !it.cycled_list(); it.forward(), ++index) { WERD_CHOICE *choice = it.data(); if (choice->TotalOfStates() != ratings_dim) { - tprintf("Cooked #%d has total of states = %d vs ratings dim of %d\n", index, - choice->TotalOfStates(), ratings_dim); + tprintf("Cooked #%u has total of states = %u vs ratings dim of %u\n", + index, choice->TotalOfStates(), ratings_dim); return false; } } @@ -471,7 +481,8 @@ bool WERD_RES::StatesAllValid() { // Prints a list of words found if debug is true or the word result matches // the word_to_debug. void WERD_RES::DebugWordChoices(bool debug, const char *word_to_debug) { - if (debug || (word_to_debug != nullptr && *word_to_debug != '\0' && best_choice != nullptr && + if (debug || (word_to_debug != nullptr && *word_to_debug != '\0' && + best_choice != nullptr && best_choice->unichar_string() == std::string(word_to_debug))) { if (raw_choice != nullptr) { raw_choice->print("\nBest Raw Choice"); @@ -490,8 +501,8 @@ void WERD_RES::DebugWordChoices(bool debug, const char *word_to_debug) { // Prints the top choice along with the accepted/done flags. void WERD_RES::DebugTopChoice(const char *msg) const { - tprintf("Best choice: accepted=%d, adaptable=%d, done=%d : ", tess_accepted, tess_would_adapt, - done); + tprintf("Best choice: accepted=%d, adaptable=%d, done=%d : ", tess_accepted, + tess_would_adapt, done); if (best_choice == nullptr) { tprintf("\n"); } else { @@ -516,25 +527,26 @@ void WERD_RES::FilterWordChoices(int debug_level) { int index = 0; for (it.forward(); !it.at_first(); it.forward(), ++index) { WERD_CHOICE *choice = it.data(); - float threshold = StopperAmbigThreshold(best_choice->adjust_factor(), choice->adjust_factor()); + float threshold = StopperAmbigThreshold(best_choice->adjust_factor(), + choice->adjust_factor()); // i, j index the blob choice in choice, best_choice. // chunk is an index into the chopped_word blobs (AKA chunks). // Since the two words may use different segmentations of the chunks, we // iterate over the chunks to find out whether a comparable blob // classification is much worse than the best result. - int i = 0, j = 0, chunk = 0; + unsigned i = 0, j = 0, chunk = 0; // Each iteration of the while deals with 1 chunk. On entry choice_chunk // and best_chunk are the indices of the first chunk in the NEXT blob, // i.e. we don't have to increment i, j while chunk < choice_chunk and // best_chunk respectively. - int choice_chunk = choice->state(0), best_chunk = best_choice->state(0); + auto choice_chunk = choice->state(0), best_chunk = best_choice->state(0); while (i < choice->length() && j < best_choice->length()) { if (choice->unichar_id(i) != best_choice->unichar_id(j) && choice->certainty(i) - best_choice->certainty(j) < threshold) { if (debug_level >= 2) { choice->print("WorstCertaintyDiffWorseThan"); tprintf( - "i %d j %d Choice->Blob[i].Certainty %.4g" + "i %u j %u Choice->Blob[i].Certainty %.4g" " WorstOtherChoiceCertainty %g Threshold %g\n", i, j, choice->certainty(i), best_choice->certainty(j), threshold); tprintf("Discarding bad choice #%d\n", index); @@ -555,13 +567,15 @@ void WERD_RES::FilterWordChoices(int debug_level) { } } -void WERD_RES::ComputeAdaptionThresholds(float certainty_scale, float min_rating, float max_rating, - float rating_margin, float *thresholds) { +void WERD_RES::ComputeAdaptionThresholds(float certainty_scale, + float min_rating, float max_rating, + float rating_margin, + float *thresholds) { int chunk = 0; int end_chunk = best_choice->state(0); int end_raw_chunk = raw_choice->state(0); int raw_blob = 0; - for (int i = 0; i < best_choice->length(); i++, thresholds++) { + for (unsigned i = 0; i < best_choice->length(); i++, thresholds++) { float avg_rating = 0.0f; int num_error_chunks = 0; @@ -612,26 +626,29 @@ bool WERD_RES::LogNewRawChoice(WERD_CHOICE *word_choice) { // The best_choices list is kept in sorted order by rating. Duplicates are // removed, and the list is kept no longer than max_num_choices in length. // Returns true if the word_choice is still a valid pointer. -bool WERD_RES::LogNewCookedChoice(int max_num_choices, bool debug, WERD_CHOICE *word_choice) { +bool WERD_RES::LogNewCookedChoice(int max_num_choices, bool debug, + WERD_CHOICE *word_choice) { if (best_choice != nullptr) { // Throw out obviously bad choices to save some work. // TODO(rays) Get rid of this! This piece of code produces different // results according to the order in which words are found, which is an // undesirable behavior. It would be better to keep all the choices and // prune them later when more information is available. - float max_certainty_delta = - StopperAmbigThreshold(best_choice->adjust_factor(), word_choice->adjust_factor()); + float max_certainty_delta = StopperAmbigThreshold( + best_choice->adjust_factor(), word_choice->adjust_factor()); if (max_certainty_delta > -kStopperAmbiguityThresholdOffset) { max_certainty_delta = -kStopperAmbiguityThresholdOffset; } - if (word_choice->certainty() - best_choice->certainty() < max_certainty_delta) { + if (word_choice->certainty() - best_choice->certainty() < + max_certainty_delta) { if (debug) { std::string bad_string; word_choice->string_and_lengths(&bad_string, nullptr); tprintf( "Discarding choice \"%s\" with an overly low certainty" " %.3f vs best choice certainty %.3f (Threshold: %.3f)\n", - bad_string.c_str(), word_choice->certainty(), best_choice->certainty(), + bad_string.c_str(), word_choice->certainty(), + best_choice->certainty(), max_certainty_delta + best_choice->certainty()); } delete word_choice; @@ -664,8 +681,8 @@ bool WERD_RES::LogNewCookedChoice(int max_num_choices, bool debug, WERD_CHOICE * } else { // Old is better. if (debug) { - tprintf("Discarding duplicate choice \"%s\", rating %g vs %g\n", new_str.c_str(), - word_choice->rating(), choice->rating()); + tprintf("Discarding duplicate choice \"%s\", rating %g vs %g\n", + new_str.c_str(), word_choice->rating(), choice->rating()); } delete word_choice; return false; @@ -720,8 +737,8 @@ void WERD_RES::PrintBestChoices() const { } alternates_str += it.data()->unichar_string(); } - tprintf("Alternates for \"%s\": {\"%s\"}\n", best_choice->unichar_string().c_str(), - alternates_str.c_str()); + tprintf("Alternates for \"%s\": {\"%s\"}\n", + best_choice->unichar_string().c_str(), alternates_str.c_str()); } // Returns the sum of the widths of the blob between start_blob and last_blob @@ -737,8 +754,8 @@ int WERD_RES::GetBlobsWidth(int start_blob, int last_blob) const { return result; } // Returns the width of a gap between the specified blob and the next one. -int WERD_RES::GetBlobsGap(int blob_index) const { - if (blob_index < 0 || blob_index >= blob_gaps.size()) { +int WERD_RES::GetBlobsGap(unsigned blob_index) const { + if (blob_index >= blob_gaps.size()) { return 0; } return blob_gaps[blob_index]; @@ -748,8 +765,8 @@ int WERD_RES::GetBlobsGap(int blob_index) const { // best choice word taken from the appropriate cell in the ratings MATRIX. // Borrowed pointer, so do not delete. May return nullptr if there is no // BLOB_CHOICE matching the unichar_id at the given index. -BLOB_CHOICE *WERD_RES::GetBlobChoice(int index) const { - if (index < 0 || index >= best_choice->length()) { +BLOB_CHOICE *WERD_RES::GetBlobChoice(unsigned index) const { + if (index >= best_choice->length()) { return nullptr; } BLOB_CHOICE_LIST *choices = GetBlobChoices(index); @@ -826,16 +843,18 @@ void WERD_RES::RebuildBestState() { } best_state.clear(); int start = 0; - for (int i = 0; i < best_choice->length(); ++i) { + for (unsigned i = 0; i < best_choice->length(); ++i) { int length = best_choice->state(i); best_state.push_back(length); if (length > 1) { - SEAM::JoinPieces(seam_array, chopped_word->blobs, start, start + length - 1); + SEAM::JoinPieces(seam_array, chopped_word->blobs, start, + start + length - 1); } TBLOB *blob = chopped_word->blobs[start]; rebuild_word->blobs.push_back(new TBLOB(*blob)); if (length > 1) { - SEAM::BreakPieces(seam_array, chopped_word->blobs, start, start + length - 1); + SEAM::BreakPieces(seam_array, chopped_word->blobs, start, + start + length - 1); } start += length; } @@ -847,10 +866,10 @@ void WERD_RES::CloneChoppedToRebuild() { delete rebuild_word; rebuild_word = new TWERD(*chopped_word); SetupBoxWord(); - int word_len = box_word->length(); + auto word_len = box_word->length(); best_state.reserve(word_len); correct_text.reserve(word_len); - for (int i = 0; i < word_len; ++i) { + for (unsigned i = 0; i < word_len; ++i) { best_state.push_back(1); correct_text.emplace_back(""); } @@ -886,14 +905,14 @@ void WERD_RES::SetAllScriptPositions(tesseract::ScriptPos position) { // providing a single classifier result for each blob. // The BLOB_CHOICEs are consumed and the word takes ownership. // The number of blobs in the box_word must match blob_count. -void WERD_RES::FakeClassifyWord(int blob_count, BLOB_CHOICE **choices) { +void WERD_RES::FakeClassifyWord(unsigned blob_count, BLOB_CHOICE **choices) { // Setup the WERD_RES. ASSERT_HOST(box_word != nullptr); ASSERT_HOST(blob_count == box_word->length()); ClearWordChoices(); ClearRatings(); ratings = new MATRIX(blob_count, 1); - for (int c = 0; c < blob_count; ++c) { + for (unsigned c = 0; c < blob_count; ++c) { auto *choice_list = new BLOB_CHOICE_LIST; BLOB_CHOICE_IT choice_it(choice_list); choice_it.add_after_then_move(choices[c]); @@ -901,6 +920,7 @@ void WERD_RES::FakeClassifyWord(int blob_count, BLOB_CHOICE **choices) { } FakeWordFromRatings(TOP_CHOICE_PERM); reject_map.initialise(blob_count); + best_state.clear(); best_state.resize(blob_count, 1); done = true; } @@ -924,7 +944,8 @@ void WERD_RES::FakeWordFromRatings(PermuterType permuter) { rating = choice->rating(); certainty = choice->certainty(); } - word_choice->append_unichar_id_space_allocated(unichar_id, 1, rating, certainty); + word_choice->append_unichar_id_space_allocated(unichar_id, 1, rating, + certainty); } LogNewRawChoice(word_choice); // Ownership of word_choice taken by word here. @@ -935,7 +956,7 @@ void WERD_RES::FakeWordFromRatings(PermuterType permuter) { void WERD_RES::BestChoiceToCorrectText() { correct_text.clear(); ASSERT_HOST(best_choice != nullptr); - for (int i = 0; i < best_choice->length(); ++i) { + for (unsigned i = 0; i < best_choice->length(); ++i) { UNICHAR_ID choice_id = best_choice->unichar_id(i); const char *blob_choice = uch_set->id_to_unichar(choice_id); correct_text.emplace_back(blob_choice); @@ -947,14 +968,17 @@ void WERD_RES::BestChoiceToCorrectText() { // callback box_cb is nullptr or returns true, setting the merged blob // result to the class returned from class_cb. // Returns true if anything was merged. -bool WERD_RES::ConditionalBlobMerge(std::function class_cb, - std::function box_cb) { +bool WERD_RES::ConditionalBlobMerge( + const std::function &class_cb, + const std::function &box_cb) { ASSERT_HOST(best_choice->empty() || ratings != nullptr); bool modified = false; - for (int i = 0; i + 1 < best_choice->length(); ++i) { - UNICHAR_ID new_id = class_cb(best_choice->unichar_id(i), best_choice->unichar_id(i + 1)); + for (unsigned i = 0; i + 1 < best_choice->length(); ++i) { + UNICHAR_ID new_id = + class_cb(best_choice->unichar_id(i), best_choice->unichar_id(i + 1)); if (new_id != INVALID_UNICHAR_ID && - (box_cb == nullptr || box_cb(box_word->BlobBox(i), box_word->BlobBox(i + 1)))) { + (box_cb == nullptr || + box_cb(box_word->BlobBox(i), box_word->BlobBox(i + 1)))) { // Raw choice should not be fixed. best_choice->set_unichar_id(new_id, i); modified = true; @@ -978,7 +1002,7 @@ bool WERD_RES::ConditionalBlobMerge(std::functionlength()) { reject_map.remove_pos(index); } @@ -1002,8 +1026,9 @@ static int is_simple_quote(const char *signed_str, int length) { // Standard 1 byte quotes. return (length == 1 && (*str == '\'' || *str == '`')) || // UTF-8 3 bytes curved quotes. - (length == 3 && ((*str == 0xe2 && *(str + 1) == 0x80 && *(str + 2) == 0x98) || - (*str == 0xe2 && *(str + 1) == 0x80 && *(str + 2) == 0x99))); + (length == 3 && + ((*str == 0xe2 && *(str + 1) == 0x80 && *(str + 2) == 0x98) || + (*str == 0xe2 && *(str + 1) == 0x80 && *(str + 2) == 0x99))); } // Callback helper for fix_quotes returns a double quote if both @@ -1011,7 +1036,8 @@ static int is_simple_quote(const char *signed_str, int length) { UNICHAR_ID WERD_RES::BothQuotes(UNICHAR_ID id1, UNICHAR_ID id2) { const char *ch = uch_set->id_to_unichar(id1); const char *next_ch = uch_set->id_to_unichar(id2); - if (is_simple_quote(ch, strlen(ch)) && is_simple_quote(next_ch, strlen(next_ch))) { + if (is_simple_quote(ch, strlen(ch)) && + is_simple_quote(next_ch, strlen(next_ch))) { return uch_set->unichar_to_id("\""); } return INVALID_UNICHAR_ID; @@ -1019,7 +1045,8 @@ UNICHAR_ID WERD_RES::BothQuotes(UNICHAR_ID id1, UNICHAR_ID id2) { // Change pairs of quotes to double quotes. void WERD_RES::fix_quotes() { - if (!uch_set->contains_unichar("\"") || !uch_set->get_enabled(uch_set->unichar_to_id("\""))) { + if (!uch_set->contains_unichar("\"") || + !uch_set->get_enabled(uch_set->unichar_to_id("\""))) { return; // Don't create it if it is disallowed. } @@ -1048,7 +1075,8 @@ bool WERD_RES::HyphenBoxesOverlap(const TBOX &box1, const TBOX &box2) { // Change pairs of hyphens to a single hyphen if the bounding boxes touch // Typically a long dash which has been segmented. void WERD_RES::fix_hyphens() { - if (!uch_set->contains_unichar("-") || !uch_set->get_enabled(uch_set->unichar_to_id("-"))) { + if (!uch_set->contains_unichar("-") || + !uch_set->get_enabled(uch_set->unichar_to_id("-"))) { return; // Don't create it if it is disallowed. } @@ -1070,8 +1098,9 @@ UNICHAR_ID WERD_RES::BothSpaces(UNICHAR_ID id1, UNICHAR_ID id2) { // Change pairs of tess failures to a single one void WERD_RES::merge_tess_fails() { using namespace std::placeholders; // for _1, _2 - if (ConditionalBlobMerge(std::bind(&WERD_RES::BothSpaces, this, _1, _2), nullptr)) { - int len = best_choice->length(); + if (ConditionalBlobMerge(std::bind(&WERD_RES::BothSpaces, this, _1, _2), + nullptr)) { + unsigned len = best_choice->length(); ASSERT_HOST(reject_map.length() == len); ASSERT_HOST(box_word->length() == len); } @@ -1082,7 +1111,7 @@ void WERD_RES::merge_tess_fails() { bool WERD_RES::PiecesAllNatural(int start, int count) const { // all seams must have no splits. for (int index = start; index < start + count - 1; ++index) { - if (index >= 0 && index < seam_array.size()) { + if (index >= 0 && static_cast(index) < seam_array.size()) { SEAM *seam = seam_array[index]; if (seam != nullptr && seam->HasAnySplits()) { return false; @@ -1177,7 +1206,8 @@ int PAGE_RES_IT::cmp(const PAGE_RES_IT &other) const { } WERD_RES_IT word_res_it(&row_res->word_res_list); - for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list(); word_res_it.forward()) { + for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list(); + word_res_it.forward()) { if (word_res_it.data() == word_res) { return -1; } else if (word_res_it.data() == other.word_res) { @@ -1189,7 +1219,8 @@ int PAGE_RES_IT::cmp(const PAGE_RES_IT &other) const { // we both point to the same block, but different rows. ROW_RES_IT row_res_it(&block_res->row_res_list); - for (row_res_it.mark_cycle_pt(); !row_res_it.cycled_list(); row_res_it.forward()) { + for (row_res_it.mark_cycle_pt(); !row_res_it.cycled_list(); + row_res_it.forward()) { if (row_res_it.data() == row_res) { return -1; } else if (row_res_it.data() == other.row_res) { @@ -1201,7 +1232,8 @@ int PAGE_RES_IT::cmp(const PAGE_RES_IT &other) const { // We point to different blocks. BLOCK_RES_IT block_res_it(&page_res->block_res_list); - for (block_res_it.mark_cycle_pt(); !block_res_it.cycled_list(); block_res_it.forward()) { + for (block_res_it.mark_cycle_pt(); !block_res_it.cycled_list(); + block_res_it.forward()) { if (block_res_it.data() == block_res) { return -1; } else if (block_res_it.data() == other.block_res) { @@ -1217,7 +1249,8 @@ int PAGE_RES_IT::cmp(const PAGE_RES_IT &other) const { // before the current position. The simple fields of the WERD_RES are copied // from clone_res and the resulting WERD_RES is returned for further setup // with best_choice etc. -WERD_RES *PAGE_RES_IT::InsertSimpleCloneWord(const WERD_RES &clone_res, WERD *new_word) { +WERD_RES *PAGE_RES_IT::InsertSimpleCloneWord(const WERD_RES &clone_res, + WERD *new_word) { // Make a WERD_RES for the new_word. auto *new_res = new WERD_RES(new_word); new_res->CopySimpleFields(clone_res); @@ -1244,7 +1277,8 @@ WERD_RES *PAGE_RES_IT::InsertSimpleCloneWord(const WERD_RES &clone_res, WERD *ne // are likely very poor, if they come from LSTM, where it only outputs the // character at one pixel within it, so we find the midpoints between them. static void ComputeBlobEnds(const WERD_RES &word, const TBOX &clip_box, - C_BLOB_LIST *next_word_blobs, std::vector *blob_ends) { + C_BLOB_LIST *next_word_blobs, + std::vector *blob_ends) { C_BLOB_IT blob_it(word.word->cblob_list()); for (int length : word.best_state) { // Get the bounding box of the fake blobs @@ -1271,17 +1305,18 @@ static void ComputeBlobEnds(const WERD_RES &word, const TBOX &clip_box, // Helper computes the bounds of a word by restricting it to existing words // that significantly overlap. -static TBOX ComputeWordBounds(const tesseract::PointerVector &words, int w_index, - TBOX prev_box, WERD_RES_IT w_it) { +static TBOX ComputeWordBounds(const tesseract::PointerVector &words, + int w_index, TBOX prev_box, WERD_RES_IT w_it) { constexpr int kSignificantOverlapFraction = 4; TBOX clipped_box; TBOX current_box = words[w_index]->word->bounding_box(); TBOX next_box; - if (w_index + 1 < words.size() && words[w_index + 1] != nullptr && - words[w_index + 1]->word != nullptr) { + if (static_cast(w_index + 1) < words.size() && + words[w_index + 1] != nullptr && words[w_index + 1]->word != nullptr) { next_box = words[w_index + 1]->word->bounding_box(); } - for (w_it.forward(); !w_it.at_first() && w_it.data()->part_of_combo; w_it.forward()) { + for (w_it.forward(); !w_it.at_first() && w_it.data()->part_of_combo; + w_it.forward()) { if (w_it.data() == nullptr || w_it.data()->word == nullptr) { continue; } @@ -1316,14 +1351,19 @@ static TBOX ComputeWordBounds(const tesseract::PointerVector &words, i // Helper moves the blob from src to dest. If it isn't contained by clip_box, // the blob is replaced by a fake that is contained. -static TBOX MoveAndClipBlob(C_BLOB_IT *src_it, C_BLOB_IT *dest_it, const TBOX &clip_box) { +static TBOX MoveAndClipBlob(C_BLOB_IT *src_it, C_BLOB_IT *dest_it, + const TBOX &clip_box) { C_BLOB *src_blob = src_it->extract(); TBOX box = src_blob->bounding_box(); if (!clip_box.contains(box)) { - int left = ClipToRange(box.left(), clip_box.left(), clip_box.right() - 1); - int right = ClipToRange(box.right(), clip_box.left() + 1, clip_box.right()); - int top = ClipToRange(box.top(), clip_box.bottom() + 1, clip_box.top()); - int bottom = ClipToRange(box.bottom(), clip_box.bottom(), clip_box.top() - 1); + int left = + ClipToRange(box.left(), clip_box.left(), clip_box.right() - 1); + int right = + ClipToRange(box.right(), clip_box.left() + 1, clip_box.right()); + int top = + ClipToRange(box.top(), clip_box.bottom() + 1, clip_box.top()); + int bottom = + ClipToRange(box.bottom(), clip_box.bottom(), clip_box.top() - 1); box = TBOX(left, bottom, right, top); delete src_blob; src_blob = C_BLOB::FakeBlob(box); @@ -1335,7 +1375,8 @@ static TBOX MoveAndClipBlob(C_BLOB_IT *src_it, C_BLOB_IT *dest_it, const TBOX &c // Replaces the current WERD/WERD_RES with the given words. The given words // contain fake blobs that indicate the position of the characters. These are // replaced with real blobs from the current word as much as possible. -void PAGE_RES_IT::ReplaceCurrentWord(tesseract::PointerVector *words) { +void PAGE_RES_IT::ReplaceCurrentWord( + tesseract::PointerVector *words) { if (words->empty()) { DeleteCurrentWord(); return; @@ -1382,7 +1423,7 @@ void PAGE_RES_IT::ReplaceCurrentWord(tesseract::PointerVector *words) C_BLOB_IT rej_b_it(input_word->word->rej_cblob_list()); rej_b_it.sort(&C_BLOB::SortByXMiddle); TBOX clip_box; - for (int w = 0; w < words->size(); ++w) { + for (size_t w = 0; w < words->size(); ++w) { WERD_RES *word_w = (*words)[w]; clip_box = ComputeWordBounds(*words, w, clip_box, wr_it_of_current_word); // Compute blob boundaries. @@ -1400,15 +1441,17 @@ void PAGE_RES_IT::ReplaceCurrentWord(tesseract::PointerVector *words) C_BLOB_IT dest_it(word_w->word->cblob_list()); // Build the box word as we move the blobs. auto *box_word = new tesseract::BoxWord; - for (int i = 0; i < blob_ends.size(); ++i, fake_b_it.forward()) { + for (size_t i = 0; i < blob_ends.size(); ++i, fake_b_it.forward()) { int end_x = blob_ends[i]; TBOX blob_box; // Add the blobs up to end_x. - while (!src_b_it.empty() && src_b_it.data()->bounding_box().x_middle() < end_x) { + while (!src_b_it.empty() && + src_b_it.data()->bounding_box().x_middle() < end_x) { blob_box += MoveAndClipBlob(&src_b_it, &dest_it, clip_box); src_b_it.forward(); } - while (!rej_b_it.empty() && rej_b_it.data()->bounding_box().x_middle() < end_x) { + while (!rej_b_it.empty() && + rej_b_it.data()->bounding_box().x_middle() < end_x) { blob_box += MoveAndClipBlob(&rej_b_it, &dest_it, clip_box); rej_b_it.forward(); } @@ -1483,13 +1526,14 @@ void PAGE_RES_IT::MakeCurrentWordFuzzy() { // The next word should be the corresponding part of combo, but we have // already stepped past it, so find it by search. WERD_RES_IT wr_it(&row()->word_res_list); - for (wr_it.mark_cycle_pt(); !wr_it.cycled_list() && wr_it.data() != word_res; - wr_it.forward()) { + for (wr_it.mark_cycle_pt(); + !wr_it.cycled_list() && wr_it.data() != word_res; wr_it.forward()) { } wr_it.forward(); ASSERT_HOST(wr_it.data()->part_of_combo); real_word = wr_it.data()->word; - ASSERT_HOST(!real_word->flag(W_FUZZY_SP) && !real_word->flag(W_FUZZY_NON)); + ASSERT_HOST(!real_word->flag(W_FUZZY_SP) && + !real_word->flag(W_FUZZY_NON)); real_word->set_flag(W_FUZZY_SP, true); } } @@ -1530,7 +1574,8 @@ void PAGE_RES_IT::ResetWordIterator() { // cycled_list state correctly. word_res_it.move_to_first(); for (word_res_it.mark_cycle_pt(); - !word_res_it.cycled_list() && word_res_it.data() != next_word_res; word_res_it.forward()) { + !word_res_it.cycled_list() && word_res_it.data() != next_word_res; + word_res_it.forward()) { if (!word_res_it.data()->part_of_combo) { if (prev_row_res == row_res) { prev_word_res = word_res; @@ -1623,8 +1668,9 @@ WERD_RES *PAGE_RES_IT::internal_forward(bool new_block, bool empty_ok) { foundword: // Update prev_word_best_choice pointer. if (page_res != nullptr && page_res->prev_word_best_choice != nullptr) { - *page_res->prev_word_best_choice = - (new_block || prev_word_res == nullptr) ? nullptr : prev_word_res->best_choice; + *page_res->prev_word_best_choice = (new_block || prev_word_res == nullptr) + ? nullptr + : prev_word_res->best_choice; } return word_res; } @@ -1652,8 +1698,9 @@ WERD_RES *PAGE_RES_IT::restart_row() { *************************************************************************/ WERD_RES *PAGE_RES_IT::forward_paragraph() { - while (block_res == next_block_res && (next_row_res != nullptr && next_row_res->row != nullptr && - row_res->row->para() == next_row_res->row->para())) { + while (block_res == next_block_res && + (next_row_res != nullptr && next_row_res->row != nullptr && + row_res->row->para() == next_row_res->row->para())) { internal_forward(false, true); } return internal_forward(false, true); diff --git a/src/ccstruct/pageres.h b/src/ccstruct/pageres.h index 96202d55c..48e70b73d 100644 --- a/src/ccstruct/pageres.h +++ b/src/ccstruct/pageres.h @@ -19,24 +19,24 @@ #ifndef PAGERES_H #define PAGERES_H -#include "blamer.h" // for BlamerBundle (ptr only), IRR_NUM_REASONS -#include "clst.h" // for CLIST_ITERATOR, CLISTIZEH +#include "blamer.h" // for BlamerBundle (ptr only), IRR_NUM_REASONS +#include "clst.h" // for CLIST_ITERATOR, CLISTIZEH +#include "elst.h" // for ELIST_ITERATOR, ELIST_LINK, ELISTIZEH #include "genericvector.h" // for PointerVector -#include "elst.h" // for ELIST_ITERATOR, ELIST_LINK, ELISTIZEH -#include "matrix.h" // for MATRIX -#include "normalis.h" // for DENORM -#include "ratngs.h" // for WERD_CHOICE, BLOB_CHOICE (ptr only) -#include "rect.h" // for TBOX -#include "rejctmap.h" // for REJMAP -#include "unicharset.h" // for UNICHARSET, UNICHARSET::Direction, UNI... -#include "werd.h" // for WERD, W_BOL, W_EOL +#include "matrix.h" // for MATRIX +#include "normalis.h" // for DENORM +#include "ratngs.h" // for WERD_CHOICE, BLOB_CHOICE (ptr only) +#include "rect.h" // for TBOX +#include "rejctmap.h" // for REJMAP +#include "unicharset.h" // for UNICHARSET, UNICHARSET::Direction, UNI... +#include "werd.h" // for WERD, W_BOL, W_EOL #include // for UNICHAR_ID, INVALID_UNICHAR_ID -#include // for int32_t, int16_t -#include // for std::function -#include // for std::pair -#include // for std::vector +#include // for int32_t, int16_t +#include // for std::function +#include // for std::pair +#include // for std::vector #include // for int8_t @@ -96,6 +96,7 @@ public: rej_count = 0; rejected = false; prev_word_best_choice = nullptr; + blame_reasons.clear(); blame_reasons.resize(IRR_NUM_REASONS); } @@ -217,7 +218,8 @@ public: // Stores the lstm choices of every timestep std::vector>> timesteps; // Stores the lstm choices of every timestep segmented by character - std::vector>>> segmented_timesteps; + std::vector>>> + segmented_timesteps; // Symbolchoices acquired during CTC std::vector>> CTC_symbol_choices; // Stores if the timestep vector starts with a space @@ -356,12 +358,12 @@ public: // This matters for mirrorable characters such as parentheses. We recognize // characters purely based on their shape on the page, and by default produce // the corresponding unicode for a left-to-right context. - const char *BestUTF8(int blob_index, bool in_rtl_context) const { - if (blob_index < 0 || best_choice == nullptr || blob_index >= best_choice->length()) { + const char *BestUTF8(unsigned blob_index, bool in_rtl_context) const { + if (best_choice == nullptr || blob_index >= best_choice->length()) { return nullptr; } UNICHAR_ID id = best_choice->unichar_id(blob_index); - if (id < 0 || id >= uch_set->size()) { + if (static_cast(id) >= uch_set->size()) { return nullptr; } UNICHAR_ID mirrored = uch_set->get_mirror(id); @@ -371,35 +373,37 @@ public: return uch_set->id_to_unichar_ext(id); } // Returns the UTF-8 string for the given blob index in the raw_choice word. - const char *RawUTF8(int blob_index) const { - if (blob_index < 0 || blob_index >= raw_choice->length()) { + const char *RawUTF8(unsigned blob_index) const { + if (blob_index >= raw_choice->length()) { return nullptr; } UNICHAR_ID id = raw_choice->unichar_id(blob_index); - if (id < 0 || id >= uch_set->size()) { + if (static_cast(id) >= uch_set->size()) { return nullptr; } return uch_set->id_to_unichar(id); } - UNICHARSET::Direction SymbolDirection(int blob_index) const { - if (best_choice == nullptr || blob_index >= best_choice->length() || blob_index < 0) { + UNICHARSET::Direction SymbolDirection(unsigned blob_index) const { + if (best_choice == nullptr || blob_index >= best_choice->length()) { return UNICHARSET::U_OTHER_NEUTRAL; } return uch_set->get_direction(best_choice->unichar_id(blob_index)); } bool AnyRtlCharsInWord() const { - if (uch_set == nullptr || best_choice == nullptr || best_choice->length() < 1) { + if (uch_set == nullptr || best_choice == nullptr || + best_choice->length() < 1) { return false; } - for (int id = 0; id < best_choice->length(); id++) { - int unichar_id = best_choice->unichar_id(id); - if (unichar_id < 0 || unichar_id >= uch_set->size()) { + for (unsigned id = 0; id < best_choice->length(); id++) { + unsigned unichar_id = best_choice->unichar_id(id); + if (unichar_id >= uch_set->size()) { continue; // Ignore illegal chars. } UNICHARSET::Direction dir = uch_set->get_direction(unichar_id); - if (dir == UNICHARSET::U_RIGHT_TO_LEFT || dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC) { + if (dir == UNICHARSET::U_RIGHT_TO_LEFT || + dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC) { return true; } } @@ -407,16 +411,18 @@ public: } bool AnyLtrCharsInWord() const { - if (uch_set == nullptr || best_choice == nullptr || best_choice->length() < 1) { + if (uch_set == nullptr || best_choice == nullptr || + best_choice->length() < 1) { return false; } - for (int id = 0; id < best_choice->length(); id++) { - int unichar_id = best_choice->unichar_id(id); - if (unichar_id < 0 || unichar_id >= uch_set->size()) { + for (unsigned id = 0; id < best_choice->length(); id++) { + unsigned unichar_id = best_choice->unichar_id(id); + if (unichar_id >= uch_set->size()) { continue; // Ignore illegal chars. } UNICHARSET::Direction dir = uch_set->get_direction(unichar_id); - if (dir == UNICHARSET::U_LEFT_TO_RIGHT || dir == UNICHARSET::U_ARABIC_NUMBER) { + if (dir == UNICHARSET::U_LEFT_TO_RIGHT || + dir == UNICHARSET::U_ARABIC_NUMBER) { return true; } } @@ -462,9 +468,11 @@ public: // of any of the above flags. It should really be a tesseract::OcrEngineMode // but is declared as int for ease of use with tessedit_ocr_engine_mode. // Returns false if the word is empty and sets up fake results. - bool SetupForRecognition(const UNICHARSET &unicharset_in, tesseract::Tesseract *tesseract, - Image pix, int norm_mode, const TBOX *norm_box, bool numeric_mode, - bool use_body_size, bool allow_detailed_fx, ROW *row, + bool SetupForRecognition(const UNICHARSET &unicharset_in, + tesseract::Tesseract *tesseract, Image pix, + int norm_mode, const TBOX *norm_box, + bool numeric_mode, bool use_body_size, + bool allow_detailed_fx, ROW *row, const BLOCK *block); // Set up the seam array, bln_boxes, best_choice, and raw_choice to empty @@ -528,8 +536,9 @@ public: // min_rating limits how tight to make a template. // max_rating limits how loose to make a template. // rating_margin denotes the amount of margin to put in template. - void ComputeAdaptionThresholds(float certainty_scale, float min_rating, float max_rating, - float rating_margin, float *thresholds); + void ComputeAdaptionThresholds(float certainty_scale, float min_rating, + float max_rating, float rating_margin, + float *thresholds); // Saves a copy of the word_choice if it has the best unadjusted rating. // Returns true if the word_choice was the new best. @@ -540,7 +549,8 @@ public: // The best_choices list is kept in sorted order by rating. Duplicates are // removed, and the list is kept no longer than max_num_choices in length. // Returns true if the word_choice is still a valid pointer. - bool LogNewCookedChoice(int max_num_choices, bool debug, WERD_CHOICE *word_choice); + bool LogNewCookedChoice(int max_num_choices, bool debug, + WERD_CHOICE *word_choice); // Prints a brief list of all the best choices. void PrintBestChoices() const; @@ -549,13 +559,13 @@ public: // inclusive. int GetBlobsWidth(int start_blob, int last_blob) const; // Returns the width of a gap between the specified blob and the next one. - int GetBlobsGap(int blob_index) const; + int GetBlobsGap(unsigned blob_index) const; // Returns the BLOB_CHOICE corresponding to the given index in the // best choice word taken from the appropriate cell in the ratings MATRIX. // Borrowed pointer, so do not delete. May return nullptr if there is no // BLOB_CHOICE matching the unichar_id at the given index. - BLOB_CHOICE *GetBlobChoice(int index) const; + BLOB_CHOICE *GetBlobChoice(unsigned index) const; // Returns the BLOB_CHOICE_LIST corresponding to the given index in the // best choice word taken from the appropriate cell in the ratings MATRIX. @@ -601,7 +611,7 @@ public: // providing a single classifier result for each blob. // The BLOB_CHOICEs are consumed and the word takes ownership. // The number of blobs in the box_word must match blob_count. - void FakeClassifyWord(int blob_count, BLOB_CHOICE **choices); + void FakeClassifyWord(unsigned blob_count, BLOB_CHOICE **choices); // Creates a WERD_CHOICE for the word using the top choices from the leading // diagonal of the ratings matrix. @@ -615,12 +625,13 @@ public: // callback box_cb is nullptr or returns true, setting the merged blob // result to the class returned from class_cb. // Returns true if anything was merged. - bool ConditionalBlobMerge(std::function class_cb, - std::function box_cb); + bool ConditionalBlobMerge( + const std::function &class_cb, + const std::function &box_cb); // Merges 2 adjacent blobs in the result (index and index+1) and corrects // all the data to account for the change. - void MergeAdjacentBlobs(int index); + void MergeAdjacentBlobs(unsigned index); // Callback helper for fix_quotes returns a double quote if both // arguments are quote, otherwise INVALID_UNICHAR_ID. @@ -682,7 +693,8 @@ public: // Do two PAGE_RES_ITs point at the same word? // This is much cheaper than cmp(). bool operator==(const PAGE_RES_IT &other) const { - return word_res == other.word_res && row_res == other.row_res && block_res == other.block_res; + return word_res == other.word_res && row_res == other.row_res && + block_res == other.block_res; } bool operator!=(const PAGE_RES_IT &other) const { diff --git a/src/ccstruct/pdblock.cpp b/src/ccstruct/pdblock.cpp index 7ee76e658..7cabb573a 100644 --- a/src/ccstruct/pdblock.cpp +++ b/src/ccstruct/pdblock.cpp @@ -42,9 +42,10 @@ constexpr ERRCODE LOSTBLOCKLINE("Can't find rectangle for line"); * Constructor for a simple rectangular block. **********************************************************************/ PDBLK::PDBLK( // rectangular block - int16_t xmin, // bottom left - int16_t ymin, int16_t xmax, // top right - int16_t ymax) + TDimension xmin, // bottom left + TDimension ymin, + TDimension xmax, // top right + TDimension ymax) : box(ICOORD(xmin, ymin), ICOORD(xmax, ymax)) { // boundaries ICOORDELT_IT left_it = &leftside; @@ -349,9 +350,9 @@ void BLOCK_RECT_IT::forward() { // next rectangle * Get the the start and width of a line in the block. **********************************************************************/ -int16_t BLOCK_LINE_IT::get_line( // get a line - int16_t y, // line to get - int16_t &xext // output extent +TDimension BLOCK_LINE_IT::get_line( // get a line + TDimension y, // line to get + TDimension &xext // output extent ) { ICOORD bleft; // bounding box ICOORD tright; // of block & rect diff --git a/src/ccstruct/pdblock.h b/src/ccstruct/pdblock.h index b7b2b8dd6..f08bfbac2 100644 --- a/src/ccstruct/pdblock.h +++ b/src/ccstruct/pdblock.h @@ -41,10 +41,10 @@ public: index_ = 0; } /// simple constructor - PDBLK(int16_t xmin, ///< bottom left - int16_t ymin, - int16_t xmax, ///< top right - int16_t ymax); + PDBLK(TDimension xmin, ///< bottom left + TDimension ymin, + TDimension xmax, ///< top right + TDimension ymax); /// set vertex lists ///@param left list of left vertices @@ -145,8 +145,8 @@ public: } private: - int16_t ymin = 0; ///< bottom of rectangle - int16_t ymax = 0; ///< top of rectangle + TDimension ymin = 0; ///< bottom of rectangle + TDimension ymax = 0; ///< top of rectangle PDBLK *block = nullptr; ///< block to iterate ICOORDELT_IT left_it; ///< boundary iterators ICOORDELT_IT right_it; @@ -172,7 +172,7 @@ public: /// get a line ///@param y line to get ///@param xext output extent - int16_t get_line(int16_t y, int16_t &xext); + TDimension get_line(TDimension y, TDimension &xext); private: PDBLK *block; ///< block to iterate diff --git a/src/ccstruct/points.h b/src/ccstruct/points.h index 61d1a162e..c8c46ad16 100644 --- a/src/ccstruct/points.h +++ b/src/ccstruct/points.h @@ -21,6 +21,7 @@ #include "elst.h" #include "errcode.h" // for ASSERT_HOST +#include "tesstypes.h" // for TDimension #include // for DLLSYM @@ -43,7 +44,7 @@ public: /// constructor ///@param xin x value ///@param yin y value - ICOORD(int16_t xin, int16_t yin) { + ICOORD(TDimension xin, TDimension yin) { xcoord = xin; ycoord = yin; } @@ -54,20 +55,20 @@ public: bool Serialize(TFile *f) const; /// access function - int16_t x() const { + TDimension x() const { return xcoord; } /// access_function - int16_t y() const { + TDimension y() const { return ycoord; } /// rewrite function - void set_x(int16_t xin) { + void set_x(TDimension xin) { xcoord = xin; // write new value } /// rewrite function - void set_y(int16_t yin) { // value to set + void set_y(TDimension yin) { // value to set ycoord = yin; } @@ -128,15 +129,15 @@ public: /// cross product friend int32_t operator*(const ICOORD &, const ICOORD &); /// multiply - friend ICOORD operator*(const ICOORD &, int16_t); + friend ICOORD operator*(const ICOORD &, TDimension); /// multiply - friend ICOORD operator*(int16_t, const ICOORD &); + friend ICOORD operator*(TDimension, const ICOORD &); /// multiply - friend ICOORD &operator*=(ICOORD &, int16_t); + friend ICOORD &operator*=(ICOORD &, TDimension); /// divide - friend ICOORD operator/(const ICOORD &, int16_t); + friend ICOORD operator/(const ICOORD &, TDimension); /// divide - friend ICOORD &operator/=(ICOORD &, int16_t); + friend ICOORD &operator/=(ICOORD &, TDimension); /// rotate ///@param vec by vector void rotate(const FCOORD &vec); @@ -155,8 +156,8 @@ public: bool DeSerialize(bool swap, FILE *fp); protected: - int16_t xcoord; ///< x value - int16_t ycoord; ///< y value + TDimension xcoord; ///< x value + TDimension ycoord; ///< y value }; class ICOORDELT : public ELIST_LINK, @@ -171,7 +172,7 @@ public: /// constructor ///@param xin x value ///@param yin y value - ICOORDELT(int16_t xin, int16_t yin) { + ICOORDELT(TDimension xin, TDimension yin) { xcoord = xin; ycoord = yin; } @@ -438,7 +439,7 @@ inline int32_t operator*( // cross product inline ICOORD operator*( // scalar multiply const ICOORD &op1, // operands - int16_t scale) { + TDimension scale) { ICOORD result; // output result.xcoord = op1.xcoord * scale; @@ -447,7 +448,7 @@ inline ICOORD operator*( // scalar multiply } inline ICOORD operator*( // scalar multiply - int16_t scale, + TDimension scale, const ICOORD &op1 // operands ) { ICOORD result; // output @@ -465,7 +466,7 @@ inline ICOORD operator*( // scalar multiply inline ICOORD &operator*=( // scalar multiply ICOORD &op1, // operands - int16_t scale) { + TDimension scale) { op1.xcoord *= scale; op1.ycoord *= scale; return op1; @@ -479,7 +480,7 @@ inline ICOORD &operator*=( // scalar multiply inline ICOORD operator/( // scalar divide const ICOORD &op1, // operands - int16_t scale) { + TDimension scale) { ICOORD result; // output result.xcoord = op1.xcoord / scale; @@ -495,7 +496,7 @@ inline ICOORD operator/( // scalar divide inline ICOORD &operator/=( // scalar divide ICOORD &op1, // operands - int16_t scale) { + TDimension scale) { op1.xcoord /= scale; op1.ycoord /= scale; return op1; @@ -509,8 +510,8 @@ inline ICOORD &operator/=( // scalar divide inline void ICOORD::rotate( // rotate by vector const FCOORD &vec) { - auto tmp = static_cast(std::floor(xcoord * vec.x() - ycoord * vec.y() + 0.5f)); - ycoord = static_cast(std::floor(ycoord * vec.x() + xcoord * vec.y() + 0.5f)); + auto tmp = static_cast(std::floor(xcoord * vec.x() - ycoord * vec.y() + 0.5f)); + ycoord = static_cast(std::floor(ycoord * vec.x() + xcoord * vec.y() + 0.5f)); xcoord = tmp; } diff --git a/src/ccstruct/polyaprx.cpp b/src/ccstruct/polyaprx.cpp index a81cbc779..10753cbcc 100644 --- a/src/ccstruct/polyaprx.cpp +++ b/src/ccstruct/polyaprx.cpp @@ -1,5 +1,5 @@ /********************************************************************** - * File: polyaprx.cpp (Formerly polygon.c) + * File: polyaprx.cpp * Description: Code for polygonal approximation from old edgeprog. * Author: Ray Smith * @@ -34,7 +34,8 @@ namespace tesseract { #define FASTEDGELENGTH 256 static BOOL_VAR(poly_debug, false, "Debug old poly"); -static BOOL_VAR(poly_wide_objects_better, true, "More accurate approx on wide things"); +static BOOL_VAR(poly_wide_objects_better, true, + "More accurate approx on wide things"); #define fixed_dist 20 // really an int_variable #define approx_dist 15 // really an int_variable @@ -43,60 +44,99 @@ const int par1 = 4500 / (approx_dist * approx_dist); const int par2 = 6750 / (approx_dist * approx_dist); /********************************************************************** - * tesspoly_outline - * - * Approximate an outline from chain codes form using the old tess algorithm. - * If allow_detailed_fx is true, the EDGEPTs in the returned TBLOB - * contain pointers to the input C_OUTLINEs that enable higher-resolution - * feature extraction that does not use the polygonal approximation. + *cutline(first,last,area) straightens out a line by partitioning + *and joining the ends by a straight line* **********************************************************************/ -TESSLINE *ApproximateOutline(bool allow_detailed_fx, C_OUTLINE *c_outline) { - EDGEPT stack_edgepts[FASTEDGELENGTH]; // converted path - EDGEPT *edgepts = stack_edgepts; +static void cutline( // recursive refine + EDGEPT *first, // ends of line + EDGEPT *last, int area // area of object +) { + EDGEPT *edge; // current edge + TPOINT vecsum; // vector sum + int vlen; // approx length of vecsum + TPOINT vec; // accumulated vector + EDGEPT *maxpoint; // worst point + int maxperp; // max deviation + int perp; // perp distance + int ptcount; // no of points + int squaresum; // sum of perps - // Use heap memory if the stack buffer is not big enough. - if (c_outline->pathlength() > FASTEDGELENGTH) { - edgepts = new EDGEPT[c_outline->pathlength()]; + edge = first; // start of line + if (edge->next == last) { + return; // simple line } - // bounding box - const auto &loop_box = c_outline->bounding_box(); - int32_t area = loop_box.height(); - if (!poly_wide_objects_better && loop_box.width() > area) { - area = loop_box.width(); + // vector sum + vecsum.x = last->pos.x - edge->pos.x; + vecsum.y = last->pos.y - edge->pos.y; + if (vecsum.x == 0 && vecsum.y == 0) { + // special case + vecsum.x = -edge->prev->vec.x; + vecsum.y = -edge->prev->vec.y; } - area *= area; - edgesteps_to_edgepts(c_outline, edgepts); - fix2(edgepts, area); - EDGEPT *edgept = poly2(edgepts, area); // 2nd approximation. - EDGEPT *startpt = edgept; - EDGEPT *result = nullptr; - EDGEPT *prev_result = nullptr; + // absolute value + vlen = vecsum.x > 0 ? vecsum.x : -vecsum.x; + if (vecsum.y > vlen) { + vlen = vecsum.y; // maximum + } else if (-vecsum.y > vlen) { + vlen = -vecsum.y; // absolute value + } + + vec.x = edge->vec.x; // accumulated vector + vec.y = edge->vec.y; + maxperp = 0; // none yet + squaresum = ptcount = 0; + edge = edge->next; // move to actual point + maxpoint = edge; // in case there isn't one do { - auto *new_pt = new EDGEPT; - new_pt->pos = edgept->pos; - new_pt->prev = prev_result; - if (prev_result == nullptr) { - result = new_pt; - } else { - prev_result->next = new_pt; - new_pt->prev = prev_result; + perp = vec.cross(vecsum); // get perp distance + if (perp != 0) { + perp *= perp; // squared deviation } - if (allow_detailed_fx) { - new_pt->src_outline = edgept->src_outline; - new_pt->start_step = edgept->start_step; - new_pt->step_count = edgept->step_count; + squaresum += perp; // sum squares + ptcount++; // count points + if (poly_debug) { + tprintf("Cutline:Final perp=%d\n", perp); } - prev_result = new_pt; - edgept = edgept->next; - } while (edgept != startpt); - prev_result->next = result; - result->prev = prev_result; - if (edgepts != stack_edgepts) { - delete[] edgepts; + if (perp > maxperp) { + maxperp = perp; + maxpoint = edge; // find greatest deviation + } + vec.x += edge->vec.x; // accumulate vectors + vec.y += edge->vec.y; + edge = edge->next; + } while (edge != last); // test all line + + perp = vecsum.length(); + ASSERT_HOST(perp != 0); + + if (maxperp < 256 * INT16_MAX) { + maxperp <<= 8; + maxperp /= perp; // true max perp + } else { + maxperp /= perp; + maxperp <<= 8; // avoid overflow + } + if (squaresum < 256 * INT16_MAX) { + // mean squared perp + perp = (squaresum << 8) / (perp * ptcount); + } else { + // avoid overflow + perp = (squaresum / perp << 8) / ptcount; + } + + if (poly_debug) { + tprintf("Cutline:A=%d, max=%.2f(%.2f%%), msd=%.2f(%.2f%%)\n", area, + maxperp / 256.0, maxperp * 200.0 / area, perp / 256.0, + perp * 300.0 / area); + } + if (maxperp * par1 >= 10 * area || perp * par2 >= 10 * area || vlen >= 126) { + maxpoint->fixed = true; + // partitions + cutline(first, maxpoint, area); + cutline(maxpoint, last, area); } - return TESSLINE::BuildFromOutlineList(result); } /********************************************************************** @@ -105,9 +145,9 @@ TESSLINE *ApproximateOutline(bool allow_detailed_fx, C_OUTLINE *c_outline) { * Convert a C_OUTLINE to EDGEPTs. **********************************************************************/ -EDGEPT *edgesteps_to_edgepts( // convert outline - C_OUTLINE *c_outline, // input - EDGEPT edgepts[] // output is array +static EDGEPT *edgesteps_to_edgepts( // convert outline + C_OUTLINE *c_outline, // input + EDGEPT edgepts[] // output is array ) { int32_t length; // steps in path ICOORD pos; // current coords @@ -131,7 +171,8 @@ EDGEPT *edgesteps_to_edgepts( // convert outline do { dir = c_outline->step_dir(stepindex); vec = c_outline->step(stepindex); - if (stepindex < length - 1 && c_outline->step_dir(stepindex + 1) - dir == -32) { + if (stepindex < length - 1 && + c_outline->step_dir(stepindex + 1) - dir == -32) { dir += 128 - 16; vec += c_outline->step(stepindex + 1); stepinc = 2; @@ -192,7 +233,8 @@ EDGEPT *edgesteps_to_edgepts( // convert outline epdir &= 7; edgepts[epindex].dir = epdir; edgepts[0].prev = &edgepts[epindex]; - ASSERT_HOST(pos.x() == c_outline->start_pos().x() && pos.y() == c_outline->start_pos().y()); + ASSERT_HOST(pos.x() == c_outline->start_pos().x() && + pos.y() == c_outline->start_pos().y()); return &edgepts[0]; } @@ -200,14 +242,13 @@ EDGEPT *edgesteps_to_edgepts( // convert outline *fix2(start,area) fixes points on the outline according to a trial method* **********************************************************************/ -void fix2( // polygonal approx - EDGEPT *start, /*loop to approimate */ +static void fix2( // polygonal approx + EDGEPT *start, // loop to approximate int area) { - EDGEPT *edgept; /*current point */ + EDGEPT *edgept; // current point EDGEPT *edgept1; - EDGEPT *loopstart; /*modified start of loop */ - EDGEPT *linestart; /*start of line segment */ - int stopped; /*completed flag */ + EDGEPT *loopstart; // modified start of loop + EDGEPT *linestart; // start of line segment int fixed_count; // no of fixed points int8_t dir; int d01, d12, d23, gapmin; @@ -215,29 +256,30 @@ void fix2( // polygonal approx EDGEPT *edgefix, *startfix; EDGEPT *edgefix0, *edgefix1, *edgefix2, *edgefix3; - edgept = start; /*start of loop */ + edgept = start; // start of loop while (((edgept->dir - edgept->prev->dir + 1) & 7) < 3 && (dir = (edgept->prev->dir - edgept->next->dir) & 7) != 2 && dir != 6) { - edgept = edgept->next; /*find suitable start */ + edgept = edgept->next; // find suitable start } - loopstart = edgept; /*remember start */ + loopstart = edgept; // remember start - stopped = 0; /*not finished yet */ - edgept->fixed = true; //fix it + // completed flag + bool stopped = false; + edgept->fixed = true; // fix it do { - linestart = edgept; /*possible start of line */ - auto dir1 = edgept->dir; //first direction - //length of dir1 + linestart = edgept; // possible start of line + auto dir1 = edgept->dir; // first direction + // length of dir1 auto sum1 = edgept->runlength; edgept = edgept->next; - auto dir2 = edgept->dir; //2nd direction - //length in dir2 + auto dir2 = edgept->dir; // 2nd direction + // length in dir2 auto sum2 = edgept->runlength; if (((dir1 - dir2 + 1) & 7) < 3) { while (edgept->prev->dir == edgept->next->dir) { - edgept = edgept->next; /*look at next */ + edgept = edgept->next; // look at next if (edgept->dir == dir1) { - /*sum lengths */ + // sum lengths sum1 += edgept->runlength; } else { sum2 += edgept->runlength; @@ -245,11 +287,12 @@ void fix2( // polygonal approx } if (edgept == loopstart) { - stopped = 1; /*finished */ + // finished + stopped = true; } if (sum2 + sum1 > 2 && linestart->prev->dir == dir2 && (linestart->prev->runlength > linestart->runlength || sum2 > sum1)) { - /*start is back one */ + // start is back one linestart = linestart->prev; linestart->fixed = true; } @@ -262,10 +305,10 @@ void fix2( // polygonal approx edgept = edgept->next; } } - /*sharp bend */ + // sharp bend edgept->fixed = true; } - /*do whole loop */ + // do whole loop while (edgept != loopstart && !stopped); edgept = start; @@ -283,13 +326,13 @@ void fix2( // polygonal approx edgept = start; do { - /*single fixed step */ + // single fixed step if (edgept->fixed && edgept->runlength == 1 - /*and neighbours free */ + // and neighbours free && edgept->next->fixed && !edgept->prev->fixed - /*same pair of dirs */ + // same pair of dirs && !edgept->next->next->fixed && edgept->prev->dir == edgept->next->dir && edgept->prev->prev->dir == edgept->next->next->dir && @@ -298,10 +341,10 @@ void fix2( // polygonal approx edgept->fixed = false; edgept->next->fixed = false; } - edgept = edgept->next; /*do all points */ - } while (edgept != start); /*until finished */ + edgept = edgept->next; // do all points + } while (edgept != start); // until finished - stopped = 0; + stopped = false; if (area < 450) { area = 450; } @@ -373,7 +416,7 @@ void fix2( // polygonal approx edgept = edgept->next; while (!edgept->fixed) { if (edgept == startfix) { - stopped = 1; + stopped = true; } edgept = edgept->next; } @@ -387,60 +430,61 @@ void fix2( // polygonal approx *using the points which have been fixed by the first approximation* **********************************************************************/ -EDGEPT *poly2( // second poly - EDGEPT *startpt, /*start of loop */ - int area /*area of blob box */ +static EDGEPT *poly2( // second poly + EDGEPT *startpt, // start of loop + int area // area of blob box ) { - EDGEPT *edgept; /*current outline point */ - EDGEPT *loopstart; /*starting point */ - EDGEPT *linestart; /*start of line */ - int edgesum; /*correction count */ + EDGEPT *edgept; // current outline point + EDGEPT *loopstart; // starting point + EDGEPT *linestart; // start of line + int edgesum; // correction count if (area < 1200) { - area = 1200; /*minimum value */ + area = 1200; // minimum value } - loopstart = nullptr; /*not found it yet */ - edgept = startpt; /*start of loop */ + loopstart = nullptr; // not found it yet + edgept = startpt; // start of loop do { // current point fixed and next not if (edgept->fixed && !edgept->next->fixed) { - loopstart = edgept; /*start of repoly */ + loopstart = edgept; // start of repoly break; } - edgept = edgept->next; /*next point */ - } while (edgept != startpt); /*until found or finished */ + edgept = edgept->next; // next point + } while (edgept != startpt); // until found or finished if (loopstart == nullptr && !startpt->fixed) { - /*fixed start of loop */ + // fixed start of loop startpt->fixed = true; - loopstart = startpt; /*or start of loop */ + loopstart = startpt; // or start of loop } if (loopstart) { do { - edgept = loopstart; /*first to do */ + edgept = loopstart; // first to do do { linestart = edgept; - edgesum = 0; /*sum of lengths */ + edgesum = 0; // sum of lengths do { - /*sum lengths */ + // sum lengths edgesum += edgept->runlength; - edgept = edgept->next; /*move on */ + edgept = edgept->next; // move on } while (!edgept->fixed && edgept != loopstart && edgesum < 126); if (poly_debug) { - tprintf("Poly2:starting at (%d,%d)+%d=(%d,%d),%d to (%d,%d)\n", linestart->pos.x, - linestart->pos.y, linestart->dir, linestart->vec.x, linestart->vec.y, - edgesum, edgept->pos.x, edgept->pos.y); + tprintf("Poly2:starting at (%d,%d)+%d=(%d,%d),%d to (%d,%d)\n", + linestart->pos.x, linestart->pos.y, linestart->dir, + linestart->vec.x, linestart->vec.y, edgesum, edgept->pos.x, + edgept->pos.y); } - /*reapproximate */ + // reapproximate cutline(linestart, edgept, area); while (edgept->next->fixed && edgept != loopstart) { - edgept = edgept->next; /*look for next non-fixed */ + edgept = edgept->next; // look for next non-fixed } } - /*do all the loop */ + // do all the loop while (edgept != loopstart); edgesum = 0; do { @@ -466,106 +510,68 @@ EDGEPT *poly2( // second poly linestart->vec.y = edgept->pos.y - linestart->pos.y; } while (edgept != loopstart); } else { - edgept = startpt; /*start of loop */ + edgept = startpt; // start of loop } - loopstart = edgept; /*new start */ - return loopstart; /*correct exit */ + loopstart = edgept; // new start + return loopstart; // correct exit } /********************************************************************** - *cutline(first,last,area) straightens out a line by partitioning - *and joining the ends by a straight line* + * tesspoly_outline + * + * Approximate an outline from chain codes form using the old tess algorithm. + * If allow_detailed_fx is true, the EDGEPTs in the returned TBLOB + * contain pointers to the input C_OUTLINEs that enable higher-resolution + * feature extraction that does not use the polygonal approximation. **********************************************************************/ -void cutline( // recursive refine - EDGEPT *first, /*ends of line */ - EDGEPT *last, int area /*area of object */ -) { - EDGEPT *edge; /*current edge */ - TPOINT vecsum; /*vector sum */ - int vlen; /*approx length of vecsum */ - TPOINT vec; /*accumulated vector */ - EDGEPT *maxpoint; /*worst point */ - int maxperp; /*max deviation */ - int perp; /*perp distance */ - int ptcount; /*no of points */ - int squaresum; /*sum of perps */ +TESSLINE *ApproximateOutline(bool allow_detailed_fx, C_OUTLINE *c_outline) { + EDGEPT stack_edgepts[FASTEDGELENGTH]; // converted path + EDGEPT *edgepts = stack_edgepts; - edge = first; /*start of line */ - if (edge->next == last) { - return; /*simple line */ + // Use heap memory if the stack buffer is not big enough. + if (c_outline->pathlength() > FASTEDGELENGTH) { + edgepts = new EDGEPT[c_outline->pathlength()]; } - /*vector sum */ - vecsum.x = last->pos.x - edge->pos.x; - vecsum.y = last->pos.y - edge->pos.y; - if (vecsum.x == 0 && vecsum.y == 0) { - /*special case */ - vecsum.x = -edge->prev->vec.x; - vecsum.y = -edge->prev->vec.y; + // bounding box + const auto &loop_box = c_outline->bounding_box(); + int32_t area = loop_box.height(); + if (!poly_wide_objects_better && loop_box.width() > area) { + area = loop_box.width(); } - /*absolute value */ - vlen = vecsum.x > 0 ? vecsum.x : -vecsum.x; - if (vecsum.y > vlen) { - vlen = vecsum.y; /*maximum */ - } else if (-vecsum.y > vlen) { - vlen = -vecsum.y; /*absolute value */ - } - - vec.x = edge->vec.x; /*accumulated vector */ - vec.y = edge->vec.y; - maxperp = 0; /*none yet */ - squaresum = ptcount = 0; - edge = edge->next; /*move to actual point */ - maxpoint = edge; /*in case there isn't one */ + area *= area; + edgesteps_to_edgepts(c_outline, edgepts); + fix2(edgepts, area); + EDGEPT *edgept = poly2(edgepts, area); // 2nd approximation. + EDGEPT *startpt = edgept; + EDGEPT *result = nullptr; + EDGEPT *prev_result = nullptr; do { - perp = vec.cross(vecsum); // get perp distance - if (perp != 0) { - perp *= perp; /*squared deviation */ + auto *new_pt = new EDGEPT; + new_pt->pos = edgept->pos; + new_pt->prev = prev_result; + if (prev_result == nullptr) { + result = new_pt; + } else { + prev_result->next = new_pt; + new_pt->prev = prev_result; } - squaresum += perp; /*sum squares */ - ptcount++; /*count points */ - if (poly_debug) { - tprintf("Cutline:Final perp=%d\n", perp); + if (allow_detailed_fx) { + new_pt->src_outline = edgept->src_outline; + new_pt->start_step = edgept->start_step; + new_pt->step_count = edgept->step_count; } - if (perp > maxperp) { - maxperp = perp; - maxpoint = edge; /*find greatest deviation */ - } - vec.x += edge->vec.x; /*accumulate vectors */ - vec.y += edge->vec.y; - edge = edge->next; - } while (edge != last); /*test all line */ - - perp = vecsum.length(); - ASSERT_HOST(perp != 0); - - if (maxperp < 256 * INT16_MAX) { - maxperp <<= 8; - maxperp /= perp; /*true max perp */ - } else { - maxperp /= perp; - maxperp <<= 8; /*avoid overflow */ - } - if (squaresum < 256 * INT16_MAX) { - /*mean squared perp */ - perp = (squaresum << 8) / (perp * ptcount); - } else { - /*avoid overflow */ - perp = (squaresum / perp << 8) / ptcount; - } - - if (poly_debug) { - tprintf("Cutline:A=%d, max=%.2f(%.2f%%), msd=%.2f(%.2f%%)\n", area, maxperp / 256.0, - maxperp * 200.0 / area, perp / 256.0, perp * 300.0 / area); - } - if (maxperp * par1 >= 10 * area || perp * par2 >= 10 * area || vlen >= 126) { - maxpoint->fixed = true; - /*partitions */ - cutline(first, maxpoint, area); - cutline(maxpoint, last, area); + prev_result = new_pt; + edgept = edgept->next; + } while (edgept != startpt); + prev_result->next = result; + result->prev = prev_result; + if (edgepts != stack_edgepts) { + delete[] edgepts; } + return TESSLINE::BuildFromOutlineList(result); } } // namespace tesseract diff --git a/src/ccstruct/polyaprx.h b/src/ccstruct/polyaprx.h index b9fa9ce39..f2f99ea97 100644 --- a/src/ccstruct/polyaprx.h +++ b/src/ccstruct/polyaprx.h @@ -1,8 +1,7 @@ /********************************************************************** - * File: polyaprx.h (Formerly polygon.h) + * File: polyaprx.h * Description: Code for polygonal approximation from old edgeprog. * Author: Ray Smith - * Created: Thu Nov 25 11:42:04 GMT 1993 * * (C) Copyright 1993, Hewlett-Packard Ltd. ** Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,26 +22,10 @@ namespace tesseract { class C_OUTLINE; -struct EDGEPT; struct TESSLINE; // convert a chain-coded input to the old OUTLINE approximation TESSLINE *ApproximateOutline(bool allow_detailed_fx, C_OUTLINE *c_outline); -EDGEPT *edgesteps_to_edgepts( // convert outline - C_OUTLINE *c_outline, // input - EDGEPT edgepts[] // output is array -); -void fix2( // polygonal approx - EDGEPT *start, /*loop to approimate */ - int area); -EDGEPT *poly2( // second poly - EDGEPT *startpt, /*start of loop */ - int area /*area of blob box */ -); -void cutline( // recursive refine - EDGEPT *first, /*ends of line */ - EDGEPT *last, int area /*area of object */ -); } // namespace tesseract diff --git a/src/ccstruct/polyblk.cpp b/src/ccstruct/polyblk.cpp index a7125445f..8c21fd8dc 100644 --- a/src/ccstruct/polyblk.cpp +++ b/src/ccstruct/polyblk.cpp @@ -198,8 +198,8 @@ void POLY_BLOCK::rotate(FCOORD rotation) { pos.set_x(pt->x()); pos.set_y(pt->y()); pos.rotate(rotation); - pt->set_x(static_cast(floor(pos.x() + 0.5))); - pt->set_y(static_cast(floor(pos.y() + 0.5))); + pt->set_x(static_cast(floor(pos.x() + 0.5))); + pt->set_y(static_cast(floor(pos.y() + 0.5))); pts.forward(); } while (!pts.at_first()); compute_bb(); @@ -270,15 +270,12 @@ void POLY_BLOCK::plot(ScrollView *window, int32_t num) { } void POLY_BLOCK::fill(ScrollView *window, ScrollView::Color colour) { - int16_t y; - int16_t width; - PB_LINE_IT *lines; ICOORDELT_IT s_it; - lines = new PB_LINE_IT(this); + std::unique_ptr lines(new PB_LINE_IT(this)); window->Pen(colour); - for (y = this->bounding_box()->bottom(); y <= this->bounding_box()->top(); y++) { + for (auto y = this->bounding_box()->bottom(); y <= this->bounding_box()->top(); y++) { const std::unique_ptr segments(lines->get_line(y)); if (!segments->empty()) { s_it.set_to_list(segments.get()); @@ -286,14 +283,12 @@ void POLY_BLOCK::fill(ScrollView *window, ScrollView::Color colour) { // Note different use of ICOORDELT, x coord is x coord of pixel // at the start of line segment, y coord is length of line segment // Last pixel is start pixel + length. - width = s_it.data()->y(); + auto width = s_it.data()->y(); window->SetCursor(s_it.data()->x(), y); window->DrawTo(s_it.data()->x() + static_cast(width), y); } } } - - delete lines; } #endif @@ -339,7 +334,7 @@ bool POLY_BLOCK::overlap(POLY_BLOCK *other) { return false; } -ICOORDELT_LIST *PB_LINE_IT::get_line(int16_t y) { +ICOORDELT_LIST *PB_LINE_IT::get_line(TDimension y) { ICOORDELT_IT v, r; ICOORDELT_LIST *result; ICOORDELT *x, *current, *previous; @@ -356,7 +351,7 @@ ICOORDELT_LIST *PB_LINE_IT::get_line(int16_t y) { float fx = 0.5f + previous->x() + (current->x() - previous->x()) * (fy - previous->y()) / (current->y() - previous->y()); - x = new ICOORDELT(static_cast(fx), 0); + x = new ICOORDELT(static_cast(fx), 0); r.add_to_end(x); } } diff --git a/src/ccstruct/polyblk.h b/src/ccstruct/polyblk.h index eeb3aa048..e302993a0 100644 --- a/src/ccstruct/polyblk.h +++ b/src/ccstruct/polyblk.h @@ -106,7 +106,7 @@ public: // Each element of the returned list is the start (x) and extent(y) of // a run inside the region. // Delete the returned list after use. - ICOORDELT_LIST *get_line(int16_t y); + ICOORDELT_LIST *get_line(TDimension y); private: POLY_BLOCK *block; diff --git a/src/ccstruct/ratngs.cpp b/src/ccstruct/ratngs.cpp index 5801cd9b4..04c6b3e9f 100644 --- a/src/ccstruct/ratngs.cpp +++ b/src/ccstruct/ratngs.cpp @@ -28,6 +28,7 @@ #include "unicharset.h" #include +#include #include #include @@ -149,7 +150,7 @@ BLOB_CHOICE &BLOB_CHOICE::operator=(const BLOB_CHOICE &other) { // Returns true if *this and other agree on the baseline and x-height // to within some tolerance based on a given estimate of the x-height. bool BLOB_CHOICE::PosAndSizeAgree(const BLOB_CHOICE &other, float x_height, bool debug) const { - double baseline_diff = fabs(yshift() - other.yshift()); + double baseline_diff = std::fabs(yshift() - other.yshift()); if (baseline_diff > kMaxBaselineDrift * x_height) { if (debug) { tprintf("Baseline diff %g for %d v %d\n", baseline_diff, unichar_id_, other.unichar_id_); @@ -243,7 +244,7 @@ void WERD_CHOICE::init(const char *src_string, const char *src_lengths, float sr this->init(src_lengths ? strlen(src_lengths) : src_string_len); length_ = reserved_; int offset = 0; - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { int unichar_length = src_lengths ? src_lengths[i] : 1; unichar_ids_[i] = unicharset_->unichar_to_id(src_string + offset, unichar_length); state_[i] = 1; @@ -270,7 +271,7 @@ const char *WERD_CHOICE::permuter_name() const { // Returns the BLOB_CHOICE_LIST corresponding to the given index in the word, // taken from the appropriate cell in the ratings MATRIX. // Borrowed pointer, so do not delete. -BLOB_CHOICE_LIST *WERD_CHOICE::blob_choices(int index, MATRIX *ratings) const { +BLOB_CHOICE_LIST *WERD_CHOICE::blob_choices(unsigned index, MATRIX *ratings) const { MATRIX_COORD coord = MatrixCoord(index); BLOB_CHOICE_LIST *result = ratings->get(coord.col, coord.row); if (result == nullptr) { @@ -282,9 +283,9 @@ BLOB_CHOICE_LIST *WERD_CHOICE::blob_choices(int index, MATRIX *ratings) const { // Returns the MATRIX_COORD corresponding to the location in the ratings // MATRIX for the given index into the word. -MATRIX_COORD WERD_CHOICE::MatrixCoord(int index) const { +MATRIX_COORD WERD_CHOICE::MatrixCoord(unsigned index) const { int col = 0; - for (int i = 0; i < index; ++i) { + for (unsigned i = 0; i < index; ++i) { col += state_[i]; } int row = col + state_[index] - 1; @@ -293,7 +294,7 @@ MATRIX_COORD WERD_CHOICE::MatrixCoord(int index) const { // Sets the entries for the given index from the BLOB_CHOICE, assuming // unit fragment lengths, but setting the state for this index to blob_count. -void WERD_CHOICE::set_blob_choice(int index, int blob_count, const BLOB_CHOICE *blob_choice) { +void WERD_CHOICE::set_blob_choice(unsigned index, int blob_count, const BLOB_CHOICE *blob_choice) { unichar_ids_[index] = blob_choice->unichar_id(); script_pos_[index] = tesseract::SP_NORMAL; state_[index] = blob_count; @@ -306,7 +307,7 @@ void WERD_CHOICE::set_blob_choice(int index, int blob_count, const BLOB_CHOICE * * Returns true if unichar_ids_ contain the given unichar_id, false otherwise. */ bool WERD_CHOICE::contains_unichar_id(UNICHAR_ID unichar_id) const { - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { if (unichar_ids_[i] == unichar_id) { return true; } @@ -321,8 +322,8 @@ bool WERD_CHOICE::contains_unichar_id(UNICHAR_ID unichar_id) const { * and updates length_ and fragment_lengths_ to reflect this change. * Note: this function does not modify rating_ and certainty_. */ -void WERD_CHOICE::remove_unichar_ids(int start, int num) { - ASSERT_HOST(start >= 0 && start + num <= length_); +void WERD_CHOICE::remove_unichar_ids(unsigned start, int num) { + ASSERT_HOST(start + num <= length_); // Accumulate the states to account for the merged blobs. for (int i = 0; i < num; ++i) { if (start > 0) { @@ -331,7 +332,7 @@ void WERD_CHOICE::remove_unichar_ids(int start, int num) { state_[start + num] += state_[start + i]; } } - for (int i = start; i + num < length_; ++i) { + for (unsigned i = start; i + num < length_; ++i) { unichar_ids_[i] = unichar_ids_[i + num]; script_pos_[i] = script_pos_[i + num]; state_[i] = state_[i + num]; @@ -346,7 +347,7 @@ void WERD_CHOICE::remove_unichar_ids(int start, int num) { * Reverses and mirrors unichars in unichar_ids. */ void WERD_CHOICE::reverse_and_mirror_unichar_ids() { - for (int i = 0; i < length_ / 2; ++i) { + for (unsigned i = 0; i < length_ / 2; ++i) { UNICHAR_ID tmp_id = unichar_ids_[i]; unichar_ids_[i] = unicharset_->get_mirror(unichar_ids_[length_ - 1 - i]); unichar_ids_[length_ - 1 - i] = unicharset_->get_mirror(tmp_id); @@ -363,16 +364,15 @@ void WERD_CHOICE::reverse_and_mirror_unichar_ids() { * enclose the core portion of this word -- the part after stripping * punctuation from the left and right. */ -void WERD_CHOICE::punct_stripped(int *start, int *end) const { +void WERD_CHOICE::punct_stripped(unsigned *start, unsigned *end) const { *start = 0; - *end = length() - 1; + *end = length(); while (*start < length() && unicharset()->get_ispunctuation(unichar_id(*start))) { (*start)++; } - while (*end > -1 && unicharset()->get_ispunctuation(unichar_id(*end))) { + while (*end > 0 && unicharset()->get_ispunctuation(unichar_id(*end - 1))) { (*end)--; } - (*end)++; } void WERD_CHOICE::GetNonSuperscriptSpan(int *pstart, int *pend) const { @@ -390,14 +390,14 @@ void WERD_CHOICE::GetNonSuperscriptSpan(int *pstart, int *pend) const { *pend = end; } -WERD_CHOICE WERD_CHOICE::shallow_copy(int start, int end) const { - ASSERT_HOST(start >= 0 && start <= length_); - ASSERT_HOST(end >= 0 && end <= length_); +WERD_CHOICE WERD_CHOICE::shallow_copy(unsigned start, unsigned end) const { + ASSERT_HOST(start <= length_); + ASSERT_HOST(end <= length_); if (end < start) { end = start; } WERD_CHOICE retval(unicharset_, end - start); - for (int i = start; i < end; i++) { + for (auto i = start; i < end; i++) { retval.append_unichar_id_space_allocated(unichar_ids_[i], state_[i], 0.0f, certainties_[i]); } return retval; @@ -409,8 +409,7 @@ WERD_CHOICE WERD_CHOICE::shallow_copy(int start, int end) const { * Returns true if unichar_ids contain at least one "strongly" RTL unichar. */ bool WERD_CHOICE::has_rtl_unichar_id() const { - int i; - for (i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { UNICHARSET::Direction dir = unicharset_->get_direction(unichar_ids_[i]); if (dir == UNICHARSET::U_RIGHT_TO_LEFT || dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC) { return true; @@ -430,7 +429,7 @@ void WERD_CHOICE::string_and_lengths(std::string *word_str, std::string *word_le if (word_lengths_str != nullptr) { *word_lengths_str = ""; } - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { const char *ch = unicharset_->id_to_unichar_ext(unichar_ids_[i]); *word_str += ch; if (word_lengths_str != nullptr) { @@ -466,7 +465,7 @@ WERD_CHOICE &WERD_CHOICE::operator+=(const WERD_CHOICE &second) { this->double_the_size(); } const std::vector &other_unichar_ids = second.unichar_ids(); - for (int i = 0; i < second.length(); ++i) { + for (unsigned i = 0; i < second.length(); ++i) { unichar_ids_[length_ + i] = other_unichar_ids[i]; state_[length_ + i] = second.state_[i]; certainties_[length_ + i] = second.certainties_[i]; @@ -504,7 +503,7 @@ WERD_CHOICE &WERD_CHOICE::operator=(const WERD_CHOICE &source) { unicharset_ = source.unicharset_; const std::vector &other_unichar_ids = source.unichar_ids(); - for (int i = 0; i < source.length(); ++i) { + for (unsigned i = 0; i < source.length(); ++i) { unichar_ids_[i] = other_unichar_ids[i]; state_[i] = source.state_[i]; certainties_[i] = source.certainties_[i]; @@ -528,17 +527,17 @@ WERD_CHOICE &WERD_CHOICE::operator=(const WERD_CHOICE &source) { // NOTE: blobs_list should be the chopped_word blobs. (Fully segemented.) void WERD_CHOICE::SetScriptPositions(bool small_caps, TWERD *word, int debug) { // Initialize to normal. - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { script_pos_[i] = tesseract::SP_NORMAL; } if (word->blobs.empty() || word->NumBlobs() != TotalOfStates()) { return; } - int position_counts[4] = {0, 0, 0, 0}; + unsigned position_counts[4] = {0, 0, 0, 0}; int chunk_index = 0; - for (int blob_index = 0; blob_index < length_; ++blob_index, ++chunk_index) { + for (unsigned blob_index = 0; blob_index < length_; ++blob_index, ++chunk_index) { TBLOB *tblob = word->blobs[chunk_index]; int uni_id = unichar_id(blob_index); TBOX blob_box = tblob->bounding_box(); @@ -557,18 +556,19 @@ void WERD_CHOICE::SetScriptPositions(bool small_caps, TWERD *word, int debug) { } // If almost everything looks like a superscript or subscript, // we most likely just got the baseline wrong. - if (position_counts[tesseract::SP_SUBSCRIPT] > 0.75 * length_ || - position_counts[tesseract::SP_SUPERSCRIPT] > 0.75 * length_) { + if (4 * position_counts[tesseract::SP_SUBSCRIPT] > 3 * length_ || + 4 * position_counts[tesseract::SP_SUPERSCRIPT] > 3 * length_) { if (debug >= 2) { tprintf( "Most characters of %s are subscript or superscript.\n" "That seems wrong, so I'll assume we got the baseline wrong\n", unichar_string().c_str()); } - for (int i = 0; i < length_; i++) { + for (unsigned i = 0; i < length_; i++) { ScriptPos sp = script_pos_[i]; if (sp == tesseract::SP_SUBSCRIPT || sp == tesseract::SP_SUPERSCRIPT) { - position_counts[sp]--; + ASSERT_HOST(position_counts[sp] > 0); + position_counts[sp]--; position_counts[tesseract::SP_NORMAL]++; script_pos_[i] = tesseract::SP_NORMAL; } @@ -578,7 +578,7 @@ void WERD_CHOICE::SetScriptPositions(bool small_caps, TWERD *word, int debug) { if ((debug >= 1 && position_counts[tesseract::SP_NORMAL] < length_) || debug >= 2) { tprintf("SetScriptPosition on %s\n", unichar_string().c_str()); int chunk_index = 0; - for (int blob_index = 0; blob_index < length_; ++blob_index) { + for (unsigned blob_index = 0; blob_index < length_; ++blob_index) { if (debug >= 2 || script_pos_[blob_index] != tesseract::SP_NORMAL) { TBLOB *tblob = word->blobs[chunk_index]; ScriptPositionOf(true, *unicharset_, tblob->bounding_box(), unichar_id(blob_index)); @@ -590,7 +590,7 @@ void WERD_CHOICE::SetScriptPositions(bool small_caps, TWERD *word, int debug) { // Sets all the script_pos_ positions to the given position. void WERD_CHOICE::SetAllScriptPositions(tesseract::ScriptPos position) { - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { script_pos_[i] = position; } } @@ -629,13 +629,9 @@ ScriptPos WERD_CHOICE::ScriptPositionOf(bool print_debug, const UNICHARSET &unic // Returns the script-id (eg Han) of the dominant script in the word. int WERD_CHOICE::GetTopScriptID() const { - int max_script = unicharset_->get_script_table_size(); - int *sid = new int[max_script]; - int x; - for (x = 0; x < max_script; x++) { - sid[x] = 0; - } - for (x = 0; x < length_; ++x) { + unsigned max_script = unicharset_->get_script_table_size(); + std::vector sid(max_script); + for (unsigned x = 0; x < length_; ++x) { int script_id = unicharset_->get_script(unichar_id(x)); sid[script_id]++; } @@ -652,8 +648,8 @@ int WERD_CHOICE::GetTopScriptID() const { } // Note that high script ID overrides lower one on a tie, thus biasing // towards non-Common script (if sorted that way in unicharset file). - int max_sid = 0; - for (x = 1; x < max_script; x++) { + unsigned max_sid = 0; + for (unsigned x = 1; x < max_script; x++) { if (sid[x] >= sid[max_sid]) { max_sid = x; } @@ -661,14 +657,13 @@ int WERD_CHOICE::GetTopScriptID() const { if (sid[max_sid] < length_ / 2) { max_sid = unicharset_->null_sid(); } - delete[] sid; return max_sid; } // Fixes the state_ for a chop at the given blob_posiiton. void WERD_CHOICE::UpdateStateForSplit(int blob_position) { int total_chunks = 0; - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { total_chunks += state_[i]; if (total_chunks > blob_position) { ++state_[i]; @@ -678,9 +673,9 @@ void WERD_CHOICE::UpdateStateForSplit(int blob_position) { } // Returns the sum of all the state elements, being the total number of blobs. -int WERD_CHOICE::TotalOfStates() const { - int total_chunks = 0; - for (int i = 0; i < length_; ++i) { +unsigned WERD_CHOICE::TotalOfStates() const { + unsigned total_chunks = 0; + for (unsigned i = 0; i < length_; ++i) { total_chunks += state_[i]; } return total_chunks; @@ -693,25 +688,25 @@ int WERD_CHOICE::TotalOfStates() const { */ void WERD_CHOICE::print(const char *msg) const { tprintf("%s : ", msg); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { tprintf("%s", unicharset_->id_to_unichar(unichar_ids_[i])); } tprintf(" : R=%g, C=%g, F=%g, Perm=%d, xht=[%g,%g], ambig=%d\n", rating_, certainty_, adjust_factor_, permuter_, min_x_height_, max_x_height_, dangerous_ambig_found_); tprintf("pos"); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { tprintf("\t%s", ScriptPosToString(script_pos_[i])); } tprintf("\nstr"); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { tprintf("\t%s", unicharset_->id_to_unichar(unichar_ids_[i])); } tprintf("\nstate:"); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { tprintf("\t%d ", state_[i]); } tprintf("\nC"); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { tprintf("\t%.3f", certainties_[i]); } tprintf("\n"); @@ -720,7 +715,7 @@ void WERD_CHOICE::print(const char *msg) const { // Prints the segmentation state with an introductory message. void WERD_CHOICE::print_state(const char *msg) const { tprintf("%s", msg); - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { tprintf(" %d", state_[i]); } tprintf("\n"); @@ -738,9 +733,10 @@ void WERD_CHOICE::DisplaySegmentation(TWERD *word) { static std::vector prev_drawn_state; bool already_done = prev_drawn_state.size() == length_; if (!already_done) { + prev_drawn_state.clear(); prev_drawn_state.resize(length_); } - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { if (prev_drawn_state[i] != state_[i]) { already_done = false; } @@ -759,7 +755,7 @@ void WERD_CHOICE::DisplaySegmentation(TWERD *word) { TBOX bbox; int blob_index = 0; - for (int c = 0; c < length_; ++c) { + for (unsigned c = 0; c < length_; ++c) { auto color = static_cast(c % kNumColors + 3); for (int i = 0; i < state_[c]; ++i, ++blob_index) { TBLOB *blob = word->blobs[blob_index]; @@ -779,14 +775,14 @@ bool EqualIgnoringCaseAndTerminalPunct(const WERD_CHOICE &word1, const WERD_CHOI if (word2.unicharset() != uchset) { return false; } - int w1start, w1end; + unsigned w1start, w1end; word1.punct_stripped(&w1start, &w1end); - int w2start, w2end; + unsigned w2start, w2end; word2.punct_stripped(&w2start, &w2end); if (w1end - w1start != w2end - w2start) { return false; } - for (int i = 0; i < w1end - w1start; i++) { + for (unsigned i = 0; i < w1end - w1start; i++) { if (uchset->to_lower(word1.unichar_id(w1start + i)) != uchset->to_lower(word2.unichar_id(w2start + i))) { return false; diff --git a/src/ccstruct/ratngs.h b/src/ccstruct/ratngs.h index a3195a257..5de2f52aa 100644 --- a/src/ccstruct/ratngs.h +++ b/src/ccstruct/ratngs.h @@ -59,8 +59,8 @@ public: unichar_id_ = UNICHAR_SPACE; fontinfo_id_ = -1; fontinfo_id2_ = -1; - rating_ = 10.0; - certainty_ = -1.0; + rating_ = 10.0f; + certainty_ = -1.0f; script_id_ = -1; min_xheight_ = 0.0f; max_xheight_ = 0.0f; @@ -170,13 +170,17 @@ public: bool PosAndSizeAgree(const BLOB_CHOICE &other, float x_height, bool debug) const; void print(const UNICHARSET *unicharset) const { - tprintf("r%.2f c%.2f x[%g,%g]: %d %s", rating_, certainty_, min_xheight_, max_xheight_, + tprintf("r%.2f c%.2f x[%g,%g]: %d %s", + static_cast(rating_), + static_cast(certainty_), + static_cast(min_xheight_), + static_cast(max_xheight_), unichar_id_, (unicharset == nullptr) ? "" : unicharset->debug_str(unichar_id_).c_str()); } void print_full() const { print(nullptr); tprintf(" script=%d, font1=%d, font2=%d, yshift=%g, classifier=%d\n", script_id_, fontinfo_id_, - fontinfo_id2_, yshift_, classifier_); + fontinfo_id2_, static_cast(yshift_), classifier_); } // Sort function for sorting BLOB_CHOICEs in increasing order of rating. static int SortByRating(const void *p1, const void *p2) { @@ -280,7 +284,7 @@ public: bool empty() const { return length_ == 0; } - inline int length() const { + inline unsigned length() const { return length_; } float adjust_factor() const { @@ -292,15 +296,15 @@ public: inline const std::vector &unichar_ids() const { return unichar_ids_; } - inline UNICHAR_ID unichar_id(int index) const { + inline UNICHAR_ID unichar_id(unsigned index) const { assert(index < length_); return unichar_ids_[index]; } - inline int state(int index) const { + inline unsigned state(unsigned index) const { return state_[index]; } - ScriptPos BlobPosition(int index) const { - if (index < 0 || index >= length_) { + ScriptPos BlobPosition(unsigned index) const { + if (index >= length_) { return SP_NORMAL; } return script_pos_[index]; @@ -311,7 +315,7 @@ public: inline float certainty() const { return certainty_; } - inline float certainty(int index) const { + inline float certainty(unsigned index) const { return certainties_[index]; } inline float min_x_height() const { @@ -331,13 +335,13 @@ public: // Returns the BLOB_CHOICE_LIST corresponding to the given index in the word, // taken from the appropriate cell in the ratings MATRIX. // Borrowed pointer, so do not delete. - BLOB_CHOICE_LIST *blob_choices(int index, MATRIX *ratings) const; + BLOB_CHOICE_LIST *blob_choices(unsigned index, MATRIX *ratings) const; // Returns the MATRIX_COORD corresponding to the location in the ratings // MATRIX for the given index into the word. - MATRIX_COORD MatrixCoord(int index) const; + MATRIX_COORD MatrixCoord(unsigned index) const; - inline void set_unichar_id(UNICHAR_ID unichar_id, int index) { + inline void set_unichar_id(UNICHAR_ID unichar_id, unsigned index) { assert(index < length_); unichar_ids_[index] = unichar_id; } @@ -359,7 +363,7 @@ public: // Note: this function should only be used if all the fields // are populated manually with set_* functions (rather than // (copy)constructors and append_* functions). - inline void set_length(int len) { + inline void set_length(unsigned len) { ASSERT_HOST(reserved_ >= len); length_ = len; } @@ -379,7 +383,7 @@ public: /// Initializes WERD_CHOICE - reserves length slots in unichar_ids_ and /// fragment_length_ arrays. Sets other values to default (blank) values. - inline void init(int reserved) { + inline void init(unsigned reserved) { reserved_ = reserved; if (reserved > 0) { unichar_ids_.resize(reserved); @@ -431,7 +435,7 @@ public: void append_unichar_id(UNICHAR_ID unichar_id, int blob_count, float rating, float certainty); inline void set_unichar_id(UNICHAR_ID unichar_id, int blob_count, float rating, float certainty, - int index) { + unsigned index) { assert(index < length_); unichar_ids_[index] = unichar_id; state_[index] = blob_count; @@ -444,14 +448,14 @@ public: } // Sets the entries for the given index from the BLOB_CHOICE, assuming // unit fragment lengths, but setting the state for this index to blob_count. - void set_blob_choice(int index, int blob_count, const BLOB_CHOICE *blob_choice); + void set_blob_choice(unsigned index, int blob_count, const BLOB_CHOICE *blob_choice); bool contains_unichar_id(UNICHAR_ID unichar_id) const; - void remove_unichar_ids(int index, int num); + void remove_unichar_ids(unsigned index, int num); inline void remove_last_unichar_id() { --length_; } - inline void remove_unichar_id(int index) { + inline void remove_unichar_id(unsigned index) { this->remove_unichar_ids(index, 1); } bool has_rtl_unichar_id() const; @@ -460,7 +464,7 @@ public: // Returns the half-open interval of unichar_id indices [start, end) which // enclose the core portion of this word -- the part after stripping // punctuation from the left and right. - void punct_stripped(int *start_core, int *end_core) const; + void punct_stripped(unsigned *start_core, unsigned *end_core) const; // Returns the indices [start, end) containing the core of the word, stripped // of any superscript digits on either side. (i.e., the non-footnote part @@ -469,12 +473,12 @@ public: // Return a copy of this WERD_CHOICE with the choices [start, end). // The result is useful only for checking against a dictionary. - WERD_CHOICE shallow_copy(int start, int end) const; + WERD_CHOICE shallow_copy(unsigned start, unsigned end) const; void string_and_lengths(std::string *word_str, std::string *word_lengths_str) const; std::string debug_string() const { std::string word_str; - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { word_str += unicharset_->debug_str(unichar_ids_[i]); word_str += " "; } @@ -482,7 +486,7 @@ public: } // Returns true if any unichar_id in the word is a non-space-delimited char. bool ContainsAnyNonSpaceDelimited() const { - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { if (!unicharset_->IsSpaceDelimited(unichar_ids_[i])) { return true; } @@ -491,7 +495,7 @@ public: } // Returns true if the word is all spaces. bool IsAllSpaces() const { - for (int i = 0; i < length_; ++i) { + for (unsigned i = 0; i < length_; ++i) { if (unichar_ids_[i] != UNICHAR_SPACE) { return false; } @@ -552,7 +556,7 @@ public: void UpdateStateForSplit(int blob_position); // Returns the sum of all the state elements, being the total number of blobs. - int TotalOfStates() const; + unsigned TotalOfStates() const; void print() const { this->print(""); @@ -591,8 +595,8 @@ private: std::vector script_pos_; // Normal/Sub/Superscript of each unichar. std::vector state_; // Number of blobs in each unichar. std::vector certainties_; // Certainty of each unichar. - int reserved_; // size of the above arrays - int length_; // word length + unsigned reserved_; // size of the above arrays + unsigned length_; // word length // Factor that was used to adjust the rating. float adjust_factor_; // Rating is the sum of the ratings of the individual blobs in the word. diff --git a/src/ccstruct/rect.cpp b/src/ccstruct/rect.cpp index 994885958..e7bee8638 100644 --- a/src/ccstruct/rect.cpp +++ b/src/ccstruct/rect.cpp @@ -83,10 +83,10 @@ void TBOX::rotate_large(const FCOORD &vec) { TBOX TBOX::intersection( // shared area box const TBOX &box) const { - int16_t left; - int16_t bottom; - int16_t right; - int16_t top; + TDimension left; + TDimension bottom; + TDimension right; + TDimension top; if (overlap(box)) { if (box.bot_left.x() > bot_left.x()) { left = box.bot_left.x(); diff --git a/src/ccstruct/rect.h b/src/ccstruct/rect.h index 9917c94c4..ab4f57beb 100644 --- a/src/ccstruct/rect.h +++ b/src/ccstruct/rect.h @@ -21,6 +21,7 @@ #include "points.h" // for ICOORD, FCOORD #include "scrollview.h" // for ScrollView, ScrollView::Color +#include "tesstypes.h" // for TDimension #include "tprintf.h" // for tprintf #include // for DLLSYM @@ -50,7 +51,7 @@ public: // in the right order. //********************************************************************* TBOX( // constructor - int16_t left, int16_t bottom, int16_t right, int16_t top) + TDimension left, TDimension bottom, TDimension right, TDimension top) : bot_left(left, bottom), top_right(right, top) {} TBOX( // box around FCOORD @@ -64,28 +65,28 @@ public: return bot_left == other.bot_left && top_right == other.top_right; } - int16_t top() const { // coord of top + TDimension top() const { // coord of top return top_right.y(); } void set_top(int y) { top_right.set_y(y); } - int16_t bottom() const { // coord of bottom + TDimension bottom() const { // coord of bottom return bot_left.y(); } void set_bottom(int y) { bot_left.set_y(y); } - int16_t left() const { // coord of left + TDimension left() const { // coord of left return bot_left.x(); } void set_left(int x) { bot_left.set_x(x); } - int16_t right() const { // coord of right + TDimension right() const { // coord of right return top_right.x(); } void set_right(int x) { @@ -114,7 +115,7 @@ public: return top_right; } - int16_t height() const { // how high is it? + TDimension height() const { // how high is it? if (!null_box()) { return top_right.y() - bot_left.y(); } else { @@ -122,7 +123,7 @@ public: } } - int16_t width() const { // how high is it? + TDimension width() const { // how high is it? if (!null_box()) { return top_right.x() - bot_left.x(); } else { @@ -147,22 +148,22 @@ public: } void move_bottom_edge( // move one edge - const int16_t y) { // by +/- y + const TDimension y) { // by +/- y bot_left += ICOORD(0, y); } void move_left_edge( // move one edge - const int16_t x) { // by +/- x + const TDimension x) { // by +/- x bot_left += ICOORD(x, 0); } void move_right_edge( // move one edge - const int16_t x) { // by +/- x + const TDimension x) { // by +/- x top_right += ICOORD(x, 0); } void move_top_edge( // move one edge - const int16_t y) { // by +/- y + const TDimension y) { // by +/- y top_right += ICOORD(0, y); } @@ -174,33 +175,33 @@ public: void move( // move box const FCOORD vec) { // by float vector - bot_left.set_x(static_cast(std::floor(bot_left.x() + vec.x()))); + bot_left.set_x(static_cast(std::floor(bot_left.x() + vec.x()))); // round left - bot_left.set_y(static_cast(std::floor(bot_left.y() + vec.y()))); + bot_left.set_y(static_cast(std::floor(bot_left.y() + vec.y()))); // round down - top_right.set_x(static_cast(std::ceil(top_right.x() + vec.x()))); + top_right.set_x(static_cast(std::ceil(top_right.x() + vec.x()))); // round right - top_right.set_y(static_cast(std::ceil(top_right.y() + vec.y()))); + top_right.set_y(static_cast(std::ceil(top_right.y() + vec.y()))); // round up } void scale( // scale box const float f) { // by multiplier // round left - bot_left.set_x(static_cast(std::floor(bot_left.x() * f))); + bot_left.set_x(static_cast(std::floor(bot_left.x() * f))); // round down - bot_left.set_y(static_cast(std::floor(bot_left.y() * f))); + bot_left.set_y(static_cast(std::floor(bot_left.y() * f))); // round right - top_right.set_x(static_cast(std::ceil(top_right.x() * f))); + top_right.set_x(static_cast(std::ceil(top_right.x() * f))); // round up - top_right.set_y(static_cast(std::ceil(top_right.y() * f))); + top_right.set_y(static_cast(std::ceil(top_right.y() * f))); } void scale( // scale box const FCOORD vec) { // by float vector - bot_left.set_x(static_cast(std::floor(bot_left.x() * vec.x()))); - bot_left.set_y(static_cast(std::floor(bot_left.y() * vec.y()))); - top_right.set_x(static_cast(std::ceil(top_right.x() * vec.x()))); - top_right.set_y(static_cast(std::ceil(top_right.y() * vec.y()))); + bot_left.set_x(static_cast(std::floor(bot_left.x() * vec.x()))); + bot_left.set_y(static_cast(std::floor(bot_left.y() * vec.y()))); + top_right.set_x(static_cast(std::ceil(top_right.x() * vec.x()))); + top_right.set_y(static_cast(std::ceil(top_right.y() * vec.y()))); } // rotate doesn't enlarge the box - it just rotates the bottom-left @@ -330,9 +331,9 @@ inline TBOX::TBOX( // constructor const FCOORD pt // floating centre ) { bot_left = - ICOORD(static_cast(std::floor(pt.x())), static_cast(std::floor(pt.y()))); + ICOORD(static_cast(std::floor(pt.x())), static_cast(std::floor(pt.y()))); top_right = - ICOORD(static_cast(std::ceil(pt.x())), static_cast(std::ceil(pt.y()))); + ICOORD(static_cast(std::ceil(pt.x())), static_cast(std::ceil(pt.y()))); } /********************************************************************** @@ -416,7 +417,7 @@ inline bool TBOX::x_overlap(const TBOX &box) const { **********************************************************************/ inline bool TBOX::major_x_overlap(const TBOX &box) const { - int16_t overlap = box.width(); + TDimension overlap = box.width(); if (this->left() > box.left()) { overlap -= this->left() - box.left(); } @@ -442,7 +443,7 @@ inline bool TBOX::y_overlap(const TBOX &box) const { **********************************************************************/ inline bool TBOX::major_y_overlap(const TBOX &box) const { - int16_t overlap = box.height(); + TDimension overlap = box.height(); if (this->bottom() > box.bottom()) { overlap -= this->bottom() - box.bottom(); } diff --git a/src/ccstruct/rejctmap.cpp b/src/ccstruct/rejctmap.cpp index 7335f812d..e95c06941 100644 --- a/src/ccstruct/rejctmap.cpp +++ b/src/ccstruct/rejctmap.cpp @@ -58,22 +58,20 @@ void REJ::full_print(FILE *fp) const { REJMAP &REJMAP::operator=(const REJMAP &source) { initialise(source.len); - for (int i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { ptr[i] = source.ptr[i]; } return *this; } -void REJMAP::initialise(int16_t length) { +void REJMAP::initialise(uint16_t length) { ptr = std::make_unique(length); len = length; } int16_t REJMAP::accept_count() const { // How many accepted? - int i; int16_t count = 0; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { count++; } @@ -82,7 +80,7 @@ int16_t REJMAP::accept_count() const { // How many accepted? } bool REJMAP::recoverable_rejects() const { // Any non perm rejs? - for (int i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].recoverable()) { return true; } @@ -91,7 +89,7 @@ bool REJMAP::recoverable_rejects() const { // Any non perm rejs? } bool REJMAP::quality_recoverable_rejects() const { // Any potential rejs? - for (int i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accept_if_good_quality()) { return true; } @@ -100,9 +98,8 @@ bool REJMAP::quality_recoverable_rejects() const { // Any potential rejs? } void REJMAP::remove_pos( // Cut out an element - int16_t pos // element to remove + uint16_t pos // element to remove ) { - ASSERT_HOST(pos >= 0); ASSERT_HOST(pos < len); ASSERT_HOST(len > 0); @@ -113,45 +110,34 @@ void REJMAP::remove_pos( // Cut out an element } void REJMAP::print(FILE *fp) const { - int i; - char buff[512]; - - for (i = 0; i < len; i++) { - buff[i] = ptr[i].display_char(); + fputc('"', fp); + for (unsigned i = 0; i < len; i++) { + fputc( ptr[i].display_char(), fp); } - buff[i] = '\0'; - fprintf(fp, "\"%s\"", buff); + fputc('"', fp); } void REJMAP::full_print(FILE *fp) const { - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { ptr[i].full_print(fp); fprintf(fp, "\n"); } } void REJMAP::rej_word_small_xht() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { ptr[i].setrej_small_xht(); } } void REJMAP::rej_word_tess_failure() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { ptr[i].setrej_tess_failure(); } } void REJMAP::rej_word_not_tess_accepted() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_not_tess_accepted(); } @@ -159,9 +145,7 @@ void REJMAP::rej_word_not_tess_accepted() { // Reject whole word } void REJMAP::rej_word_contains_blanks() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_contains_blanks(); } @@ -169,9 +153,7 @@ void REJMAP::rej_word_contains_blanks() { // Reject whole word } void REJMAP::rej_word_bad_permuter() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_bad_permuter(); } @@ -179,9 +161,7 @@ void REJMAP::rej_word_bad_permuter() { // Reject whole word } void REJMAP::rej_word_xht_fixup() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_xht_fixup(); } @@ -189,9 +169,7 @@ void REJMAP::rej_word_xht_fixup() { // Reject whole word } void REJMAP::rej_word_no_alphanums() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_no_alphanums(); } @@ -199,9 +177,7 @@ void REJMAP::rej_word_no_alphanums() { // Reject whole word } void REJMAP::rej_word_mostly_rej() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_mostly_rej(); } @@ -209,9 +185,7 @@ void REJMAP::rej_word_mostly_rej() { // Reject whole word } void REJMAP::rej_word_bad_quality() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_bad_quality(); } @@ -219,9 +193,7 @@ void REJMAP::rej_word_bad_quality() { // Reject whole word } void REJMAP::rej_word_doc_rej() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_doc_rej(); } @@ -229,9 +201,7 @@ void REJMAP::rej_word_doc_rej() { // Reject whole word } void REJMAP::rej_word_block_rej() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_block_rej(); } @@ -239,9 +209,7 @@ void REJMAP::rej_word_block_rej() { // Reject whole word } void REJMAP::rej_word_row_rej() { // Reject whole word - int i; - - for (i = 0; i < len; i++) { + for (unsigned i = 0; i < len; i++) { if (ptr[i].accepted()) { ptr[i].setrej_row_rej(); } diff --git a/src/ccstruct/rejctmap.h b/src/ccstruct/rejctmap.h index 678d6ba1a..beeb53737 100644 --- a/src/ccstruct/rejctmap.h +++ b/src/ccstruct/rejctmap.h @@ -309,10 +309,10 @@ public: class REJMAP { std::unique_ptr ptr; // ptr to the chars - int16_t len; // Number of chars + uint16_t len = 0; // Number of chars public: - REJMAP() : len(0) {} + REJMAP() = default; REJMAP(const REJMAP &rejmap) { *this = rejmap; @@ -321,16 +321,16 @@ public: REJMAP &operator=(const REJMAP &source); // Sets up the ptr array to length, whatever it was before. - void initialise(int16_t length); + void initialise(uint16_t length); REJ &operator[]( // access function - int16_t index) const // map index + uint16_t index) const // map index { ASSERT_HOST(index < len); return ptr[index]; // no bounds checks } - int32_t length() const { // map length + uint16_t length() const { // map length return len; } @@ -340,8 +340,8 @@ public: return len - accept_count(); } - void remove_pos( // Cut out an element - int16_t pos); // element to remove + // Cut out an element. + void remove_pos(uint16_t pos); void print(FILE *fp) const; diff --git a/src/ccstruct/seam.cpp b/src/ccstruct/seam.cpp index 431c0bc34..4f299d789 100644 --- a/src/ccstruct/seam.cpp +++ b/src/ccstruct/seam.cpp @@ -143,7 +143,7 @@ void SEAM::UndoSeam(TBLOB *blob, TBLOB *other_blob) const { // Prints everything in *this SEAM. void SEAM::Print(const char *label) const { tprintf("%s", label); - tprintf(" %6.2f @ (%d,%d), p=%d, n=%d ", priority_, location_.x, location_.y, widthp_, widthn_); + tprintf(" %6.2f @ (%d,%d), p=%u, n=%u ", priority_, location_.x, location_.y, widthp_, widthn_); for (int s = 0; s < num_splits_; ++s) { splits_[s].Print(); if (s + 1 < num_splits_) { @@ -263,7 +263,7 @@ void start_seam_list(TWERD *word, std::vector *seam_array) { seam_array->clear(); TPOINT location; - for (int b = 1; b < word->NumBlobs(); ++b) { + for (unsigned b = 1; b < word->NumBlobs(); ++b) { TBOX bbox = word->blobs[b - 1]->bounding_box(); TBOX nbox = word->blobs[b]->bounding_box(); location.x = (bbox.right() + nbox.left()) / 2; diff --git a/src/ccstruct/seam.h b/src/ccstruct/seam.h index 2c1e343a5..73acaea72 100644 --- a/src/ccstruct/seam.h +++ b/src/ccstruct/seam.h @@ -34,10 +34,10 @@ class SEAM { public: // A seam with no splits SEAM(float priority, const TPOINT &location) - : priority_(priority), location_(location), widthp_(0), widthn_(0), num_splits_(0) {} + : priority_(priority), location_(location), num_splits_(0) {} // A seam with a single split point. SEAM(float priority, const TPOINT &location, const SPLIT &split) - : priority_(priority), location_(location), widthp_(0), widthn_(0), num_splits_(1) { + : priority_(priority), location_(location), num_splits_(1) { splits_[0] = split; } // Default copy constructor, operator= and destructor are OK! @@ -191,8 +191,8 @@ private: // A range such that all splits in *this SEAM are contained within blobs in // the range [index - widthn_,index + widthp_] where index is the index of // this SEAM in the seams vector. - int8_t widthp_; - int8_t widthn_; + uint8_t widthp_ = 0; + uint8_t widthn_ = 0; // Number of splits_ that are used. uint8_t num_splits_; // Set of pairs of points that are the ends of each split in the SEAM. diff --git a/src/ccstruct/split.cpp b/src/ccstruct/split.cpp index bc581751b..891a40839 100644 --- a/src/ccstruct/split.cpp +++ b/src/ccstruct/split.cpp @@ -135,7 +135,7 @@ bool SPLIT::IsLittleChunk(int min_points, int min_area) const { * * Create an EDGEPT and hook it into an existing list of edge points. **********************************************************************/ -EDGEPT *make_edgept(int x, int y, EDGEPT *next, EDGEPT *prev) { +EDGEPT *make_edgept(TDimension x, TDimension y, EDGEPT *next, EDGEPT *prev) { EDGEPT *this_edgept; /* Create point */ this_edgept = new EDGEPT; diff --git a/src/ccstruct/split.h b/src/ccstruct/split.h index 5ad28bace..529526d29 100644 --- a/src/ccstruct/split.h +++ b/src/ccstruct/split.h @@ -111,12 +111,12 @@ struct SPLIT { V a r i a b l e s ----------------------------------------------------------------------*/ -extern BOOL_VAR_H(wordrec_display_splits, 0, "Display splits"); +extern BOOL_VAR_H(wordrec_display_splits); /*---------------------------------------------------------------------- F u n c t i o n s ----------------------------------------------------------------------*/ -EDGEPT *make_edgept(int x, int y, EDGEPT *next, EDGEPT *prev); +EDGEPT *make_edgept(TDimension x, TDimension y, EDGEPT *next, EDGEPT *prev); void remove_edgept(EDGEPT *point); diff --git a/src/ccstruct/statistc.cpp b/src/ccstruct/statistc.cpp index dd08012dc..70c10fc35 100644 --- a/src/ccstruct/statistc.cpp +++ b/src/ccstruct/statistc.cpp @@ -521,12 +521,12 @@ int STATS::top_n_modes(int max_modes, std::vector> &modes) break; } } - if (total_count > least_count || modes.size() < max_modes) { + if (total_count > least_count || modes.size() < static_cast(max_modes)) { // We definitely want this mode, so if we have enough discard the least. - if (modes.size() == max_modes) { + if (modes.size() == static_cast(max_modes)) { modes.resize(max_modes - 1); } - int target_index = 0; + size_t target_index = 0; // Linear search for the target insertion point. while (target_index < modes.size() && modes[target_index].data() >= total_count) { ++target_index; diff --git a/src/ccstruct/statistc.h b/src/ccstruct/statistc.h index 36fbb9187..4689d3dde 100644 --- a/src/ccstruct/statistc.h +++ b/src/ccstruct/statistc.h @@ -73,6 +73,9 @@ public: double median() const; // get median of samples // Returns the count of the given value. int32_t pile_count(int32_t value) const { + if (buckets_ == nullptr) { + return 0; + } if (value <= rangemin_) { return buckets_[0]; } diff --git a/src/ccstruct/stepblob.cpp b/src/ccstruct/stepblob.cpp index 513619d76..4c61b6c65 100644 --- a/src/ccstruct/stepblob.cpp +++ b/src/ccstruct/stepblob.cpp @@ -45,14 +45,14 @@ static void position_outline( // put in place C_OUTLINE *outline, // thing to place C_OUTLINE_LIST *destlist // desstination list ) { - C_OUTLINE *dest_outline; // outline from dest list C_OUTLINE_IT it = destlist; // iterator // iterator on children C_OUTLINE_IT child_it = outline->child(); if (!it.empty()) { do { - dest_outline = it.data(); // get destination + // outline from dest list + C_OUTLINE *dest_outline = it.data(); // get destination // encloses dest if (*dest_outline < *outline) { // take off list @@ -248,13 +248,12 @@ C_BLOB *C_BLOB::FakeBlob(const TBOX &box) { **********************************************************************/ TBOX C_BLOB::bounding_box() const { // bounding box - C_OUTLINE *outline; // current outline // This is a read-only iteration of the outlines. C_OUTLINE_IT it = const_cast(&outlines); TBOX box; // bounding box for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { - outline = it.data(); + C_OUTLINE *outline = it.data(); box += outline->bounding_box(); } return box; @@ -267,13 +266,11 @@ TBOX C_BLOB::bounding_box() const { // bounding box **********************************************************************/ int32_t C_BLOB::area() { // area - C_OUTLINE *outline; // current outline C_OUTLINE_IT it = &outlines; // outlines of blob - int32_t total; // total area + int32_t total = 0; // total area - total = 0; for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { - outline = it.data(); + C_OUTLINE *outline = it.data(); total += outline->area(); } return total; @@ -286,13 +283,11 @@ int32_t C_BLOB::area() { // area **********************************************************************/ int32_t C_BLOB::perimeter() { - C_OUTLINE *outline; // current outline C_OUTLINE_IT it = &outlines; // outlines of blob - int32_t total; // total perimeter + int32_t total = 0; // total perimeter - total = 0; for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { - outline = it.data(); + C_OUTLINE *outline = it.data(); total += outline->perimeter(); } return total; @@ -305,13 +300,11 @@ int32_t C_BLOB::perimeter() { **********************************************************************/ int32_t C_BLOB::outer_area() { // area - C_OUTLINE *outline; // current outline C_OUTLINE_IT it = &outlines; // outlines of blob - int32_t total; // total area + int32_t total = 0; // total area - total = 0; for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { - outline = it.data(); + C_OUTLINE *outline = it.data(); total += outline->outer_area(); } return total; @@ -327,13 +320,11 @@ int32_t C_BLOB::outer_area() { // area int32_t C_BLOB::count_transitions( // area int32_t threshold // on size ) { - C_OUTLINE *outline; // current outline C_OUTLINE_IT it = &outlines; // outlines of blob - int32_t total; // total area + int32_t total = 0; // total area - total = 0; for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { - outline = it.data(); + C_OUTLINE *outline = it.data(); total += outline->count_transitions(threshold); } return total; @@ -431,8 +422,7 @@ int16_t C_BLOB::EstimateBaselinePosition() { return bottom; // This is only for non-CJK blobs. } // Get the minimum y coordinate at each x-coordinate. - std::vector y_mins; - y_mins.resize(width + 1, box.top()); + std::vector y_mins(width + 1, box.top()); C_OUTLINE_IT it(&outlines); for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { C_OUTLINE *outline = it.data(); diff --git a/src/ccstruct/tabletransfer.h b/src/ccstruct/tabletransfer.h deleted file mode 100644 index 1a74ddc91..000000000 --- a/src/ccstruct/tabletransfer.h +++ /dev/null @@ -1,67 +0,0 @@ -/****************************************************************************** - * File: tabletransfer.h - * Description: Infrastructure for the transfer of table detection results - * Author: Stefan Brechtken - * - * (C) Copyright 2021, Stefan Brechtken - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - ****************************************************************************/ - -#ifndef TESSERACT_CCSTRUCT_TABLETRANSFER_H_ -#define TESSERACT_CCSTRUCT_TABLETRANSFER_H_ -#include -#include -#include "rect.h" - -namespace tesseract { - -/// Structure for data transfer from table detector -struct TessTable { - tesseract::TBOX box; - std::vector rows; - std::vector cols; -}; - -/** \brief You can use this small template function to ensure that one and - * only one object of type T exists. It implements the Singleton Pattern. - * - * T must be default-constructable. - * Usage examples: - * A& a = uniqueInstance(); - * a.xyz(); - * uniqueInstance(make_unique(42)); // replace instance - * a.foo(); - * or - * uniqueInstance().xyz(); - */ -template -T& uniqueInstance(std::unique_ptr new_instance = nullptr) -{ - static std::unique_ptr _instance = std::make_unique(); - - if (new_instance) { - _instance = std::move(new_instance); - } - - return *_instance.get(); -} - -/// return const version of \see uniqueInstance -template -const T& constUniqueInstance(std::unique_ptr new_instance = nullptr) -{ - return uniqueInstance(std::move(new_instance)); -} - -} // namespace tesseract - -#endif // TESSERACT_CCSTRUCT_TABLETRANSFER_H_ diff --git a/src/ccstruct/werd.cpp b/src/ccstruct/werd.cpp index 31ed738b1..22f9cda71 100644 --- a/src/ccstruct/werd.cpp +++ b/src/ccstruct/werd.cpp @@ -121,7 +121,7 @@ WERD::WERD(C_BLOB_LIST *blob_list, ///< In word order while (!end_it.at_last()) { end_it.forward(); // move to last } - (reinterpret_cast(&cblobs))->assign_to_sublist(&start_it, &end_it); + cblobs.assign_to_sublist(&start_it, &end_it); // move to our list blanks = clone->blanks; // fprintf(stderr,"Wrong constructor!!!!\n"); diff --git a/src/ccutil/ambigs.cpp b/src/ccutil/ambigs.cpp index a5c50f152..b59566105 100644 --- a/src/ccutil/ambigs.cpp +++ b/src/ccutil/ambigs.cpp @@ -49,7 +49,7 @@ AmbigSpec::AmbigSpec() { // Initializes the ambigs by adding a nullptr pointer to each table. void UnicharAmbigs::InitUnicharAmbigs(const UNICHARSET &unicharset, bool use_ambigs_for_adaption) { - for (int i = 0; i < unicharset.size(); ++i) { + for (unsigned i = 0; i < unicharset.size(); ++i) { replace_ambigs_.push_back(nullptr); dang_ambigs_.push_back(nullptr); one_to_one_definite_ambigs_.push_back(nullptr); @@ -72,7 +72,6 @@ void UnicharAmbigs::LoadUniversal(const UNICHARSET &encoder_set, UNICHARSET *uni void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambig_file, int debug_level, bool use_ambigs_for_adaption, UNICHARSET *unicharset) { - int i, j; UnicharIdVector *adaption_ambigs_entry; if (debug_level) { tprintf("Reading ambiguities\n"); @@ -91,7 +90,7 @@ void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambi // Determine the version of the ambigs file. int version = 0; - ASSERT_HOST(ambig_file->FGets(buffer, kBufferSize) != nullptr && strlen(buffer) > 0); + ASSERT_HOST(ambig_file->FGets(buffer, kBufferSize) != nullptr && buffer[0] != '\0'); if (*buffer == 'v') { version = static_cast(strtol(buffer + 1, nullptr, 10)); ++line_num; @@ -130,7 +129,7 @@ void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambi // Silently ignore invalid strings, as before, so it is safe to use a // universal ambigs file. if (unicharset->encode_string(replacement_string, true, &encoding, nullptr, nullptr)) { - for (i = 0; i < test_ambig_part_size; ++i) { + for (int i = 0; i < test_ambig_part_size; ++i) { if (ambigs_for_adaption_[test_unichar_ids[i]] == nullptr) { ambigs_for_adaption_[test_unichar_ids[i]] = new UnicharIdVector(); } @@ -139,10 +138,10 @@ void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambi ASSERT_HOST(id_to_insert != INVALID_UNICHAR_ID); // Add the new unichar id to adaption_ambigs_entry (only if the // vector does not already contain it) keeping it in sorted order. + size_t j; for (j = 0; j < adaption_ambigs_entry->size() && (*adaption_ambigs_entry)[j] > id_to_insert; ++j) { - ; } if (j < adaption_ambigs_entry->size()) { if ((*adaption_ambigs_entry)[j] != id_to_insert) { @@ -160,12 +159,12 @@ void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambi // Fill in reverse_ambigs_for_adaption from ambigs_for_adaption vector. if (use_ambigs_for_adaption) { - for (i = 0; i < ambigs_for_adaption_.size(); ++i) { + for (size_t i = 0; i < ambigs_for_adaption_.size(); ++i) { adaption_ambigs_entry = ambigs_for_adaption_[i]; if (adaption_ambigs_entry == nullptr) { continue; } - for (j = 0; j < adaption_ambigs_entry->size(); ++j) { + for (size_t j = 0; j < adaption_ambigs_entry->size(); ++j) { UNICHAR_ID ambig_id = (*adaption_ambigs_entry)[j]; if (reverse_ambigs_for_adaption_[ambig_id] == nullptr) { reverse_ambigs_for_adaption_[ambig_id] = new UnicharIdVector(); @@ -179,7 +178,7 @@ void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambi if (debug_level > 1) { for (int tbl = 0; tbl < 2; ++tbl) { const UnicharAmbigsVector &print_table = (tbl == 0) ? replace_ambigs_ : dang_ambigs_; - for (i = 0; i < print_table.size(); ++i) { + for (size_t i = 0; i < print_table.size(); ++i) { AmbigSpec_LIST *lst = print_table[i]; if (lst == nullptr) { continue; @@ -202,12 +201,12 @@ void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambi for (int vec_id = 0; vec_id < 2; ++vec_id) { const std::vector &vec = (vec_id == 0) ? ambigs_for_adaption_ : reverse_ambigs_for_adaption_; - for (i = 0; i < vec.size(); ++i) { + for (size_t i = 0; i < vec.size(); ++i) { adaption_ambigs_entry = vec[i]; if (adaption_ambigs_entry != nullptr) { tprintf("%sAmbigs for adaption for %s:\n", (vec_id == 0) ? "" : "Reverse ", unicharset->debug_str(i).c_str()); - for (j = 0; j < adaption_ambigs_entry->size(); ++j) { + for (size_t j = 0; j < adaption_ambigs_entry->size(); ++j) { tprintf("%s ", unicharset->debug_str((*adaption_ambigs_entry)[j]).c_str()); } tprintf("\n"); @@ -246,7 +245,7 @@ bool UnicharAmbigs::ParseAmbiguityLine(int line_num, int version, int debug_leve return false; } // Copy encoded string to output. - for (int i = 0; i < unichars.size(); ++i) { + for (size_t i = 0; i < unichars.size(); ++i) { test_unichar_ids[i] = unichars[i]; } test_unichar_ids[unichars.size()] = INVALID_UNICHAR_ID; diff --git a/src/ccutil/ccutil.h b/src/ccutil/ccutil.h index 940ed72c3..e64199315 100644 --- a/src/ccutil/ccutil.h +++ b/src/ccutil/ccutil.h @@ -72,9 +72,8 @@ public: // Member parameters. // These have to be declared and initialized after params_ member, since // params_ should be initialized before parameters are added to it. - INT_VAR_H(ambigs_debug_level, 0, "Debug level for unichar ambiguities"); - BOOL_VAR_H(use_ambigs_for_adaption, false, - "Use ambigs for deciding whether to adapt to a character"); + INT_VAR_H(ambigs_debug_level); + BOOL_VAR_H(use_ambigs_for_adaption); }; } // namespace tesseract diff --git a/src/ccutil/clst.cpp b/src/ccutil/clst.cpp index 4a720b13e..c80eed833 100644 --- a/src/ccutil/clst.cpp +++ b/src/ccutil/clst.cpp @@ -89,7 +89,7 @@ void CLIST::assign_to_sublist( // to this list constexpr ERRCODE LIST_NOT_EMPTY("Destination list must be empty before extracting a sublist"); if (!empty()) { - LIST_NOT_EMPTY.error("CLIST.assign_to_sublist", ABORT, nullptr); + LIST_NOT_EMPTY.error("CLIST.assign_to_sublist", ABORT); } last = start_it->extract_sublist(end_it); @@ -246,9 +246,9 @@ void *CLIST_ITERATOR::data_relative( // get data + or - ... #ifndef NDEBUG if (!list) - NO_LIST.error("CLIST_ITERATOR::data_relative", ABORT, nullptr); + NO_LIST.error("CLIST_ITERATOR::data_relative", ABORT); if (list->empty()) - EMPTY_LIST.error("CLIST_ITERATOR::data_relative", ABORT, nullptr); + EMPTY_LIST.error("CLIST_ITERATOR::data_relative", ABORT); if (offset < -1) BAD_PARAMETER.error("CLIST_ITERATOR::data_relative", ABORT, "offset < -l"); #endif @@ -308,7 +308,7 @@ link */ /* Error if either current element is deleted */ if (!current || !other_it->current) { - DONT_EXCHANGE_DELETED.error("CLIST_ITERATOR.exchange", ABORT, nullptr); + DONT_EXCHANGE_DELETED.error("CLIST_ITERATOR.exchange", ABORT); } /* Now handle the 4 cases: doubleton list; non-doubleton adjacent elements @@ -389,12 +389,12 @@ CLIST_LINK *CLIST_ITERATOR::extract_sublist( // from this current constexpr ERRCODE DONT_EXTRACT_DELETED("Can't extract a sublist marked by deleted points"); if (list != other_it->list) - BAD_EXTRACTION_PTS.error("CLIST_ITERATOR.extract_sublist", ABORT, nullptr); + BAD_EXTRACTION_PTS.error("CLIST_ITERATOR.extract_sublist", ABORT); if (list->empty()) - EMPTY_LIST.error("CLIST_ITERATOR::extract_sublist", ABORT, nullptr); + EMPTY_LIST.error("CLIST_ITERATOR::extract_sublist", ABORT); if (!current || !other_it->current) - DONT_EXTRACT_DELETED.error("CLIST_ITERATOR.extract_sublist", ABORT, nullptr); + DONT_EXTRACT_DELETED.error("CLIST_ITERATOR.extract_sublist", ABORT); #endif ex_current_was_last = other_it->ex_current_was_last = false; @@ -404,7 +404,7 @@ CLIST_LINK *CLIST_ITERATOR::extract_sublist( // from this current temp_it.mark_cycle_pt(); do { // walk sublist if (temp_it.cycled_list()) { // can't find end pt - BAD_SUBLIST.error("CLIST_ITERATOR.extract_sublist", ABORT, nullptr); + BAD_SUBLIST.error("CLIST_ITERATOR.extract_sublist", ABORT); } if (temp_it.at_last()) { diff --git a/src/ccutil/clst.h b/src/ccutil/clst.h index 4c1b059ac..53b458f19 100644 --- a/src/ccutil/clst.h +++ b/src/ccutil/clst.h @@ -190,7 +190,7 @@ public: void *data() { // get current data #ifndef NDEBUG if (!list) { - NO_LIST.error("CLIST_ITERATOR::data", ABORT, nullptr); + NO_LIST.error("CLIST_ITERATOR::data", ABORT); } #endif return current->data; @@ -523,7 +523,7 @@ inline void *CLIST_ITERATOR::extract() { #ifndef NDEBUG if (!current) { // list empty or // element extracted - NULL_CURRENT.error("CLIST_ITERATOR::extract", ABORT, nullptr); + NULL_CURRENT.error("CLIST_ITERATOR::extract", ABORT); } #endif @@ -576,7 +576,7 @@ inline void *CLIST_ITERATOR::move_to_first() { inline void CLIST_ITERATOR::mark_cycle_pt() { #ifndef NDEBUG if (!list) { - NO_LIST.error("CLIST_ITERATOR::mark_cycle_pt", ABORT, nullptr); + NO_LIST.error("CLIST_ITERATOR::mark_cycle_pt", ABORT); } #endif @@ -666,7 +666,7 @@ inline void CLIST_ITERATOR::add_to_end( // element to add void *new_data) { #ifndef NDEBUG if (!list) { - NO_LIST.error("CLIST_ITERATOR::add_to_end", ABORT, nullptr); + NO_LIST.error("CLIST_ITERATOR::add_to_end", ABORT); } if (!new_data) { BAD_PARAMETER.error("CLIST_ITERATOR::add_to_end", ABORT, "new_data is nullptr"); diff --git a/src/ccutil/elst.cpp b/src/ccutil/elst.cpp index 4ee9e733a..2cac5fd14 100644 --- a/src/ccutil/elst.cpp +++ b/src/ccutil/elst.cpp @@ -70,7 +70,7 @@ void ELIST::assign_to_sublist( // to this list constexpr ERRCODE LIST_NOT_EMPTY("Destination list must be empty before extracting a sublist"); if (!empty()) { - LIST_NOT_EMPTY.error("ELIST.assign_to_sublist", ABORT, nullptr); + LIST_NOT_EMPTY.error("ELIST.assign_to_sublist", ABORT); } last = start_it->extract_sublist(end_it); @@ -169,7 +169,7 @@ ELIST_LINK *ELIST::add_sorted_and_find(int comparator(const void *, const void * ELIST_LINK *ELIST_ITERATOR::forward() { #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST_ITERATOR::forward", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::forward", ABORT); #endif if (list->empty()) { return nullptr; @@ -189,13 +189,17 @@ ELIST_LINK *ELIST_ITERATOR::forward() { } #ifndef NDEBUG if (!current) - NULL_DATA.error("ELIST_ITERATOR::forward", ABORT, nullptr); + NULL_DATA.error("ELIST_ITERATOR::forward", ABORT); #endif next = current->next; #ifndef NDEBUG - if (!next) - NULL_NEXT.error("ELIST_ITERATOR::forward", ABORT, "This is: %p Current is: %p", this, current); + if (!next) { + NULL_NEXT.error("ELIST_ITERATOR::forward", ABORT, + "This is: %p Current is: %p", + static_cast(this), + static_cast(current)); + } #endif return current; } @@ -214,9 +218,9 @@ ELIST_LINK *ELIST_ITERATOR::data_relative( // get data + or - ... #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST_ITERATOR::data_relative", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::data_relative", ABORT); if (list->empty()) - EMPTY_LIST.error("ELIST_ITERATOR::data_relative", ABORT, nullptr); + EMPTY_LIST.error("ELIST_ITERATOR::data_relative", ABORT); if (offset < -1) BAD_PARAMETER.error("ELIST_ITERATOR::data_relative", ABORT, "offset < -l"); #endif @@ -231,7 +235,7 @@ ELIST_LINK *ELIST_ITERATOR::data_relative( // get data + or - ... #ifndef NDEBUG if (!ptr) - NULL_DATA.error("ELIST_ITERATOR::data_relative", ABORT, nullptr); + NULL_DATA.error("ELIST_ITERATOR::data_relative", ABORT); #endif return ptr; @@ -248,7 +252,7 @@ ELIST_LINK *ELIST_ITERATOR::data_relative( // get data + or - ... ELIST_LINK *ELIST_ITERATOR::move_to_last() { #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST_ITERATOR::move_to_last", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::move_to_last", ABORT); #endif while (current != list->last) { @@ -276,7 +280,7 @@ void ELIST_ITERATOR::exchange( // positions of 2 links #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST_ITERATOR::exchange", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::exchange", ABORT); if (!other_it) BAD_PARAMETER.error("ELIST_ITERATOR::exchange", ABORT, "other_it nullptr"); if (!(other_it->list)) @@ -293,7 +297,7 @@ link */ /* Error if either current element is deleted */ if (!current || !other_it->current) { - DONT_EXCHANGE_DELETED.error("ELIST_ITERATOR.exchange", ABORT, nullptr); + DONT_EXCHANGE_DELETED.error("ELIST_ITERATOR.exchange", ABORT); } /* Now handle the 4 cases: doubleton list; non-doubleton adjacent elements @@ -379,14 +383,14 @@ ELIST_LINK *ELIST_ITERATOR::extract_sublist( // from this current if (!other_it) BAD_PARAMETER.error("ELIST_ITERATOR::extract_sublist", ABORT, "other_it nullptr"); if (!list) - NO_LIST.error("ELIST_ITERATOR::extract_sublist", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::extract_sublist", ABORT); if (list != other_it->list) - BAD_EXTRACTION_PTS.error("ELIST_ITERATOR.extract_sublist", ABORT, nullptr); + BAD_EXTRACTION_PTS.error("ELIST_ITERATOR.extract_sublist", ABORT); if (list->empty()) - EMPTY_LIST.error("ELIST_ITERATOR::extract_sublist", ABORT, nullptr); + EMPTY_LIST.error("ELIST_ITERATOR::extract_sublist", ABORT); if (!current || !other_it->current) - DONT_EXTRACT_DELETED.error("ELIST_ITERATOR.extract_sublist", ABORT, nullptr); + DONT_EXTRACT_DELETED.error("ELIST_ITERATOR.extract_sublist", ABORT); #endif ex_current_was_last = other_it->ex_current_was_last = false; @@ -396,7 +400,7 @@ ELIST_LINK *ELIST_ITERATOR::extract_sublist( // from this current temp_it.mark_cycle_pt(); do { // walk sublist if (temp_it.cycled_list()) { // can't find end pt - BAD_SUBLIST.error("ELIST_ITERATOR.extract_sublist", ABORT, nullptr); + BAD_SUBLIST.error("ELIST_ITERATOR.extract_sublist", ABORT); } if (temp_it.at_last()) { diff --git a/src/ccutil/elst.h b/src/ccutil/elst.h index 7bc294fc4..ae6e19816 100644 --- a/src/ccutil/elst.h +++ b/src/ccutil/elst.h @@ -121,7 +121,7 @@ public: // destroy all links void internal_clear(void (*zapper)(void *)); - bool empty() const { + bool empty() const { return !last; } @@ -231,10 +231,10 @@ public: ELIST_LINK *data() { // get current data #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::data", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::data", ABORT); } if (!current) { - NULL_DATA.error("ELIST_ITERATOR::data", ABORT, nullptr); + NULL_DATA.error("ELIST_ITERATOR::data", ABORT); } #endif return current; @@ -256,7 +256,7 @@ public: bool empty() const { // is list empty? #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::empty", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::empty", ABORT); } #endif return list->empty(); @@ -334,13 +334,13 @@ inline void ELIST_ITERATOR::add_after_then_move( // element to add ELIST_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_after_then_move", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_after_then_move", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST_ITERATOR::add_after_then_move", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST_ITERATOR::add_after_then_move", ABORT, nullptr); + STILL_LINKED.error("ELIST_ITERATOR::add_after_then_move", ABORT); } #endif @@ -381,13 +381,13 @@ inline void ELIST_ITERATOR::add_after_stay_put( // element to add ELIST_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_after_stay_put", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_after_stay_put", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST_ITERATOR::add_after_stay_put", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST_ITERATOR::add_after_stay_put", ABORT, nullptr); + STILL_LINKED.error("ELIST_ITERATOR::add_after_stay_put", ABORT); } #endif @@ -430,13 +430,13 @@ inline void ELIST_ITERATOR::add_before_then_move( // element to add ELIST_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_before_then_move", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_before_then_move", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST_ITERATOR::add_before_then_move", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST_ITERATOR::add_before_then_move", ABORT, nullptr); + STILL_LINKED.error("ELIST_ITERATOR::add_before_then_move", ABORT); } #endif @@ -473,13 +473,13 @@ inline void ELIST_ITERATOR::add_before_stay_put( // element to add ELIST_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_before_stay_put", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_before_stay_put", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST_ITERATOR::add_before_stay_put", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST_ITERATOR::add_before_stay_put", ABORT, nullptr); + STILL_LINKED.error("ELIST_ITERATOR::add_before_stay_put", ABORT); } #endif @@ -517,7 +517,7 @@ inline void ELIST_ITERATOR::add_before_stay_put( // element to add inline void ELIST_ITERATOR::add_list_after(ELIST *list_to_add) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_list_after", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_list_after", ABORT); } if (!list_to_add) { BAD_PARAMETER.error("ELIST_ITERATOR::add_list_after", ABORT, "list_to_add is nullptr"); @@ -564,7 +564,7 @@ inline void ELIST_ITERATOR::add_list_after(ELIST *list_to_add) { inline void ELIST_ITERATOR::add_list_before(ELIST *list_to_add) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_list_before", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_list_before", ABORT); } if (!list_to_add) { BAD_PARAMETER.error("ELIST_ITERATOR::add_list_before", ABORT, "list_to_add is nullptr"); @@ -612,11 +612,11 @@ inline ELIST_LINK *ELIST_ITERATOR::extract() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::extract", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::extract", ABORT); } if (!current) { // list empty or // element extracted - NULL_CURRENT.error("ELIST_ITERATOR::extract", ABORT, nullptr); + NULL_CURRENT.error("ELIST_ITERATOR::extract", ABORT); } #endif @@ -649,7 +649,7 @@ inline ELIST_LINK *ELIST_ITERATOR::extract() { inline ELIST_LINK *ELIST_ITERATOR::move_to_first() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::move_to_first", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::move_to_first", ABORT); } #endif @@ -673,7 +673,7 @@ inline ELIST_LINK *ELIST_ITERATOR::move_to_first() { inline void ELIST_ITERATOR::mark_cycle_pt() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::mark_cycle_pt", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::mark_cycle_pt", ABORT); } #endif @@ -695,7 +695,7 @@ inline void ELIST_ITERATOR::mark_cycle_pt() { inline bool ELIST_ITERATOR::at_first() const { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::at_first", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::at_first", ABORT); } #endif @@ -715,7 +715,7 @@ inline bool ELIST_ITERATOR::at_first() const { inline bool ELIST_ITERATOR::at_last() const { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::at_last", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::at_last", ABORT); } #endif @@ -735,7 +735,7 @@ inline bool ELIST_ITERATOR::at_last() const { inline bool ELIST_ITERATOR::cycled_list() const { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::cycled_list", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::cycled_list", ABORT); } #endif @@ -754,7 +754,7 @@ inline void ELIST_ITERATOR::sort( // sort elements const void *, const void *)) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::sort", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::sort", ABORT); } #endif @@ -776,13 +776,13 @@ inline void ELIST_ITERATOR::add_to_end( // element to add ELIST_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST_ITERATOR::add_to_end", ABORT, nullptr); + NO_LIST.error("ELIST_ITERATOR::add_to_end", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST_ITERATOR::add_to_end", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST_ITERATOR::add_to_end", ABORT, nullptr); + STILL_LINKED.error("ELIST_ITERATOR::add_to_end", ABORT); } #endif diff --git a/src/ccutil/elst2.cpp b/src/ccutil/elst2.cpp index 22ef6d9f7..64d22fdb6 100644 --- a/src/ccutil/elst2.cpp +++ b/src/ccutil/elst2.cpp @@ -71,7 +71,7 @@ void ELIST2::assign_to_sublist( // to this list constexpr ERRCODE LIST_NOT_EMPTY("Destination list must be empty before extracting a sublist"); if (!empty()) { - LIST_NOT_EMPTY.error("ELIST2.assign_to_sublist", ABORT, nullptr); + LIST_NOT_EMPTY.error("ELIST2.assign_to_sublist", ABORT); } last = start_it->extract_sublist(end_it); @@ -162,7 +162,7 @@ void ELIST2::add_sorted(int comparator(const void *, const void *), ELIST2_LINK ELIST2_LINK *ELIST2_ITERATOR::forward() { #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST2_ITERATOR::forward", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::forward", ABORT); #endif if (list->empty()) { return nullptr; @@ -183,15 +183,18 @@ ELIST2_LINK *ELIST2_ITERATOR::forward() { #ifndef NDEBUG if (!current) - NULL_DATA.error("ELIST2_ITERATOR::forward", ABORT, nullptr); + NULL_DATA.error("ELIST2_ITERATOR::forward", ABORT); #endif next = current->next; #ifndef NDEBUG - if (!next) - NULL_NEXT.error("ELIST2_ITERATOR::forward", ABORT, "This is: %p Current is: %p", this, - current); + if (!next) { + NULL_NEXT.error("ELIST2_ITERATOR::forward", ABORT, + "This is: %p Current is: %p", + static_cast(this), + static_cast(current)); + } #endif return current; @@ -207,7 +210,7 @@ ELIST2_LINK *ELIST2_ITERATOR::forward() { ELIST2_LINK *ELIST2_ITERATOR::backward() { #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST2_ITERATOR::backward", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::backward", ABORT); #endif if (list->empty()) { return nullptr; @@ -228,10 +231,13 @@ ELIST2_LINK *ELIST2_ITERATOR::backward() { #ifndef NDEBUG if (!current) - NULL_DATA.error("ELIST2_ITERATOR::backward", ABORT, nullptr); - if (!prev) - NULL_PREV.error("ELIST2_ITERATOR::backward", ABORT, "This is: %p Current is: %p", this, - current); + NULL_DATA.error("ELIST2_ITERATOR::backward", ABORT); + if (!prev) { + NULL_PREV.error("ELIST2_ITERATOR::backward", ABORT, + "This is: %p Current is: %p", + static_cast(this), + static_cast(current)); + } #endif prev = current->prev; @@ -251,9 +257,9 @@ ELIST2_LINK *ELIST2_ITERATOR::data_relative( // get data + or - .. #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST2_ITERATOR::data_relative", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::data_relative", ABORT); if (list->empty()) - EMPTY_LIST.error("ELIST2_ITERATOR::data_relative", ABORT, nullptr); + EMPTY_LIST.error("ELIST2_ITERATOR::data_relative", ABORT); #endif if (offset < 0) { @@ -268,7 +274,7 @@ ELIST2_LINK *ELIST2_ITERATOR::data_relative( // get data + or - .. #ifndef NDEBUG if (!ptr) - NULL_DATA.error("ELIST2_ITERATOR::data_relative", ABORT, nullptr); + NULL_DATA.error("ELIST2_ITERATOR::data_relative", ABORT); #endif return ptr; @@ -292,7 +298,7 @@ void ELIST2_ITERATOR::exchange( // positions of 2 links #ifndef NDEBUG if (!list) - NO_LIST.error("ELIST2_ITERATOR::exchange", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::exchange", ABORT); if (!other_it) BAD_PARAMETER.error("ELIST2_ITERATOR::exchange", ABORT, "other_it nullptr"); if (!(other_it->list)) @@ -309,7 +315,7 @@ link */ /* Error if either current element is deleted */ if (!current || !other_it->current) { - DONT_EXCHANGE_DELETED.error("ELIST2_ITERATOR.exchange", ABORT, nullptr); + DONT_EXCHANGE_DELETED.error("ELIST2_ITERATOR.exchange", ABORT); } /* Now handle the 4 cases: doubleton list; non-doubleton adjacent elements @@ -407,14 +413,14 @@ ELIST2_LINK *ELIST2_ITERATOR::extract_sublist( // from this current if (!other_it) BAD_PARAMETER.error("ELIST2_ITERATOR::extract_sublist", ABORT, "other_it nullptr"); if (!list) - NO_LIST.error("ELIST2_ITERATOR::extract_sublist", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::extract_sublist", ABORT); if (list != other_it->list) - BAD_EXTRACTION_PTS.error("ELIST2_ITERATOR.extract_sublist", ABORT, nullptr); + BAD_EXTRACTION_PTS.error("ELIST2_ITERATOR.extract_sublist", ABORT); if (list->empty()) - EMPTY_LIST.error("ELIST2_ITERATOR::extract_sublist", ABORT, nullptr); + EMPTY_LIST.error("ELIST2_ITERATOR::extract_sublist", ABORT); if (!current || !other_it->current) - DONT_EXTRACT_DELETED.error("ELIST2_ITERATOR.extract_sublist", ABORT, nullptr); + DONT_EXTRACT_DELETED.error("ELIST2_ITERATOR.extract_sublist", ABORT); #endif ex_current_was_last = other_it->ex_current_was_last = false; @@ -424,7 +430,7 @@ ELIST2_LINK *ELIST2_ITERATOR::extract_sublist( // from this current temp_it.mark_cycle_pt(); do { // walk sublist if (temp_it.cycled_list()) { // can't find end pt - BAD_SUBLIST.error("ELIST2_ITERATOR.extract_sublist", ABORT, nullptr); + BAD_SUBLIST.error("ELIST2_ITERATOR.extract_sublist", ABORT); } if (temp_it.at_last()) { diff --git a/src/ccutil/elst2.h b/src/ccutil/elst2.h index 59e2c7924..736c68893 100644 --- a/src/ccutil/elst2.h +++ b/src/ccutil/elst2.h @@ -191,10 +191,10 @@ public: ELIST2_LINK *data() { // get current data #ifndef NDEBUG if (!current) { - NULL_DATA.error("ELIST2_ITERATOR::data", ABORT, nullptr); + NULL_DATA.error("ELIST2_ITERATOR::data", ABORT); } if (!list) { - NO_LIST.error("ELIST2_ITERATOR::data", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::data", ABORT); } #endif return current; @@ -219,7 +219,7 @@ public: bool empty() const { // is list empty? #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::empty", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::empty", ABORT); } #endif return list->empty(); @@ -301,13 +301,13 @@ inline void ELIST2_ITERATOR::add_after_then_move( // element to add ELIST2_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_after_then_move", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_after_then_move", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_after_then_move", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST2_ITERATOR::add_after_then_move", ABORT, nullptr); + STILL_LINKED.error("ELIST2_ITERATOR::add_after_then_move", ABORT); } #endif @@ -352,13 +352,13 @@ inline void ELIST2_ITERATOR::add_after_stay_put( // element to add ELIST2_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_after_stay_put", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_after_stay_put", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_after_stay_put", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST2_ITERATOR::add_after_stay_put", ABORT, nullptr); + STILL_LINKED.error("ELIST2_ITERATOR::add_after_stay_put", ABORT); } #endif @@ -405,13 +405,13 @@ inline void ELIST2_ITERATOR::add_before_then_move( // element to add ELIST2_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_before_then_move", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_before_then_move", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_before_then_move", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST2_ITERATOR::add_before_then_move", ABORT, nullptr); + STILL_LINKED.error("ELIST2_ITERATOR::add_before_then_move", ABORT); } #endif @@ -453,13 +453,13 @@ inline void ELIST2_ITERATOR::add_before_stay_put( // element to add ELIST2_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_before_stay_put", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_before_stay_put", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_before_stay_put", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST2_ITERATOR::add_before_stay_put", ABORT, nullptr); + STILL_LINKED.error("ELIST2_ITERATOR::add_before_stay_put", ABORT); } #endif @@ -502,7 +502,7 @@ inline void ELIST2_ITERATOR::add_before_stay_put( // element to add inline void ELIST2_ITERATOR::add_list_after(ELIST2 *list_to_add) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_list_after", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_list_after", ABORT); } if (!list_to_add) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_list_after", ABORT, "list_to_add is nullptr"); @@ -553,7 +553,7 @@ inline void ELIST2_ITERATOR::add_list_after(ELIST2 *list_to_add) { inline void ELIST2_ITERATOR::add_list_before(ELIST2 *list_to_add) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_list_before", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_list_before", ABORT); } if (!list_to_add) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_list_before", ABORT, "list_to_add is nullptr"); @@ -605,11 +605,11 @@ inline ELIST2_LINK *ELIST2_ITERATOR::extract() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::extract", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::extract", ABORT); } if (!current) { // list empty or // element extracted - NULL_CURRENT.error("ELIST2_ITERATOR::extract", ABORT, nullptr); + NULL_CURRENT.error("ELIST2_ITERATOR::extract", ABORT); } #endif @@ -646,7 +646,7 @@ inline ELIST2_LINK *ELIST2_ITERATOR::extract() { inline ELIST2_LINK *ELIST2_ITERATOR::move_to_first() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::move_to_first", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::move_to_first", ABORT); } #endif @@ -666,7 +666,7 @@ inline ELIST2_LINK *ELIST2_ITERATOR::move_to_first() { inline ELIST2_LINK *ELIST2_ITERATOR::move_to_last() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::move_to_last", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::move_to_last", ABORT); } #endif @@ -690,7 +690,7 @@ inline ELIST2_LINK *ELIST2_ITERATOR::move_to_last() { inline void ELIST2_ITERATOR::mark_cycle_pt() { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::mark_cycle_pt", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::mark_cycle_pt", ABORT); } #endif @@ -712,7 +712,7 @@ inline void ELIST2_ITERATOR::mark_cycle_pt() { inline bool ELIST2_ITERATOR::at_first() const { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::at_first", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::at_first", ABORT); } #endif @@ -732,7 +732,7 @@ inline bool ELIST2_ITERATOR::at_first() const { inline bool ELIST2_ITERATOR::at_last() const { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::at_last", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::at_last", ABORT); } #endif @@ -752,7 +752,7 @@ inline bool ELIST2_ITERATOR::at_last() const { inline bool ELIST2_ITERATOR::cycled_list() const { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::cycled_list", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::cycled_list", ABORT); } #endif @@ -771,7 +771,7 @@ inline void ELIST2_ITERATOR::sort( // sort elements const void *, const void *)) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::sort", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::sort", ABORT); } #endif @@ -793,13 +793,13 @@ inline void ELIST2_ITERATOR::add_to_end( // element to add ELIST2_LINK *new_element) { #ifndef NDEBUG if (!list) { - NO_LIST.error("ELIST2_ITERATOR::add_to_end", ABORT, nullptr); + NO_LIST.error("ELIST2_ITERATOR::add_to_end", ABORT); } if (!new_element) { BAD_PARAMETER.error("ELIST2_ITERATOR::add_to_end", ABORT, "new_element is nullptr"); } if (new_element->next) { - STILL_LINKED.error("ELIST2_ITERATOR::add_to_end", ABORT, nullptr); + STILL_LINKED.error("ELIST2_ITERATOR::add_to_end", ABORT); } #endif diff --git a/src/ccutil/errcode.cpp b/src/ccutil/errcode.cpp index fb8f8c11a..dddc1231b 100644 --- a/src/ccutil/errcode.cpp +++ b/src/ccutil/errcode.cpp @@ -91,8 +91,12 @@ void ERRCODE::error( // handle error #endif abort(); default: - BADERRACTION.error("error", ABORT, nullptr); + BADERRACTION.error("error", ABORT); } } +void ERRCODE::error(const char *caller, TessErrorLogCode action) const { + error(caller, action, nullptr); +} + } // namespace tesseract diff --git a/src/ccutil/errcode.h b/src/ccutil/errcode.h index 4c64defba..51cd4f05b 100644 --- a/src/ccutil/errcode.h +++ b/src/ccutil/errcode.h @@ -37,6 +37,10 @@ enum TessErrorLogCode { #define MEMORY_ABORT 2 #define FILE_ABORT 3 +#if !defined(__GNUC__) && !defined(__attribute__) +# define __attribute__(attr) // compiler without support for __attribute__ +#endif + class TESS_API ERRCODE { // error handler class const char *message; // error message public: @@ -44,7 +48,8 @@ public: const char *caller, // function location TessErrorLogCode action, // action to take const char *format, ... // fprintf format - ) const; + ) const __attribute__((format(printf, 4, 5))); + void error(const char *caller, TessErrorLogCode action) const; constexpr ERRCODE(const char *string) : message(string) {} // initialize with string }; diff --git a/src/ccutil/genericvector.h b/src/ccutil/genericvector.h index aa5210c38..105b79a52 100644 --- a/src/ccutil/genericvector.h +++ b/src/ccutil/genericvector.h @@ -71,7 +71,7 @@ public: } // Return the size used. - int size() const { + unsigned size() const { return size_used_; } // Workaround to avoid g++ -Wsign-compare warnings. @@ -103,12 +103,6 @@ public: // Return the index of the T object. int get_index(const T &object) const; - // Return true if T is in the array - bool contains(const T &object) const; - - // Return true if the index is valid - T contains_index(int index) const; - // Push an element in the end of the array int push_back(T object); void operator+=(const T &t); @@ -308,7 +302,7 @@ inline bool SaveDataToFile(const GenericVector &data, const char *filename if (fp == nullptr) { return false; } - bool result = static_cast(fwrite(&data[0], 1, data.size(), fp)) == data.size(); + bool result = fwrite(&data[0], 1, data.size(), fp) == data.size(); fclose(fp); return result; } @@ -373,7 +367,7 @@ public: } PointerVector &operator+=(const PointerVector &other) { this->reserve(this->size_used_ + other.size_used_); - for (int i = 0; i < other.size(); ++i) { + for (unsigned i = 0; i < other.size(); ++i) { this->push_back(new T(*other.data_[i])); } return *this; @@ -615,12 +609,6 @@ void GenericVector::remove(int index) { size_used_--; } -// Return true if the index is valindex -template -T GenericVector::contains_index(int index) const { - return index >= 0 && index < size_used_; -} - // Return the index of the T object. template int GenericVector::get_index(const T &object) const { @@ -632,12 +620,6 @@ int GenericVector::get_index(const T &object) const { return -1; } -// Return true if T is in the array -template -bool GenericVector::contains(const T &object) const { - return get_index(object) != -1; -} - // Add an element in the array template int GenericVector::push_back(T object) { @@ -681,7 +663,7 @@ void GenericVector::operator+=(const T &t) { template GenericVector &GenericVector::operator+=(const GenericVector &other) { this->reserve(size_used_ + other.size_used_); - for (int i = 0; i < other.size(); ++i) { + for (unsigned i = 0; i < other.size(); ++i) { this->operator+=(other.data_[i]); } return *this; @@ -757,7 +739,7 @@ bool GenericVector::read(TFile *f, std::function cb) { } } } else { - if (f->FReadEndian(data_, sizeof(T), size_used_) != size_used_) { + if (f->FReadEndian(data_, sizeof(T), size_used_) != static_cast(size_used_)) { return false; } } diff --git a/src/ccutil/helpers.h b/src/ccutil/helpers.h index afdf78b23..d0084ec5f 100644 --- a/src/ccutil/helpers.h +++ b/src/ccutil/helpers.h @@ -21,6 +21,7 @@ #define TESSERACT_CCUTIL_HELPERS_H_ #include +#include // for INT_MIN, INT_MAX #include // std::isfinite #include #include @@ -173,6 +174,8 @@ inline int DivRounded(int a, int b) { // Return a double cast to int with rounding. inline int IntCastRounded(double x) { assert(std::isfinite(x)); + assert(x < INT_MAX); + assert(x > INT_MIN); return x >= 0.0 ? static_cast(x + 0.5) : -static_cast(-x + 0.5); } @@ -249,7 +252,7 @@ bool Serialize(FILE *fp, const std::vector &data) { uint32_t size = data.size(); if (fwrite(&size, sizeof(size), 1, fp) != 1) { return false; - } else if constexpr (std::is_class_v) { + } else if constexpr (std::is_class::value) { // Serialize a tesseract class. for (auto &item : data) { if (!item.Serialize(fp)) { diff --git a/src/ccutil/indexmapbidi.cpp b/src/ccutil/indexmapbidi.cpp index baf3a4475..fb5d20e6e 100644 --- a/src/ccutil/indexmapbidi.cpp +++ b/src/ccutil/indexmapbidi.cpp @@ -123,7 +123,7 @@ void IndexMapBiDi::Setup() { } compact_map_.clear(); compact_map_.resize(compact_size, -1); - for (int i = 0; i < sparse_map_.size(); ++i) { + for (size_t i = 0; i < sparse_map_.size(); ++i) { if (sparse_map_[i] >= 0) { compact_map_[sparse_map_[i]] = i; } @@ -187,7 +187,7 @@ void IndexMapBiDi::CompleteMerges() { // Re-generate the compact_map leaving holes for unused indices. compact_map_.clear(); compact_map_.resize(compact_size, -1); - for (int i = 0; i < sparse_map_.size(); ++i) { + for (size_t i = 0; i < sparse_map_.size(); ++i) { if (sparse_map_[i] >= 0) { if (compact_map_[sparse_map_[i]] == -1) { compact_map_[sparse_map_[i]] = i; @@ -198,7 +198,7 @@ void IndexMapBiDi::CompleteMerges() { // index went to in the compacted map. std::vector tmp_compact_map(compact_size, -1); compact_size = 0; - for (int i = 0; i < compact_map_.size(); ++i) { + for (size_t i = 0; i < compact_map_.size(); ++i) { if (compact_map_[i] >= 0) { tmp_compact_map[i] = compact_size; compact_map_[compact_size++] = compact_map_[i]; @@ -222,8 +222,8 @@ bool IndexMapBiDi::Serialize(FILE *fp) const { // then each additional sparse entry needs to be stored. // Normally we store only the compact map to save space. std::vector remaining_pairs; - for (int i = 0; i < sparse_map_.size(); ++i) { - if (sparse_map_[i] >= 0 && compact_map_[sparse_map_[i]] != i) { + for (unsigned i = 0; i < sparse_map_.size(); ++i) { + if (sparse_map_[i] >= 0 && static_cast(compact_map_[sparse_map_[i]]) != i) { remaining_pairs.push_back(i); remaining_pairs.push_back(sparse_map_[i]); } @@ -243,10 +243,10 @@ bool IndexMapBiDi::DeSerialize(bool swap, FILE *fp) { } sparse_map_.clear(); sparse_map_.resize(sparse_size_, -1); - for (int i = 0; i < compact_map_.size(); ++i) { + for (unsigned i = 0; i < compact_map_.size(); ++i) { sparse_map_[compact_map_[i]] = i; } - for (int i = 0; i < remaining_pairs.size(); ++i) { + for (size_t i = 0; i < remaining_pairs.size(); ++i) { int sparse_index = remaining_pairs[i++]; sparse_map_[sparse_index] = remaining_pairs[i]; } diff --git a/src/ccutil/mainblk.cpp b/src/ccutil/mainblk.cpp index 6e665b5fe..7135d23a2 100644 --- a/src/ccutil/mainblk.cpp +++ b/src/ccutil/mainblk.cpp @@ -26,12 +26,6 @@ #include "fileerr.h" namespace tesseract { -/********************************************************************** - * main_setup - * - * Main for mithras demo program. Read the arguments and set up globals. - **********************************************************************/ - /** * @brief CCUtil::main_setup - set location of tessdata and name of image * @@ -68,12 +62,8 @@ void CCUtil::main_setup(const std::string &argv0, const std::string &basename) { #endif /* _WIN32 */ #if defined(TESSDATA_PREFIX) } else { -/* Use tessdata prefix which was compiled in. */ -# define _STR(a) # a -# define _XSTR(a) _STR(a) - datadir = _XSTR(TESSDATA_PREFIX) "/tessdata"; -# undef _XSTR -# undef _STR + // Use tessdata prefix which was compiled in. + datadir = TESSDATA_PREFIX "/tessdata"; #endif } diff --git a/src/ccutil/object_cache.h b/src/ccutil/object_cache.h index 7764bb291..8d442e4c8 100644 --- a/src/ccutil/object_cache.h +++ b/src/ccutil/object_cache.h @@ -43,7 +43,8 @@ public: tprintf( "ObjectCache(%p)::~ObjectCache(): WARNING! LEAK! object %p " "still has count %d (id %s)\n", - this, it.object, it.count, it.id.c_str()); + static_cast(this), static_cast(it.object), + it.count, it.id.c_str()); } else { delete it.object; it.object = nullptr; diff --git a/src/ccutil/params.h b/src/ccutil/params.h index 02186cdf8..ba8043b56 100644 --- a/src/ccutil/params.h +++ b/src/ccutil/params.h @@ -345,13 +345,13 @@ ParamsVectors *GlobalParams(); * (there is no such guarantee for parameters defined with the other macros). *************************************************************************/ -#define INT_VAR_H(name, val, comment) ::tesseract::IntParam name +#define INT_VAR_H(name) ::tesseract::IntParam name -#define BOOL_VAR_H(name, val, comment) ::tesseract::BoolParam name +#define BOOL_VAR_H(name) ::tesseract::BoolParam name -#define STRING_VAR_H(name, val, comment) ::tesseract::StringParam name +#define STRING_VAR_H(name) ::tesseract::StringParam name -#define double_VAR_H(name, val, comment) ::tesseract::DoubleParam name +#define double_VAR_H(name) ::tesseract::DoubleParam name #define INT_VAR(name, val, comment) \ ::tesseract::IntParam name(val, #name, comment, false, ::tesseract::GlobalParams()) diff --git a/src/ccutil/serialis.cpp b/src/ccutil/serialis.cpp index ceeacb462..d9c9a8d41 100644 --- a/src/ccutil/serialis.cpp +++ b/src/ccutil/serialis.cpp @@ -55,13 +55,13 @@ bool SaveDataToFile(const std::vector &data, const char *filename) { if (fp == nullptr) { return false; } - bool result = static_cast(fwrite(&data[0], 1, data.size(), fp)) == data.size(); + bool result = fwrite(&data[0], 1, data.size(), fp) == data.size(); fclose(fp); return result; } -TFile::TFile() - : data_(nullptr), offset_(0), data_is_owned_(false), is_writing_(false), swap_(false) {} +TFile::TFile() { +} TFile::~TFile() { if (data_is_owned_) { @@ -152,7 +152,7 @@ bool TFile::Open(const char *filename, FileReader reader) { } } -bool TFile::Open(const char *data, int size) { +bool TFile::Open(const char *data, size_t size) { offset_ = 0; if (!data_is_owned_) { data_ = new std::vector; @@ -181,7 +181,7 @@ bool TFile::Open(FILE *fp, int64_t end_offset) { return false; } } - int size = end_offset - current_pos; + size_t size = end_offset - current_pos; is_writing_ = false; swap_ = false; if (!data_is_owned_) { @@ -189,7 +189,7 @@ bool TFile::Open(FILE *fp, int64_t end_offset) { data_is_owned_ = true; } data_->resize(size); // TODO: optimize no init - return static_cast(fread(&(*data_)[0], 1, size, fp)) == size; + return fread(&(*data_)[0], 1, size, fp) == size; } char *TFile::FGets(char *buffer, int buffer_size) { @@ -207,21 +207,20 @@ char *TFile::FGets(char *buffer, int buffer_size) { return size > 0 ? buffer : nullptr; } -int TFile::FReadEndian(void *buffer, size_t size, int count) { - int num_read = FRead(buffer, size, count); +size_t TFile::FReadEndian(void *buffer, size_t size, size_t count) { + auto num_read = FRead(buffer, size, count); if (swap_ && size != 1) { char *char_buffer = static_cast(buffer); - for (int i = 0; i < num_read; ++i, char_buffer += size) { + for (size_t i = 0; i < num_read; ++i, char_buffer += size) { ReverseN(char_buffer, size); } } return num_read; } -int TFile::FRead(void *buffer, size_t size, int count) { +size_t TFile::FRead(void *buffer, size_t size, size_t count) { ASSERT_HOST(!is_writing_); ASSERT_HOST(size > 0); - ASSERT_HOST(count >= 0); size_t required_size; if (SIZE_MAX / size <= count) { // Avoid integer overflow. @@ -270,10 +269,9 @@ bool TFile::CloseWrite(const char *filename, FileWriter writer) { } } -int TFile::FWrite(const void *buffer, size_t size, int count) { +size_t TFile::FWrite(const void *buffer, size_t size, size_t count) { ASSERT_HOST(is_writing_); ASSERT_HOST(size > 0); - ASSERT_HOST(count >= 0); ASSERT_HOST(SIZE_MAX / size > count); size_t total = size * count; const char *buf = static_cast(buffer); diff --git a/src/ccutil/serialis.h b/src/ccutil/serialis.h index a07ed394e..2b985762f 100644 --- a/src/ccutil/serialis.h +++ b/src/ccutil/serialis.h @@ -76,7 +76,7 @@ public: // Note that mixed read/write is not supported. bool Open(const char *filename, FileReader reader); // From an existing memory buffer. - bool Open(const char *data, int size); + bool Open(const char *data, size_t size); // From an open file and an end offset. bool Open(FILE *fp, int64_t end_offset); // Sets the value of the swap flag, so that FReadEndian does the right thing. @@ -92,7 +92,7 @@ public: //bool DeSerialize(std::vector &data); template bool DeSerialize(T *data, size_t count = 1) { - return FReadEndian(data, sizeof(T), count) == static_cast(count); + return FReadEndian(data, sizeof(T), count) == count; } template bool DeSerialize(std::vector &data) { @@ -104,7 +104,7 @@ public: } else if (size > 50000000) { // Arbitrarily limit the number of elements to protect against bad data. return false; - } else if constexpr (std::is_same_v) { + } else if constexpr (std::is_same::value) { // Deserialize a string. // TODO: optimize. data.resize(size); @@ -113,7 +113,7 @@ public: return false; } } - } else if constexpr (std::is_class_v) { + } else if constexpr (std::is_class::value) { // Deserialize a tesseract class. // TODO: optimize. data.resize(size); @@ -122,7 +122,7 @@ public: return false; } } - } else if constexpr (std::is_pointer_v) { + } else if constexpr (std::is_pointer::value) { // Deserialize pointers. // TODO: optimize. data.resize(size); @@ -155,7 +155,7 @@ public: bool Serialize(const std::vector &data); template bool Serialize(const T *data, size_t count = 1) { - return FWrite(data, sizeof(T), count) == static_cast(count); + return FWrite(data, sizeof(T), count) == count; } template bool Serialize(const std::vector &data) { @@ -163,21 +163,21 @@ public: uint32_t size = data.size(); if (!Serialize(&size)) { return false; - } else if constexpr (std::is_same_v) { + } else if constexpr (std::is_same::value) { // Serialize strings. for (auto string : data) { if (!Serialize(string)) { return false; } } - } else if constexpr (std::is_class_v) { + } else if constexpr (std::is_class::value) { // Serialize a tesseract class. for (auto &item : data) { if (!item.Serialize(this)) { return false; } } - } else if constexpr (std::is_pointer_v) { + } else if constexpr (std::is_pointer::value) { // Serialize pointers. for (auto &item : data) { uint8_t non_null = (item != nullptr); @@ -207,9 +207,9 @@ public: // Replicates fread, followed by a swap of the bytes if needed, returning the // number of items read. If swap_ is true then the count items will each have // size bytes reversed. - int FReadEndian(void *buffer, size_t size, int count); + size_t FReadEndian(void *buffer, size_t size, size_t count); // Replicates fread, returning the number of items read. - int FRead(void *buffer, size_t size, int count); + size_t FRead(void *buffer, size_t size, size_t count); // Resets the TFile as if it has been Opened, but nothing read. // Only allowed while reading! void Rewind(); @@ -222,19 +222,19 @@ public: // Replicates fwrite, returning the number of items written. // To use fprintf, use snprintf and FWrite. - int FWrite(const void *buffer, size_t size, int count); + size_t FWrite(const void *buffer, size_t size, size_t count); private: // The buffered data from the file. - std::vector *data_; + std::vector *data_ = nullptr; // The number of bytes used so far. - int offset_; + unsigned offset_ = 0; // True if the data_ pointer is owned by *this. - bool data_is_owned_; + bool data_is_owned_ = false; // True if the TFile is open for writing. - bool is_writing_; + bool is_writing_ = false; // True if bytes need to be swapped in FReadEndian. - bool swap_; + bool swap_ = false; }; } // namespace tesseract. diff --git a/src/ccutil/tessdatamanager.cpp b/src/ccutil/tessdatamanager.cpp index 279cf7acc..ae8a62a1a 100644 --- a/src/ccutil/tessdatamanager.cpp +++ b/src/ccutil/tessdatamanager.cpp @@ -211,7 +211,7 @@ void TessdataManager::Clear() { // Prints a directory of contents. void TessdataManager::Directory() const { - tprintf("Version string:%s\n", VersionString().c_str()); + tprintf("Version:%s\n", VersionString().c_str()); auto offset = TESSDATA_NUM_ENTRIES * sizeof(int64_t); for (unsigned i = 0; i < TESSDATA_NUM_ENTRIES; ++i) { if (!entries_[i].empty()) { diff --git a/src/ccutil/tesstypes.h b/src/ccutil/tesstypes.h new file mode 100644 index 000000000..6866225f2 --- /dev/null +++ b/src/ccutil/tesstypes.h @@ -0,0 +1,44 @@ +/////////////////////////////////////////////////////////////////////// +// File: tesstypes.h +// Description: Simple data types used by Tesseract code. +// Author: Stefan Weil +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_TESSTYPES_H +#define TESSERACT_TESSTYPES_H + +#ifdef HAVE_CONFIG_H +# include "config_auto.h" // FAST_FLOAT +#endif + +#include // for int16_t, int32_t + +namespace tesseract { + +// Image dimensions (width and height, coordinates). +#if defined(LARGE_IMAGES) +using TDimension = int32_t; +#else +using TDimension = int16_t; +#endif + +// Floating point data type used for LSTM calculations. +#if defined(FAST_FLOAT) +using TFloat = float; +#else +using TFloat = double; +#endif + +} + +#endif // TESSERACT_TESSTYPES_H diff --git a/src/ccutil/tprintf.cpp b/src/ccutil/tprintf.cpp index 4e392db6e..3c5f7e2fc 100644 --- a/src/ccutil/tprintf.cpp +++ b/src/ccutil/tprintf.cpp @@ -25,6 +25,7 @@ #include "params.h" +#include // for INT_MAX #include #include @@ -32,6 +33,8 @@ namespace tesseract { #define MAX_MSG_LEN 2048 +INT_VAR(log_level, INT_MAX, "Logging level"); + static STRING_VAR(debug_file, "", "File to send tprintf output to"); // Trace printf diff --git a/src/ccutil/tprintf.h b/src/ccutil/tprintf.h index 5f76687a4..6d15f33f1 100644 --- a/src/ccutil/tprintf.h +++ b/src/ccutil/tprintf.h @@ -2,7 +2,6 @@ * File: tprintf.h * Description: Trace version of printf - portable between UX and NT * Author: Phil Cheatle - * Created: Wed Jun 28 15:01:15 BST 1995 * * (C) Copyright 1995, Hewlett-Packard Ltd. ** Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,13 +19,22 @@ #ifndef TESSERACT_CCUTIL_TPRINTF_H #define TESSERACT_CCUTIL_TPRINTF_H +#include "params.h" // for BOOL_VAR_H #include // for TESS_API namespace tesseract { +#if !defined(__GNUC__) && !defined(__attribute__) +# define __attribute__(attr) // compiler without support for __attribute__ +#endif + +// Disable some log messages by setting log_level > 0. +extern TESS_API INT_VAR_H(log_level); + // Main logging function. extern TESS_API void tprintf( // Trace printf - const char *format, ...); // Message + const char *format, ...) // Message + __attribute__((format(printf, 1, 2))); } // namespace tesseract diff --git a/src/ccutil/unicharcompress.cpp b/src/ccutil/unicharcompress.cpp index c94722143..d5efccabc 100644 --- a/src/ccutil/unicharcompress.cpp +++ b/src/ccutil/unicharcompress.cpp @@ -61,7 +61,7 @@ static bool DecodeRadicalLine(std::string &radical_data_line, RSMap *radical_map return false; } std::unique_ptr> radicals(new std::vector); - for (int i = 1; i < entries.size(); ++i) { + for (size_t i = 1; i < entries.size(); ++i) { int radical = strtol(&entries[i][0], &end, 10); if (*end != '\0') { return false; @@ -78,7 +78,7 @@ static bool DecodeRadicalLine(std::string &radical_data_line, RSMap *radical_map // is unlikely to want to use it again. static bool DecodeRadicalTable(std::string &radical_data, RSMap *radical_map) { std::vector lines = split(radical_data, '\n'); - for (int i = 0; i < lines.size(); ++i) { + for (unsigned i = 0; i < lines.size(); ++i) { if (!DecodeRadicalLine(lines[i], radical_map)) { tprintf("Invalid format in radical table at line %d: %s\n", i, lines[i].c_str()); return false; @@ -132,10 +132,10 @@ bool UnicharCompress::ComputeEncoding(const UNICHARSET &unicharset, int null_id, // to measure the number of radicals and strokes, initially we use the same // code range for all 3 Han code positions, and fix them after. int han_offset = hangul_offset + kTotalJamos; - for (int u = 0; u <= unicharset.size(); ++u) { + for (unsigned u = 0; u <= unicharset.size(); ++u) { // We special-case allow null_id to be equal to unicharset.size() in case // there is no space in unicharset for it. - if (u == unicharset.size() && u != null_id) { + if (u == unicharset.size() && static_cast(u) != null_id) { break; // Finished } RecodedCharID code; @@ -173,7 +173,7 @@ bool UnicharCompress::ComputeEncoding(const UNICHARSET &unicharset, int null_id, // Special cases. if (u == UNICHAR_SPACE) { code.Set(0, 0); // Space. - } else if (u == null_id || + } else if (static_cast(u) == null_id || (unicharset.has_special_codes() && u < SPECIAL_UNICHAR_CODES_COUNT)) { code.Set(0, direct_set.unichar_to_id(kNullChar)); } else { @@ -207,7 +207,7 @@ bool UnicharCompress::ComputeEncoding(const UNICHARSET &unicharset, int null_id, int code_offset = 0; for (int i = 0; i < RecodedCharID::kMaxCodeLen; ++i) { int max_offset = 0; - for (int u = 0; u < unicharset.size(); ++u) { + for (unsigned u = 0; u < unicharset.size(); ++u) { RecodedCharID *code = &encoder_[u]; if (code->length() <= i) { continue; @@ -229,7 +229,7 @@ bool UnicharCompress::ComputeEncoding(const UNICHARSET &unicharset, int null_id, // passes them through unchanged. void UnicharCompress::SetupPassThrough(const UNICHARSET &unicharset) { std::vector codes; - for (int u = 0; u < unicharset.size(); ++u) { + for (unsigned u = 0; u < unicharset.size(); ++u) { RecodedCharID code; code.Set(0, u); codes.push_back(code); @@ -265,10 +265,10 @@ void UnicharCompress::DefragmentCodeValues(int encoded_null) { } // Compute offsets based on code use. int offset = 0; - for (int i = 0; i < offsets.size(); ++i) { + for (unsigned i = 0; i < offsets.size(); ++i) { // If not used, decrement everything above here. // We are moving encoded_null to the end, so it is not "used". - if (offsets[i] == 0 || i == encoded_null) { + if (offsets[i] == 0 || i == static_cast(encoded_null)) { --offset; } else { offsets[i] = offset; @@ -292,8 +292,8 @@ void UnicharCompress::DefragmentCodeValues(int encoded_null) { // Encodes a single unichar_id. Returns the length of the code, or zero if // invalid input, and the encoding itself -int UnicharCompress::EncodeUnichar(int unichar_id, RecodedCharID *code) const { - if (unichar_id < 0 || unichar_id >= encoder_.size()) { +int UnicharCompress::EncodeUnichar(unsigned unichar_id, RecodedCharID *code) const { + if (unichar_id >= encoder_.size()) { return 0; } *code = encoder_[unichar_id]; @@ -338,7 +338,7 @@ bool UnicharCompress::DeSerialize(TFile *fp) { // See the class comment above for details. std::string UnicharCompress::GetEncodingAsString(const UNICHARSET &unicharset) const { std::string encoding; - for (int c = 0; c < encoder_.size(); ++c) { + for (unsigned c = 0; c < encoder_.size(); ++c) { const RecodedCharID &code = encoder_[c]; if (0 < c && c < SPECIAL_UNICHAR_CODES_COUNT && code == encoder_[c - 1]) { // Don't show the duplicate entry. @@ -395,8 +395,9 @@ void UnicharCompress::ComputeCodeRange() { // Initializes the decoding hash_map from the encoding array. void UnicharCompress::SetupDecoder() { Cleanup(); - is_valid_start_.resize(code_range_, false); - for (int c = 0; c < encoder_.size(); ++c) { + is_valid_start_.clear(); + is_valid_start_.resize(code_range_); + for (unsigned c = 0; c < encoder_.size(); ++c) { const RecodedCharID &code = encoder_[c]; decoder_[code] = c; is_valid_start_[code(0)] = true; diff --git a/src/ccutil/unicharcompress.h b/src/ccutil/unicharcompress.h index 581b09ef4..2e81bbde5 100644 --- a/src/ccutil/unicharcompress.h +++ b/src/ccutil/unicharcompress.h @@ -174,7 +174,7 @@ public: // Encodes a single unichar_id. Returns the length of the code, (or zero if // invalid input), and the encoding itself in code. - int EncodeUnichar(int unichar_id, RecodedCharID *code) const; + int EncodeUnichar(unsigned unichar_id, RecodedCharID *code) const; // Decodes code, returning the original unichar-id, or // INVALID_UNICHAR_ID if the input is invalid. int DecodeUnichar(const RecodedCharID &code) const; diff --git a/src/ccutil/unicharset.cpp b/src/ccutil/unicharset.cpp index 0aa313224..2d0c920eb 100644 --- a/src/ccutil/unicharset.cpp +++ b/src/ccutil/unicharset.cpp @@ -58,24 +58,26 @@ const double kMinXHeightFraction = 0.25; const double kMinCapHeightFraction = 0.05; /*static */ -const char *UNICHARSET::kCustomLigatures[][2] = {{"ct", "\uE003"}, // c + t -> U+E003 - {"ſh", "\uE006"}, // long-s + h -> U+E006 - {"ſi", "\uE007"}, // long-s + i -> U+E007 - {"ſl", "\uE008"}, // long-s + l -> U+E008 - {"ſſ", "\uE009"}, // long-s + long-s -> U+E009 - {nullptr, nullptr}}; +const char *UNICHARSET::kCustomLigatures[][2] = { + {"ct", "\uE003"}, // c + t -> U+E003 + {"ſh", "\uE006"}, // long-s + h -> U+E006 + {"ſi", "\uE007"}, // long-s + i -> U+E007 + {"ſl", "\uE008"}, // long-s + l -> U+E008 + {"ſſ", "\uE009"}, // long-s + long-s -> U+E009 + {nullptr, nullptr}}; // List of mappings to make when ingesting strings from the outside. // The substitutions clean up text that should exist for rendering of // synthetic data, but not in the recognition set. -const char *UNICHARSET::kCleanupMaps[][2] = {{"\u0640", ""}, // TATWEEL is deleted. - {"\ufb01", "fi"}, // fi ligature->fi pair. - {"\ufb02", "fl"}, // fl ligature->fl pair. - {nullptr, nullptr}}; +const char *UNICHARSET::kCleanupMaps[][2] = { + {"\u0640", ""}, // TATWEEL is deleted. + {"\ufb01", "fi"}, // fi ligature->fi pair. + {"\ufb02", "fl"}, // fl ligature->fl pair. + {nullptr, nullptr}}; // List of strings for the SpecialUnicharCodes. Keep in sync with the enum. -const char *UNICHARSET::kSpecialUnicharCodes[SPECIAL_UNICHAR_CODES_COUNT] = {" ", "Joined", - "|Broken|0|1"}; +const char *UNICHARSET::kSpecialUnicharCodes[SPECIAL_UNICHAR_CODES_COUNT] = { + " ", "Joined", "|Broken|0|1"}; const char *UNICHARSET::null_script = "NULL"; @@ -137,7 +139,8 @@ bool UNICHARSET::UNICHAR_PROPERTIES::AnyRangeEmpty() const { } // Expands the ranges with the ranges from the src properties. -void UNICHARSET::UNICHAR_PROPERTIES::ExpandRangesFrom(const UNICHAR_PROPERTIES &src) { +void UNICHARSET::UNICHAR_PROPERTIES::ExpandRangesFrom( + const UNICHAR_PROPERTIES &src) { UpdateRange(src.min_bottom, &min_bottom, &max_bottom); UpdateRange(src.max_bottom, &min_bottom, &max_bottom); UpdateRange(src.min_top, &min_top, &max_top); @@ -164,7 +167,8 @@ void UNICHARSET::UNICHAR_PROPERTIES::CopyFrom(const UNICHAR_PROPERTIES &src) { fragment = saved_fragment; } -UNICHARSET::UNICHARSET() : ids(), script_table(nullptr), script_table_size_used(0) { +UNICHARSET::UNICHARSET() + : ids(), script_table(nullptr), script_table_size_used(0) { clear(); for (int i = 0; i < SPECIAL_UNICHAR_CODES_COUNT; ++i) { unichar_insert(kSpecialUnicharCodes[i]); @@ -180,13 +184,15 @@ UNICHARSET::~UNICHARSET() { UNICHAR_ID UNICHARSET::unichar_to_id(const char *const unichar_repr) const { - std::string cleaned = old_style_included_ ? unichar_repr : CleanupString(unichar_repr); + std::string cleaned = + old_style_included_ ? unichar_repr : CleanupString(unichar_repr); return ids.contains(cleaned.data(), cleaned.size()) ? ids.unichar_to_id(cleaned.data(), cleaned.size()) : INVALID_UNICHAR_ID; } -UNICHAR_ID UNICHARSET::unichar_to_id(const char *const unichar_repr, int length) const { +UNICHAR_ID UNICHARSET::unichar_to_id(const char *const unichar_repr, + int length) const { assert(length > 0 && length <= UNICHAR_LEN); std::string cleaned(unichar_repr, length); if (!old_style_included_) { @@ -215,7 +221,8 @@ int UNICHARSET::step(const char *str) const { // Return whether the given UTF-8 string is encodable with this UNICHARSET. // If not encodable, write the first byte offset which cannot be converted // into the second (return) argument. -bool UNICHARSET::encodable_string(const char *str, unsigned *first_bad_position) const { +bool UNICHARSET::encodable_string(const char *str, + unsigned *first_bad_position) const { std::vector encoding; return encode_string(str, true, &encoding, nullptr, first_bad_position); } @@ -230,7 +237,8 @@ bool UNICHARSET::encodable_string(const char *str, unsigned *first_bad_position) // that do not belong in the unicharset, or encoding may fail. // Use CleanupString to perform the cleaning. bool UNICHARSET::encode_string(const char *str, bool give_up_on_failure, - std::vector *encoding, std::vector *lengths, + std::vector *encoding, + std::vector *lengths, unsigned *encoded_length) const { std::vector working_encoding; std::vector working_lengths; @@ -240,8 +248,8 @@ bool UNICHARSET::encode_string(const char *str, bool give_up_on_failure, unsigned str_pos = 0; bool perfect = true; while (str_pos < str_length) { - encode_string(str, str_pos, str_length, &working_encoding, &working_lengths, &str_pos, encoding, - &best_lengths); + encode_string(str, str_pos, str_length, &working_encoding, &working_lengths, + &str_pos, encoding, &best_lengths); if (str_pos < str_length) { // This is a non-match. Skip one utf-8 character. perfect = false; @@ -272,7 +280,7 @@ const char *UNICHARSET::id_to_unichar(UNICHAR_ID id) const { if (id == INVALID_UNICHAR_ID) { return INVALID_UNICHAR; } - ASSERT_HOST(id < this->size()); + ASSERT_HOST(static_cast(id) < this->size()); return unichars[id].representation; } @@ -280,7 +288,7 @@ const char *UNICHARSET::id_to_unichar_ext(UNICHAR_ID id) const { if (id == INVALID_UNICHAR_ID) { return INVALID_UNICHAR; } - ASSERT_HOST(id < this->size()); + ASSERT_HOST(static_cast(id) < this->size()); // Resolve from the kCustomLigatures table if this is a private encoding. if (get_isprivate(id)) { const char *ch = id_to_unichar(id); @@ -357,8 +365,9 @@ void UNICHARSET::set_normed_ids(UNICHAR_ID unichar_id) { unichars[unichar_id].properties.normed_ids.clear(); if (unichar_id == UNICHAR_SPACE && id_to_unichar(unichar_id)[0] == ' ') { unichars[unichar_id].properties.normed_ids.push_back(UNICHAR_SPACE); - } else if (!encode_string(unichars[unichar_id].properties.normed.c_str(), true, - &unichars[unichar_id].properties.normed_ids, nullptr, nullptr)) { + } else if (!encode_string(unichars[unichar_id].properties.normed.c_str(), + true, &unichars[unichar_id].properties.normed_ids, + nullptr, nullptr)) { unichars[unichar_id].properties.normed_ids.clear(); unichars[unichar_id].properties.normed_ids.push_back(unichar_id); } @@ -383,8 +392,9 @@ void UNICHARSET::set_ranges_empty() { // Sets all the properties for this unicharset given a src unicharset with // everything set. The unicharsets don't have to be the same, and graphemes // are correctly accounted for. -void UNICHARSET::PartialSetPropertiesFromOther(int start_index, const UNICHARSET &src) { - for (int ch = start_index; ch < unichars.size(); ++ch) { +void UNICHARSET::PartialSetPropertiesFromOther(int start_index, + const UNICHARSET &src) { + for (unsigned ch = start_index; ch < unichars.size(); ++ch) { const char *utf8 = id_to_unichar(ch); UNICHAR_PROPERTIES properties; if (src.GetStrProperties(utf8, &properties)) { @@ -464,8 +474,10 @@ void UNICHARSET::AppendOtherUnicharset(const UNICHARSET &src) { // Returns true if the acceptable ranges of the tops of the characters do // not overlap, making their x-height calculations distinct. bool UNICHARSET::SizesDistinct(UNICHAR_ID id1, UNICHAR_ID id2) const { - int overlap = std::min(unichars[id1].properties.max_top, unichars[id2].properties.max_top) - - std::max(unichars[id1].properties.min_top, unichars[id2].properties.min_top); + int overlap = std::min(unichars[id1].properties.max_top, + unichars[id2].properties.max_top) - + std::max(unichars[id1].properties.min_top, + unichars[id2].properties.min_top); return overlap <= 0; } @@ -478,10 +490,12 @@ bool UNICHARSET::SizesDistinct(UNICHAR_ID id1, UNICHAR_ID id2) const { // the overall process of encoding a partially failed string more efficient. // See unicharset.h for definition of the args. void UNICHARSET::encode_string(const char *str, int str_index, int str_length, - std::vector *encoding, std::vector *lengths, - unsigned *best_total_length, std::vector *best_encoding, + std::vector *encoding, + std::vector *lengths, + unsigned *best_total_length, + std::vector *best_encoding, std::vector *best_lengths) const { - if (str_index > *best_total_length) { + if (str_index > static_cast(*best_total_length)) { // This is the best result so far. *best_total_length = str_index; *best_encoding = *encoding; @@ -504,9 +518,9 @@ void UNICHARSET::encode_string(const char *str, int str_index, int str_length, UNICHAR_ID id = ids.unichar_to_id(str + str_index, length); encoding->push_back(id); lengths->push_back(length); - encode_string(str, str_index + length, str_length, encoding, lengths, best_total_length, - best_encoding, best_lengths); - if (*best_total_length == str_length) { + encode_string(str, str_index + length, str_length, encoding, lengths, + best_total_length, best_encoding, best_lengths); + if (static_cast(*best_total_length) == str_length) { return; // Tail recursion success! } // Failed with that length, truncate back and try again. @@ -526,7 +540,8 @@ void UNICHARSET::encode_string(const char *str, int str_index, int str_length, // Returns false if no valid match was found in the unicharset. // NOTE that script_id, mirror, and other_case refer to this unicharset on // return and will need translation if the target unicharset is different. -bool UNICHARSET::GetStrProperties(const char *utf8_str, UNICHAR_PROPERTIES *props) const { +bool UNICHARSET::GetStrProperties(const char *utf8_str, + UNICHAR_PROPERTIES *props) const { props->Init(); props->SetRangesEmpty(); int total_unicodes = 0; @@ -636,22 +651,27 @@ char UNICHARSET::get_chartype(UNICHAR_ID id) const { return 0; } -void UNICHARSET::unichar_insert(const char *const unichar_repr, OldUncleanUnichars old_style) { +void UNICHARSET::unichar_insert(const char *const unichar_repr, + OldUncleanUnichars old_style) { if (old_style == OldUncleanUnichars::kTrue) { old_style_included_ = true; } - std::string cleaned = old_style_included_ ? unichar_repr : CleanupString(unichar_repr); + std::string cleaned = + old_style_included_ ? unichar_repr : CleanupString(unichar_repr); if (!cleaned.empty() && !ids.contains(cleaned.data(), cleaned.size())) { const char *str = cleaned.c_str(); std::vector encoding; - if (!old_style_included_ && encode_string(str, true, &encoding, nullptr, nullptr)) { + if (!old_style_included_ && + encode_string(str, true, &encoding, nullptr, nullptr)) { return; } - auto &u = unichars.emplace_back(); + unichars.emplace_back(); + auto &u = unichars.back(); int index = 0; do { if (index >= UNICHAR_LEN) { - fprintf(stderr, "Utf8 buffer too big, size>%d for %s\n", UNICHAR_LEN, unichar_repr); + fprintf(stderr, "Utf8 buffer too big, size>%d for %s\n", UNICHAR_LEN, + unichar_repr); return; } u.representation[index++] = *str++; @@ -673,11 +693,13 @@ void UNICHARSET::unichar_insert(const char *const unichar_repr, OldUncleanUnicha } bool UNICHARSET::contains_unichar(const char *const unichar_repr) const { - std::string cleaned = old_style_included_ ? unichar_repr : CleanupString(unichar_repr); + std::string cleaned = + old_style_included_ ? unichar_repr : CleanupString(unichar_repr); return ids.contains(cleaned.data(), cleaned.size()); } -bool UNICHARSET::contains_unichar(const char *const unichar_repr, int length) const { +bool UNICHARSET::contains_unichar(const char *const unichar_repr, + int length) const { if (length == 0) { return false; } @@ -688,16 +710,17 @@ bool UNICHARSET::contains_unichar(const char *const unichar_repr, int length) co return ids.contains(cleaned.data(), cleaned.size()); } -bool UNICHARSET::eq(UNICHAR_ID unichar_id, const char *const unichar_repr) const { +bool UNICHARSET::eq(UNICHAR_ID unichar_id, + const char *const unichar_repr) const { return strcmp(this->id_to_unichar(unichar_id), unichar_repr) == 0; } bool UNICHARSET::save_to_string(std::string &str) const { const int kFileBufSize = 1024; char buffer[kFileBufSize + 1]; - snprintf(buffer, kFileBufSize, "%d\n", this->size()); + snprintf(buffer, kFileBufSize, "%zu\n", this->size()); str = buffer; - for (UNICHAR_ID id = 0; id < this->size(); ++id) { + for (unsigned id = 0; id < this->size(); ++id) { int min_bottom, max_bottom, min_top, max_top; get_top_bottom(id, &min_bottom, &max_bottom, &min_top, &max_top); float width, width_sd; @@ -709,17 +732,20 @@ bool UNICHARSET::save_to_string(std::string &str) const { unsigned int properties = this->get_properties(id); if (strcmp(this->id_to_unichar(id), " ") == 0) { snprintf(buffer, kFileBufSize, "%s %x %s %d\n", "NULL", properties, - this->get_script_from_script_id(this->get_script(id)), this->get_other_case(id)); + this->get_script_from_script_id(this->get_script(id)), + this->get_other_case(id)); str += buffer; } else { std::ostringstream stream; stream.imbue(std::locale::classic()); - stream << this->id_to_unichar(id) << ' ' << properties << ' ' << min_bottom << ',' - << max_bottom << ',' << min_top << ',' << max_top << ',' << width << ',' << width_sd - << ',' << bearing << ',' << bearing_sd << ',' << advance << ',' << advance_sd << ' ' + stream << this->id_to_unichar(id) << ' ' << properties << ' ' + << min_bottom << ',' << max_bottom << ',' << min_top << ',' + << max_top << ',' << width << ',' << width_sd << ',' << bearing + << ',' << bearing_sd << ',' << advance << ',' << advance_sd << ' ' << this->get_script_from_script_id(this->get_script(id)) << ' ' - << this->get_other_case(id) << ' ' << this->get_direction(id) << ' ' - << this->get_mirror(id) << ' ' << this->get_normed_unichar(id) << "\t# " + << this->get_other_case(id) << ' ' << this->get_direction(id) + << ' ' << this->get_mirror(id) << ' ' + << this->get_normed_unichar(id) << "\t# " << this->debug_str(id).c_str() << '\n'; str += stream.str().c_str(); } @@ -741,24 +767,28 @@ private: bool UNICHARSET::load_from_file(FILE *file, bool skip_fragments) { LocalFilePointer lfp(file); using namespace std::placeholders; // for _1, _2 - std::function fgets_cb = std::bind(&LocalFilePointer::fgets, &lfp, _1, _2); + std::function fgets_cb = + std::bind(&LocalFilePointer::fgets, &lfp, _1, _2); bool success = load_via_fgets(fgets_cb, skip_fragments); return success; } bool UNICHARSET::load_from_file(tesseract::TFile *file, bool skip_fragments) { using namespace std::placeholders; // for _1, _2 - std::function fgets_cb = std::bind(&tesseract::TFile::FGets, file, _1, _2); + std::function fgets_cb = + std::bind(&tesseract::TFile::FGets, file, _1, _2); bool success = load_via_fgets(fgets_cb, skip_fragments); return success; } -bool UNICHARSET::load_via_fgets(std::function fgets_cb, bool skip_fragments) { +bool UNICHARSET::load_via_fgets( + const std::function &fgets_cb, bool skip_fragments) { int unicharset_size; char buffer[256]; this->clear(); - if (fgets_cb(buffer, sizeof(buffer)) == nullptr || sscanf(buffer, "%d", &unicharset_size) != 1) { + if (fgets_cb(buffer, sizeof(buffer)) == nullptr || + sscanf(buffer, "%d", &unicharset_size) != 1) { return false; } for (UNICHAR_ID id = 0; id < unicharset_size; ++id) { @@ -800,27 +830,30 @@ bool UNICHARSET::load_via_fgets(std::function fgets_cb, boo auto position = stream.tellg(); stream.seekg(position); char c1, c2, c3, c4, c5, c6, c7, c8, c9; - stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> max_top >> c4 >> width >> - c5 >> width_sd >> c6 >> bearing >> c7 >> bearing_sd >> c8 >> advance >> c9 >> advance_sd >> - std::setw(63) >> script >> other_case >> direction >> mirror >> std::setw(63) >> normed; - if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',' || c4 != ',' || c5 != ',' || - c6 != ',' || c7 != ',' || c8 != ',' || c9 != ',') { + stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> + max_top >> c4 >> width >> c5 >> width_sd >> c6 >> bearing >> c7 >> + bearing_sd >> c8 >> advance >> c9 >> advance_sd >> std::setw(63) >> + script >> other_case >> direction >> mirror >> std::setw(63) >> normed; + if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',' || c4 != ',' || + c5 != ',' || c6 != ',' || c7 != ',' || c8 != ',' || c9 != ',') { stream.clear(); stream.seekg(position); - stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> max_top >> c4 >> width >> - c5 >> width_sd >> c6 >> bearing >> c7 >> bearing_sd >> c8 >> advance >> c9 >> - advance_sd >> std::setw(63) >> script >> other_case >> direction >> mirror; - if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',' || c4 != ',' || c5 != ',' || - c6 != ',' || c7 != ',' || c8 != ',' || c9 != ',') { + stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> + max_top >> c4 >> width >> c5 >> width_sd >> c6 >> bearing >> c7 >> + bearing_sd >> c8 >> advance >> c9 >> advance_sd >> std::setw(63) >> + script >> other_case >> direction >> mirror; + if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',' || c4 != ',' || + c5 != ',' || c6 != ',' || c7 != ',' || c8 != ',' || c9 != ',') { stream.clear(); stream.seekg(position); - stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> max_top >> - std::setw(63) >> script >> other_case >> direction >> mirror; + stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> + max_top >> std::setw(63) >> script >> other_case >> direction >> + mirror; if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',') { stream.clear(); stream.seekg(position); - stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> max_top >> - std::setw(63) >> script >> other_case; + stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >> + max_top >> std::setw(63) >> script >> other_case; if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',') { stream.clear(); stream.seekg(position); @@ -883,7 +916,7 @@ void UNICHARSET::post_load_setup() { int x_height_alphas = 0; int cap_height_alphas = 0; top_bottom_set_ = false; - for (UNICHAR_ID id = 0; id < unichars.size(); ++id) { + for (unsigned id = 0; id < unichars.size(); ++id) { int min_bottom = 0; int max_bottom = UINT8_MAX; int min_top = 0; @@ -909,8 +942,9 @@ void UNICHARSET::post_load_setup() { script_has_upper_lower_ = net_case_alphas > 0; script_has_xheight_ = - script_has_upper_lower_ || (x_height_alphas > cap_height_alphas * kMinXHeightFraction && - cap_height_alphas > x_height_alphas * kMinCapHeightFraction); + script_has_upper_lower_ || + (x_height_alphas > cap_height_alphas * kMinXHeightFraction && + cap_height_alphas > x_height_alphas * kMinCapHeightFraction); null_sid_ = get_script_id_from_name(null_script); ASSERT_HOST(null_sid_ == 0); @@ -954,7 +988,8 @@ bool UNICHARSET::major_right_to_left() const { if (dir == UNICHARSET::U_LEFT_TO_RIGHT) { ltr_count++; } - if (dir == UNICHARSET::U_RIGHT_TO_LEFT || dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC || + if (dir == UNICHARSET::U_RIGHT_TO_LEFT || + dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC || dir == UNICHARSET::U_ARABIC_NUMBER) { rtl_count++; } @@ -966,7 +1001,8 @@ bool UNICHARSET::major_right_to_left() const { // An empty or nullptr whitelist enables everything (minus any blacklist). // An empty or nullptr blacklist disables nothing. // An empty or nullptr blacklist has no effect. -void UNICHARSET::set_black_and_whitelist(const char *blacklist, const char *whitelist, +void UNICHARSET::set_black_and_whitelist(const char *blacklist, + const char *whitelist, const char *unblacklist) { bool def_enabled = whitelist == nullptr || whitelist[0] == '\0'; // Set everything to default @@ -1012,7 +1048,7 @@ bool UNICHARSET::AnyRepeatedUnicodes() const { if (has_special_codes()) { start_id = SPECIAL_UNICHAR_CODES_COUNT; } - for (int id = start_id; id < unichars.size(); ++id) { + for (unsigned id = start_id; id < unichars.size(); ++id) { // Convert to unicodes. std::vector unicodes = UNICHAR::UTF8ToUTF32(get_normed_unichar(id)); for (size_t u = 1; u < unicodes.size(); ++u) { @@ -1037,7 +1073,8 @@ int UNICHARSET::add_script(const char *script) { assert(script_table_size_used == script_table_size_reserved); script_table_size_reserved += script_table_size_reserved; char **new_script_table = new char *[script_table_size_reserved]; - memcpy(new_script_table, script_table, script_table_size_used * sizeof(char *)); + memcpy(new_script_table, script_table, + script_table_size_used * sizeof(char *)); delete[] script_table; script_table = new_script_table; } @@ -1048,7 +1085,8 @@ int UNICHARSET::add_script(const char *script) { // Returns the string that represents a fragment // with the given unichar, pos and total. -std::string CHAR_FRAGMENT::to_string(const char *unichar, int pos, int total, bool natural) { +std::string CHAR_FRAGMENT::to_string(const char *unichar, int pos, int total, + bool natural) { if (total == 1) { return std::string(unichar); } @@ -1056,8 +1094,8 @@ std::string CHAR_FRAGMENT::to_string(const char *unichar, int pos, int total, bo result += kSeparator; result += unichar; char buffer[kMaxLen]; - snprintf(buffer, kMaxLen, "%c%d%c%d", kSeparator, pos, natural ? kNaturalFlag : kSeparator, - total); + snprintf(buffer, kMaxLen, "%c%d%c%d", kSeparator, pos, + natural ? kNaturalFlag : kSeparator, total); result += buffer; return result; } diff --git a/src/ccutil/unicharset.h b/src/ccutil/unicharset.h index 5d265f711..dd0ff8f3d 100644 --- a/src/ccutil/unicharset.h +++ b/src/ccutil/unicharset.h @@ -85,7 +85,8 @@ public: // Returns the string that represents a fragment // with the given unichar, pos and total. - static std::string to_string(const char *unichar, int pos, int total, bool natural); + static std::string to_string(const char *unichar, int pos, int total, + bool natural); // Returns the string that represents this fragment. std::string to_string() const { return to_string(unichar, pos, total, natural); @@ -93,19 +94,22 @@ public: // Checks whether a fragment has the same unichar, // position and total as the given inputs. - inline bool equals(const char *other_unichar, int other_pos, int other_total) const { - return (strcmp(this->unichar, other_unichar) == 0 && this->pos == other_pos && - this->total == other_total); + inline bool equals(const char *other_unichar, int other_pos, + int other_total) const { + return (strcmp(this->unichar, other_unichar) == 0 && + this->pos == other_pos && this->total == other_total); } inline bool equals(const CHAR_FRAGMENT *other) const { - return this->equals(other->get_unichar(), other->get_pos(), other->get_total()); + return this->equals(other->get_unichar(), other->get_pos(), + other->get_total()); } // Checks whether a given fragment is a continuation of this fragment. // Assumes that the given fragment pointer is not nullptr. inline bool is_continuation_of(const CHAR_FRAGMENT *fragment) const { return (strcmp(this->unichar, fragment->get_unichar()) == 0 && - this->total == fragment->get_total() && this->pos == fragment->get_pos() + 1); + this->total == fragment->get_total() && + this->pos == fragment->get_pos() + 1); } // Returns true if this fragment is a beginning fragment. @@ -237,8 +241,10 @@ public: // WARNING: Caller must guarantee that str has already been cleaned of codes // that do not belong in the unicharset, or encoding may fail. // Use CleanupString to perform the cleaning. - bool encode_string(const char *str, bool give_up_on_failure, std::vector *encoding, - std::vector *lengths, unsigned *encoded_length) const; + bool encode_string(const char *str, bool give_up_on_failure, + std::vector *encoding, + std::vector *lengths, + unsigned *encoded_length) const; // Return the unichar representation corresponding to the given UNICHAR_ID // within the UNICHARSET. @@ -272,7 +278,8 @@ public: // TATWEEL characters are kept and n-grams are allowed. Otherwise TATWEEL // characters are ignored/skipped as if they don't exist and n-grams that // can already be encoded are not added. - void unichar_insert(const char *const unichar_repr, OldUncleanUnichars old_style); + void unichar_insert(const char *const unichar_repr, + OldUncleanUnichars old_style); void unichar_insert(const char *const unichar_repr) { unichar_insert(unichar_repr, OldUncleanUnichars::kFalse); } @@ -283,7 +290,7 @@ public: if (cleaned != unichar_repr) { unichar_insert(unichar_repr, OldUncleanUnichars::kTrue); } else { - int old_size = size(); + auto old_size = size(); unichar_insert(unichar_repr, OldUncleanUnichars::kFalse); if (size() == old_size) { unichar_insert(unichar_repr, OldUncleanUnichars::kTrue); @@ -345,7 +352,7 @@ public: } // Return the size of the set (the number of different UNICHAR it holds). - int size() const { + size_t size() const { return unichars.size(); } @@ -365,7 +372,8 @@ public: // Returns true if the operation is successful. bool save_to_file(FILE *file) const { std::string str; - return save_to_string(str) && tesseract::Serialize(file, &str[0], str.length()); + return save_to_string(str) && + tesseract::Serialize(file, &str[0], str.length()); } bool save_to_file(tesseract::TFile *file) const { @@ -575,8 +583,8 @@ public: // baseline-normalized coordinates, ie, where the baseline is // kBlnBaselineOffset and the meanline is kBlnBaselineOffset + kBlnXHeight // (See normalis.h for the definitions). - void get_top_bottom(UNICHAR_ID unichar_id, int *min_bottom, int *max_bottom, int *min_top, - int *max_top) const { + void get_top_bottom(UNICHAR_ID unichar_id, int *min_bottom, int *max_bottom, + int *min_top, int *max_top) const { if (INVALID_UNICHAR_ID == unichar_id) { *min_bottom = *min_top = 0; *max_bottom = *max_top = 256; // kBlnCellHeight @@ -588,20 +596,24 @@ public: *min_top = unichars[unichar_id].properties.min_top; *max_top = unichars[unichar_id].properties.max_top; } - void set_top_bottom(UNICHAR_ID unichar_id, int min_bottom, int max_bottom, int min_top, - int max_top) { - unichars[unichar_id].properties.min_bottom = ClipToRange(min_bottom, 0, UINT8_MAX); - unichars[unichar_id].properties.max_bottom = ClipToRange(max_bottom, 0, UINT8_MAX); - unichars[unichar_id].properties.min_top = ClipToRange(min_top, 0, UINT8_MAX); - unichars[unichar_id].properties.max_top = ClipToRange(max_top, 0, UINT8_MAX); + void set_top_bottom(UNICHAR_ID unichar_id, int min_bottom, int max_bottom, + int min_top, int max_top) { + unichars[unichar_id].properties.min_bottom = + ClipToRange(min_bottom, 0, UINT8_MAX); + unichars[unichar_id].properties.max_bottom = + ClipToRange(max_bottom, 0, UINT8_MAX); + unichars[unichar_id].properties.min_top = + ClipToRange(min_top, 0, UINT8_MAX); + unichars[unichar_id].properties.max_top = + ClipToRange(max_top, 0, UINT8_MAX); } // Returns the width stats (as mean, sd) of the given unichar relative to the // median advance of all characters in the character set. - void get_width_stats(UNICHAR_ID unichar_id, float *width, float *width_sd) const { + void get_width_stats(UNICHAR_ID unichar_id, float *width, + float *width_sd) const { if (INVALID_UNICHAR_ID == unichar_id) { *width = 0.0f; *width_sd = 0.0f; - ; return; } ASSERT_HOST(contains_unichar_id(unichar_id)); @@ -614,7 +626,8 @@ public: } // Returns the stats of the x-bearing (as mean, sd) of the given unichar // relative to the median advance of all characters in the character set. - void get_bearing_stats(UNICHAR_ID unichar_id, float *bearing, float *bearing_sd) const { + void get_bearing_stats(UNICHAR_ID unichar_id, float *bearing, + float *bearing_sd) const { if (INVALID_UNICHAR_ID == unichar_id) { *bearing = *bearing_sd = 0.0f; return; @@ -623,13 +636,15 @@ public: *bearing = unichars[unichar_id].properties.bearing; *bearing_sd = unichars[unichar_id].properties.bearing_sd; } - void set_bearing_stats(UNICHAR_ID unichar_id, float bearing, float bearing_sd) { + void set_bearing_stats(UNICHAR_ID unichar_id, float bearing, + float bearing_sd) { unichars[unichar_id].properties.bearing = bearing; unichars[unichar_id].properties.bearing_sd = bearing_sd; } // Returns the stats of the x-advance of the given unichar (as mean, sd) // relative to the median advance of all characters in the character set. - void get_advance_stats(UNICHAR_ID unichar_id, float *advance, float *advance_sd) const { + void get_advance_stats(UNICHAR_ID unichar_id, float *advance, + float *advance_sd) const { if (INVALID_UNICHAR_ID == unichar_id) { *advance = *advance_sd = 0; return; @@ -638,7 +653,8 @@ public: *advance = unichars[unichar_id].properties.advance; *advance_sd = unichars[unichar_id].properties.advance_sd; } - void set_advance_stats(UNICHAR_ID unichar_id, float advance, float advance_sd) { + void set_advance_stats(UNICHAR_ID unichar_id, float advance, + float advance_sd) { unichars[unichar_id].properties.advance = advance; unichars[unichar_id].properties.advance_sd = advance_sd; } @@ -654,8 +670,9 @@ public: return true; } int script_id = get_script(unichar_id); - return script_id != han_sid_ && script_id != thai_sid_ && script_id != hangul_sid_ && - script_id != hiragana_sid_ && script_id != katakana_sid_; + return script_id != han_sid_ && script_id != thai_sid_ && + script_id != hangul_sid_ && script_id != hiragana_sid_ && + script_id != katakana_sid_; } // Return the script name of the given unichar. @@ -738,7 +755,8 @@ public: // at these codes and they should not be used. bool has_special_codes() const { return get_fragment(UNICHAR_BROKEN) != nullptr && - strcmp(id_to_unichar(UNICHAR_BROKEN), kSpecialUnicharCodes[UNICHAR_BROKEN]) == 0; + strcmp(id_to_unichar(UNICHAR_BROKEN), + kSpecialUnicharCodes[UNICHAR_BROKEN]) == 0; } // Returns true if there are any repeated unicodes in the normalized @@ -800,7 +818,8 @@ public: // Return a pointer to the CHAR_FRAGMENT class struct if the given // unichar representation represents a character fragment. const CHAR_FRAGMENT *get_fragment(const char *const unichar_repr) const { - if (unichar_repr == nullptr || unichar_repr[0] == '\0' || !ids.contains(unichar_repr, false)) { + if (unichar_repr == nullptr || unichar_repr[0] == '\0' || + !ids.contains(unichar_repr, false)) { return nullptr; } return get_fragment(unichar_to_id(unichar_repr)); @@ -1020,8 +1039,9 @@ private: // best_encoding contains the encoding that used the longest part of str. // best_lengths (may be null) contains the lengths of best_encoding. void encode_string(const char *str, int str_index, int str_length, - std::vector *encoding, std::vector *lengths, - unsigned *best_total_length, std::vector *best_encoding, + std::vector *encoding, + std::vector *lengths, unsigned *best_total_length, + std::vector *best_encoding, std::vector *best_lengths) const; // Gets the properties for a grapheme string, combining properties for @@ -1034,7 +1054,8 @@ private: // Load ourselves from a "file" where our only interface to the file is // an implementation of fgets(). This is the parsing primitive accessed by // the public routines load_from_file(). - bool load_via_fgets(std::function fgets_cb, bool skip_fragments); + bool load_via_fgets(const std::function &fgets_cb, + bool skip_fragments); // List of mappings to make when ingesting strings from the outside. // The substitutions clean up text that should exists for rendering of diff --git a/src/ccutil/unicity_table.h b/src/ccutil/unicity_table.h index b12511df4..8e2ccde85 100644 --- a/src/ccutil/unicity_table.h +++ b/src/ccutil/unicity_table.h @@ -37,14 +37,20 @@ template class UnicityTable { public: /// Clear the structures and deallocate internal structures. - ~UnicityTable(); + ~UnicityTable() { + clear(); + } /// Reserve some memory. If there is size or more elements, the table will /// then allocate size * 2 elements. - void reserve(int size); + void reserve(int size) { + table_.reserve(size); + } /// Return the size used. - int size() const; + int size() const { + return table_.size(); + } /// Return the object from an id. const T &at(int id) const { @@ -59,16 +65,19 @@ public: /// Return the id of the T object. /// This method NEEDS a compare_callback to be passed to /// set_compare_callback. - int get_id(T object) const; - - /// Return true if T is in the table - bool contains(T object) const; - - /// Return true if the id is valid - T contains_id(int id) const; + int get_index(T object) const { + return table_.get_index(object); + } /// Add an element in the table - int push_back(T object); + int push_back(T object) { + auto idx = get_index(object); + if (idx == -1) { + table_.push_back(object); + idx = size(); + } + return idx; + } /// Add a callback to be called to delete the elements when the table took /// their ownership. @@ -80,11 +89,15 @@ public: /// All the owned Callbacks are also deleted. /// If you don't want the Callbacks to be deleted, before calling clear, set /// the callback to nullptr. - void clear(); + void clear() { + table_.clear(); + } /// This method clear the current object, then, does a shallow copy of /// its argument, and finally invalidate its argument. - void move(UnicityTable *from); + void move(UnicityTable *from) { + table_.move(&from->table_); + } /// Read/Write the table to a file. This does _NOT_ read/write the callbacks. /// The Callback given must be permanent since they will be called more than @@ -101,65 +114,6 @@ private: GenericVector table_; }; -template -UnicityTable::~UnicityTable() { - clear(); -} - -template -int UnicityTable::size() const { - return table_.size(); -} - -// Reserve some memory. If there is size or more elements, the table will -// then allocate size * 2 elements. -template -void UnicityTable::reserve(int size) { - table_.reserve(size); -} - -// Return true if the id is valid -template -T UnicityTable::contains_id(int id) const { - return table_.contains_index(id); -} - -// Return the id of the T object. -template -int UnicityTable::get_id(T object) const { - return table_.get_index(object); -} - -// Return true if T is in the table -template -bool UnicityTable::contains(T object) const { - return get_id(object) != -1; -} - -// Add an element in the table -template -int UnicityTable::push_back(T object) { - int idx = get_id(object); - if (idx == -1) { - table_.push_back(object); - idx = size(); - } - return idx; -} - -// Clear the table, calling the callback function if any. -template -void UnicityTable::clear() { - table_.clear(); -} - -// This method clear the current object, then, does a shallow copy of -// its argument, and finally invalidate its argument. -template -void UnicityTable::move(UnicityTable *from) { - table_.move(&from->table_); -} - } // namespace tesseract #endif // TESSERACT_CCUTIL_UNICITY_TABLE_H_ diff --git a/src/ccutil/universalambigs.cpp b/src/ccutil/universalambigs.cpp deleted file mode 100644 index a058fbe81..000000000 --- a/src/ccutil/universalambigs.cpp +++ /dev/null @@ -1,19038 +0,0 @@ -/////////////////////////////////////////////////////////////////////// -// File: universalambigs.cpp -// Description: Data for a universal ambigs file that is useful for -// any language. -// Author: Ray Smith -// -// (C) Copyright 2013, Google Inc. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -/////////////////////////////////////////////////////////////////////// - -#include "universalambigs.h" - -namespace tesseract { - -const char kUniversalAmbigsFile[] = { - "v2\n" - "'' \" 1\n" - "`' \" 1\n" - "'` \" 1\n" - "‘' \" 1\n" - "'‘ \" 1\n" - "’' \" 1\n" - "'’ \" 1\n" - "`` \" 1\n" - "`‘ \" 1\n" - "‘` \" 1\n" - "`’ \" 1\n" - "’` \" 1\n" - "‘‘ “ 1\n" - "‘’ \" 1\n" - "’‘ \" 1\n" - "’’ ” 1\n" - ",, „ 1\n" - "m rn 0\n" - "rn m 0\n" - "m in 0\n" - "in m 0\n" - "d cl 0\n" - "cl d 0\n" - "nn rm 0\n" - "rm nn 0\n" - "n ri 0\n" - "ri n 0\n" - "li h 0\n" - "lr h 0\n" - "ii u 0\n" - "ii n 0\n" - "ni m 0\n" - "iii m 0\n" - "ll H 0\n" - "I-I H 0\n" - "vv w 0\n" - "VV W 0\n" - "t f 0\n" - "f t 0\n" - "a o 0\n" - "o a 0\n" - "e c 0\n" - "c e 0\n" - "rr n 0\n" - "E fi 0\n" - "l< k 0\n" - "ld ki 0\n" - "lx h 0\n" - "xn m 0\n" - "ux in 0\n" - "r t 0\n" - "d tl 0\n" - "di th 0\n" - "ur in 0\n" - "un im 0\n" - "u a 0\n" - "o ó 0\n" - "ó o 0\n" - "i í 0\n" - "í i 0\n" - "a á 0\n" - "á a 0\n" - "e é 0\n" - "é e 0\n" - "u ú 0\n" - "ú u 0\n" - "n ñ 0\n" - "ñ n 0\n" - "0 o 0\n" - "d tr 0\n" - "n tr 0\n" - "ñ fi 0\n" - "u ti 0\n" - "ñ ti 0\n" - "d ti 0\n" - "d tí 0\n" - "d rí 0\n" - "a à 0\n" - "e è 0\n" - "n ij 0\n" - "g ij 0\n" - "o ò 0\n" - "E É 0\n" - "E È 0\n" - "u ü 0\n" - "xnE an 1\n" - "mYx me 1\n" - "qtE nt 1\n" - "Tlb le 1\n" - "vxN va 1\n" - "gjQ ng 1\n" - "jpF ij 1\n" - "Yrl le 1\n" - "aqY an 1\n" - "zvJ va 1\n" - "fbL be 1\n" - "Nvk va 1\n" - "fJp pr 1\n" - "wxC wa 1\n" - "cuJ qu 1\n" - "Qzt ta 1\n" - "qKw wa 1\n" - "scJ st 1\n" - "pXp po 1\n" - "Vqi ti 1\n" - "Uxk ka 1\n" - "kJv ka 1\n" - "Ykd ka 1\n" - "vpX va 1\n" - "iBv ti 1\n" - "zRb sz 1\n" - "yTm mi 1\n" - "mKp pr 1\n" - "Vzq qu 1\n" - "Xtp ti 1\n" - "mvD va 1\n" - "mDq me 1\n" - "jxP ij 1\n" - "Bxv va 1\n" - "oIu qu 1\n" - "Rvc va 1\n" - "uCj qu 1\n" - "oAo vo 1\n" - "quB tu 1\n" - "btV ti 1\n" - "Lmc me 1\n" - "tVw ti 1\n" - "Yxv va 1\n" - "Hxm me 1\n" - "dVh th 1\n" - "xYc ch 1\n" - "uPj tu 1\n" - "fTf fo 1\n" - "Rjw ij 1\n" - "xdA di 1\n" - "jzN ij 1\n" - "mxL me 1\n" - "ygJ ng 1\n" - "Vvg va 1\n" - "rjK ij 1\n" - "yuV tu 1\n" - "sWk ku 1\n" - "Pgz sz 1\n" - "jHm me 1\n" - "zkU ku 1\n" - "gvG va 1\n" - "hdP th 1\n" - "mVb me 1\n" - "Qgd di 1\n" - "zcZ ch 1\n" - "zqj ij 1\n" - "zsJ sz 1\n" - "dfN di 1\n" - "dgW di 1\n" - "wNr ri 1\n" - "zvC va 1\n" - "qYw qu 1\n" - "uHy tu 1\n" - "tNq th 1\n" - "lxJ li 1\n" - "Hbk ku 1\n" - "xsG st 1\n" - "vSb va 1\n" - "xFb bu 1\n" - "Ntg th 1\n" - "oBj ij 1\n" - "qkv qu 1\n" - "bVj ij 1\n" - "zjT ij 1\n" - "bvX va 1\n" - "oZf to 1\n" - "kcU ko 1\n" - "fFm me 1\n" - "Xbj ij 1\n" - "Kqv va 1\n" - "Rwj ij 1\n" - "dvJ va 1\n" - "znJ sz 1\n" - "qqV qu 1\n" - "pxM po 1\n" - "eBj ij 1\n" - "mJx me 1\n" - "xnM ng 1\n" - "aCq va 1\n" - "pHj ij 1\n" - "tfQ th 1\n" - "wqn qu 1\n" - "mSs is 1\n" - "sBw st 1\n" - "Fhn th 1\n" - "zNb sz 1\n" - "Mvb va 1\n" - "bVt th 1\n" - "qHt th 1\n" - "qLv qu 1\n" - "kgF ng 1\n" - "vxW va 1\n" - "cdY ch 1\n" - "Xrz sz 1\n" - "Efh th 1\n" - "lqI qu 1\n" - "Lzq qu 1\n" - "zhX th 1\n" - "ghZ th 1\n" - "lFg ng 1\n" - "vVc va 1\n" - "lMr er 1\n" - "Tqj qu 1\n" - "jAx ij 1\n" - "iMt th 1\n" - "Nlv va 1\n" - "zbP sz 1\n" - "kVx ka 1\n" - "eQl te 1\n" - "sWb st 1\n" - "Bqy qu 1\n" - "dXk ka 1\n" - "vUc va 1\n" - "vOb va 1\n" - "uHf qu 1\n" - "qNr qu 1\n" - "uFz qu 1\n" - "Mlr er 1\n" - "kmZ ka 1\n" - "sRt th 1\n" - "Wqv qu 1\n" - "hfK th 1\n" - "vxQ va 1\n" - "lCq qu 1\n" - "fYw wa 1\n" - "tfS th 1\n" - "qdO qu 1\n" - "dQd de 1\n" - "xdX de 1\n" - "mNx me 1\n" - "kFz sz 1\n" - "wjS ij 1\n" - "yPp pr 1\n" - "wcW ch 1\n" - "Njz sz 1\n" - "dVp de 1\n" - "dqD qu 1\n" - "rJs sz 1\n" - "xpH po 1\n" - "xqR qu 1\n" - "gVr er 1\n" - "Btq th 1\n" - "nmB nt 1\n" - "zcM sz 1\n" - "cfG ch 1\n" - "mfO me 1\n" - "Yhc th 1\n" - "bZm me 1\n" - "mzB sz 1\n" - "vRw va 1\n" - "yDh th 1\n" - "Zgf ng 1\n" - "kqT qu 1\n" - "Iuz qu 1\n" - "rbW er 1\n" - "Jmq qu 1\n" - "Kvj va 1\n" - "zcD ch 1\n" - "xgC ng 1\n" - "jCx ij 1\n" - "bWg ng 1\n" - "ywW wa 1\n" - "Jkc ch 1\n" - "xGs sz 1\n" - "vbH va 1\n" - "lTz sz 1\n" - "eCb er 1\n" - "jVv va 1\n" - "jDq qu 1\n" - "joQ po 1\n" - "qtM th 1\n" - "Rqk qu 1\n" - "Hvg va 1\n" - "uAz qu 1\n" - "mfW me 1\n" - "tgS th 1\n" - "cqD qu 1\n" - "sfY sz 1\n" - "Yhv th 1\n" - "uqM qu 1\n" - "xpK pr 1\n" - "Jzh th 1\n" - "cQk ch 1\n" - "tjO th 1\n" - "qxZ qu 1\n" - "zPv sz 1\n" - "qNk qu 1\n" - "lvQ va 1\n" - "kGw ka 1\n" - "xuD qu 1\n" - "Jvy va 1\n" - "jYe te 1\n" - "fZu qu 1\n" - "qYo qu 1\n" - "vhI th 1\n" - "fxY fo 1\n" - "yPf fo 1\n" - "fGj ij 1\n" - "dmT me 1\n" - "vfX va 1\n" - "xQt th 1\n" - "cxS ch 1\n" - "vzA va 1\n" - "qaA qu 1\n" - "Jbx be 1\n" - "kVd ka 1\n" - "Xjv va 1\n" - "hkI th 1\n" - "vQu qu 1\n" - "vhK th 1\n" - "Dvj va 1\n" - "Vbm me 1\n" - "fpN pr 1\n" - "pkG ka 1\n" - "bLc ch 1\n" - "tJc th 1\n" - "wwJ wa 1\n" - "Zrw er 1\n" - "wdW de 1\n" - "Wgf ng 1\n" - "Pqz qu 1\n" - "wgN ng 1\n" - "zHt th 1\n" - "xTl le 1\n" - "Dvt th 1\n" - "wmU me 1\n" - "xhm th 1\n" - "hCx th 1\n" - "vwV va 1\n" - "zvL va 1\n" - "nGf nt 1\n" - "jjC ij 1\n" - "Ucg ch 1\n" - "pWf pr 1\n" - "jxG ij 1\n" - "Mqn qu 1\n" - "yvW va 1\n" - "lWk ka 1\n" - "mdO me 1\n" - "qNm qu 1\n" - "Rwg ng 1\n" - "xfv va 1\n" - "uOw qu 1\n" - "xhZ th 1\n" - "jLr er 1\n" - "fBy fo 1\n" - "nUj nt 1\n" - "lTg ng 1\n" - "jlP ij 1\n" - "wrR er 1\n" - "rXw er 1\n" - "eVw ve 1\n" - "zWn ng 1\n" - "mJs sz 1\n" - "Mgy ng 1\n" - "uZq qu 1\n" - "Tdg ng 1\n" - "mqI qu 1\n" - "Dhp th 1\n" - "pmK me 1\n" - "Ssf sz 1\n" - "sWl sz 1\n" - "iqK qu 1\n" - "gjG ng 1\n" - "djB ij 1\n" - "wKv va 1\n" - "wvI va 1\n" - "tcU th 1\n" - "tkG th 1\n" - "zUe te 1\n" - "lUh th 1\n" - "nBg nt 1\n" - "dHx de 1\n" - "Wbz sz 1\n" - "vuQ qu 1\n" - "Hpl le 1\n" - "oVj ij 1\n" - "vBb va 1\n" - "Tdz sz 1\n" - "pfV pr 1\n" - "qgN qu 1\n" - "pcU ch 1\n" - "gcN ch 1\n" - "vkA va 1\n" - "cQf ch 1\n" - "Yzx sz 1\n" - "ypF pr 1\n" - "vBw va 1\n" - "pPd de 1\n" - "qmU qu 1\n" - "eWf ve 1\n" - "jZr er 1\n" - "Hwl le 1\n" - "yyI ny 1\n" - "Zfh th 1\n" - "Lgw ng 1\n" - "uqp qu 1\n" - "xOj ij 1\n" - "dkJ ko 1\n" - "dqM qu 1\n" - "sbW is 1\n" - "zMp sz 1\n" - "nJz ng 1\n" - "kMc ko 1\n" - "zqW qu 1\n" - "vQk va 1\n" - "eqD qu 1\n" - "hFn th 1\n" - "vcZ ch 1\n" - "xGk ka 1\n" - "kzf sz 1\n" - "xZx xe 1\n" - "qvN qu 1\n" - "ykY ka 1\n" - "brH er 1\n" - "Wrh th 1\n" - "wjE ij 1\n" - "kjQ ka 1\n" - "fLj ij 1\n" - "mgE ng 1\n" - "xwI wa 1\n" - "iDw ti 1\n" - "Btx th 1\n" - "vPz va 1\n" - "yqH qu 1\n" - "wFe er 1\n" - "lQy le 1\n" - "gBp ng 1\n" - "jdY de 1\n" - "tvQ th 1\n" - "ljO le 1\n" - "Nsq qu 1\n" - "xdO de 1\n" - "gzW ng 1\n" - "wtM th 1\n" - "qfR qu 1\n" - "jZh th 1\n" - "Wcb ch 1\n" - "dvQ va 1\n" - "jHb ij 1\n" - "xbM be 1\n" - "nWg nt 1\n" - "Ywj ij 1\n" - "Xwj ij 1\n" - "pxK pr 1\n" - "ybQ be 1\n" - "Wvm va 1\n" - "Lgz ng 1\n" - "btS th 1\n" - "jRl le 1\n" - "qqJ qu 1\n" - "Cnq qu 1\n" - "Fmw me 1\n" - "dvP va 1\n" - "vqB qu 1\n" - "djI de 1\n" - "jVq qu 1\n" - "fvZ va 1\n" - "Cwt th 1\n" - "Uyb be 1\n" - "Ffc ch 1\n" - "soX sz 1\n" - "qhR th 1\n" - "fWz sz 1\n" - "vrX va 1\n" - "eOq qu 1\n" - "bwZ be 1\n" - "dnV ng 1\n" - "Gbw be 1\n" - "xGd de 1\n" - "mnZ ng 1\n" - "bpN pr 1\n" - "dzX de 1\n" - "Bxq qu 1\n" - "zpx sz 1\n" - "dqZ qu 1\n" - "xTf fo 1\n" - "wPv va 1\n" - "cxq qu 1\n" - "hdT th 1\n" - "ywX wa 1\n" - "Uvv va 1\n" - "rKp er 1\n" - "sdF de 1\n" - "Jcg ch 1\n" - "xzO sz 1\n" - "xTt th 1\n" - "djP de 1\n" - "gTn ng 1\n" - "Gtp th 1\n" - "xgA ng 1\n" - "bdL de 1\n" - "wzO sz 1\n" - "fhI th 1\n" - "Wmp me 1\n" - "Qdt th 1\n" - "uYq qu 1\n" - "pbJ pr 1\n" - "jRd de 1\n" - "Xsx sz 1\n" - "zgI ng 1\n" - "qhY th 1\n" - "Ggj ng 1\n" - "Fjq qu 1\n" - "Qwk ka 1\n" - "zxW sz 1\n" - "vCc ch 1\n" - "ccL ch 1\n" - "Kxs sz 1\n" - "mYr er 1\n" - "rQt er 1\n" - "Zxs sz 1\n" - "hdQ th 1\n" - "dwH de 1\n" - "Yml le 1\n" - "qVz qu 1\n" - "Rvl va 1\n" - "yHk ka 1\n" - "Wjt th 1\n" - "hMw th 1\n" - "pzU sz 1\n" - "gcL ch 1\n" - "qOa qu 1\n" - "eqI qu 1\n" - "iYp ti 1\n" - "vCq qu 1\n" - "uoV ro 1\n" - "fZx fo 1\n" - "qQd qu 1\n" - "qdE qu 1\n" - "qWx qu 1\n" - "Ykj ij 1\n" - "Fpj ij 1\n" - "zGv va 1\n" - "rwO er 1\n" - "Qzq qu 1\n" - "Kqb qu 1\n" - "zgT ng 1\n" - "jsZ sz 1\n" - "aHq qu 1\n" - "yjL ij 1\n" - "Ycw ch 1\n" - "bnP an 1\n" - "vWn an 1\n" - "zyY sz 1\n" - "zRs st 1\n" - "wuP qu 1\n" - "vjB va 1\n" - "jrT er 1\n" - "vwJ va 1\n" - "dVj de 1\n" - "zvW va 1\n" - "dZk de 1\n" - "nrG an 1\n" - "qsU qu 1\n" - "Pvs va 1\n" - "lLh th 1\n" - "qCz qu 1\n" - "dvV de 1\n" - "Pjw ij 1\n" - "Kmj ij 1\n" - "Jfh th 1\n" - "nwY an 1\n" - "gwC ng 1\n" - "vGb va 1\n" - "qWr qu 1\n" - "qpW qu 1\n" - "dKk de 1\n" - "yWb be 1\n" - "jmN ij 1\n" - "gpV ng 1\n" - "qzS qu 1\n" - "oZh th 1\n" - "Qmt th 1\n" - "mNk me 1\n" - "ypM pr 1\n" - "lwH le 1\n" - "zHs sz 1\n" - "jzC jo 1\n" - "oJh th 1\n" - "Lqh th 1\n" - "hXg th 1\n" - "xEf fo 1\n" - "uWx qu 1\n" - "kvT va 1\n" - "zsG sz 1\n" - "lSx le 1\n" - "qKb qu 1\n" - "Qye de 1\n" - "xHk ka 1\n" - "Cwp pr 1\n" - "zmJ sz 1\n" - "xuL qu 1\n" - "bdH de 1\n" - "Pbw wa 1\n" - "qdX qu 1\n" - "lVc ch 1\n" - "bqL qu 1\n" - "wNs sz 1\n" - "vzN va 1\n" - "qjA qu 1\n" - "Zhf th 1\n" - "ypJ pr 1\n" - "xMq qu 1\n" - "bTk ka 1\n" - "tLf th 1\n" - "xgR ng 1\n" - "kQz sz 1\n" - "Rjp ij 1\n" - "xhG th 1\n" - "bCc ch 1\n" - "hbF th 1\n" - "rxQ er 1\n" - "qVp qu 1\n" - "bkY ka 1\n" - "qPl qu 1\n" - "jQk ij 1\n" - "Ovq qu 1\n" - "sVv va 1\n" - "pmU me 1\n" - "uFv qu 1\n" - "xaZ va 1\n" - "gGn an 1\n" - "pgI ng 1\n" - "zTj sz 1\n" - "lvC va 1\n" - "wGv va 1\n" - "rNv va 1\n" - "Qtq th 1\n" - "vNh th 1\n" - "lPv va 1\n" - "Jdq qu 1\n" - "Xdj de 1\n" - "yqk qu 1\n" - "iwY ti 1\n" - "Nmq qu 1\n" - "fTp pr 1\n" - "qzQ qu 1\n" - "pjA ij 1\n" - "pvH va 1\n" - "xLj ij 1\n" - "qWh th 1\n" - "vVq qu 1\n" - "gQd de 1\n" - "svY va 1\n" - "fLf fo 1\n" - "qzB qu 1\n" - "Dxg ng 1\n" - "uzY qu 1\n" - "gVz sz 1\n" - "hZb th 1\n" - "Gpx pr 1\n" - "xqh th 1\n" - "gcX ch 1\n" - "Hxd de 1\n" - "tUq th 1\n" - "bKp pr 1\n" - "iGx ti 1\n" - "xvQ va 1\n" - "lxA le 1\n" - "sjH st 1\n" - "Gqo qu 1\n" - "dgQ de 1\n" - "yDk ka 1\n" - "Znv va 1\n" - "vfU va 1\n" - "vuD qu 1\n" - "oQj ij 1\n" - "bhD th 1\n" - "qLj qu 1\n" - "mdY de 1\n" - "rZb er 1\n" - "kDv va 1\n" - "fsK sz 1\n" - "Kqf qu 1\n" - "yWl le 1\n" - "mVw me 1\n" - "mcV ch 1\n" - "tDf th 1\n" - "lAo le 1\n" - "fzR sz 1\n" - "Xrq qu 1\n" - "jrZ er 1\n" - "qmN qu 1\n" - "Jnp an 1\n" - "jhC th 1\n" - "kqR qu 1\n" - "dWn de 1\n" - "Wmw me 1\n" - "Rgy ng 1\n" - "uvN qu 1\n" - "jiY ti 1\n" - "xWc ch 1\n" - "yJr er 1\n" - "oHq qu 1\n" - "yvw va 1\n" - "Ydn de 1\n" - "Nvq qu 1\n" - "Gmv va 1\n" - "xxZ xe 1\n" - "Xdf de 1\n" - "xYh th 1\n" - "Vnv an 1\n" - "jNz sz 1\n" - "Wnq qu 1\n" - "Xwk ka 1\n" - "qWz qu 1\n" - "mQs sz 1\n" - "Vxb be 1\n" - "xwG wa 1\n" - "wvp va 1\n" - "gmV ng 1\n" - "Rzq qu 1\n" - "Cpw pr 1\n" - "Gyy ny 1\n" - "xzA sz 1\n" - "wGx wa 1\n" - "bqS qu 1\n" - "whR th 1\n" - "jPc ch 1\n" - "iqG qu 1\n" - "djK de 1\n" - "cVk ch 1\n" - "rwT er 1\n" - "Vhn th 1\n" - "Hfw wa 1\n" - "bnJ an 1\n" - "Cpd de 1\n" - "Nmd de 1\n" - "dnO an 1\n" - "qWc qu 1\n" - "aVq qu 1\n" - "qOn qu 1\n" - "Qlr er 1\n" - "qnN qu 1\n" - "rLq qu 1\n" - "wtE th 1\n" - "jgR ng 1\n" - "Yqp qu 1\n" - "Hwg ng 1\n" - "nWk an 1\n" - "wqB qu 1\n" - "fAp pr 1\n" - "hZv th 1\n" - "Kzp sz 1\n" - "fNk ka 1\n" - "Tkd de 1\n" - "uYm qu 1\n" - "kcR ch 1\n" - "xNl le 1\n" - "kHk ka 1\n" - "bJk ka 1\n" - "jjD ij 1\n" - "Nlq qu 1\n" - "dhB th 1\n" - "jXl le 1\n" - "nwB an 1\n" - "Hzb sz 1\n" - "qQz qu 1\n" - "fKc ch 1\n" - "jVw ij 1\n" - "ylU le 1\n" - "Lzj sz 1\n" - "sXu qu 1\n" - "wBw wa 1\n" - "Iqg qu 1\n" - "wjV ij 1\n" - "wxt th 1\n" - "jzK sz 1\n" - "rDd de 1\n" - "uQy qu 1\n" - "qGw qu 1\n" - "tbU th 1\n" - "kUo ka 1\n" - "dVm de 1\n" - "Ddn an 1\n" - "vqC vo 1\n" - "jkZ ij 1\n" - "Lvz va 1\n" - "tPy th 1\n" - "Vfj ij 1\n" - "Qhb th 1\n" - "whB th 1\n" - "Fqf qu 1\n" - "hCv th 1\n" - "Fjf ij 1\n" - "Qfr er 1\n" - "zwF sz 1\n" - "Fwf wa 1\n" - "pvU va 1\n" - "whC th 1\n" - "hTk th 1\n" - "dlQ de 1\n" - "wzL sz 1\n" - "zqS qu 1\n" - "qtP th 1\n" - "yhC th 1\n" - "yjB ij 1\n" - "iTd de 1\n" - "kLx ka 1\n" - "Rqi qu 1\n" - "qjS qu 1\n" - "vjI va 1\n" - "pGz sz 1\n" - "wnV an 1\n" - "lQx le 1\n" - "uvS qu 1\n" - "Zge de 1\n" - "gJv ng 1\n" - "Ydb de 1\n" - "wDh th 1\n" - "zwV sz 1\n" - "hNm th 1\n" - "zwQ sz 1\n" - "fRr er 1\n" - "wVr er 1\n" - "nKg an 1\n" - "Tgg ng 1\n" - "bYp pr 1\n" - "lBn an 1\n" - "zjp sz 1\n" - "qAf qu 1\n" - "zmK me 1\n" - "wqK qu 1\n" - "vjT va 1\n" - "Lql qu 1\n" - "snC an 1\n" - "fzY sz 1\n" - "vqU qu 1\n" - "mGb me 1\n" - "fkP ka 1\n" - "wQg ng 1\n" - "Fqt th 1\n" - "bVm me 1\n" - "Wcx ch 1\n" - "wpY wa 1\n" - "lFv va 1\n" - "gwD ng 1\n" - "gWp ng 1\n" - "fjT ij 1\n" - "pFt th 1\n" - "iIp in 1\n" - "tbD th 1\n" - "Xqc qu 1\n" - "Qkc ch 1\n" - "qeZ qu 1\n" - "qPb qu 1\n" - "gwL ng 1\n" - "fHi in 1\n" - "xwP wa 1\n" - "xvB va 1\n" - "jSw ij 1\n" - "pzF sz 1\n" - "wYp wa 1\n" - "dDx de 1\n" - "nBx an 1\n" - "cNv ch 1\n" - "Ubm me 1\n" - "xXu qu 1\n" - "dRl de 1\n" - "dBz de 1\n" - "Xvh th 1\n" - "Xld de 1\n" - "mwY me 1\n" - "whQ th 1\n" - "Mzl le 1\n" - "Aqj qu 1\n" - "uDp qu 1\n" - "cjZ ch 1\n" - "Vkf ka 1\n" - "uGq qu 1\n" - "hBs th 1\n" - "qLh th 1\n" - "tfW th 1\n" - "cPn an 1\n" - "xoN on 1\n" - "Ydx de 1\n" - "Lxk ka 1\n" - "ccZ ch 1\n" - "uJh th 1\n" - "sVp sz 1\n" - "wrE er 1\n" - "xgP ng 1\n" - "hPp th 1\n" - "euU qu 1\n" - "sZh th 1\n" - "qnK qu 1\n" - "Bgh th 1\n" - "slQ le 1\n" - "gxA ng 1\n" - "jLd de 1\n" - "znD an 1\n" - "kXk ka 1\n" - "tfV th 1\n" - "Vwl le 1\n" - "xWd do 1\n" - "xnH an 1\n" - "cOq ch 1\n" - "Lkk ka 1\n" - "Nvy va 1\n" - "xIh th 1\n" - "xkK ka 1\n" - "rMr er 1\n" - "rmQ er 1\n" - "bPn an 1\n" - "fAa an 1\n" - "vQv va 1\n" - "fHr er 1\n" - "Pmv va 1\n" - "vzJ sz 1\n" - "wTg ng 1\n" - "bWc ch 1\n" - "Zwg ng 1\n" - "gKx ng 1\n" - "Gbq qu 1\n" - "wMk ka 1\n" - "Nfx fo 1\n" - "fAo on 1\n" - "dHb de 1\n" - "lxH le 1\n" - "dqO qu 1\n" - "Tlq qu 1\n" - "Yjj ij 1\n" - "Iyh th 1\n" - "uoY qu 1\n" - "mhH th 1\n" - "lMj le 1\n" - "fzF sz 1\n" - "frR er 1\n" - "yNl le 1\n" - "aPv an 1\n" - "ywG wa 1\n" - "Cmw me 1\n" - "svK va 1\n" - "srO er 1\n" - "Uhz th 1\n" - "vPn an 1\n" - "zTq qu 1\n" - "kzH sz 1\n" - "Iox on 1\n" - "fQa an 1\n" - "wZr er 1\n" - "nqU an 1\n" - "wPb wa 1\n" - "Tzg ng 1\n" - "pnR an 1\n" - "vfJ va 1\n" - "vyX va 1\n" - "fLz sz 1\n" - "zjP sz 1\n" - "pmR me 1\n" - "ePq qu 1\n" - "jyT ij 1\n" - "mjP ij 1\n" - "fsH sz 1\n" - "vwB va 1\n" - "Ynr an 1\n" - "Tqh th 1\n" - "Lvv va 1\n" - "tCf th 1\n" - "wpB wa 1\n" - "wXh th 1\n" - "mhX th 1\n" - "kYd de 1\n" - "Dpg ng 1\n" - "ygR ng 1\n" - "Rfp pr 1\n" - "Jyq qu 1\n" - "yxq qu 1\n" - "pPc ch 1\n" - "aOj an 1\n" - "Zww wa 1\n" - "fFx fo 1\n" - "bDh th 1\n" - "qKx qu 1\n" - "wHx wa 1\n" - "hrX th 1\n" - "rFh th 1\n" - "lLx le 1\n" - "aYj an 1\n" - "kCs sz 1\n" - "lWt th 1\n" - "pdY de 1\n" - "swI sz 1\n" - "bLw wa 1\n" - "Mzx sz 1\n" - "cKk ch 1\n" - "hMz th 1\n" - "Jcu qu 1\n" - "wjB ij 1\n" - "Mqe qu 1\n" - "rxW er 1\n" - "gZv ng 1\n" - "Rfn an 1\n" - "pwD wa 1\n" - "lhX th 1\n" - "fVg ng 1\n" - "vfW va 1\n" - "lxP le 1\n" - "Yyj ij 1\n" - "hPg th 1\n" - "Uxq qu 1\n" - "bdO de 1\n" - "bRz sz 1\n" - "dXq qu 1\n" - "Rjq qu 1\n" - "fgV ng 1\n" - "xAf fo 1\n" - "wXn an 1\n" - "Kvv va 1\n" - "svL va 1\n" - "fWv va 1\n" - "drQ er 1\n" - "Lpv va 1\n" - "qKp qu 1\n" - "eCv er 1\n" - "xwH wa 1\n" - "cvC ch 1\n" - "kUf ka 1\n" - "oPx on 1\n" - "tjJ th 1\n" - "bBk ka 1\n" - "vpI va 1\n" - "gzY ng 1\n" - "oZs on 1\n" - "pKc ch 1\n" - "xKs sz 1\n" - "qcH qu 1\n" - "Vfm me 1\n" - "svM va 1\n" - "Vjx ij 1\n" - "lVw le 1\n" - "wWf wa 1\n" - "Xpx pr 1\n" - "lcA ch 1\n" - "tLc th 1\n" - "lDg ng 1\n" - "Xjh th 1\n" - "Xdh th 1\n" - "rKm er 1\n" - "fnW an 1\n" - "Tcb ch 1\n" - "qgX qu 1\n" - "qZo qu 1\n" - "eJv er 1\n" - "Yxy ny 1\n" - "kfM ka 1\n" - "qKe qu 1\n" - "vMf va 1\n" - "dgY de 1\n" - "gGd ng 1\n" - "Vcj ch 1\n" - "Sfw wa 1\n" - "xDk ka 1\n" - "fTc ch 1\n" - "qRw qu 1\n" - "tOa th 1\n" - "guQ qu 1\n" - "mgJ ng 1\n" - "bRd de 1\n" - "kYq qu 1\n" - "xwD wa 1\n" - "vXs va 1\n" - "zlC le 1\n" - "kmH ka 1\n" - "jhZ th 1\n" - "Wxo on 1\n" - "vtX th 1\n" - "iWm in 1\n" - "qVx qu 1\n" - "Hjv va 1\n" - "Pxs sz 1\n" - "bYi in 1\n" - "wgG ng 1\n" - "Jvs va 1\n" - "gHh th 1\n" - "Kzy sz 1\n" - "xjI ij 1\n" - "uVb qu 1\n" - "Pzq qu 1\n" - "hxC th 1\n" - "wPy wa 1\n" - "bXh th 1\n" - "jzY sz 1\n" - "fqJ qu 1\n" - "qxX qu 1\n" - "vfB va 1\n" - "pPm me 1\n" - "bpC pr 1\n" - "hFv th 1\n" - "Cql qu 1\n" - "dwI de 1\n" - "Tcq ch 1\n" - "Zjx ij 1\n" - "wOz sz 1\n" - "Jfj ij 1\n" - "iZr in 1\n" - "Vxf fo 1\n" - "Lpx pr 1\n" - "fHt th 1\n" - "hFy th 1\n" - "lcD ch 1\n" - "vMc ch 1\n" - "xyU ny 1\n" - "mGq qu 1\n" - "wJv va 1\n" - "zKs sz 1\n" - "lMm le 1\n" - "mqU qu 1\n" - "vHg ng 1\n" - "lGc ch 1\n" - "eIj te 1\n" - "Vdh th 1\n" - "rCk er 1\n" - "wQh th 1\n" - "Ywf wa 1\n" - "zUf sz 1\n" - "qZs qu 1\n" - "vNt th 1\n" - "Dxj ij 1\n" - "cYr ch 1\n" - "dKt th 1\n" - "vDp va 1\n" - "qnF an 1\n" - "Lsj sz 1\n" - "xHv va 1\n" - "jCt th 1\n" - "bnX an 1\n" - "fBx fo 1\n" - "jVt th 1\n" - "qOy qu 1\n" - "uqD qu 1\n" - "Rfw wa 1\n" - "cjS ch 1\n" - "ufX qu 1\n" - "fvI va 1\n" - "Owx wa 1\n" - "gXw ng 1\n" - "oCv va 1\n" - "Mrx er 1\n" - "cIb ch 1\n" - "fJj ij 1\n" - "kqM qu 1\n" - "zqL qu 1\n" - "rPz er 1\n" - "iwW in 1\n" - "cMp ch 1\n" - "lVt th 1\n" - "vTb va 1\n" - "Iwf wa 1\n" - "xlZ le 1\n" - "vjQ va 1\n" - "iPb in 1\n" - "Whk th 1\n" - "Wvh th 1\n" - "mzD sz 1\n" - "Hqk qu 1\n" - "jqB qu 1\n" - "qhM th 1\n" - "prR er 1\n" - "nlV an 1\n" - "qYk qu 1\n" - "zVp sz 1\n" - "vpO va 1\n" - "Rvr er 1\n" - "scY ch 1\n" - "qdA qu 1\n" - "vLk va 1\n" - "svI va 1\n" - "mdE de 1\n" - "hBx th 1\n" - "Zrv er 1\n" - "jWt th 1\n" - "fTx fo 1\n" - "Ypc ch 1\n" - "mMk ka 1\n" - "fdq qu 1\n" - "hcK th 1\n" - "xCy ny 1\n" - "fVr er 1\n" - "aPx an 1\n" - "fpU pr 1\n" - "Vkb ka 1\n" - "tbM th 1\n" - "zQt th 1\n" - "gxV ng 1\n" - "Sfg ng 1\n" - "pYl le 1\n" - "gWt th 1\n" - "xEb be 1\n" - "mXy me 1\n" - "lnQ an 1\n" - "qmL qu 1\n" - "Vky ka 1\n" - "wwX wa 1\n" - "Uwx wa 1\n" - "cfB ch 1\n" - "Gxp pr 1\n" - "fpL pr 1\n" - "jTx ij 1\n" - "cZv ch 1\n" - "zlK le 1\n" - "hBc th 1\n" - "Wqi qu 1\n" - "lGs le 1\n" - "Dqz qu 1\n" - "Jgw ng 1\n" - "gCx ng 1\n" - "cNj ch 1\n" - "cqJ ch 1\n" - "blD le 1\n" - "qXr qu 1\n" - "kXr er 1\n" - "khK th 1\n" - "xZh th 1\n" - "jSs sz 1\n" - "yjx ij 1\n" - "Hwf wa 1\n" - "fXs sz 1\n" - "qgz qu 1\n" - "Xdw de 1\n" - "hcN th 1\n" - "jJd de 1\n" - "cmQ ch 1\n" - "mvV va 1\n" - "Nqe qu 1\n" - "zxS sz 1\n" - "kGt th 1\n" - "tFg th 1\n" - "fzM sz 1\n" - "Xrr er 1\n" - "dcJ ch 1\n" - "dQa an 1\n" - "qNy qu 1\n" - "hxT th 1\n" - "twB th 1\n" - "Bqj qu 1\n" - "prK er 1\n" - "zdC de 1\n" - "yAo on 1\n" - "dLt st 1\n" - "pgF ng 1\n" - "vgW ng 1\n" - "vpN va 1\n" - "Ivx va 1\n" - "vYl le 1\n" - "xRg ng 1\n" - "jPu qu 1\n" - "Oqr qu 1\n" - "vjg ng 1\n" - "dpH de 1\n" - "yDp pr 1\n" - "xfJ fo 1\n" - "fqV qu 1\n" - "eBf er 1\n" - "Zkw ka 1\n" - "qHp qu 1\n" - "Aqz qu 1\n" - "bNw wa 1\n" - "fjX ij 1\n" - "fqS qu 1\n" - "ljK le 1\n" - "Gkf ka 1\n" - "bSf be 1\n" - "Mxg ng 1\n" - "Dqm qu 1\n" - "hKp th 1\n" - "wFq qu 1\n" - "wmJ me 1\n" - "vzT va 1\n" - "rhJ th 1\n" - "nHf an 1\n" - "jJo on 1\n" - "qWy qu 1\n" - "Wvk va 1\n" - "gkB ng 1\n" - "mEw me 1\n" - "Ugx ng 1\n" - "Qmy me 1\n" - "Ljq qu 1\n" - "bGp pr 1\n" - "lHg ng 1\n" - "cGg ch 1\n" - "gFk ng 1\n" - "xnV an 1\n" - "eFy er 1\n" - "Nfm me 1\n" - "hSf th 1\n" - "gXj ng 1\n" - "xHf fo 1\n" - "uqj qu 1\n" - "wXa an 1\n" - "vcT ch 1\n" - "uJw qu 1\n" - "pWx pr 1\n" - "qpQ qu 1\n" - "hqE th 1\n" - "Yfn an 1\n" - "jrI er 1\n" - "cgK ch 1\n" - "yyP ny 1\n" - "Zmg ng 1\n" - "Lkc ch 1\n" - "eUq qu 1\n" - "jrY er 1\n" - "kFs sz 1\n" - "sUq qu 1\n" - "jlZ le 1\n" - "cnV ch 1\n" - "aPj an 1\n" - "mjE ij 1\n" - "pZl le 1\n" - "uFs qu 1\n" - "Knf an 1\n" - "Fpc ch 1\n" - "hfR th 1\n" - "qnC an 1\n" - "Dlq qu 1\n" - "frM er 1\n" - "sfB sz 1\n" - "Gxk ka 1\n" - "Fkj ij 1\n" - "vGk va 1\n" - "gRm ng 1\n" - "rWf er 1\n" - "rYv er 1\n" - "qEd qu 1\n" - "qHr qu 1\n" - "Smv va 1\n" - "lFp le 1\n" - "kDs sz 1\n" - "dSd de 1\n" - "rLw er 1\n" - "cnZ an 1\n" - "Wjp ij 1\n" - "pTq qu 1\n" - "Kcx ch 1\n" - "vKs va 1\n" - "bcK ch 1\n" - "vwy va 1\n" - "Ujx ij 1\n" - "Qvr er 1\n" - "dcV ch 1\n" - "xVf fo 1\n" - "uIk qu 1\n" - "jlN le 1\n" - "vwL va 1\n" - "fWp pr 1\n" - "Pxr er 1\n" - "rRb er 1\n" - "bfD be 1\n" - "yCx ny 1\n" - "nJs an 1\n" - "dCm de 1\n" - "cbG ch 1\n" - "gCf ng 1\n" - "tmV th 1\n" - "qeC qu 1\n" - "knS an 1\n" - "gwY ng 1\n" - "Wjl le 1\n" - "mIw me 1\n" - "qjW qu 1\n" - "gwv ng 1\n" - "qJw wa 1\n" - "cnA an 1\n" - "bBm me 1\n" - "gFw ng 1\n" - "wDn an 1\n" - "qgL qu 1\n" - "lUa an 1\n" - "hDn th 1\n" - "kHx ka 1\n" - "wXm me 1\n" - "qyY qu 1\n" - "pkD ka 1\n" - "sLz st 1\n" - "zxF sz 1\n" - "vMx va 1\n" - "plR le 1\n" - "pwZ pr 1\n" - "pYd de 1\n" - "zfL sz 1\n" - "ztK th 1\n" - "mTm me 1\n" - "dCp de 1\n" - "bwx wa 1\n" - "xCs sz 1\n" - "tfF th 1\n" - "Lnq an 1\n" - "dYi in 1\n" - "pWq qu 1\n" - "oIx on 1\n" - "ywE wa 1\n" - "wNk ka 1\n" - "jwO ij 1\n" - "xZz sz 1\n" - "wGm me 1\n" - "cVw ch 1\n" - "bjK ij 1\n" - "Gzg ng 1\n" - "kwz sz 1\n" - "pBn an 1\n" - "cTx ch 1\n" - "rHq qu 1\n" - "Wsg ng 1\n" - "xEh th 1\n" - "yrK er 1\n" - "mMb me 1\n" - "pHw pr 1\n" - "cjN ch 1\n" - "nXn an 1\n" - "bwO wa 1\n" - "flB le 1\n" - "Qqj qu 1\n" - "mKv va 1\n" - "fFn an 1\n" - "wfG wa 1\n" - "wfB wa 1\n" - "Jqk qu 1\n" - "bwK wa 1\n" - "hhI th 1\n" - "lUe er 1\n" - "wFd de 1\n" - "vkT va 1\n" - "xLg ng 1\n" - "fhB th 1\n" - "wmV me 1\n" - "tmF th 1\n" - "Rtc th 1\n" - "dyY de 1\n" - "jyw ij 1\n" - "kRf ka 1\n" - "fXz sz 1\n" - "Znz an 1\n" - "wqX qu 1\n" - "uMx qu 1\n" - "gwV ng 1\n" - "Pbh th 1\n" - "dcM ch 1\n" - "nPz an 1\n" - "cwU ch 1\n" - "vJt th 1\n" - "gyQ ng 1\n" - "fXi in 1\n" - "bsZ sz 1\n" - "Bqi qu 1\n" - "vGn an 1\n" - "knN an 1\n" - "wYq qu 1\n" - "tTb th 1\n" - "bmP me 1\n" - "jpZ ij 1\n" - "Mqw qu 1\n" - "vjM va 1\n" - "qVh th 1\n" - "juY qu 1\n" - "rBk er 1\n" - "juI qu 1\n" - "zEq qu 1\n" - "zWg ng 1\n" - "fzH sz 1\n" - "tLx th 1\n" - "Ncf ch 1\n" - "kfN ka 1\n" - "uUo qu 1\n" - "fCs sz 1\n" - "tCv th 1\n" - "sUy sz 1\n" - "pBf pr 1\n" - "jBz sz 1\n" - "vDc ch 1\n" - "qmx qu 1\n" - "qtK th 1\n" - "qcS ch 1\n" - "vPt th 1\n" - "gQm ng 1\n" - "hzR th 1\n" - "dcL ch 1\n" - "xrI er 1\n" - "dvN va 1\n" - "Cwv va 1\n" - "xhQ th 1\n" - "Gzu qu 1\n" - "pdO de 1\n" - "Bqr qu 1\n" - "vLn an 1\n" - "lxf le 1\n" - "vYk va 1\n" - "wSq qu 1\n" - "pkS ka 1\n" - "zKg ng 1\n" - "tPm th 1\n" - "Pmj ij 1\n" - "lWu qu 1\n" - "Xuu qu 1\n" - "jcX ch 1\n" - "xzQ sz 1\n" - "Gzw sz 1\n" - "ePm er 1\n" - "fwW wa 1\n" - "qwA qu 1\n" - "vQt th 1\n" - "bxP be 1\n" - "dmD de 1\n" - "awQ an 1\n" - "fVf fo 1\n" - "bwY wa 1\n" - "Zxt th 1\n" - "Xhk th 1\n" - "gYk ng 1\n" - "zCf sz 1\n" - "yfQ ny 1\n" - "zGw sz 1\n" - "gvE ng 1\n" - "gCv ng 1\n" - "oPf on 1\n" - "zXi in 1\n" - "hvI th 1\n" - "hzS th 1\n" - "mfX me 1\n" - "dPd de 1\n" - "Lrf er 1\n" - "lrG er 1\n" - "mYf me 1\n" - "hNj th 1\n" - "qAj qu 1\n" - "sxQ st 1\n" - "kTl le 1\n" - "qOf qu 1\n" - "Jdx de 1\n" - "swK sz 1\n" - "jQb ij 1\n" - "Dqp qu 1\n" - "cWv ch 1\n" - "dxE de 1\n" - "sXj sz 1\n" - "nvB an 1\n" - "wXf wa 1\n" - "Cqi qu 1\n" - "bzW sz 1\n" - "rRf er 1\n" - "mZj ij 1\n" - "bnF an 1\n" - "qaG an 1\n" - "Bqs qu 1\n" - "lMn an 1\n" - "wHp pr 1\n" - "Ljc ch 1\n" - "Mwf wa 1\n" - "pzK sz 1\n" - "mPb me 1\n" - "qjE qu 1\n" - "wRr er 1\n" - "xZf fo 1\n" - "nqG an 1\n" - "vVb va 1\n" - "pjC ij 1\n" - "uHl qu 1\n" - "jDn an 1\n" - "pqX qu 1\n" - "pqk qu 1\n" - "xgU ng 1\n" - "wJx wa 1\n" - "znK an 1\n" - "rhB th 1\n" - "vDq qu 1\n" - "sJc ch 1\n" - "Xkh th 1\n" - "lnJ an 1\n" - "bRq qu 1\n" - "fzA sz 1\n" - "bQe er 1\n" - "Txw wa 1\n" - "bkG ka 1\n" - "ywZ wa 1\n" - "zWc ch 1\n" - "lhL th 1\n" - "gmF ng 1\n" - "sfQ sz 1\n" - "zmG sz 1\n" - "Ogz ng 1\n" - "xuA qu 1\n" - "qAq qu 1\n" - "zDw sz 1\n" - "lVu qu 1\n" - "xRw wa 1\n" - "xmM me 1\n" - "pxB pr 1\n" - "ztT th 1\n" - "kzJ sz 1\n" - "nFz an 1\n" - "uVz qu 1\n" - "pnQ an 1\n" - "pGt th 1\n" - "Xdn an 1\n" - "fVz sz 1\n" - "Mhg th 1\n" - "Xqo qu 1\n" - "sHq qu 1\n" - "jwC ij 1\n" - "vkG va 1\n" - "Xkx ka 1\n" - "tRg th 1\n" - "nvV an 1\n" - "qwG qu 1\n" - "Vhh th 1\n" - "zwO sz 1\n" - "qQb qu 1\n" - "crR ch 1\n" - "Mrq qu 1\n" - "oQe er 1\n" - "mBt th 1\n" - "vUy va 1\n" - "twW th 1\n" - "Qgn an 1\n" - "Nxu qu 1\n" - "qhF th 1\n" - "xpX pr 1\n" - "fvD va 1\n" - "Cvy va 1\n" - "oHj on 1\n" - "Qqo qu 1\n" - "vYd de 1\n" - "xhV th 1\n" - "fZf fo 1\n" - "yKm me 1\n" - "xYq qu 1\n" - "fcU ch 1\n" - "qEp qu 1\n" - "jXd de 1\n" - "mlQ le 1\n" - "Ggz ng 1\n" - "cLp ch 1\n" - "yxU ny 1\n" - "gvJ ng 1\n" - "wqD qu 1\n" - "vsN sz 1\n" - "Ijf ij 1\n" - "jbJ ij 1\n" - "bMx be 1\n" - "kXs sz 1\n" - "grT ng 1\n" - "wOd de 1\n" - "pGw pr 1\n" - "Gkd de 1\n" - "qCj qu 1\n" - "hqY th 1\n" - "rDp er 1\n" - "nQt th 1\n" - "kdV de 1\n" - "bgS ng 1\n" - "Tqo qu 1\n" - "fEj ij 1\n" - "hZs th 1\n" - "jYn an 1\n" - "bPx be 1\n" - "hgY th 1\n" - "Pvy va 1\n" - "fxK fo 1\n" - "Hww wa 1\n" - "xRk ka 1\n" - "dmP de 1\n" - "mcY ch 1\n" - "bxR be 1\n" - "Lsl le 1\n" - "hRl th 1\n" - "iwQ in 1\n" - "Wqx qu 1\n" - "kfV ka 1\n" - "qwN qu 1\n" - "Qpv va 1\n" - "mrO er 1\n" - "iFc ti 1\n" - "wzD sz 1\n" - "qbF qu 1\n" - "xfS fo 1\n" - "Pqh th 1\n" - "xYb be 1\n" - "lDh th 1\n" - "vtG th 1\n" - "Xzu qu 1\n" - "xjK ij 1\n" - "jDx ij 1\n" - "nCj an 1\n" - "mCk ka 1\n" - "qxP qu 1\n" - "oMv on 1\n" - "cgY ch 1\n" - "Wqt th 1\n" - "kkQ ka 1\n" - "tqO th 1\n" - "jnC an 1\n" - "fGq qu 1\n" - "Bfv va 1\n" - "vYi in 1\n" - "pcL ch 1\n" - "Fgp ng 1\n" - "jtR th 1\n" - "vhF th 1\n" - "wUi in 1\n" - "nNj an 1\n" - "jTw ij 1\n" - "qsM qu 1\n" - "aJg an 1\n" - "jQe er 1\n" - "Gnj an 1\n" - "fmM me 1\n" - "zqM qu 1\n" - "gjZ ng 1\n" - "nxH an 1\n" - "cdO ch 1\n" - "aAx an 1\n" - "tUv th 1\n" - "hXk th 1\n" - "qBx qu 1\n" - "tgK th 1\n" - "fZy ny 1\n" - "Jkx ka 1\n" - "pvD va 1\n" - "bmT me 1\n" - "oYx on 1\n" - "hwV th 1\n" - "mjB ij 1\n" - "bYn an 1\n" - "iHx in 1\n" - "lYh th 1\n" - "qCi in 1\n" - "fhR th 1\n" - "nDf an 1\n" - "hCd th 1\n" - "lxB le 1\n" - "eXj er 1\n" - "fvW va 1\n" - "ccW ch 1\n" - "dTc ch 1\n" - "sqA qu 1\n" - "fNt th 1\n" - "zkM sz 1\n" - "lRv le 1\n" - "qnI an 1\n" - "xwC wa 1\n" - "zqY qu 1\n" - "yQb be 1\n" - "xrC er 1\n" - "xFm me 1\n" - "oeQ er 1\n" - "mLl le 1\n" - "jwT ij 1\n" - "fwD wa 1\n" - "vpE va 1\n" - "flY le 1\n" - "sRg ng 1\n" - "vSd de 1\n" - "wuR qu 1\n" - "wrI er 1\n" - "Ysn st 1\n" - "Vhj th 1\n" - "Cqh th 1\n" - "Ygb ng 1\n" - "hPq th 1\n" - "mkB ka 1\n" - "tRq th 1\n" - "ajQ an 1\n" - "hcR th 1\n" - "vDw va 1\n" - "pQn an 1\n" - "xeU er 1\n" - "vcM ch 1\n" - "zVc ch 1\n" - "bRh th 1\n" - "uFx qu 1\n" - "fbW be 1\n" - "uUv qu 1\n" - "Nhv th 1\n" - "Ykx ka 1\n" - "Wtp th 1\n" - "Mzj sz 1\n" - "npT in 1\n" - "Xqk qu 1\n" - "xwN wa 1\n" - "hXw th 1\n" - "zLb sz 1\n" - "Gxy ny 1\n" - "dDq qu 1\n" - "Bfy ny 1\n" - "fkx ka 1\n" - "jOq qu 1\n" - "Ddk de 1\n" - "Njp ij 1\n" - "xjJ ij 1\n" - "qhS th 1\n" - "Qwm me 1\n" - "yWj ij 1\n" - "nFv an 1\n" - "pLb pr 1\n" - "qbB qu 1\n" - "smX sz 1\n" - "tnZ th 1\n" - "zQh th 1\n" - "Fzb sz 1\n" - "cNb ch 1\n" - "hpV th 1\n" - "Bxz sz 1\n" - "xgG ng 1\n" - "Rlj le 1\n" - "iHq in 1\n" - "swN sz 1\n" - "Njv va 1\n" - "wPk ka 1\n" - "oRv on 1\n" - "pJs sz 1\n" - "kZw ka 1\n" - "vVs st 1\n" - "Vbw wa 1\n" - "Ffh th 1\n" - "mzQ sz 1\n" - "Gvl le 1\n" - "Pgq qu 1\n" - "lPp le 1\n" - "vCv va 1\n" - "kNf ka 1\n" - "bmD me 1\n" - "mWt th 1\n" - "slF le 1\n" - "qiX in 1\n" - "yRt th 1\n" - "lqx qu 1\n" - "qlj qu 1\n" - "sfZ sz 1\n" - "Wfy ny 1\n" - "vrO er 1\n" - "gxT ng 1\n" - "lwE le 1\n" - "qdJ qu 1\n" - "Ypk ka 1\n" - "Qpf pr 1\n" - "Znw an 1\n" - "bfJ be 1\n" - "qQy qu 1\n" - "qAy qu 1\n" - "aqW an 1\n" - "qqI qu 1\n" - "Lwg ng 1\n" - "Nnw an 1\n" - "cLv ch 1\n" - "Wtx th 1\n" - "qcq ch 1\n" - "sjR sz 1\n" - "lWn an 1\n" - "Zmx me 1\n" - "qZg qu 1\n" - "tYz th 1\n" - "gVx ng 1\n" - "mXt th 1\n" - "nwJ an 1\n" - "jwZ ij 1\n" - "lwL le 1\n" - "eGx er 1\n" - "Sqk qu 1\n" - "gBg ng 1\n" - "zsS sz 1\n" - "knQ an 1\n" - "Nnf an 1\n" - "qmT qu 1\n" - "Sqp qu 1\n" - "ffQ fo 1\n" - "Vcv ch 1\n" - "fmD me 1\n" - "zYg ng 1\n" - "bAx be 1\n" - "nbW an 1\n" - "gJm ng 1\n" - "Jwn an 1\n" - "mxJ me 1\n" - "xbC be 1\n" - "Rbq qu 1\n" - "xZc ch 1\n" - "bJy be 1\n" - "Xyk ka 1\n" - "zkV sz 1\n" - "uoF qu 1\n" - "bcU ch 1\n" - "cZq ch 1\n" - "rPm er 1\n" - "rGn an 1\n" - "lcL ch 1\n" - "rVt th 1\n" - "Cgw ng 1\n" - "Ctq th 1\n" - "eGv er 1\n" - "Rzs st 1\n" - "Qhz th 1\n" - "sLv va 1\n" - "Vqm qu 1\n" - "ydJ de 1\n" - "xVr er 1\n" - "tLk th 1\n" - "qfy qu 1\n" - "wxV wa 1\n" - "yRq qu 1\n" - "Vxq qu 1\n" - "qYz qu 1\n" - "zhM th 1\n" - "mLn an 1\n" - "Zvt th 1\n" - "Fvm va 1\n" - "hcM th 1\n" - "Mwp wa 1\n" - "cTg ch 1\n" - "lXr er 1\n" - "fQe er 1\n" - "Jbw wa 1\n" - "yfG ny 1\n" - "phK th 1\n" - "gjH ng 1\n" - "Wdg de 1\n" - "pPn an 1\n" - "Bwg ng 1\n" - "znB an 1\n" - "fwJ wa 1\n" - "utQ th 1\n" - "cjC ch 1\n" - "fVd de 1\n" - "cTm ch 1\n" - "wMv va 1\n" - "Kgk ng 1\n" - "nRd an 1\n" - "mMt th 1\n" - "xjQ ij 1\n" - "qYt th 1\n" - "sYj st 1\n" - "jNc ch 1\n" - "qXt th 1\n" - "wzB sz 1\n" - "Sjq qu 1\n" - "qtF th 1\n" - "wYi in 1\n" - "glT ng 1\n" - "Uug ng 1\n" - "uOp qu 1\n" - "iBx in 1\n" - "Rqt th 1\n" - "zWj sz 1\n" - "Hcx ch 1\n" - "jNd de 1\n" - "zQr er 1\n" - "iHd in 1\n" - "Wpx pr 1\n" - "nfY an 1\n" - "Rkz sz 1\n" - "Kqg qu 1\n" - "Gfv va 1\n" - "krC er 1\n" - "Whc th 1\n" - "ljM le 1\n" - "yxG ny 1\n" - "fpW pr 1\n" - "bcF ch 1\n" - "krx er 1\n" - "uDt th 1\n" - "Fzo on 1\n" - "wPn an 1\n" - "Lfj ij 1\n" - "Bkp ka 1\n" - "Xkq qu 1\n" - "jxH ij 1\n" - "vIj va 1\n" - "gTc ch 1\n" - "hEj th 1\n" - "fqB qu 1\n" - "jlD le 1\n" - "tFf th 1\n" - "Nfw wa 1\n" - "Fqe qu 1\n" - "Tzp sz 1\n" - "sJr er 1\n" - "qIt th 1\n" - "dFb de 1\n" - "qzE qu 1\n" - "mVv va 1\n" - "Vqa an 1\n" - "bqM qu 1\n" - "mdJ de 1\n" - "dIp de 1\n" - "Znx an 1\n" - "jkK ij 1\n" - "rfQ er 1\n" - "xkI ku 1\n" - "fIo ro 1\n" - "lqV qu 1\n" - "Qpd de 1\n" - "pAx pr 1\n" - "rrQ er 1\n" - "bIu qu 1\n" - "xDw wa 1\n" - "oHx on 1\n" - "wJw wa 1\n" - "Cqv qu 1\n" - "yvB va 1\n" - "yqU qu 1\n" - "rLx er 1\n" - "Fzx sz 1\n" - "dZf de 1\n" - "Nqh th 1\n" - "Rnz an 1\n" - "hTc th 1\n" - "bVb be 1\n" - "Fdm de 1\n" - "vfv va 1\n" - "hwS th 1\n" - "zPt th 1\n" - "Gxv va 1\n" - "Fvt th 1\n" - "mZr er 1\n" - "zVr er 1\n" - "mBc ch 1\n" - "fXq qu 1\n" - "Plw le 1\n" - "Nlx le 1\n" - "jCd de 1\n" - "Kwv va 1\n" - "Jqa an 1\n" - "zGs st 1\n" - "fuV qu 1\n" - "pzL sz 1\n" - "iFx in 1\n" - "fTm me 1\n" - "yWd de 1\n" - "cHv ch 1\n" - "fFk ka 1\n" - "mqd qu 1\n" - "aQk an 1\n" - "uDf qu 1\n" - "Vbf be 1\n" - "pgJ ng 1\n" - "fkN ka 1\n" - "pBm me 1\n" - "Bdv de 1\n" - "jmW ij 1\n" - "Jvv va 1\n" - "Xpk ka 1\n" - "qQc ch 1\n" - "kdG de 1\n" - "qkP qu 1\n" - "cSd ch 1\n" - "Fdc ch 1\n" - "qgK qu 1\n" - "qdH qu 1\n" - "uNv qu 1\n" - "eVt th 1\n" - "dfA de 1\n" - "Hzy sz 1\n" - "lWc ch 1\n" - "vxH va 1\n" - "hxW th 1\n" - "Khp th 1\n" - "xQb be 1\n" - "pwT pr 1\n" - "Lwf wa 1\n" - "zDq qu 1\n" - "kxK ka 1\n" - "mtY th 1\n" - "bhT th 1\n" - "ywR wa 1\n" - "jIa an 1\n" - "Wze er 1\n" - "hqK th 1\n" - "flZ le 1\n" - "qMi in 1\n" - "wpR wa 1\n" - "qHh th 1\n" - "aOw an 1\n" - "dkU de 1\n" - "vRr er 1\n" - "vjX va 1\n" - "cuQ ch 1\n" - "qmJ qu 1\n" - "uuJ ou 1\n" - "yWx ny 1\n" - "hUf th 1\n" - "vzP va 1\n" - "rSx er 1\n" - "qgy qu 1\n" - "Rzf sz 1\n" - "zjB sz 1\n" - "Sjx ij 1\n" - "xfA fo 1\n" - "fHj ij 1\n" - "qkB qu 1\n" - "cdF ch 1\n" - "fWj ij 1\n" - "jbA ij 1\n" - "Bmb me 1\n" - "yjg ng 1\n" - "rxZ er 1\n" - "Vmr er 1\n" - "iIq in 1\n" - "Wgl ng 1\n" - "mRp me 1\n" - "wvS va 1\n" - "Uvy va 1\n" - "ypQ pr 1\n" - "vFw vo 1\n" - "fqE qu 1\n" - "swJ st 1\n" - "Jrx er 1\n" - "cxE ch 1\n" - "lZk le 1\n" - "fVn an 1\n" - "bhZ th 1\n" - "jhR th 1\n" - "vSq qu 1\n" - "yQz sz 1\n" - "fHv va 1\n" - "vuN qu 1\n" - "jpG ij 1\n" - "Pkz sz 1\n" - "gQb ng 1\n" - "pFs st 1\n" - "Gjq qu 1\n" - "hsK th 1\n" - "twx th 1\n" - "yyQ ny 1\n" - "dqF qu 1\n" - "bHh th 1\n" - "qMq qu 1\n" - "qKv qu 1\n" - "zLg ng 1\n" - "jmO ij 1\n" - "wBk ka 1\n" - "pjQ ij 1\n" - "xZv va 1\n" - "qIu un 1\n" - "ycY ch 1\n" - "mDf me 1\n" - "yJs st 1\n" - "Isx st 1\n" - "Qqr qu 1\n" - "Fkw ka 1\n" - "Cpj ij 1\n" - "Yvq qu 1\n" - "zjG sz 1\n" - "gGc ch 1\n" - "Xdm de 1\n" - "hBv th 1\n" - "Wxj ij 1\n" - "Ywb ow 1\n" - "Vtq th 1\n" - "tjY th 1\n" - "jDj ij 1\n" - "uGd qu 1\n" - "wvF va 1\n" - "uqg qu 1\n" - "Rwp pr 1\n" - "Bgb ng 1\n" - "mnU an 1\n" - "dpI de 1\n" - "wKd de 1\n" - "yXz sz 1\n" - "kLd de 1\n" - "gYx ng 1\n" - "qxk qu 1\n" - "Hhy th 1\n" - "fpJ pr 1\n" - "cVc ch 1\n" - "kVv va 1\n" - "Jzs st 1\n" - "nDw an 1\n" - "tjF th 1\n" - "bZj ij 1\n" - "mqL qu 1\n" - "hFt th 1\n" - "nNw an 1\n" - "wFv va 1\n" - "gHc ch 1\n" - "qRx qu 1\n" - "Jxh th 1\n" - "Vpv va 1\n" - "nMk an 1\n" - "tjN th 1\n" - "fhQ th 1\n" - "bpD pr 1\n" - "Dfg ng 1\n" - "jyO ij 1\n" - "jhV th 1\n" - "kVk ka 1\n" - "nKc an 1\n" - "jkJ ij 1\n" - "cwS ch 1\n" - "oDf on 1\n" - "mkY ka 1\n" - "gdV ng 1\n" - "Xhb th 1\n" - "jUq qu 1\n" - "aJf an 1\n" - "Qxg ng 1\n" - "xzS sz 1\n" - "vUw va 1\n" - "hTj th 1\n" - "oVt th 1\n" - "zdq qu 1\n" - "fHs st 1\n" - "xKk ka 1\n" - "bFc ch 1\n" - "gWq qu 1\n" - "Yqa an 1\n" - "dmH de 1\n" - "Ttq th 1\n" - "iQc ch 1\n" - "jFh ij 1\n" - "fcY ch 1\n" - "fsR st 1\n" - "iWg in 1\n" - "Xyj ij 1\n" - "Xjs st 1\n" - "xpb pr 1\n" - "lzY le 1\n" - "pzg ng 1\n" - "dVw de 1\n" - "Ijc ch 1\n" - "fvq qu 1\n" - "Vnb an 1\n" - "zdH de 1\n" - "cDd ch 1\n" - "wqI qu 1\n" - "yfU ny 1\n" - "qoH qu 1\n" - "xkw ka 1\n" - "Kck ch 1\n" - "mUq qu 1\n" - "zWm sz 1\n" - "Bfj ij 1\n" - "rQj er 1\n" - "qeW qu 1\n" - "qpC qu 1\n" - "oqM qu 1\n" - "pzO sz 1\n" - "cjQ ch 1\n" - "zTx sz 1\n" - "gRw ng 1\n" - "kdQ de 1\n" - "wbQ wa 1\n" - "Qpj ij 1\n" - "zIc ch 1\n" - "yxN ny 1\n" - "nCk an 1\n" - "Jqz qu 1\n" - "dEq qu 1\n" - "gdE ng 1\n" - "wCg ng 1\n" - "pQt th 1\n" - "vKe er 1\n" - "Tjm ij 1\n" - "Zcy ch 1\n" - "kmR ka 1\n" - "cTp ch 1\n" - "bqE qu 1\n" - "vvZ va 1\n" - "cLw ch 1\n" - "oIw on 1\n" - "xjG ij 1\n" - "vtU th 1\n" - "hcH th 1\n" - "xgT ng 1\n" - "vqR qu 1\n" - "wuM qu 1\n" - "xsY st 1\n" - "jCu qu 1\n" - "Fbn an 1\n" - "cqH ch 1\n" - "Xjz ij 1\n" - "fgR ng 1\n" - "yiX in 1\n" - "qnO an 1\n" - "wmN me 1\n" - "wgH ng 1\n" - "tbZ th 1\n" - "Xks st 1\n" - "pzC po 1\n" - "lfX le 1\n" - "qBu un 1\n" - "mLw me 1\n" - "pmY me 1\n" - "xqE qu 1\n" - "rjY er 1\n" - "vrH er 1\n" - "Iuf qu 1\n" - "yfD ny 1\n" - "clG ch 1\n" - "cdZ ch 1\n" - "eTd er 1\n" - "lXv le 1\n" - "kpV ka 1\n" - "sZq qu 1\n" - "Wxc ch 1\n" - "vmJ va 1\n" - "hkE th 1\n" - "pUw pr 1\n" - "Cqd qu 1\n" - "wCn an 1\n" - "pxQ pr 1\n" - "Ywp pr 1\n" - "xwb wa 1\n" - "Wjm ij 1\n" - "zqQ qu 1\n" - "gTp ng 1\n" - "uZv qu 1\n" - "mdH de 1\n" - "juQ qu 1\n" - "gVm ng 1\n" - "zjY ij 1\n" - "fhN th 1\n" - "wfD wa 1\n" - "Zjc ch 1\n" - "iPv in 1\n" - "mzW sz 1\n" - "vXm va 1\n" - "fEq qu 1\n" - "Ozq qu 1\n" - "gEp ng 1\n" - "kDj ij 1\n" - "Zlw le 1\n" - "zbR sz 1\n" - "zCt th 1\n" - "woY on 1\n" - "pkT ka 1\n" - "kbI ka 1\n" - "hdW de 1\n" - "Hsx st 1\n" - "zpX sz 1\n" - "zfV sz 1\n" - "Dhk th 1\n" - "wMp pr 1\n" - "hzJ th 1\n" - "Lwp pr 1\n" - "zmN sz 1\n" - "xfq qu 1\n" - "sjQ sz 1\n" - "zkK sz 1\n" - "bBv va 1\n" - "bdE de 1\n" - "Qxn an 1\n" - "jqt th 1\n" - "jhG th 1\n" - "fYv va 1\n" - "xhE th 1\n" - "cbF ch 1\n" - "Jnb an 1\n" - "jxN ij 1\n" - "fYx fo 1\n" - "hJp th 1\n" - "cRt th 1\n" - "qnS an 1\n" - "vLp va 1\n" - "cBd ch 1\n" - "qqU qu 1\n" - "Sdd de 1\n" - "xeZ er 1\n" - "Jwo on 1\n" - "dPf de 1\n" - "fNl le 1\n" - "kIb ka 1\n" - "cbL ch 1\n" - "Qdr er 1\n" - "Mfb be 1\n" - "jJl le 1\n" - "mxY me 1\n" - "lFd le 1\n" - "twT th 1\n" - "kFk ka 1\n" - "crB ch 1\n" - "jRr er 1\n" - "Htz th 1\n" - "pYf pr 1\n" - "rVc er 1\n" - "vRf va 1\n" - "wVq qu 1\n" - "zpA sz 1\n" - "glY le 1\n" - "sNj ij 1\n" - "vKx va 1\n" - "tvB th 1\n" - "Yjf ij 1\n" - "mwP me 1\n" - "Jyb be 1\n" - "tBc th 1\n" - "gSb ng 1\n" - "cMl ch 1\n" - "gjJ ng 1\n" - "dYz de 1\n" - "zPg ng 1\n" - "kqB qu 1\n" - "sFv st 1\n" - "xkH ka 1\n" - "fZt th 1\n" - "yhR th 1\n" - "bwN wa 1\n" - "qjG qu 1\n" - "nQm an 1\n" - "qMr qu 1\n" - "jcW ch 1\n" - "qJv qu 1\n" - "gTm ng 1\n" - "kmQ ka 1\n" - "Wlc ch 1\n" - "kYf ka 1\n" - "eJp er 1\n" - "Tkb ka 1\n" - "hfM th 1\n" - "nxY an 1\n" - "pDl le 1\n" - "wcN ch 1\n" - "pQa an 1\n" - "ohZ th 1\n" - "xRz sz 1\n" - "lbV le 1\n" - "lKc ch 1\n" - "wxB wa 1\n" - "Lww wa 1\n" - "fqQ qu 1\n" - "kkZ ka 1\n" - "iwO in 1\n" - "dgU ng 1\n" - "dvO de 1\n" - "pDt th 1\n" - "kvK ka 1\n" - "jlV le 1\n" - "xXd de 1\n" - "ykF ku 1\n" - "iyT in 1\n" - "Ufx fo 1\n" - "nzU an 1\n" - "xbH bu 1\n" - "lSb le 1\n" - "Xpf pr 1\n" - "Uvf va 1\n" - "yyF ny 1\n" - "fxP fo 1\n" - "jYu qu 1\n" - "qjb qu 1\n" - "gxL ng 1\n" - "pwI pr 1\n" - "jUe er 1\n" - "rFc ch 1\n" - "fsF st 1\n" - "cdW ch 1\n" - "Xwp pr 1\n" - "xdH de 1\n" - "jYs ij 1\n" - "bFd de 1\n" - "qIh th 1\n" - "yIg ng 1\n" - "vTd de 1\n" - "wfE wa 1\n" - "qRb qu 1\n" - "yhK th 1\n" - "kMn an 1\n" - "cpB ch 1\n" - "txN th 1\n" - "kPd de 1\n" - "nbB an 1\n" - "skQ st 1\n" - "uKw qu 1\n" - "wQf wa 1\n" - "kWf ka 1\n" - "wqA qu 1\n" - "cwA ch 1\n" - "vJk ka 1\n" - "hcD th 1\n" - "nfK an 1\n" - "uXf qu 1\n" - "cgA ch 1\n" - "Pjd de 1\n" - "Lqs qu 1\n" - "zwC sz 1\n" - "ljN le 1\n" - "vkP ka 1\n" - "Rqp qu 1\n" - "zGx sz 1\n" - "jPg ng 1\n" - "kbT ka 1\n" - "kpQ ka 1\n" - "Mzq qu 1\n" - "Gjs st 1\n" - "kDl le 1\n" - "jwR ij 1\n" - "Wyq qu 1\n" - "qxS qu 1\n" - "qGt th 1\n" - "Wvr er 1\n" - "zNx sz 1\n" - "vCm va 1\n" - "hlD th 1\n" - "vBp va 1\n" - "mJc ch 1\n" - "hFb th 1\n" - "vDm va 1\n" - "pfC pr 1\n" - "Lpy pr 1\n" - "Fhd th 1\n" - "dxS de 1\n" - "wWg ng 1\n" - "Fgn an 1\n" - "nFf an 1\n" - "cxF ch 1\n" - "aVh th 1\n" - "Sqx qu 1\n" - "Vjz ij 1\n" - "znC an 1\n" - "qqv qu 1\n" - "zrZ er 1\n" - "bNl le 1\n" - "nvW an 1\n" - "Qyb be 1\n" - "Fht th 1\n" - "jGv ij 1\n" - "gLp ng 1\n" - "gLb ng 1\n" - "qKj qu 1\n" - "hJd th 1\n" - "Zjg ng 1\n" - "nQq an 1\n" - "npX an 1\n" - "qiO in 1\n" - "vvG va 1\n" - "jOx ij 1\n" - "hhE th 1\n" - "vdN de 1\n" - "Czz sz 1\n" - "gjU ng 1\n" - "hVb th 1\n" - "Kcg ch 1\n" - "dvH de 1\n" - "wtD th 1\n" - "jIo on 1\n" - "jQa an 1\n" - "Fyj ij 1\n" - "cpU ch 1\n" - "hxY th 1\n" - "qbD qu 1\n" - "svJ st 1\n" - "vjW ij 1\n" - "gpY ng 1\n" - "qnR an 1\n" - "gQn an 1\n" - "Cvh th 1\n" - "ykB ka 1\n" - "xgB ng 1\n" - "zfD sz 1\n" - "yHw wa 1\n" - "qdG qu 1\n" - "qTn an 1\n" - "lTm le 1\n" - "jgB ng 1\n" - "gxS ng 1\n" - "qPe qu 1\n" - "ppQ pr 1\n" - "yxW ny 1\n" - "Hjk ij 1\n" - "kNk ka 1\n" - "cnJ an 1\n" - "uHd qu 1\n" - "jvH ij 1\n" - "Ggn ng 1\n" - "lbS le 1\n" - "Qcx ch 1\n" - "cqR ch 1\n" - "Jyc ch 1\n" - "wRp pr 1\n" - "nfA an 1\n" - "lXw le 1\n" - "cmJ ch 1\n" - "Ysw st 1\n" - "qQs qu 1\n" - "gsX ng 1\n" - "cIq ch 1\n" - "jjZ ij 1\n" - "Llb le 1\n" - "mMv va 1\n" - "lVh th 1\n" - "Fph th 1\n" - "Zmm me 1\n" - "xMd de 1\n" - "Gwb wa 1\n" - "Qjv ij 1\n" - "lqZ qu 1\n" - "zJh th 1\n" - "Wky ka 1\n" - "hDk th 1\n" - "yLg ng 1\n" - "dYw de 1\n" - "dCq qu 1\n" - "Gmj ij 1\n" - "xTq qu 1\n" - "wkF ka 1\n" - "hFp th 1\n" - "qnB an 1\n" - "xyJ ny 1\n" - "nIj an 1\n" - "xYd de 1\n" - "Wqr qu 1\n" - "xqV qu 1\n" - "wYk ka 1\n" - "Qdz de 1\n" - "fbN be 1\n" - "qwY qu 1\n" - "Ubx be 1\n" - "wtL th 1\n" - "nQw an 1\n" - "jJk ij 1\n" - "Nzs st 1\n" - "dCn an 1\n" - "Nfv va 1\n" - "Hgh th 1\n" - "Hcq ch 1\n" - "Xvb va 1\n" - "sxJ st 1\n" - "wMx wa 1\n" - "qFn an 1\n" - "Gzf sz 1\n" - "qfJ qu 1\n" - "zdQ de 1\n" - "Xgz ng 1\n" - "fkI ka 1\n" - "pvK va 1\n" - "Cqr qu 1\n" - "zFd de 1\n" - "oHm on 1\n" - "aJj an 1\n" - "Fzd de 1\n" - "dWk de 1\n" - "wmE me 1\n" - "sMl le 1\n" - "tBp th 1\n" - "vNw va 1\n" - "Qdh th 1\n" - "whG th 1\n" - "qAp qu 1\n" - "jrM er 1\n" - "rHw er 1\n" - "Lvc ch 1\n" - "gRn an 1\n" - "yjV ij 1\n" - "hRk th 1\n" - "bkV ka 1\n" - "jWm ij 1\n" - "yYz sz 1\n" - "vTy va 1\n" - "dxV de 1\n" - "mKy me 1\n" - "Qlq qu 1\n" - "Upx pr 1\n" - "Qpq qu 1\n" - "Lwm me 1\n" - "yXr er 1\n" - "gTk ng 1\n" - "qnT an 1\n" - "Vlq qu 1\n" - "Qqd qu 1\n" - "Zdd de 1\n" - "Xqt th 1\n" - "Dfb be 1\n" - "oeO on 1\n" - "nCx an 1\n" - "lXd le 1\n" - "vHc ch 1\n" - "vAb va 1\n" - "Ybw wa 1\n" - "zDn an 1\n" - "dGk de 1\n" - "plH le 1\n" - "lxG le 1\n" - "Hgp ng 1\n" - "jRz ij 1\n" - "dTs de 1\n" - "mCj ij 1\n" - "lHf le 1\n" - "lLj le 1\n" - "tNb th 1\n" - "mKk ka 1\n" - "gGj ng 1\n" - "jlQ le 1\n" - "Yyg ng 1\n" - "fDv va 1\n" - "zXg ng 1\n" - "qzZ qu 1\n" - "fEg ng 1\n" - "lhS th 1\n" - "mzM sz 1\n" - "xqT qu 1\n" - "Ycj ch 1\n" - "fbF be 1\n" - "Xsj ij 1\n" - "Lnc an 1\n" - "Gqp qu 1\n" - "fjO ij 1\n" - "zhI th 1\n" - "zgH ng 1\n" - "gWc ch 1\n" - "yKf ny 1\n" - "uQd qu 1\n" - "Kwl le 1\n" - "dxG de 1\n" - "Yqw qu 1\n" - "tKc th 1\n" - "cWn an 1\n" - "hcI th 1\n" - "wfY wa 1\n" - "rBp er 1\n" - "cJd ch 1\n" - "sYf sz 1\n" - "Sqj qu 1\n" - "kQv ka 1\n" - "xpF pr 1\n" - "fcX ch 1\n" - "yfK ny 1\n" - "jQo on 1\n" - "gTg ng 1\n" - "Qwn an 1\n" - "Pnx an 1\n" - "yZt th 1\n" - "wPz sz 1\n" - "juX qu 1\n" - "Lxv va 1\n" - "iXr in 1\n" - "pcE ch 1\n" - "Nqy qu 1\n" - "hjI th 1\n" - "hzV th 1\n" - "nmF an 1\n" - "pvW va 1\n" - "eJw er 1\n" - "Iqd qu 1\n" - "gXy ng 1\n" - "wfW wa 1\n" - "Vdw de 1\n" - "qJx qu 1\n" - "Pdq qu 1\n" - "Bjb ij 1\n" - "qLl qu 1\n" - "zdW de 1\n" - "fQr er 1\n" - "xzW sz 1\n" - "vwQ va 1\n" - "rwU er 1\n" - "qPn an 1\n" - "bFw wa 1\n" - "vHl le 1\n" - "hWl th 1\n" - "wgO ng 1\n" - "hLk th 1\n" - "Jkb ka 1\n" - "zBh th 1\n" - "Dhx th 1\n" - "Fgv ng 1\n" - "bpA pr 1\n" - "zxC sz 1\n" - "gfS ng 1\n" - "Mvx va 1\n" - "uPk qu 1\n" - "Vqn an 1\n" - "yqC qu 1\n" - "vMk ka 1\n" - "wqL qu 1\n" - "wrJ er 1\n" - "cdN ch 1\n" - "pwR pr 1\n" - "hMf th 1\n" - "jPf ij 1\n" - "Vbv va 1\n" - "qzF qu 1\n" - "qNc ch 1\n" - "Jbq qu 1\n" - "fTk ka 1\n" - "Zff fo 1\n" - "Fzt th 1\n" - "Kcw ch 1\n" - "eKf er 1\n" - "pqZ qu 1\n" - "Wpb pr 1\n" - "jkF ij 1\n" - "Vxp pr 1\n" - "hGq th 1\n" - "qBc ch 1\n" - "fcT ch 1\n" - "jMq qu 1\n" - "kZv ka 1\n" - "qkG qu 1\n" - "Ifp pr 1\n" - "dRw de 1\n" - "Zlj le 1\n" - "Kwj ij 1\n" - "fNb be 1\n" - "dYy de 1\n" - "hZl th 1\n" - "wtP th 1\n" - "hPz th 1\n" - "Ykc ch 1\n" - "Jlw le 1\n" - "jNt th 1\n" - "yrW er 1\n" - "gWd ng 1\n" - "yXd de 1\n" - "fQl le 1\n" - "jfF ij 1\n" - "Ejx ij 1\n" - "fGk ka 1\n" - "Zjz ij 1\n" - "wdM de 1\n" - "jlF le 1\n" - "cxZ ch 1\n" - "Zgk ng 1\n" - "mcJ ch 1\n" - "slE le 1\n" - "nYq an 1\n" - "Wfg ng 1\n" - "zJk ka 1\n" - "bvF va 1\n" - "Hnz an 1\n" - "Wkv ka 1\n" - "Mvq qu 1\n" - "Dxh th 1\n" - "Bvt th 1\n" - "sMj ij 1\n" - "wRf wa 1\n" - "vLb va 1\n" - "zGq qu 1\n" - "mFp me 1\n" - "gNb ng 1\n" - "pCg ng 1\n" - "xFs sz 1\n" - "jKf ij 1\n" - "qJb qu 1\n" - "pzI sz 1\n" - "jgG ng 1\n" - "pKs sz 1\n" - "fqD qu 1\n" - "gxQ ng 1\n" - "fvG va 1\n" - "wgF ng 1\n" - "Xxz sz 1\n" - "Lwu qu 1\n" - "dlX le 1\n" - "lPz le 1\n" - "Wqk qu 1\n" - "Xzj ij 1\n" - "uHj qu 1\n" - "uFj qu 1\n" - "jvV ij 1\n" - "jXe le 1\n" - "Zfm me 1\n" - "qIm qu 1\n" - "zbB sz 1\n" - "yZf ny 1\n" - "sKk sz 1\n" - "zpL sz 1\n" - "qKg qu 1\n" - "Ibj ij 1\n" - "iQb in 1\n" - "Fxu qu 1\n" - "Fpb pr 1\n" - "Wva an 1\n" - "fzD sz 1\n" - "bkT ka 1\n" - "Ykt th 1\n" - "njG an 1\n" - "Uvh th 1\n" - "gfT ng 1\n" - "zcI ch 1\n" - "bDq qu 1\n" - "Jdh th 1\n" - "xMg ng 1\n" - "Jby be 1\n" - "lwJ le 1\n" - "sWw sz 1\n" - "Svw va 1\n" - "nrX an 1\n" - "uvV qu 1\n" - "jVr er 1\n" - "tqB th 1\n" - "bVr er 1\n" - "kQl le 1\n" - "fbG be 1\n" - "rqM qu 1\n" - "zHj ij 1\n" - "fhY th 1\n" - "Yzr er 1\n" - "vFf va 1\n" - "Qpg ng 1\n" - "uAq qu 1\n" - "zxP sz 1\n" - "jCn an 1\n" - "qaM an 1\n" - "xlY le 1\n" - "cTf ch 1\n" - "kBf ka 1\n" - "cQc ch 1\n" - "Rbj ij 1\n" - "kVs sz 1\n" - "bGv va 1\n" - "wdN de 1\n" - "gfN ng 1\n" - "bPj ij 1\n" - "gcI ch 1\n" - "gxj ng 1\n" - "rHb er 1\n" - "pVr er 1\n" - "rVj er 1\n" - "vgS ng 1\n" - "Fqz qu 1\n" - "xMk ka 1\n" - "qQm qu 1\n" - "jZc ch 1\n" - "jBc ch 1\n" - "uwY qu 1\n" - "rHf er 1\n" - "czX ch 1\n" - "zcT ch 1\n" - "bFj ij 1\n" - "qcB ch 1\n" - "hfT th 1\n" - "xqO qu 1\n" - "qfp qu 1\n" - "xjU ij 1\n" - "bhR th 1\n" - "tWv th 1\n" - "iqE in 1\n" - "gpU ng 1\n" - "iWb in 1\n" - "tlP th 1\n" - "tYq th 1\n" - "bCv va 1\n" - "oKc ch 1\n" - "Sgj ng 1\n" - "hvq th 1\n" - "kfY ka 1\n" - "zbM sz 1\n" - "zvA sz 1\n" - "cHp ch 1\n" - "vvK va 1\n" - "fpZ pr 1\n" - "dfX de 1\n" - "wrK er 1\n" - "xeE er 1\n" - "fkY ka 1\n" - "sbX sz 1\n" - "fcS ch 1\n" - "vKh th 1\n" - "Qlx le 1\n" - "Zqh th 1\n" - "qWg qu 1\n" - "cdL ch 1\n" - "jvG ij 1\n" - "Mgx ng 1\n" - "gwF ng 1\n" - "kdP de 1\n" - "uMr qu 1\n" - "tcD th 1\n" - "qrL qu 1\n" - "Mtm th 1\n" - "bQz sz 1\n" - "Hpx pr 1\n" - "zpI sz 1\n" - "jkR ij 1\n" - "khH th 1\n" - "mSq qu 1\n" - "pFz sz 1\n" - "juO qu 1\n" - "Xyq qu 1\n" - "jGd de 1\n" - "Yzd de 1\n" - "wbC wa 1\n" - "wSb wa 1\n" - "sZd de 1\n" - "Rzx sz 1\n" - "Flx le 1\n" - "bqC qu 1\n" - "lcH ch 1\n" - "wmG me 1\n" - "zCj ij 1\n" - "xaD an 1\n" - "iwH in 1\n" - "qDp qu 1\n" - "sGx sz 1\n" - "Xhy th 1\n" - "eVc ch 1\n" - "wkJ wa 1\n" - "Lcf ch 1\n" - "lgQ ng 1\n" - "Dhh th 1\n" - "zfO sz 1\n" - "kVc ch 1\n" - "hmL th 1\n" - "Owf wa 1\n" - "wZc ch 1\n" - "dnN an 1\n" - "Mzp sz 1\n" - "mYw me 1\n" - "yLh th 1\n" - "Xxr er 1\n" - "qwI qu 1\n" - "Txs sz 1\n" - "yKp pr 1\n" - "bjX ij 1\n" - "pbS pr 1\n" - "zrP er 1\n" - "hJm th 1\n" - "qgA qu 1\n" - "zwY sz 1\n" - "rXk er 1\n" - "nDx an 1\n" - "vGz sz 1\n" - "mQq qu 1\n" - "upY qu 1\n" - "rLn an 1\n" - "Vfk ka 1\n" - "wCv va 1\n" - "cgx ch 1\n" - "kZq qu 1\n" - "Wjw ij 1\n" - "Qax an 1\n" - "grG ng 1\n" - "bJd de 1\n" - "dJx de 1\n" - "cMd ch 1\n" - "Qcs ch 1\n" - "mkK ka 1\n" - "jNx ij 1\n" - "mrY er 1\n" - "Xwx wa 1\n" - "rZl er 1\n" - "gxU ng 1\n" - "Lnv an 1\n" - "ygC ng 1\n" - "Dqh th 1\n" - "lLn an 1\n" - "mnQ an 1\n" - "kjU ij 1\n" - "bvO va 1\n" - "oVm on 1\n" - "vWt th 1\n" - "rGq qu 1\n" - "tbJ th 1\n" - "fSv va 1\n" - "wJn an 1\n" - "fJv va 1\n" - "oQv on 1\n" - "Vws sz 1\n" - "pnU an 1\n" - "Nmh th 1\n" - "cTq ch 1\n" - "Edx de 1\n" - "uqw qu 1\n" - "Yrh th 1\n" - "Qnx an 1\n" - "mJf me 1\n" - "kDq qu 1\n" - "Xhd th 1\n" - "nLx an 1\n" - "xkU ka 1\n" - "fqT qu 1\n" - "qYh th 1\n" - "bFv va 1\n" - "xbQ be 1\n" - "vcS ch 1\n" - "qqT qu 1\n" - "gkF ng 1\n" - "zFh th 1\n" - "kpE ka 1\n" - "Gxb be 1\n" - "Ztw th 1\n" - "qIl qu 1\n" - "Qkd de 1\n" - "wdV de 1\n" - "rwP er 1\n" - "aCg an 1\n" - "Zrs er 1\n" - "zmW sz 1\n" - "vfO va 1\n" - "hBj th 1\n" - "tbH th 1\n" - "Dxv va 1\n" - "zdD de 1\n" - "nBw an 1\n" - "lrV er 1\n" - "gQq ng 1\n" - "tlK th 1\n" - "ztP th 1\n" - "yqV qu 1\n" - "nRm an 1\n" - "jVz sz 1\n" - "Crq er 1\n" - "fFg ng 1\n" - "Xjg ng 1\n" - "Cml le 1\n" - "qWj qu 1\n" - "jzO ij 1\n" - "Mdq qu 1\n" - "mtQ th 1\n" - "rGv er 1\n" - "kGn an 1\n" - "mLg ng 1\n" - "uWj qu 1\n" - "Rcq ch 1\n" - "cVp ch 1\n" - "bWk ka 1\n" - "Xzx sz 1\n" - "Wkb ka 1\n" - "xzH sz 1\n" - "quP un 1\n" - "dHv de 1\n" - "Dmq qu 1\n" - "Dgv ng 1\n" - "tgY th 1\n" - "jtM th 1\n" - "tMz th 1\n" - "bHm me 1\n" - "Zfk ka 1\n" - "xZp pr 1\n" - "jkH ij 1\n" - "rNp er 1\n" - "xMv va 1\n" - "wpF pr 1\n" - "djD de 1\n" - "bxV be 1\n" - "hgS th 1\n" - "Pkh th 1\n" - "Dxq qu 1\n" - "mMx me 1\n" - "dGj de 1\n" - "kbH ka 1\n" - "Lhg th 1\n" - "Dvq qu 1\n" - "qrT qu 1\n" - "Ijw ij 1\n" - "wuI qu 1\n" - "Zwn an 1\n" - "dhJ th 1\n" - "qcR ch 1\n" - "whM th 1\n" - "pgP ng 1\n" - "qkR qu 1\n" - "sqR qu 1\n" - "lxY le 1\n" - "vVw va 1\n" - "lKd le 1\n" - "Nly le 1\n" - "yKz sz 1\n" - "qBb qu 1\n" - "wQx wa 1\n" - "kYw ka 1\n" - "fQd de 1\n" - "svW sz 1\n" - "yGp pr 1\n" - "ytB th 1\n" - "jvU ij 1\n" - "kjz ka 1\n" - "jVc ch 1\n" - "Qbz sz 1\n" - "pqM qu 1\n" - "vwu ku 1\n" - "Qww wa 1\n" - "dcZ ch 1\n" - "lhG th 1\n" - "gmS ng 1\n" - "Iqz qu 1\n" - "zZf sz 1\n" - "hLn th 1\n" - "eMf er 1\n" - "xNq qu 1\n" - "mPm um 1\n" - "pMg ng 1\n" - "wzW sz 1\n" - "kRl le 1\n" - "hzK th 1\n" - "fbO be 1\n" - "Xxt th 1\n" - "Fnx an 1\n" - "Bvn an 1\n" - "bjZ ij 1\n" - "tcY th 1\n" - "dmB de 1\n" - "qFe qu 1\n" - "kxB ka 1\n" - "qBz qu 1\n" - "pVp pr 1\n" - "boQ on 1\n" - "xoH on 1\n" - "dWg de 1\n" - "Tdq qu 1\n" - "zNq qu 1\n" - "vYp va 1\n" - "pDf pr 1\n" - "lwG le 1\n" - "hDq th 1\n" - "Jdy de 1\n" - "snZ an 1\n" - "mzU sz 1\n" - "zKx sz 1\n" - "rvC er 1\n" - "wuS qu 1\n" - "dnQ an 1\n" - "vCy va 1\n" - "Udw wa 1\n" - "bTl le 1\n" - "qbC qu 1\n" - "tbT th 1\n" - "iDk ka 1\n" - "Whb th 1\n" - "tbX th 1\n" - "tfO th 1\n" - "Tfq qu 1\n" - "dbW de 1\n" - "Bdy de 1\n" - "vjR ij 1\n" - "cbC ch 1\n" - "wuW qu 1\n" - "wCw wa 1\n" - "Wdq qu 1\n" - "vRb va 1\n" - "bWm me 1\n" - "vZw va 1\n" - "dJj de 1\n" - "qZy qu 1\n" - "Jgq ng 1\n" - "zbH sz 1\n" - "hJl th 1\n" - "Xhg th 1\n" - "nVp an 1\n" - "dVc ch 1\n" - "qCc ch 1\n" - "oYg ng 1\n" - "kwH ka 1\n" - "vwN va 1\n" - "zfw sz 1\n" - "vlO le 1\n" - "ztX ti 1\n" - "dKx de 1\n" - "xQs sz 1\n" - "cDl ch 1\n" - "yVv va 1\n" - "zpN sz 1\n" - "xkG ka 1\n" - "eqW qu 1\n" - "jdD di 1\n" - "fQm me 1\n" - "Yhl th 1\n" - "tBf th 1\n" - "qEf qu 1\n" - "whX th 1\n" - "Vgv ng 1\n" - "Lsq qu 1\n" - "dfJ de 1\n" - "Zdp de 1\n" - "rZc ch 1\n" - "tZh ch 1\n" - "mtC th 1\n" - "zxQ sz 1\n" - "Vnj an 1\n" - "sHg ng 1\n" - "wYl le 1\n" - "Bqb qu 1\n" - "yrV er 1\n" - "Ycs ch 1\n" - "jRw ij 1\n" - "iWt th 1\n" - "hVw th 1\n" - "wZs sz 1\n" - "Cqo qu 1\n" - "Gfn an 1\n" - "rBv er 1\n" - "Ojz sz 1\n" - "zGf sz 1\n" - "bZc ch 1\n" - "Fvd de 1\n" - "Zgs ng 1\n" - "Rfg ng 1\n" - "Rww wa 1\n" - "Yrp er 1\n" - "iFp in 1\n" - "bVx be 1\n" - "zfM sz 1\n" - "qdV qu 1\n" - "bGm me 1\n" - "tnJ th 1\n" - "pdR de 1\n" - "gBc ch 1\n" - "gzC ng 1\n" - "Pwc ch 1\n" - "uAw qu 1\n" - "znX an 1\n" - "vgT ng 1\n" - "oAw ko 1\n" - "xBm me 1\n" - "dNf de 1\n" - "Pqs qu 1\n" - "Npd di 1\n" - "oUy ko 1\n" - "fpD pr 1\n" - "Rfx fo 1\n" - "lXm le 1\n" - "qWs qu 1\n" - "gWv vi 1\n" - "Fwv va 1\n" - "Lqj qu 1\n" - "fvQ va 1\n" - "zgB ng 1\n" - "kJl le 1\n" - "vWo on 1\n" - "Xvc ch 1\n" - "yDq qu 1\n" - "bdP de 1\n" - "jVf ij 1\n" - "wPw wa 1\n" - "dwA de 1\n" - "Oqp qu 1\n" - "qiZ in 1\n" - "xdV de 1\n" - "qFg ng 1\n" - "qzI qu 1\n" - "ywL wa 1\n" - "sWv sz 1\n" - "Tpy pr 1\n" - "wbf wa 1\n" - "uPg ng 1\n" - "Knw an 1\n" - "iuO in 1\n" - "Qdn an 1\n" - "Yfv va 1\n" - "wuK qu 1\n" - "xLn an 1\n" - "yJg ng 1\n" - "Nfk ka 1\n" - "Yql qu 1\n" - "qsH qu 1\n" - "Rzv sz 1\n" - "bIp pr 1\n" - "sQt th 1\n" - "tgC th 1\n" - "qSa an 1\n" - "fxQ fo 1\n" - "hcZ th 1\n" - "wbJ wa 1\n" - "qRl qu 1\n" - "Gcy ch 1\n" - "vZm va 1\n" - "Xzl le 1\n" - "wgR ng 1\n" - "dlO le 1\n" - "tCb th 1\n" - "qmY qu 1\n" - "qZx qu 1\n" - "Lbp pr 1\n" - "Dgq ng 1\n" - "Vkj ij 1\n" - "wqU qu 1\n" - "Mqk qu 1\n" - "wUv va 1\n" - "qgC ng 1\n" - "sbD sz 1\n" - "Sqy qu 1\n" - "bMq qu 1\n" - "Bzt th 1\n" - "sIq qu 1\n" - "cVj ch 1\n" - "wJt th 1\n" - "Xjm ij 1\n" - "Hmg ng 1\n" - "aQd an 1\n" - "iHt th 1\n" - "fMm me 1\n" - "wWc ch 1\n" - "fuE qu 1\n" - "mCf me 1\n" - "qnP an 1\n" - "zLn an 1\n" - "kRt th 1\n" - "Mvl le 1\n" - "mRd de 1\n" - "yfJ ny 1\n" - "xCb be 1\n" - "sQb sz 1\n" - "quC un 1\n" - "Ctc th 1\n" - "pPv va 1\n" - "zjI sz 1\n" - "xmC me 1\n" - "xdJ de 1\n" - "nXv an 1\n" - "vsO sz 1\n" - "pRd de 1\n" - "vbF va 1\n" - "wNl le 1\n" - "kHq qu 1\n" - "rwM er 1\n" - "gxD ng 1\n" - "Qhi th 1\n" - "mqB qu 1\n" - "pnL an 1\n" - "bKb be 1\n" - "iqN in 1\n" - "dkX de 1\n" - "bQd de 1\n" - "bNj ij 1\n" - "Tlk le 1\n" - "Nlg ng 1\n" - "Cxh th 1\n" - "Mqf qu 1\n" - "Pvj ij 1\n" - "zwZ sz 1\n" - "pGb pr 1\n" - "nrF an 1\n" - "bkS ka 1\n" - "dRv de 1\n" - "jJm ij 1\n" - "iqF in 1\n" - "fGc ch 1\n" - "nxW an 1\n" - "xsW sz 1\n" - "mfQ me 1\n" - "fgP ng 1\n" - "jlH le 1\n" - "nrI an 1\n" - "kXv ka 1\n" - "Vpq qu 1\n" - "zMk sz 1\n" - "pHf pr 1\n" - "jdM de 1\n" - "bqJ qu 1\n" - "Ckt th 1\n" - "zKv sz 1\n" - "jzG sz 1\n" - "uIx qu 1\n" - "yNm me 1\n" - "jYt th 1\n" - "fwL wa 1\n" - "dZx de 1\n" - "vgF ng 1\n" - "wXi in 1\n" - "vZt th 1\n" - "Ctf th 1\n" - "xqC qu 1\n" - "qOc ch 1\n" - "ygX ng 1\n" - "kWk ka 1\n" - "grF ng 1\n" - "qnX an 1\n" - "xUi in 1\n" - "pmC me 1\n" - "uzE qu 1\n" - "Ivw va 1\n" - "gvI ng 1\n" - "knZ an 1\n" - "lxZ le 1\n" - "Xwf wa 1\n" - "Dqb qu 1\n" - "yKg ng 1\n" - "Vwg ng 1\n" - "xSb be 1\n" - "Hwp pr 1\n" - "yNx ny 1\n" - "yoQ on 1\n" - "cSx ch 1\n" - "Evq qu 1\n" - "tIw th 1\n" - "dfZ de 1\n" - "hzP th 1\n" - "xBk ka 1\n" - "kqr qu 1\n" - "yBm me 1\n" - "lJj le 1\n" - "cjq ch 1\n" - "drW er 1\n" - "qaD an 1\n" - "wDf wa 1\n" - "Lxz sz 1\n" - "zQf fo 1\n" - "Jtq th 1\n" - "qRv qu 1\n" - "Gfc ch 1\n" - "Xbt th 1\n" - "wZb wa 1\n" - "srQ er 1\n" - "gJq ng 1\n" - "jFt th 1\n" - "gNc ch 1\n" - "Rkr er 1\n" - "pzJ sz 1\n" - "lbA le 1\n" - "cBq ch 1\n" - "Kyq qu 1\n" - "xcO ch 1\n" - "zXr er 1\n" - "cVs ch 1\n" - "rYm er 1\n" - "kVm ka 1\n" - "fcZ ch 1\n" - "fzC sz 1\n" - "tKp th 1\n" - "gPz ng 1\n" - "qcL ch 1\n" - "Yjr er 1\n" - "zxU sz 1\n" - "xbT be 1\n" - "nvX an 1\n" - "qmR qu 1\n" - "bxL be 1\n" - "Xww wa 1\n" - "jSf ij 1\n" - "lNf le 1\n" - "zTs sz 1\n" - "kFq qu 1\n" - "qLz qu 1\n" - "rrX er 1\n" - "wXg ng 1\n" - "zvE sz 1\n" - "Hwx wa 1\n" - "qFm qu 1\n" - "cgR ch 1\n" - "pDp pr 1\n" - "Oqb qu 1\n" - "sVc ch 1\n" - "Xtx th 1\n" - "Qwt th 1\n" - "Wfe er 1\n" - "Pcx ch 1\n" - "bpO pr 1\n" - "Cwg ng 1\n" - "wxO wa 1\n" - "bVs sz 1\n" - "jFw ij 1\n" - "fnF an 1\n" - "kxH ka 1\n" - "Yws sz 1\n" - "gdD ng 1\n" - "jWx ij 1\n" - "cTl ch 1\n" - "kmW ka 1\n" - "mhW th 1\n" - "bzT sz 1\n" - "rvJ er 1\n" - "xcJ ch 1\n" - "vkS ka 1\n" - "sXr er 1\n" - "sCv sz 1\n" - "Ntp th 1\n" - "oHh lo 1\n" - "Yvs sz 1\n" - "pVf pr 1\n" - "kEq qu 1\n" - "qfE qu 1\n" - "oWm on 1\n" - "tMw th 1\n" - "zYp sz 1\n" - "nFw an 1\n" - "yQc ch 1\n" - "zQj sz 1\n" - "wKq qu 1\n" - "mKf me 1\n" - "uLr qu 1\n" - "wIb wa 1\n" - "wrH er 1\n" - "pgL ng 1\n" - "Lbt th 1\n" - "zjF sz 1\n" - "qFp qu 1\n" - "zdX de 1\n" - "wTc ch 1\n" - "Jwl le 1\n" - "lxU le 1\n" - "hjA th 1\n" - "iPg in 1\n" - "Xns an 1\n" - "wkW ka 1\n" - "pfP pr 1\n" - "Dyq qu 1\n" - "jWu qu 1\n" - "qzR qu 1\n" - "Yjz sz 1\n" - "twX th 1\n" - "Nwj ij 1\n" - "jbB ij 1\n" - "qwR qu 1\n" - "Ytf th 1\n" - "blX le 1\n" - "xZk ka 1\n" - "Ymw me 1\n" - "wfX wa 1\n" - "Vqy qu 1\n" - "Xqn an 1\n" - "yUw wa 1\n" - "jzT jo 1\n" - "kNt th 1\n" - "pmQ me 1\n" - "dXr er 1\n" - "ylq qu 1\n" - "tWz th 1\n" - "Kvr er 1\n" - "bhQ th 1\n" - "uJn an 1\n" - "pbT pr 1\n" - "aBf an 1\n" - "Rhj th 1\n" - "uAx qu 1\n" - "Bgx ng 1\n" - "jqN qu 1\n" - "jdC ij 1\n" - "fBs st 1\n" - "cXk ch 1\n" - "nmM an 1\n" - "xRr er 1\n" - "Hkz sz 1\n" - "dhZ th 1\n" - "Fyp pr 1\n" - "kGm ka 1\n" - "sGq qu 1\n" - "jKh th 1\n" - "vDz sz 1\n" - "vLq qu 1\n" - "lJs le 1\n" - "zNn an 1\n" - "Wgj ng 1\n" - "jmL ij 1\n" - "gVt th 1\n" - "wFz sz 1\n" - "zbD sz 1\n" - "kTd de 1\n" - "dwX de 1\n" - "xRl le 1\n" - "Azv sz 1\n" - "bQh th 1\n" - "qQf qu 1\n" - "yoZ on 1\n" - "jPs sz 1\n" - "jyG ij 1\n" - "kXj ka 1\n" - "yBv va 1\n" - "nwP an 1\n" - "xnA an 1\n" - "bKf be 1\n" - "qbP qu 1\n" - "vGs sz 1\n" - "jjG ij 1\n" - "Kqc ch 1\n" - "zVt th 1\n" - "wSg ng 1\n" - "sWm sz 1\n" - "fDg ng 1\n" - "pHz sz 1\n" - "fYp pr 1\n" - "zrW er 1\n" - "lDx le 1\n" - "hQh th 1\n" - "Bdp de 1\n" - "fqZ qu 1\n" - "oQm on 1\n" - "Qsq qu 1\n" - "xjq qu 1\n" - "Mfv va 1\n" - "zbQ sz 1\n" - "quR un 1\n" - "cMb ch 1\n" - "zqD qu 1\n" - "dXf de 1\n" - "rHh th 1\n" - "jhF th 1\n" - "nNf an 1\n" - "wHb wa 1\n" - "Tpq qu 1\n" - "bjY ij 1\n" - "cJq ch 1\n" - "lCk le 1\n" - "Pfp pr 1\n" - "Oqn an 1\n" - "fmR me 1\n" - "Qpu qu 1\n" - "Ncv ch 1\n" - "qYr qu 1\n" - "sfA sz 1\n" - "frS er 1\n" - "Gpf pr 1\n" - "jmD ij 1\n" - "hwI th 1\n" - "Rbz sz 1\n" - "jhB th 1\n" - "xXj ij 1\n" - "qYd qu 1\n" - "sVf sz 1\n" - "cCz ch 1\n" - "qMl qu 1\n" - "fpK pr 1\n" - "hVy th 1\n" - "lcJ ch 1\n" - "Okj ij 1\n" - "qJg ng 1\n" - "jLp ij 1\n" - "nYf an 1\n" - "npF on 1\n" - "rWk er 1\n" - "mcP ch 1\n" - "nZm an 1\n" - "fYb fo 1\n" - "zbC sz 1\n" - "nBq an 1\n" - "fjy ij 1\n" - "bIx be 1\n" - "twN th 1\n" - "Ggk ng 1\n" - "Czm sz 1\n" - "jtO th 1\n" - "nRl an 1\n" - "jyC ij 1\n" - "yEh th 1\n" - "vmH va 1\n" - "wtQ th 1\n" - "wIf wa 1\n" - "jIf ij 1\n" - "qbM qu 1\n" - "Rwq qu 1\n" - "fqF qu 1\n" - "Wfj ij 1\n" - "jfW ij 1\n" - "wWm me 1\n" - "Wpp pr 1\n" - "Mgj ng 1\n" - "dSf de 1\n" - "wYv va 1\n" - "ccI ch 1\n" - "ylT le 1\n" - "Gqh th 1\n" - "Cmz sz 1\n" - "Hfk ka 1\n" - "qBt th 1\n" - "yCf ny 1\n" - "qzO qu 1\n" - "ydF de 1\n" - "Vdt th 1\n" - "pJd de 1\n" - "sfR sz 1\n" - "dlV le 1\n" - "jOd de 1\n" - "nfF an 1\n" - "wTt th 1\n" - "rGk er 1\n" - "xAw wa 1\n" - "vfF va 1\n" - "Dzg ng 1\n" - "kFp ka 1\n" - "jTm ij 1\n" - "nNq an 1\n" - "qcN ch 1\n" - "Jjx ij 1\n" - "tKf th 1\n" - "Zrq qu 1\n" - "hmK th 1\n" - "Mqz qu 1\n" - "xfR fo 1\n" - "wQq qu 1\n" - "mqG qu 1\n" - "xUr er 1\n" - "oiU in 1\n" - "qsS qu 1\n" - "qGg ng 1\n" - "qtO th 1\n" - "tPb th 1\n" - "Rqm qu 1\n" - "vkX ka 1\n" - "Wsb st 1\n" - "cxR ch 1\n" - "fZr er 1\n" - "yQg ng 1\n" - "ziU in 1\n" - "xvW va 1\n" - "aDx an 1\n" - "bQj ij 1\n" - "jxC ij 1\n" - "Twk ka 1\n" - "sQh th 1\n" - "Bfx fo 1\n" - "aGj an 1\n" - "Pgc ch 1\n" - "Hzh th 1\n" - "qgW ng 1\n" - "kdF de 1\n" - "kbY ka 1\n" - "Qjx ij 1\n" - "Hxj ij 1\n" - "tVx th 1\n" - "nxZ an 1\n" - "oVd on 1\n" - "Hlq qu 1\n" - "jKz sz 1\n" - "qAi in 1\n" - "dNl le 1\n" - "pqA qu 1\n" - "eIv er 1\n" - "xmW me 1\n" - "ycK ch 1\n" - "mQd de 1\n" - "hmU th 1\n" - "nlF an 1\n" - "Gkl le 1\n" - "qBq qu 1\n" - "rhQ th 1\n" - "Znk an 1\n" - "Vfp pr 1\n" - "nBn an 1\n" - "qvL qu 1\n" - "aqN an 1\n" - "kLf ka 1\n" - "zJr er 1\n" - "tQw th 1\n" - "sWq qu 1\n" - "bwW wa 1\n" - "vzB sz 1\n" - "yyR ny 1\n" - "qqN qu 1\n" - "wyI ny 1\n" - "jzJ sz 1\n" - "qgI qu 1\n" - "bgQ ng 1\n" - "yLt th 1\n" - "Vqq qu 1\n" - "Xnr an 1\n" - "wHg ng 1\n" - "aQg an 1\n" - "cFh th 1\n" - "zjQ sz 1\n" - "gpD ng 1\n" - "xzN sz 1\n" - "iIw in 1\n" - "dQg ng 1\n" - "pQy pr 1\n" - "Xyx ny 1\n" - "sWc ch 1\n" - "jFd de 1\n" - "bpF pr 1\n" - "Vsv st 1\n" - "Qql qu 1\n" - "wzT sz 1\n" - "sqQ qu 1\n" - "Kzm sz 1\n" - "oFq qu 1\n" - "gkJ ng 1\n" - "hkH th 1\n" - "qLg ng 1\n" - "bmU me 1\n" - "crJ ch 1\n" - "slX le 1\n" - "Tzx sz 1\n" - "qbx qu 1\n" - "kpI ka 1\n" - "xCf fo 1\n" - "Fml le 1\n" - "Qhj th 1\n" - "tQs th 1\n" - "vRd de 1\n" - "Ycb ch 1\n" - "cjP ch 1\n" - "yuE qu 1\n" - "gIi in 1\n" - "kWg ng 1\n" - "Jwh th 1\n" - "fVy ny 1\n" - "jqy qu 1\n" - "Wzp sz 1\n" - "Cwc ch 1\n" - "qEy qu 1\n" - "jrX er 1\n" - "Kqi in 1\n" - "lYv le 1\n" - "dGv de 1\n" - "Cwj ij 1\n" - "nDv an 1\n" - "Ojm ij 1\n" - "Dnx an 1\n" - "vrF er 1\n" - "Jmr er 1\n" - "zfI sz 1\n" - "bqT qu 1\n" - "Xvj ij 1\n" - "nPp an 1\n" - "aVw an 1\n" - "wBv va 1\n" - "kVb ka 1\n" - "gcH ch 1\n" - "Xbs sz 1\n" - "tRd th 1\n" - "mQz sz 1\n" - "Hxe er 1\n" - "Dnw an 1\n" - "xWg ng 1\n" - "pGc ch 1\n" - "hgI th 1\n" - "ywP wa 1\n" - "nrW an 1\n" - "iVq di 1\n" - "xzE sz 1\n" - "Vxd de 1\n" - "Lzc ch 1\n" - "Jwp pr 1\n" - "gCq ng 1\n" - "Otq th 1\n" - "wvP va 1\n" - "cNr ch 1\n" - "iXq in 1\n" - "Qnl in 1\n" - "tPz th 1\n" - "hIb th 1\n" - "aPg an 1\n" - "zvw sz 1\n" - "nqO an 1\n" - "sqO qu 1\n" - "bjQ ij 1\n" - "lwQ le 1\n" - "pEq qu 1\n" - "bWj ij 1\n" - "swT sz 1\n" - "gmY ng 1\n" - "gRk ng 1\n" - "dZr er 1\n" - "fMr er 1\n" - "lxO le 1\n" - "kbQ ka 1\n" - "yfN ny 1\n" - "ymq qu 1\n" - "jpK ij 1\n" - "Wjn an 1\n" - "fmW me 1\n" - "rKx er 1\n" - "dlH le 1\n" - "kcK ch 1\n" - "vbV va 1\n" - "qNl qu 1\n" - "pHt th 1\n" - "hlT th 1\n" - "lBv le 1\n" - "oaF an 1\n" - "xfM fo 1\n" - "rZd er 1\n" - "jgW ng 1\n" - "Hvh th 1\n" - "Fkf ka 1\n" - "cDc ch 1\n" - "hLh th 1\n" - "qQp qu 1\n" - "zhJ th 1\n" - "ivQ in 1\n" - "Ukq qu 1\n" - "bpV pr 1\n" - "bJq qu 1\n" - "aPw an 1\n" - "sdK de 1\n" - "cGf ch 1\n" - "Ljw ij 1\n" - "qhP th 1\n" - "mFw me 1\n" - "fIu qu 1\n" - "zhB th 1\n" - "fuH qu 1\n" - "bFq qu 1\n" - "Wgk ng 1\n" - "Fqh th 1\n" - "zmf sz 1\n" - "Zpf pr 1\n" - "nFh th 1\n" - "yBw wa 1\n" - "gIj ng 1\n" - "qBf fo 1\n" - "Uwl le 1\n" - "zrM er 1\n" - "yBd de 1\n" - "Rlf le 1\n" - "Pzh ch 1\n" - "rZx er 1\n" - "qVs qu 1\n" - "dxJ de 1\n" - "Lcz ch 1\n" - "gFn an 1\n" - "vIm va 1\n" - "qtG th 1\n" - "qbG qu 1\n" - "bHg ng 1\n" - "xrY er 1\n" - "tBd th 1\n" - "nKq an 1\n" - "Nkt th 1\n" - "jCq qu 1\n" - "byX be 1\n" - "oBp on 1\n" - "Wjz sz 1\n" - "zfP sz 1\n" - "aQz an 1\n" - "sjx ij 1\n" - "nfW an 1\n" - "nXw an 1\n" - "bJw wa 1\n" - "aSf an 1\n" - "iRf in 1\n" - "yMd de 1\n" - "fBc ch 1\n" - "vxR va 1\n" - "Llx le 1\n" - "yGs sz 1\n" - "Jsy sz 1\n" - "Lvx va 1\n" - "eFh th 1\n" - "wbM wa 1\n" - "uOq qu 1\n" - "wWl le 1\n" - "bvU va 1\n" - "fnO an 1\n" - "mzI sz 1\n" - "Vcf ch 1\n" - "mhE th 1\n" - "vgQ ng 1\n" - "jgP ng 1\n" - "qbj qu 1\n" - "bZf be 1\n" - "Xtj th 1\n" - "yYq qu 1\n" - "jdK de 1\n" - "jzB sz 1\n" - "Yys sz 1\n" - "wUg ng 1\n" - "yBb be 1\n" - "qjM qu 1\n" - "sXw sz 1\n" - "Xqw qu 1\n" - "cTb ch 1\n" - "jrE er 1\n" - "sNp sz 1\n" - "Zhm th 1\n" - "xVs sz 1\n" - "jGz sz 1\n" - "Jqh th 1\n" - "zTm sz 1\n" - "vhE th 1\n" - "dQi in 1\n" - "Tmv va 1\n" - "qxD qu 1\n" - "fzE sz 1\n" - "vMr er 1\n" - "Cqx qu 1\n" - "twY th 1\n" - "nVz an 1\n" - "lRk le 1\n" - "Owq qu 1\n" - "qYj qu 1\n" - "yQk ka 1\n" - "Nlf le 1\n" - "qDn an 1\n" - "bHw wa 1\n" - "cjA ch 1\n" - "sgU ng 1\n" - "kQi in 1\n" - "yNf ny 1\n" - "lwZ le 1\n" - "vGd de 1\n" - "Vmn an 1\n" - "tpB th 1\n" - "cFd ch 1\n" - "xHm me 1\n" - "bSg ng 1\n" - "hEq th 1\n" - "ewQ er 1\n" - "eWd er 1\n" - "jfR ij 1\n" - "zpY sz 1\n" - "cvQ ch 1\n" - "hXr th 1\n" - "cJw ch 1\n" - "wEp pr 1\n" - "Nxl le 1\n" - "qMf qu 1\n" - "vGc ch 1\n" - "pyQ pr 1\n" - "jpU ij 1\n" - "xoA on 1\n" - "gXn an 1\n" - "qqG qu 1\n" - "pXn an 1\n" - "vlP le 1\n" - "Lzv sz 1\n" - "jxB ij 1\n" - "cJc ch 1\n" - "jcT ch 1\n" - "Wtm th 1\n" - "cLg ch 1\n" - "kUx ka 1\n" - "nFp an 1\n" - "Jsw sz 1\n" - "sBg ng 1\n" - "jFn an 1\n" - "gvC ng 1\n" - "fFy ny 1\n" - "qnA an 1\n" - "Zbb be 1\n" - "Pzx sz 1\n" - "psJ sz 1\n" - "lZq qu 1\n" - "yfP ny 1\n" - "gYv ng 1\n" - "bfC be 1\n" - "dMx de 1\n" - "hlN th 1\n" - "wRl le 1\n" - "qjH qu 1\n" - "Wjc ch 1\n" - "uQp qu 1\n" - "zTb sz 1\n" - "qUr qu 1\n" - "zqp qu 1\n" - "vlR le 1\n" - "jqX qu 1\n" - "swR sz 1\n" - "qMy ny 1\n" - "zkT sz 1\n" - "yqX qu 1\n" - "nlR an 1\n" - "Hqn an 1\n" - "aaJ an 1\n" - "lKw le 1\n" - "bzB sz 1\n" - "Vgk ng 1\n" - "aVm an 1\n" - "dnR an 1\n" - "txQ th 1\n" - "Qzi in 1\n" - "zxV sz 1\n" - "xgQ ng 1\n" - "tvZ th 1\n" - "jwN ij 1\n" - "Eqj qu 1\n" - "Bxj ij 1\n" - "hzH th 1\n" - "Qfy ny 1\n" - "Ppj ij 1\n" - "Aqp qu 1\n" - "zJn an 1\n" - "szF st 1\n" - "qfX qu 1\n" - "pzV sz 1\n" - "tgN th 1\n" - "xsS sz 1\n" - "nQz an 1\n" - "tkF th 1\n" - "Qhq th 1\n" - "gJc ch 1\n" - "uOa an 1\n" - "rqW qu 1\n" - "fYz sz 1\n" - "uFc ch 1\n" - "Ncx ch 1\n" - "lMw le 1\n" - "cjI ch 1\n" - "Jcw ch 1\n" - "vEo on 1\n" - "eQy er 1\n" - "Sxc ch 1\n" - "bUx mb 1\n" - "zdJ sz 1\n" - "lpN le 1\n" - "Rkq qu 1\n" - "vvI va 1\n" - "Qmq qu 1\n" - "tgJ th 1\n" - "gfE ng 1\n" - "qcX ch 1\n" - "klT le 1\n" - "bbV be 1\n" - "pmZ me 1\n" - "uqA qu 1\n" - "cYy ch 1\n" - "wmY me 1\n" - "zlB le 1\n" - "zNd sz 1\n" - "cvZ ch 1\n" - "dvL de 1\n" - "wLz sz 1\n" - "qcG ch 1\n" - "Qjl le 1\n" - "nqf an 1\n" - "gxY ng 1\n" - "aqI an 1\n" - "Kqa an 1\n" - "Xqp qu 1\n" - "Yvg ng 1\n" - "qqF qu 1\n" - "yHh th 1\n" - "nHc an 1\n" - "Uqq qu 1\n" - "zfN sz 1\n" - "mXq qu 1\n" - "Fgj ng 1\n" - "Dsx sz 1\n" - "xRv va 1\n" - "wbZ wa 1\n" - "Hnp an 1\n" - "fUx fo 1\n" - "cYd ch 1\n" - "qTg ng 1\n" - "Bgq ng 1\n" - "pCn an 1\n" - "Xmh th 1\n" - "vjJ ij 1\n" - "tdG th 1\n" - "Zhk th 1\n" - "xFn an 1\n" - "dkQ de 1\n" - "Lcg ch 1\n" - "mIu qu 1\n" - "Iwd de 1\n" - "wjw ij 1\n" - "zbX sz 1\n" - "Yhp th 1\n" - "cvH ch 1\n" - "Lcx ch 1\n" - "Wfn an 1\n" - "Nfq qu 1\n" - "qMv qu 1\n" - "Uvw va 1\n" - "Qnh th 1\n" - "nbG an 1\n" - "sFg ng 1\n" - "xlJ le 1\n" - "bPb be 1\n" - "xpI pr 1\n" - "mrV er 1\n" - "Fwu qu 1\n" - "wOy wa 1\n" - "Pmh th 1\n" - "Jhq th 1\n" - "Zbx be 1\n" - "pgY ng 1\n" - "Rbw wa 1\n" - "Awx wa 1\n" - "mcB ch 1\n" - "gkG ng 1\n" - "xkW ka 1\n" - "Pnw in 1\n" - "bNs sz 1\n" - "nXr an 1\n" - "Vmt th 1\n" - "eUv er 1\n" - "yQv va 1\n" - "kxr er 1\n" - "Ksw sz 1\n" - "bpW pr 1\n" - "qeD qu 1\n" - "Qvh th 1\n" - "bRm me 1\n" - "qJm qu 1\n" - "csY ch 1\n" - "qwH qu 1\n" - "Cqc ch 1\n" - "lYq qu 1\n" - "dPp de 1\n" - "oAe er 1\n" - "dcS ch 1\n" - "uwU qu 1\n" - "zjL sz 1\n" - "oZx on 1\n" - "kjR ij 1\n" - "cDy ch 1\n" - "fSs sz 1\n" - "eQf le 1\n" - "qBm qu 1\n" - "mLb me 1\n" - "Zrj er 1\n" - "Gkx ka 1\n" - "pkX ka 1\n" - "vTk ka 1\n" - "Zgp ng 1\n" - "dhP th 1\n" - "nPv an 1\n" - "xnQ an 1\n" - "bHp pr 1\n" - "Xgf ng 1\n" - "Cwf wa 1\n" - "lbN le 1\n" - "jNm ij 1\n" - "xNt th 1\n" - "rJp er 1\n" - "oJd on 1\n" - "Ryq qu 1\n" - "lvL le 1\n" - "qvY qu 1\n" - "vwC va 1\n" - "kFj ij 1\n" - "qHd qu 1\n" - "wcB ch 1\n" - "xTs sz 1\n" - "fQz sz 1\n" - "Dlf le 1\n" - "wLt th 1\n" - "Fbh th 1\n" - "rqJ qu 1\n" - "hhO th 1\n" - "xOi in 1\n" - "mqz qu 1\n" - "qmQ me 1\n" - "qQj qu 1\n" - "ovQ on 1\n" - "gfR ng 1\n" - "Pmq qu 1\n" - "Tcj ch 1\n" - "mqQ qu 1\n" - "mwV me 1\n" - "bXw wa 1\n" - "jlA le 1\n" - "fjG ij 1\n" - "jxY ij 1\n" - "qwM qu 1\n" - "kvU ka 1\n" - "Bkq qu 1\n" - "gfA ng 1\n" - "Awc ch 1\n" - "Vmv va 1\n" - "Qhl th 1\n" - "Wmj ij 1\n" - "cMq ch 1\n" - "tHp th 1\n" - "lPb le 1\n" - "vlK le 1\n" - "Ygk ng 1\n" - "gJs ng 1\n" - "tWl th 1\n" - "xVw wa 1\n" - "srN er 1\n" - "Uhb th 1\n" - "vfR va 1\n" - "kFf ka 1\n" - "Jlz le 1\n" - "fKq qu 1\n" - "mRq qu 1\n" - "kWw ka 1\n" - "zvO sz 1\n" - "Xqz qu 1\n" - "dIj de 1\n" - "wJm me 1\n" - "Fqv qu 1\n" - "wNt th 1\n" - "lxL le 1\n" - "xLm me 1\n" - "dqN qu 1\n" - "wRj ij 1\n" - "Ljt th 1\n" - "wRw wa 1\n" - "cxB ch 1\n" - "cjH ch 1\n" - "Vqj qu 1\n" - "qJs qu 1\n" - "cFk ch 1\n" - "xqd qu 1\n" - "Eqh th 1\n" - "qRd qu 1\n" - "vfT va 1\n" - "Zqb qu 1\n" - "mGc ch 1\n" - "Sbd de 1\n" - "iwV in 1\n" - "jfI ij 1\n" - "nWz an 1\n" - "Ljg ng 1\n" - "rjG er 1\n" - "cFb ch 1\n" - "uqZ qu 1\n" - "mVm me 1\n" - "jgK ng 1\n" - "dZh th 1\n" - "Bqx qu 1\n" - "quG un 1\n" - "lCv le 1\n" - "lxW le 1\n" - "gGb ng 1\n" - "gvY ng 1\n" - "mjF ij 1\n" - "ptX th 1\n" - "pYy pr 1\n" - "Yrf er 1\n" - "mVd de 1\n" - "zpR sz 1\n" - "xKw wa 1\n" - "wpM pr 1\n" - "cLk ch 1\n" - "Sqz qu 1\n" - "gWn an 1\n" - "sWz st 1\n" - "srS er 1\n" - "cVx ch 1\n" - "xNb be 1\n" - "hPb th 1\n" - "bGq qu 1\n" - "tdH th 1\n" - "yJl le 1\n" - "vUk ka 1\n" - "dJz sz 1\n" - "qhI th 1\n" - "mtP th 1\n" - "lGb le 1\n" - "hDx th 1\n" - "zfW sz 1\n" - "Nml le 1\n" - "Hsw st 1\n" - "pfG pr 1\n" - "dMj de 1\n" - "kKq qu 1\n" - "rjS er 1\n" - "Qlg ng 1\n" - "Nfy ny 1\n" - "cqM ch 1\n" - "hWm th 1\n" - "fuO qu 1\n" - "zfF sz 1\n" - "qgH ng 1\n" - "bpZ pr 1\n" - "btY th 1\n" - "uqB qu 1\n" - "qyA qu 1\n" - "Xrp er 1\n" - "ytX th 1\n" - "dHm de 1\n" - "vBg ng 1\n" - "yyN ny 1\n" - "Qrj er 1\n" - "gKd ng 1\n" - "bfU be 1\n" - "Qft th 1\n" - "bqP qu 1\n" - "qOz qu 1\n" - "Xhc th 1\n" - "dqY qu 1\n" - "hjQ th 1\n" - "Yfu qu 1\n" - "aXk an 1\n" - "pbV pr 1\n" - "vjP ij 1\n" - "Ybp pr 1\n" - "Jmb me 1\n" - "qFq qu 1\n" - "yPq qu 1\n" - "yWw wa 1\n" - "vhX th 1\n" - "iwT in 1\n" - "qZf qu 1\n" - "uqU qu 1\n" - "uFk qu 1\n" - "cpW ch 1\n" - "Lpq qu 1\n" - "kfL ka 1\n" - "pQe er 1\n" - "gwz ng 1\n" - "jpM ij 1\n" - "Qkm ka 1\n" - "jgH ng 1\n" - "xjP ij 1\n" - "xgL ng 1\n" - "jLm ij 1\n" - "dxN de 1\n" - "vWs st 1\n" - "Jjh th 1\n" - "hhG th 1\n" - "Yvc ch 1\n" - "xrE er 1\n" - "bZw wa 1\n" - "Lvw va 1\n" - "eNw er 1\n" - "fjB ij 1\n" - "dcQ ch 1\n" - "lZt th 1\n" - "Jwq qu 1\n" - "qPg ng 1\n" - "xMb be 1\n" - "hfD th 1\n" - "jzQ sz 1\n" - "Uuf qu 1\n" - "zGk sz 1\n" - "zCc ch 1\n" - "npC an 1\n" - "tWd th 1\n" - "hjF th 1\n" - "Pzs st 1\n" - "wuA qu 1\n" - "Qhg th 1\n" - "Mqm qu 1\n" - "fsI st 1\n" - "fdU de 1\n" - "Xrm er 1\n" - "qQg ng 1\n" - "bkW ka 1\n" - "dHg ng 1\n" - "rcB ch 1\n" - "hWu th 1\n" - "nIq an 1\n" - "rYq qu 1\n" - "xXv va 1\n" - "wqP qu 1\n" - "xmN me 1\n" - "sJf st 1\n" - "yMf ny 1\n" - "Sfk ka 1\n" - "qzW qu 1\n" - "cvT ch 1\n" - "kmX ka 1\n" - "xqU qu 1\n" - "cnG an 1\n" - "Jpi in 1\n" - "frX er 1\n" - "yLf ny 1\n" - "uyU qu 1\n" - "Ddw de 1\n" - "Tgj ng 1\n" - "qeH qu 1\n" - "fEz sz 1\n" - "pCk ka 1\n" - "qmf qu 1\n" - "rjH er 1\n" - "xMp pr 1\n" - "Ywo on 1\n" - "zgD ng 1\n" - "Pqx qu 1\n" - "nqM on 1\n" - "wdX de 1\n" - "Bpz sz 1\n" - "lhM th 1\n" - "Epb pr 1\n" - "bhJ th 1\n" - "kvQ ka 1\n" - "Rsq qu 1\n" - "xbP be 1\n" - "nMm an 1\n" - "xuC qu 1\n" - "wjs sz 1\n" - "fxX fo 1\n" - "hvT th 1\n" - "uPx qu 1\n" - "Jmy me 1\n" - "Qzd de 1\n" - "Nsz st 1\n" - "vWd de 1\n" - "hfX th 1\n" - "jCg ng 1\n" - "yQx ny 1\n" - "whJ th 1\n" - "wrq qu 1\n" - "xgW ng 1\n" - "Jhj th 1\n" - "lhC th 1\n" - "Pwf ow 1\n" - "ljC le 1\n" - "vvB va 1\n" - "mcN ch 1\n" - "yHx ny 1\n" - "bBj ij 1\n" - "qRz qu 1\n" - "glH ng 1\n" - "cZp ch 1\n" - "qJh th 1\n" - "tSg th 1\n" - "xVm me 1\n" - "uWs qu 1\n" - "Vxo on 1\n" - "fjM ij 1\n" - "zhK th 1\n" - "Cjh th 1\n" - "vZr er 1\n" - "bCs sz 1\n" - "rwY er 1\n" - "xEi in 1\n" - "dUv de 1\n" - "fRg ng 1\n" - "Gcu ch 1\n" - "jDf ij 1\n" - "djH de 1\n" - "vlU le 1\n" - "qyG qu 1\n" - "kfq qu 1\n" - "lXg ng 1\n" - "lbC le 1\n" - "Pwg ng 1\n" - "Oae an 1\n" - "pbC pr 1\n" - "dWt th 1\n" - "lzU le 1\n" - "wJz sz 1\n" - "dYj de 1\n" - "cBj ch 1\n" - "fRv va 1\n" - "djG de 1\n" - "mYg ng 1\n" - "Qbc ch 1\n" - "gnX an 1\n" - "wPm me 1\n" - "wvN va 1\n" - "qGm qu 1\n" - "qNh th 1\n" - "mRg ng 1\n" - "Uqv qu 1\n" - "Qxm me 1\n" - "fzX sz 1\n" - "zjM sz 1\n" - "xqA qu 1\n" - "bMs sz 1\n" - "vmL me 1\n" - "Eyx ny 1\n" - "hHj th 1\n" - "jGp ij 1\n" - "mfD me 1\n" - "Jfw wa 1\n" - "Wjh th 1\n" - "bZs sz 1\n" - "Iyk ka 1\n" - "zRn an 1\n" - "cdU ch 1\n" - "mJh th 1\n" - "Qjy ij 1\n" - "Qao an 1\n" - "bXv va 1\n" - "hSg th 1\n" - "rAo er 1\n" - "hLs th 1\n" - "lCs le 1\n" - "qkJ qu 1\n" - "Rxu qu 1\n" - "xdN de 1\n" - "yYx ny 1\n" - "dkN de 1\n" - "Rgw ng 1\n" - "zgL sz 1\n" - "Rcj ch 1\n" - "iWz in 1\n" - "dLk de 1\n" - "mpX me 1\n" - "Gbd de 1\n" - "bnH an 1\n" - "kdM de 1\n" - "wqG qu 1\n" - "vMz sz 1\n" - "zwH sz 1\n" - "wgx ng 1\n" - "Ljk ij 1\n" - "tlG th 1\n" - "tgE th 1\n" - "Wcw ch 1\n" - "Vby be 1\n" - "mVz sz 1\n" - "Hgc ch 1\n" - "gqP ng 1\n" - "hhB th 1\n" - "nFx an 1\n" - "yBf ny 1\n" - "Wmx me 1\n" - "vNb va 1\n" - "Mnv an 1\n" - "Zmc ch 1\n" - "bzS sz 1\n" - "yfC ny 1\n" - "Epx pr 1\n" - "ljG le 1\n" - "wUa an 1\n" - "Qgo ng 1\n" - "pqb qu 1\n" - "Jkm ka 1\n" - "Wvy va 1\n" - "Bjp ij 1\n" - "vfZ va 1\n" - "wxT wa 1\n" - "Vxw wa 1\n" - "dRt th 1\n" - "nVq an 1\n" - "iWf in 1\n" - "Smq qu 1\n" - "jwG ij 1\n" - "vcW ch 1\n" - "Qgz ng 1\n" - "Wkq qu 1\n" - "xrL er 1\n" - "tVh ch 1\n" - "Zlr er 1\n" - "zDt th 1\n" - "yxP ny 1\n" - "Yyw wa 1\n" - "zPk sz 1\n" - "Bgg ng 1\n" - "xOk ka 1\n" - "oXq qu 1\n" - "tQf th 1\n" - "fxF fo 1\n" - "dOq qu 1\n" - "Vtp th 1\n" - "jhP th 1\n" - "vhZ th 1\n" - "Gqq qu 1\n" - "dFg ng 1\n" - "eCg ng 1\n" - "kjH ij 1\n" - "vqQ qu 1\n" - "jpL ij 1\n" - "hgZ th 1\n" - "xFd de 1\n" - "Qjd de 1\n" - "xKm me 1\n" - "zQc ch 1\n" - "Nhw th 1\n" - "Kqo qu 1\n" - "hwO th 1\n" - "oYn an 1\n" - "Wnf an 1\n" - "vSc ch 1\n" - "Afq qu 1\n" - "jqJ qu 1\n" - "jEg ng 1\n" - "dKp de 1\n" - "nmK an 1\n" - "wXw wa 1\n" - "vjC ij 1\n" - "dXb de 1\n" - "tQn th 1\n" - "qoR qu 1\n" - "bRf be 1\n" - "yyL ny 1\n" - "kSj ij 1\n" - "Xyu qu 1\n" - "vmA va 1\n" - "Zgm ng 1\n" - "Lbx be 1\n" - "bIv va 1\n" - "Zdq qu 1\n" - "gHn an 1\n" - "bYq qu 1\n" - "Mqd qu 1\n" - "qMk qu 1\n" - "Qsv st 1\n" - "zXx sz 1\n" - "hQf th 1\n" - "wcV ch 1\n" - "Xfz sz 1\n" - "Mhc th 1\n" - "kBz sz 1\n" - "bWp pr 1\n" - "Wzu qu 1\n" - "hWw th 1\n" - "yNp pr 1\n" - "xbZ be 1\n" - "mTb me 1\n" - "Kdf de 1\n" - "pfQ pr 1\n" - "vCd de 1\n" - "Pqf qu 1\n" - "ofZ on 1\n" - "wYd de 1\n" - "Tfc ch 1\n" - "Gnb an 1\n" - "Zdx de 1\n" - "zVj sz 1\n" - "Tqw qu 1\n" - "fzV sz 1\n" - "Igq ng 1\n" - "Qvv vi 1\n" - "Pmf me 1\n" - "qHe qu 1\n" - "ybR be 1\n" - "cFg ch 1\n" - "Kvf va 1\n" - "Zxm me 1\n" - "oVc ch 1\n" - "Yhb th 1\n" - "bwP wa 1\n" - "Vvz sz 1\n" - "sdW de 1\n" - "gFz ng 1\n" - "mRl le 1\n" - "bqN qu 1\n" - "bhU th 1\n" - "tBw th 1\n" - "Hbb be 1\n" - "Jzp sz 1\n" - "zrS er 1\n" - "mkZ me 1\n" - "bKw wa 1\n" - "jPx ij 1\n" - "Xqa an 1\n" - "fGz sz 1\n" - "xLk ka 1\n" - "nrV an 1\n" - "Tmx me 1\n" - "zvZ sz 1\n" - "gWl ng 1\n" - "Yxb be 1\n" - "yWt th 1\n" - "lqN qu 1\n" - "tWu th 1\n" - "xZt th 1\n" - "iqI in 1\n" - "cpQ ch 1\n" - "zPf sz 1\n" - "bqG qu 1\n" - "gmI ng 1\n" - "Wkc ch 1\n" - "Zvs sz 1\n" - "qdN qu 1\n" - "hYf th 1\n" - "sBn an 1\n" - "Dwb ow 1\n" - "Wzq qu 1\n" - "Qdw de 1\n" - "svR sz 1\n" - "Nvv va 1\n" - "jRc ch 1\n" - "qDv qu 1\n" - "qGe qu 1\n" - "cwT ch 1\n" - "fTy ny 1\n" - "Cvv va 1\n" - "flQ le 1\n" - "mWg ng 1\n" - "twS th 1\n" - "npM an 1\n" - "Ufq qu 1\n" - "fuG qu 1\n" - "oCj on 1\n" - "txF th 1\n" - "Yft th 1\n" - "qwy qu 1\n" - "Vdz de 1\n" - "Vgq ng 1\n" - "Rkg ng 1\n" - "Pxz sz 1\n" - "mCn an 1\n" - "whZ th 1\n" - "fgB ng 1\n" - "jvW ij 1\n" - "kdL de 1\n" - "Lxi in 1\n" - "svB sz 1\n" - "xuH qu 1\n" - "gFy ng 1\n" - "oVv on 1\n" - "Zhq th 1\n" - "oqG qu 1\n" - "oJp on 1\n" - "gIf ng 1\n" - "bwF wa 1\n" - "vLh th 1\n" - "jgX ng 1\n" - "qKi in 1\n" - "xRh th 1\n" - "qwV qu 1\n" - "mNl le 1\n" - "Gvv va 1\n" - "pQf pr 1\n" - "xbV be 1\n" - "dpZ de 1\n" - "fHq qu 1\n" - "bBd de 1\n" - "vUh th 1\n" - "hzA th 1\n" - "Mnz an 1\n" - "pBt th 1\n" - "oaE an 1\n" - "slK le 1\n" - "Wlg ng 1\n" - "jhK th 1\n" - "xvX va 1\n" - "Ffx fo 1\n" - "gXh th 1\n" - "cWf ch 1\n" - "Gpy pr 1\n" - "xmS me 1\n" - "gZn an 1\n" - "djX de 1\n" - "bkX ka 1\n" - "xlP le 1\n" - "hCt th 1\n" - "Yhj th 1\n" - "gwQ ng 1\n" - "klD le 1\n" - "Rhq th 1\n" - "aEj an 1\n" - "jpY ij 1\n" - "pVn an 1\n" - "nJx an 1\n" - "zdV de 1\n" - "Rvf va 1\n" - "Oqy qu 1\n" - "zpT sz 1\n" - "Pzc ch 1\n" - "qTm qu 1\n" - "jfq ij 1\n" - "ztY th 1\n" - "Zqv qu 1\n" - "nZb an 1\n" - "pHl le 1\n" - "Qcr ch 1\n" - "zVm sz 1\n" - "pNm me 1\n" - "Xhj th 1\n" - "oYy on 1\n" - "Flq qu 1\n" - "lwj le 1\n" - "rwH er 1\n" - "oWq qu 1\n" - "Bwm me 1\n" - "jXs sz 1\n" - "Lkt th 1\n" - "lVn an 1\n" - "jXa an 1\n" - "hkB th 1\n" - "qrQ qu 1\n" - "dqK qu 1\n" - "Zxn an 1\n" - "ygZ ng 1\n" - "Fgt th 1\n" - "nwM an 1\n" - "Wzx sz 1\n" - "qgb ng 1\n" - "Ygv ng 1\n" - "Xdd de 1\n" - "xjM ij 1\n" - "qHb qu 1\n" - "zKz sz 1\n" - "dvM de 1\n" - "Zpx pr 1\n" - "wPt th 1\n" - "qiA in 1\n" - "jyV ij 1\n" - "jyR ij 1\n" - "Uox on 1\n" - "Qkz ka 1\n" - "Lxq qu 1\n" - "fpq qu 1\n" - "Xmf me 1\n" - "kRx ka 1\n" - "jFk ij 1\n" - "nZc an 1\n" - "hCp th 1\n" - "Hbw wa 1\n" - "zlF le 1\n" - "kqI qu 1\n" - "wWj ij 1\n" - "qKk qu 1\n" - "Jpf pr 1\n" - "lbR le 1\n" - "rbJ er 1\n" - "zfK sz 1\n" - "gVk ng 1\n" - "bZx be 1\n" - "znQ an 1\n" - "gZb ga 1\n" - "wtI th 1\n" - "bvW va 1\n" - "qhG th 1\n" - "xrV er 1\n" - "pYc ch 1\n" - "bQq qu 1\n" - "qpV qu 1\n" - "pFm me 1\n" - "zdO de 1\n" - "Jvj ij 1\n" - "mQl le 1\n" - "xWm me 1\n" - "Dtz th 1\n" - "lKz le 1\n" - "dkI de 1\n" - "fSx fo 1\n" - "yCp pr 1\n" - "whF th 1\n" - "lVm le 1\n" - "yHv va 1\n" - "Plm le 1\n" - "Jpm me 1\n" - "hEw ha 1\n" - "zHz sz 1\n" - "uIj qu 1\n" - "gzB ng 1\n" - "qsV qu 1\n" - "pbX pr 1\n" - "jyY ij 1\n" - "mjq qu 1\n" - "zDd de 1\n" - "Tqc ch 1\n" - "fTg ng 1\n" - "qbh th 1\n" - "Cjq qu 1\n" - "pcW ch 1\n" - "Xhp th 1\n" - "fwR wa 1\n" - "dQm de 1\n" - "xCk ka 1\n" - "yhM th 1\n" - "glQ ng 1\n" - "gVb ng 1\n" - "Pdy de 1\n" - "yOj ij 1\n" - "jZg ng 1\n" - "oqZ qu 1\n" - "bqI qu 1\n" - "jkX ij 1\n" - "Kfh th 1\n" - "xpQ pr 1\n" - "rhX th 1\n" - "wjI ij 1\n" - "Bqf qu 1\n" - "aCp an 1\n" - "ccX ch 1\n" - "vGm ma 1\n" - "paU an 1\n" - "xUh th 1\n" - "gLd ng 1\n" - "tfJ th 1\n" - "fwH wa 1\n" - "Pnq an 1\n" - "kxV ka 1\n" - "Nbk ka 1\n" - "sqE qu 1\n" - "Cjp ij 1\n" - "kcZ ka 1\n" - "Wqj ij 1\n" - "tzY th 1\n" - "nqX an 1\n" - "Yyc ch 1\n" - "Lzd de 1\n" - "xZy ny 1\n" - "sdY de 1\n" - "jXn an 1\n" - "Nbm me 1\n" - "wLr er 1\n" - "Nqr qu 1\n" - "Zwx wa 1\n" - "yvH va 1\n" - "ylC le 1\n" - "qyh th 1\n" - "Jnz an 1\n" - "hHv th 1\n" - "zUq qu 1\n" - "xgI ng 1\n" - "Ztp th 1\n" - "Vvb va 1\n" - "tGn th 1\n" - "Ujq qu 1\n" - "jHs sz 1\n" - "bWq qu 1\n" - "bXr er 1\n" - "hFg th 1\n" - "gdT ng 1\n" - "qHc ch 1\n" - "lCj le 1\n" - "mVg ng 1\n" - "pQq qu 1\n" - "vWl le 1\n" - "yFq qu 1\n" - "djY de 1\n" - "btQ th 1\n" - "vlM le 1\n" - "Iwt th 1\n" - "Pdb de 1\n" - "jtQ th 1\n" - "xjR ij 1\n" - "dhW th 1\n" - "zXs sz 1\n" - "fbE be 1\n" - "Hqr qu 1\n" - "vLt th 1\n" - "kbD ka 1\n" - "vUd de 1\n" - "yZc ch 1\n" - "Qke le 1\n" - "fhG th 1\n" - "eHt th 1\n" - "vHj ij 1\n" - "Tfg ng 1\n" - "uoA qu 1\n" - "zCx sz 1\n" - "zLk sz 1\n" - "jdW de 1\n" - "Cgn an 1\n" - "Lrq qu 1\n" - "yOi in 1\n" - "qOw qu 1\n" - "fqs qu 1\n" - "ltQ th 1\n" - "nwU an 1\n" - "zYq qu 1\n" - "Gzs st 1\n" - "nWv an 1\n" - "lNx le 1\n" - "Wql qu 1\n" - "dcD ch 1\n" - "vfD va 1\n" - "qVd qu 1\n" - "Wzz sz 1\n" - "jfH ij 1\n" - "Rrt th 1\n" - "qDr qu 1\n" - "lOh th 1\n" - "wwZ wa 1\n" - "mQw me 1\n" - "nqK an 1\n" - "Uvl le 1\n" - "kRq qu 1\n" - "Vhg th 1\n" - "xsD st 1\n" - "Ldd de 1\n" - "sQv st 1\n" - "qMj qu 1\n" - "hbQ th 1\n" - "cjX ch 1\n" - "nbT an 1\n" - "xNf fo 1\n" - "wCt th 1\n" - "jnX an 1\n" - "tZf th 1\n" - "qCk qu 1\n" - "dHk de 1\n" - "Ccq ch 1\n" - "uMf qu 1\n" - "bvG va 1\n" - "zPz sz 1\n" - "yIy ny 1\n" - "lHx le 1\n" - "fnB an 1\n" - "Ebx be 1\n" - "rGc ch 1\n" - "mgD ng 1\n" - "hJg th 1\n" - "jcG ch 1\n" - "Ybd de 1\n" - "oDq qu 1\n" - "jRx ij 1\n" - "kJf ka 1\n" - "tFv th 1\n" - "Gdv de 1\n" - "fHn an 1\n" - "Uqp qu 1\n" - "cYh th 1\n" - "kHp ka 1\n" - "qhZ th 1\n" - "wZh th 1\n" - "kQt th 1\n" - "hwH th 1\n" - "xzU sz 1\n" - "tQg th 1\n" - "Qbj ij 1\n" - "zVl le 1\n" - "qJd qu 1\n" - "Xrf er 1\n" - "fMv va 1\n" - "qJc ch 1\n" - "Dqy qu 1\n" - "qMs qu 1\n" - "fzl le 1\n" - "Wdx de 1\n" - "Tdw wa 1\n" - "mcT ch 1\n" - "fOd de 1\n" - "Kgj ng 1\n" - "yrT er 1\n" - "bqA qu 1\n" - "snq an 1\n" - "Lzt th 1\n" - "gLw ng 1\n" - "dLq qu 1\n" - "Qzr er 1\n" - "Qrn an 1\n" - "eFn an 1\n" - "Nmw wa 1\n" - "pxE pr 1\n" - "Cqk qu 1\n" - "Wcd ch 1\n" - "fXw wa 1\n" - "fbU be 1\n" - "aeO an 1\n" - "svV st 1\n" - "yVt th 1\n" - "sRp st 1\n" - "rxU er 1\n" - "qhK th 1\n" - "uQw qu 1\n" - "oXw on 1\n" - "Jvw va 1\n" - "kvH ka 1\n" - "zVy sz 1\n" - "rOq qu 1\n" - "cWx ch 1\n" - "iXv in 1\n" - "cBk ch 1\n" - "xkM ka 1\n" - "vHb va 1\n" - "jbW ij 1\n" - "mYq qu 1\n" - "fnH an 1\n" - "zRj sz 1\n" - "hvN th 1\n" - "oMh th 1\n" - "yqO qu 1\n" - "fBf fo 1\n" - "oPj on 1\n" - "fFc ch 1\n" - "lVq qu 1\n" - "ptJ th 1\n" - "Ntj th 1\n" - "rwL er 1\n" - "cFz ch 1\n" - "jVd de 1\n" - "Gbv va 1\n" - "oJn an 1\n" - "wkL ka 1\n" - "qoT qu 1\n" - "Qxk ka 1\n" - "rZj ij 1\n" - "Cgd ng 1\n" - "gvW ng 1\n" - "kYv ka 1\n" - "qjR qu 1\n" - "Vnq an 1\n" - "yJt th 1\n" - "xWy ny 1\n" - "bXl le 1\n" - "xVk ka 1\n" - "xuG qu 1\n" - "Hzs st 1\n" - "uDq qu 1\n" - "Ywk ka 1\n" - "Jkh th 1\n" - "Gdm de 1\n" - "qcO ch 1\n" - "hlH th 1\n" - "Jfv va 1\n" - "cLn an 1\n" - "wzG sz 1\n" - "yhF th 1\n" - "kfD ka 1\n" - "kbJ ka 1\n" - "Nqp qu 1\n" - "gYq ng 1\n" - "ztM th 1\n" - "jcD ch 1\n" - "wgY ng 1\n" - "qdT da 1\n" - "vTw va 1\n" - "cNz ch 1\n" - "Jbc ch 1\n" - "Xcj ch 1\n" - "rUw er 1\n" - "gXv ng 1\n" - "dRf de 1\n" - "bJz sz 1\n" - "aqA an 1\n" - "uOz qu 1\n" - "wPj ij 1\n" - "uDw qu 1\n" - "mqF qu 1\n" - "cXr ch 1\n" - "yrL er 1\n" - "nJk an 1\n" - "hsY th 1\n" - "Zqs qu 1\n" - "qeS qu 1\n" - "bLv va 1\n" - "jEo on 1\n" - "pmE me 1\n" - "jIt th 1\n" - "vzZ sz 1\n" - "Qhd th 1\n" - "cnN an 1\n" - "bPq qu 1\n" - "pZw pr 1\n" - "iwR in 1\n" - "oJv ko 1\n" - "ufI qu 1\n" - "wKm me 1\n" - "uWv qu 1\n" - "fCf fo 1\n" - "wBn an 1\n" - "Uyf ny 1\n" - "uVx qu 1\n" - "kKf ka 1\n" - "mrZ er 1\n" - "lXb le 1\n" - "zJm sz 1\n" - "wYr er 1\n" - "Hkw ka 1\n" - "Ewz sz 1\n" - "xJy ny 1\n" - "Emx me 1\n" - "cqL ch 1\n" - "zVk sz 1\n" - "yPb be 1\n" - "zcC ch 1\n" - "Ndq qu 1\n" - "uWf qu 1\n" - "kcM ch 1\n" - "tkB th 1\n" - "yhq th 1\n" - "qaP an 1\n" - "rVs er 1\n" - "dLd de 1\n" - "Sgm ng 1\n" - "Xhx th 1\n" - "xqH qu 1\n" - "Kqy qu 1\n" - "yRw wa 1\n" - "Wdw de 1\n" - "qcQ ch 1\n" - "zbp sz 1\n" - "dtY th 1\n" - "cwB ch 1\n" - "nfV an 1\n" - "cgP ch 1\n" - "pwW pr 1\n" - "pqf qu 1\n" - "Xkp ka 1\n" - "izJ in 1\n" - "cYw ch 1\n" - "iQl in 1\n" - "Qvy va 1\n" - "ylR le 1\n" - "sFp st 1\n" - "Lqg ng 1\n" - "xnP an 1\n" - "gYl ng 1\n" - "wIr er 1\n" - "fqR qu 1\n" - "Qpk ka 1\n" - "qXz qu 1\n" - "Lrr er 1\n" - "sjI st 1\n" - "iyX in 1\n" - "Zfq qu 1\n" - "vtH th 1\n" - "cZf ch 1\n" - "hXp th 1\n" - "rJw er 1\n" - "gbP ng 1\n" - "Qug ng 1\n" - "jRt th 1\n" - "lXh th 1\n" - "pVc ch 1\n" - "kGc ch 1\n" - "Nxr er 1\n" - "yKk ka 1\n" - "xAo on 1\n" - "oUx on 1\n" - "nWx an 1\n" - "fwU wa 1\n" - "mKg ng 1\n" - "qhO th 1\n" - "sGg ng 1\n" - "Wwu qu 1\n" - "cnE an 1\n" - "tjS th 1\n" - "Qyd de 1\n" - "yWm me 1\n" - "Qdj de 1\n" - "jSd de 1\n" - "Ioy on 1\n" - "Xpp pr 1\n" - "xJb be 1\n" - "xvT va 1\n" - "cdT ch 1\n" - "khX th 1\n" - "hVp th 1\n" - "cjT ch 1\n" - "Hqf qu 1\n" - "nbP an 1\n" - "Uwb wa 1\n" - "Kcb ch 1\n" - "qsQ qu 1\n" - "tkZ th 1\n" - "zrX er 1\n" - "zbN sz 1\n" - "mYi in 1\n" - "gLx ng 1\n" - "sGc ch 1\n" - "Pbv va 1\n" - "gcV ch 1\n" - "Qjf ij 1\n" - "wvB va 1\n" - "gKp ng 1\n" - "jZy ij 1\n" - "qhW th 1\n" - "vCg ng 1\n" - "Lrk er 1\n" - "fRw wa 1\n" - "cMj ch 1\n" - "ohK th 1\n" - "frK er 1\n" - "dQq qu 1\n" - "Hdj de 1\n" - "Bkx ka 1\n" - "yXv va 1\n" - "fdO de 1\n" - "sWg ng 1\n" - "Xtf th 1\n" - "rUx ar 1\n" - "qHm qu 1\n" - "kQh th 1\n" - "wzU sz 1\n" - "vTt th 1\n" - "zkN sz 1\n" - "Fqp qu 1\n" - "xJc ch 1\n" - "wkQ ka 1\n" - "wxF wa 1\n" - "vRj ij 1\n" - "jzD sz 1\n" - "Zqu un 1\n" - "zWw sz 1\n" - "zgU ng 1\n" - "ugX ng 1\n" - "pmB me 1\n" - "gzA ng 1\n" - "Zjj ij 1\n" - "xIj ij 1\n" - "xoK on 1\n" - "Gqx qu 1\n" - "uLq qu 1\n" - "lGw le 1\n" - "tZq th 1\n" - "zcN ch 1\n" - "yPz sz 1\n" - "rqN qu 1\n" - "pwG pr 1\n" - "vfP va 1\n" - "vIy va 1\n" - "vEj ij 1\n" - "jqD qu 1\n" - "Hxu qu 1\n" - "qLs qu 1\n" - "Jpy pr 1\n" - "pRw pr 1\n" - "fZs st 1\n" - "Vvx va 1\n" - "zkB sz 1\n" - "yGk ka 1\n" - "kvZ ka 1\n" - "cqW ch 1\n" - "wLg ng 1\n" - "Ypg ng 1\n" - "jrR er 1\n" - "vwZ va 1\n" - "gVd ng 1\n" - "iCw ij 1\n" - "Fxw wa 1\n" - "qyZ qu 1\n" - "qgT qu 1\n" - "xLs st 1\n" - "pXg ng 1\n" - "gNv ng 1\n" - "Hgz ng 1\n" - "zJv sz 1\n" - "Hvm va 1\n" - "uXb qu 1\n" - "lLz le 1\n" - "dwP de 1\n" - "gvN ng 1\n" - "cpF ch 1\n" - "vZj ij 1\n" - "Pfv va 1\n" - "xcI ch 1\n" - "yVp pr 1\n" - "fdC de 1\n" - "pbE pr 1\n" - "jQm ij 1\n" - "Tqt th 1\n" - "wMh th 1\n" - "Gkq qu 1\n" - "tdV th 1\n" - "xIk ka 1\n" - "hHp th 1\n" - "Lsb st 1\n" - "Wvs st 1\n" - "Qcw ch 1\n" - "gfQ ng 1\n" - "Fjt th 1\n" - "xBz sz 1\n" - "fLx fo 1\n" - "zkR sz 1\n" - "kjA ij 1\n" - "Fcw ch 1\n" - "fhT th 1\n" - "qiK qu 1\n" - "wQv va 1\n" - "pXl le 1\n" - "hLg th 1\n" - "jJw ij 1\n" - "sOj st 1\n" - "vWb va 1\n" - "Ajq qu 1\n" - "vKc ch 1\n" - "iIy in 1\n" - "pJy pr 1\n" - "Lqc ch 1\n" - "wBd de 1\n" - "kRb ka 1\n" - "Lcp ch 1\n" - "gfB ng 1\n" - "zVn an 1\n" - "qWf qu 1\n" - "Qyf ny 1\n" - "puF qu 1\n" - "fIe er 1\n" - "wGb wa 1\n" - "jjL ij 1\n" - "hcE th 1\n" - "qhp th 1\n" - "gxN ng 1\n" - "tMd th 1\n" - "Rzt th 1\n" - "cgO ch 1\n" - "vmT va 1\n" - "Dcq ch 1\n" - "qoI qu 1\n" - "Nqz qu 1\n" - "vhM th 1\n" - "gBq ng 1\n" - "jWv ij 1\n" - "xmE me 1\n" - "qcd ch 1\n" - "lYj le 1\n" - "dDc ch 1\n" - "xUa an 1\n" - "kVl le 1\n" - "wqN qu 1\n" - "uuI qu 1\n" - "Wzf sz 1\n" - "yvX va 1\n" - "Pyq qu 1\n" - "wuU qu 1\n" - "hLp th 1\n" - "qqL qu 1\n" - "cVh th 1\n" - "Fgs ng 1\n" - "xjF ij 1\n" - "wkG ka 1\n" - "qJr qu 1\n" - "Gzq qu 1\n" - "Ixv va 1\n" - "hMv th 1\n" - "dfQ de 1\n" - "eOx er 1\n" - "mHq qu 1\n" - "Zkn an 1\n" - "nqW an 1\n" - "nJd an 1\n" - "pEh th 1\n" - "gVg ng 1\n" - "Zyf ny 1\n" - "nmT an 1\n" - "csQ ch 1\n" - "Pkq qu 1\n" - "tdP th 1\n" - "fkz sz 1\n" - "Qnc an 1\n" - "pBj ij 1\n" - "Mjv ij 1\n" - "ymJ me 1\n" - "Mxs st 1\n" - "hbL th 1\n" - "vQh th 1\n" - "xDy ny 1\n" - "djC de 1\n" - "cdQ ch 1\n" - "bnL an 1\n" - "Yjl le 1\n" - "qUc ch 1\n" - "mjW ij 1\n" - "zWs st 1\n" - "xvF va 1\n" - "Gqi qu 1\n" - "fGm me 1\n" - "Xuw qu 1\n" - "qCs qu 1\n" - "Kxm me 1\n" - "lNn an 1\n" - "sdL de 1\n" - "Vtn th 1\n" - "sJj st 1\n" - "kQj ij 1\n" - "xfX fo 1\n" - "Nqk qu 1\n" - "cBs ch 1\n" - "yzP sz 1\n" - "xUv va 1\n" - "lbT le 1\n" - "wyV wa 1\n" - "Xkm ka 1\n" - "Wdv de 1\n" - "qQn an 1\n" - "sqZ qu 1\n" - "sfW st 1\n" - "gfM ng 1\n" - "Vlp le 1\n" - "Xjx ij 1\n" - "hIj th 1\n" - "Jws st 1\n" - "xZr er 1\n" - "iKw in 1\n" - "Tbd de 1\n" - "zQv sz 1\n" - "nmZ an 1\n" - "bpE pr 1\n" - "zSv sz 1\n" - "Fgi ng 1\n" - "uIw qu 1\n" - "Zvx va 1\n" - "rqR qu 1\n" - "vjZ ij 1\n" - "Njr er 1\n" - "kwF ka 1\n" - "Ovw va 1\n" - "hwZ th 1\n" - "Mvk ka 1\n" - "Dvf va 1\n" - "xsP st 1\n" - "gZq ng 1\n" - "vXv va 1\n" - "wGt th 1\n" - "qlO qu 1\n" - "fNz sz 1\n" - "Nvw va 1\n" - "zdZ de 1\n" - "vxV va 1\n" - "Nhz th 1\n" - "tZm th 1\n" - "iyS in 1\n" - "qZa an 1\n" - "xrZ er 1\n" - "qly qu 1\n" - "cjM ch 1\n" - "kYj ij 1\n" - "iyF in 1\n" - "Cdq qu 1\n" - "xwE wa 1\n" - "xfV fo 1\n" - "wbF wa 1\n" - "wuO qu 1\n" - "Rlh th 1\n" - "fCj ij 1\n" - "bcZ ch 1\n" - "Gjv ij 1\n" - "gLl ng 1\n" - "wLc ch 1\n" - "zmP sz 1\n" - "cYo ch 1\n" - "Rhk th 1\n" - "grM ng 1\n" - "fDh th 1\n" - "Yyb be 1\n" - "uyW un 1\n" - "kGb ka 1\n" - "iwK in 1\n" - "qkN qu 1\n" - "qXd qu 1\n" - "zCb sz 1\n" - "rQf er 1\n" - "xrO er 1\n" - "Fzh th 1\n" - "wSj ij 1\n" - "yPw wa 1\n" - "Bqw qu 1\n" - "kWc ch 1\n" - "qhX th 1\n" - "kBw ka 1\n" - "yvL va 1\n" - "xcT ch 1\n" - "Fbz sz 1\n" - "cEb ch 1\n" - "vEk ka 1\n" - "uQh th 1\n" - "sHw us 1\n" - "Fvf va 1\n" - "wkO ka 1\n" - "wiY in 1\n" - "sPm st 1\n" - "dFn an 1\n" - "qQx qu 1\n" - "Rsg ng 1\n" - "fUj ij 1\n" - "tLw th 1\n" - "sRk st 1\n" - "zkP sz 1\n" - "mvF va 1\n" - "jYb ij 1\n" - "swY is 1\n" - "rRc ch 1\n" - "rHd er 1\n" - "bDk ka 1\n" - "lWv le 1\n" - "vqv qu 1\n" - "qoN qu 1\n" - "zMl le 1\n" - "pfJ pr 1\n" - "Dmz sz 1\n" - "obQ on 1\n" - "Vfz sz 1\n" - "bVd de 1\n" - "Cjv ij 1\n" - "mKz sz 1\n" - "jjE ij 1\n" - "Aqc ch 1\n" - "Cxn an 1\n" - "vpH va 1\n" - "Lxa an 1\n" - "zpH sz 1\n" - "qoF qu 1\n" - "hRz th 1\n" - "yYw wa 1\n" - "dUx de 1\n" - "Kxl le 1\n" - "xUo on 1\n" - "hDp th 1\n" - "zDf sz 1\n" - "Wsq qu 1\n" - "jzZ sz 1\n" - "mGf me 1\n" - "jjV ij 1\n" - "pfR pr 1\n" - "bPd de 1\n" - "wjq qu 1\n" - "Rjx ij 1\n" - "Lwq qu 1\n" - "fqH qu 1\n" - "jRs sz 1\n" - "sfT sz 1\n" - "Grw er 1\n" - "zGn an 1\n" - "ycW ch 1\n" - "lUq qu 1\n" - "pRq qu 1\n" - "nZq an 1\n" - "Svx va 1\n" - "Phf th 1\n" - "Fvj ij 1\n" - "Qlm le 1\n" - "jgS ng 1\n" - "Mmv va 1\n" - "xPd de 1\n" - "qqw qu 1\n" - "rWp er 1\n" - "qIr qu 1\n" - "Cxf fo 1\n" - "wtG th 1\n" - "cKb ch 1\n" - "btL th 1\n" - "pRx pr 1\n" - "zsB sz 1\n" - "nbD an 1\n" - "jKg ng 1\n" - "bhL th 1\n" - "Yhw th 1\n" - "yYr er 1\n" - "jCm ij 1\n" - "xzK sz 1\n" - "pJl le 1\n" - "Qrr er 1\n" - "uvG qu 1\n" - "cfJ ch 1\n" - "iqX in 1\n" - "vNd de 1\n" - "qcM ch 1\n" - "Wvj ij 1\n" - "vmS va 1\n" - "vWp va 1\n" - "aIj an 1\n" - "jmS ij 1\n" - "Fmk ka 1\n" - "iyN in 1\n" - "bZu qu 1\n" - "Kzj sz 1\n" - "Vwd de 1\n" - "Ulx le 1\n" - "rCv er 1\n" - "wvq qu 1\n" - "Qkr ri 1\n" - "fjC ij 1\n" - "tRr th 1\n" - "pCy pr 1\n" - "fbC be 1\n" - "fQc ch 1\n" - "Xkf ka 1\n" - "Dqr qu 1\n" - "fgE ng 1\n" - "vMm va 1\n" - "dPb de 1\n" - "vjL ij 1\n" - "wKc ch 1\n" - "Pyw wa 1\n" - "eXv er 1\n" - "nVw an 1\n" - "Jww wa 1\n" - "Dfq qu 1\n" - "tCc th 1\n" - "qtH th 1\n" - "Xqm qu 1\n" - "Bhc th 1\n" - "tcX th 1\n" - "xKp pr 1\n" - "tfN th 1\n" - "ibZ in 1\n" - "Nzb sz 1\n" - "Wnj an 1\n" - "vXy va 1\n" - "iVf in 1\n" - "dxT de 1\n" - "jxQ ij 1\n" - "Ddv de 1\n" - "mXd de 1\n" - "fUq qu 1\n" - "wgQ ng 1\n" - "Lgj ng 1\n" - "mgY ng 1\n" - "qMw qu 1\n" - "gpJ ng 1\n" - "sZx st 1\n" - "nXz an 1\n" - "Wve er 1\n" - "lVk le 1\n" - "wCb wa 1\n" - "xvI va 1\n" - "mfJ me 1\n" - "tQq th 1\n" - "dTt th 1\n" - "fqk qu 1\n" - "nVt th 1\n" - "wIh th 1\n" - "Qvp va 1\n" - "vfN va 1\n" - "gQs ng 1\n" - "iVp in 1\n" - "jGl le 1\n" - "xMf fo 1\n" - "xvw wi 1\n" - "zIl le 1\n" - "zfR sz 1\n" - "zWv sz 1\n" - "ehV th 1\n" - "dZq qu 1\n" - "tmK th 1\n" - "cLt th 1\n" - "pZb pr 1\n" - "vnJ an 1\n" - "fvk ka 1\n" - "Xhv th 1\n" - "Vjn an 1\n" - "tgI th 1\n" - "xaJ an 1\n" - "mSf me 1\n" - "Xzm sz 1\n" - "dTz de 1\n" - "xXm me 1\n" - "pQz sz 1\n" - "Cqg ng 1\n" - "bSs st 1\n" - "prW er 1\n" - "hDb th 1\n" - "sXt th 1\n" - "kcD ch 1\n" - "kgZ ng 1\n" - "Tzt th 1\n" - "zcR ch 1\n" - "Xwu qu 1\n" - "kXg ng 1\n" - "Ywv wi 1\n" - "rpK er 1\n" - "wPs is 1\n" - "Kjz sz 1\n" - "fDb be 1\n" - "jrF er 1\n" - "bbQ be 1\n" - "Qdb de 1\n" - "rKt th 1\n" - "vYf va 1\n" - "vxA va 1\n" - "fhM th 1\n" - "jsU st 1\n" - "zXk sz 1\n" - "uwO qu 1\n" - "jsR st 1\n" - "kHn an 1\n" - "xWv va 1\n" - "vfS va 1\n" - "pIv va 1\n" - "bcW ch 1\n" - "zdM sz 1\n" - "gCz ng 1\n" - "hzN th 1\n" - "bQw wa 1\n" - "ojX on 1\n" - "Vqv qu 1\n" - "qWb qu 1\n" - "Ykb ka 1\n" - "xnJ an 1\n" - "sJz st 1\n" - "hRr th 1\n" - "tXs th 1\n" - "Qeb er 1\n" - "Uwd de 1\n" - "nYg an 1\n" - "Yfx fo 1\n" - "xrG er 1\n" - "eZr le 1\n" - "ufV us 1\n" - "rXm er 1\n" - "qZv qu 1\n" - "vQz sz 1\n" - "Tnq an 1\n" - "Rmj ij 1\n" - "jlM le 1\n" - "cqO ch 1\n" - "xWf fo 1\n" - "jcZ ch 1\n" - "jfV ij 1\n" - "Zmj ij 1\n" - "bxM be 1\n" - "fFd de 1\n" - "gjP ng 1\n" - "hMs th 1\n" - "Ysq qu 1\n" - "qkV qu 1\n" - "Kmc ch 1\n" - "xYy ny 1\n" - "dvX de 1\n" - "rwC er 1\n" - "gwW wa 1\n" - "Qpy pr 1\n" - "jXy ij 1\n" - "qOj qu 1\n" - "Qmz sz 1\n" - "Eqq qu 1\n" - "zJs st 1\n" - "fHy ny 1\n" - "hDt th 1\n" - "sDh th 1\n" - "Vkq qu 1\n" - "yLc ch 1\n" - "vHm va 1\n" - "vnX an 1\n" - "jxS ij 1\n" - "Jtj th 1\n" - "qgE ng 1\n" - "bpH pr 1\n" - "Iqy qu 1\n" - "qMn an 1\n" - "dmE de 1\n" - "Hfq qu 1\n" - "pSb pr 1\n" - "xhI th 1\n" - "Qjt th 1\n" - "yfX ny 1\n" - "vuF qu 1\n" - "wFw wa 1\n" - "znS an 1\n" - "zlV le 1\n" - "lkK le 1\n" - "Fvz sz 1\n" - "qjT qu 1\n" - "zoQ on 1\n" - "Wvx va 1\n" - "hMn th 1\n" - "dMw de 1\n" - "gcF ch 1\n" - "dbB de 1\n" - "Cqj qu 1\n" - "mCv va 1\n" - "pJx pr 1\n" - "Dfv va 1\n" - "sjL st 1\n" - "qiG in 1\n" - "Zls le 1\n" - "Vsf st 1\n" - "Fgd ng 1\n" - "wmD me 1\n" - "Dxo on 1\n" - "qrk qu 1\n" - "pJr er 1\n" - "cLx ch 1\n" - "jdB de 1\n" - "ybM be 1\n" - "mvM va 1\n" - "jtX th 1\n" - "cnB an 1\n" - "wtW th 1\n" - "Ksd st 1\n" - "wql wa 1\n" - "mhU th 1\n" - "oJy on 1\n" - "Ghp th 1\n" - "qoX qu 1\n" - "xsI st 1\n" - "vFs st 1\n" - "fYe er 1\n" - "lnV an 1\n" - "uXn an 1\n" - "Eoh th 1\n" - "wcM wa 1\n" - "jwK ij 1\n" - "Gke er 1\n" - "uFq qu 1\n" - "Ycg ch 1\n" - "xqy qu 1\n" - "btM th 1\n" - "jHw ij 1\n" - "qeU qu 1\n" - "Qjz sz 1\n" - "nuQ an 1\n" - "Fcx ch 1\n" - "Kqt th 1\n" - "Lqv qu 1\n" - "mwU me 1\n" - "fQs st 1\n" - "kSd de 1\n" - "nYv an 1\n" - "wGj ij 1\n" - "gvZ ng 1\n" - "mqN qu 1\n" - "Fhp th 1\n" - "pMq qu 1\n" - "dBh ch 1\n" - "bXk ka 1\n" - "fqK qu 1\n" - "Yyq qu 1\n" - "Krq qu 1\n" - "Rnv an 1\n" - "uuE qu 1\n" - "Xsz st 1\n" - "fKb be 1\n" - "yIh th 1\n" - "Ncd ch 1\n" - "mLr er 1\n" - "cSs ch 1\n" - "lbE le 1\n" - "xaW an 1\n" - "Rtd th 1\n" - "rbF er 1\n" - "vgR ng 1\n" - "scZ ch 1\n" - "rHp er 1\n" - "eYw er 1\n" - "Lxj ij 1\n" - "qRg ng 1\n" - "jpN ij 1\n" - "rjW er 1\n" - "lgK ng 1\n" - "mCc ch 1\n" - "fGu qu 1\n" - "xzT sz 1\n" - "wQw wa 1\n" - "klJ li 1\n" - "cqk ch 1\n" - "lMh th 1\n" - "pYs st 1\n" - "hQk th 1\n" - "Hxz sz 1\n" - "feY er 1\n" - "fhF th 1\n" - "fBm me 1\n" - "fVt th 1\n" - "zfh th 1\n" - "sbT st 1\n" - "dQy de 1\n" - "Fmc ch 1\n" - "vhL th 1\n" - "Jtb th 1\n" - "Vrx er 1\n" - "yqZ qu 1\n" - "jDm ij 1\n" - "mfV me 1\n" - "oSx on 1\n" - "Jxg ng 1\n" - "wOq qu 1\n" - "dJq qu 1\n" - "Vvc ch 1\n" - "Eqe qu 1\n" - "jqO qu 1\n" - "zxI sz 1\n" - "qKf qu 1\n" - "fdW de 1\n" - "ccM ch 1\n" - "gcW ch 1\n" - "lFn an 1\n" - "Rvq qu 1\n" - "znN an 1\n" - "zbU sz 1\n" - "tNw th 1\n" - "wjK ij 1\n" - "Jbd de 1\n" - "Bfc ch 1\n" - "qeX le 1\n" - "tXk th 1\n" - "slJ le 1\n" - "cKd ch 1\n" - "nCf an 1\n" - "qgV ng 1\n" - "Mhx th 1\n" - "sKf st 1\n" - "hqZ th 1\n" - "Fdt th 1\n" - "qzJ qu 1\n" - "sNn an 1\n" - "tjW th 1\n" - "xcN ch 1\n" - "fcJ ch 1\n" - "djU de 1\n" - "Ygh th 1\n" - "woI on 1\n" - "Yyz sz 1\n" - "kQc ch 1\n" - "hfQ th 1\n" - "nrL an 1\n" - "lQs le 1\n" - "mtF th 1\n" - "wbX wa 1\n" - "gmR ng 1\n" - "Zsq qu 1\n" - "ytQ th 1\n" - "mbF me 1\n" - "fgT ng 1\n" - "cWu ch 1\n" - "gxG ng 1\n" - "hNv th 1\n" - "dfW de 1\n" - "zrC er 1\n" - "woX on 1\n" - "wjT ij 1\n" - "Pqw qu 1\n" - "vkf ka 1\n" - "nLz an 1\n" - "cjV ch 1\n" - "fcP ch 1\n" - "vlQ le 1\n" - "Fgq ng 1\n" - "hgP th 1\n" - "Gqy qu 1\n" - "tKs th 1\n" - "Xfv va 1\n" - "yZq qu 1\n" - "yiZ in 1\n" - "rXv er 1\n" - "Ycy ch 1\n" - "fvA va 1\n" - "Tqs qu 1\n" - "hZy th 1\n" - "xwc ch 1\n" - "qVf qu 1\n" - "Mhq th 1\n" - "zSj sz 1\n" - "vhQ th 1\n" - "tzX th 1\n" - "Gvm va 1\n" - "cqU ch 1\n" - "Hhp th 1\n" - "gQk ng 1\n" - "pwL pr 1\n" - "sNw st 1\n" - "qEt th 1\n" - "Nzq qu 1\n" - "zsD st 1\n" - "mDg ng 1\n" - "Rtq th 1\n" - "jLf ij 1\n" - "wTp pr 1\n" - "xJh th 1\n" - "Vqo qu 1\n" - "Zqk qu 1\n" - "qqQ qu 1\n" - "hrY th 1\n" - "Wqo qu 1\n" - "mIy me 1\n" - "Ipk ka 1\n" - "xjC ij 1\n" - "lLp le 1\n" - "hqF th 1\n" - "cWg ch 1\n" - "qYc qu 1\n" - "cjU ch 1\n" - "qXk qu 1\n" - "hqL th 1\n" - "zxT sz 1\n" - "dnX an 1\n" - "zBt th 1\n" - "Qls le 1\n" - "khC th 1\n" - "uqX qu 1\n" - "Zbf be 1\n" - "iDx li 1\n" - "Znp an 1\n" - "Jxq qu 1\n" - "jqY qu 1\n" - "vbU va 1\n" - "qRr qu 1\n" - "qpj qu 1\n" - "wlG le 1\n" - "Wgx ng 1\n" - "Vxj ij 1\n" - "zSw sz 1\n" - "ihW th 1\n" - "kzT sz 1\n" - "aeZ an 1\n" - "hKj th 1\n" - "tWs th 1\n" - "gLc ch 1\n" - "gpK ng 1\n" - "yJz sz 1\n" - "Gvt th 1\n" - "fEo on 1\n" - "sKd st 1\n" - "xhN th 1\n" - "aMq an 1\n" - "ehX th 1\n" - "kfZ ku 1\n" - "Wwc ch 1\n" - "Ymz sz 1\n" - "Vkd de 1\n" - "bzD sz 1\n" - "Xkg ng 1\n" - "Vzz sz 1\n" - "xvV va 1\n" - "pHh th 1\n" - "rKq qu 1\n" - "vmM va 1\n" - "Qxj ij 1\n" - "zNr er 1\n" - "bqB qu 1\n" - "Jqw qu 1\n" - "zqB qu 1\n" - "Xvm va 1\n" - "lBf le 1\n" - "qqB qu 1\n" - "gCs ng 1\n" - "rRg ng 1\n" - "Rnm an 1\n" - "Lzw sz 1\n" - "iwN in 1\n" - "pfN pr 1\n" - "hCw wa 1\n" - "uHz qu 1\n" - "cLc ch 1\n" - "lwD le 1\n" - "qjB qu 1\n" - "Ojy ij 1\n" - "dmV di 1\n" - "cCw ch 1\n" - "lXs le 1\n" - "smR st 1\n" - "mxO me 1\n" - "Jrt th 1\n" - "zjN sz 1\n" - "bBn an 1\n" - "cxQ ch 1\n" - "Kdp de 1\n" - "Dlb le 1\n" - "pqD qu 1\n" - "qqC qu 1\n" - "Spz sz 1\n" - "tCd th 1\n" - "gfP ng 1\n" - "uGj qu 1\n" - "xbE be 1\n" - "Xpv va 1\n" - "Xzt th 1\n" - "gqG qu 1\n" - "kqq qu 1\n" - "Kvq qu 1\n" - "qWi qu 1\n" - "mxZ me 1\n" - "qoY qu 1\n" - "Sgf ng 1\n" - "cRv ch 1\n" - "Wgi ng 1\n" - "eDx er 1\n" - "cWw ch 1\n" - "vFq qu 1\n" - "Kxv va 1\n" - "iWp in 1\n" - "fRx fo 1\n" - "wtB th 1\n" - "swW st 1\n" - "grK ng 1\n" - "Hfe er 1\n" - "gfZ ng 1\n" - "xqX qu 1\n" - "oKj on 1\n" - "vfq qu 1\n" - "pWw pr 1\n" - "uWc ch 1\n" - "lCg ng 1\n" - "qkg qu 1\n" - "cDh th 1\n" - "Sfz sz 1\n" - "uYx qu 1\n" - "xvR va 1\n" - "eAo er 1\n" - "pYg ng 1\n" - "dRx de 1\n" - "iWd in 1\n" - "gGx ng 1\n" - "bXz sz 1\n" - "kcP ch 1\n" - "hcJ th 1\n" - "lCf le 1\n" - "gmW ng 1\n" - "Hkf ka 1\n" - "rhL th 1\n" - "jqP qu 1\n" - "rQp er 1\n" - "vCn an 1\n" - "dWj de 1\n" - "Hrx er 1\n" - "sTz st 1\n" - "aVt th 1\n" - "qwK qu 1\n" - "vvE va 1\n" - "wKp pr 1\n" - "xcY ch 1\n" - "vpM va 1\n" - "jlC le 1\n" - "dlG le 1\n" - "oTq qu 1\n" - "iLp in 1\n" - "xsL st 1\n" - "lFz le 1\n" - "vhC th 1\n" - "ylX le 1\n" - "pmO me 1\n" - "Ycc ch 1\n" - "Ynp an 1\n" - "Ybm me 1\n" - "Qln an 1\n" - "bxA be 1\n" - "tFs th 1\n" - "Lqw qu 1\n" - "zcU ch 1\n" - "vfK va 1\n" - "vpQ va 1\n" - "Dtf th 1\n" - "bTj ij 1\n" - "Vvw va 1\n" - "Qbx be 1\n" - "zWk sz 1\n" - "bSx be 1\n" - "zpK sz 1\n" - "wTb wa 1\n" - "mkC ka 1\n" - "cRh th 1\n" - "nBk an 1\n" - "xGv va 1\n" - "hnQ th 1\n" - "aqQ an 1\n" - "zhZ th 1\n" - "zwP sz 1\n" - "vqL qu 1\n" - "scU ch 1\n" - "glS ng 1\n" - "pjE ij 1\n" - "qqD qu 1\n" - "lRx le 1\n" - "qVr qu 1\n" - "Xuh th 1\n" - "brB er 1\n" - "Qyc ch 1\n" - "Sgx ng 1\n" - "dqk qu 1\n" - "bYj ij 1\n" - "mPx me 1\n" - "Fdv de 1\n" - "Xmd de 1\n" - "cPj ch 1\n" - "Pqg qu 1\n" - "vYh th 1\n" - "bJx be 1\n" - "dQt th 1\n" - "fxj ij 1\n" - "Hwq qu 1\n" - "vgC ng 1\n" - "kjK ij 1\n" - "nrC an 1\n" - "vqX qu 1\n" - "Bgk ng 1\n" - "Cbv va 1\n" - "Uww wa 1\n" - "wcJ ch 1\n" - "gBf ng 1\n" - "zTv va 1\n" - "zwX sz 1\n" - "lWg le 1\n" - "qOs qu 1\n" - "fbB be 1\n" - "xqG qu 1\n" - "jQj ij 1\n" - "voQ on 1\n" - "yjW ij 1\n" - "qvO qu 1\n" - "xbF be 1\n" - "nWu an 1\n" - "yjQ ij 1\n" - "cjK ch 1\n" - "Sxn an 1\n" - "ybX be 1\n" - "eYg ng 1\n" - "Bmn an 1\n" - "fDt th 1\n" - "jXm ij 1\n" - "nMt th 1\n" - "Sxb be 1\n" - "lHm le 1\n" - "gfY ng 1\n" - "nwG an 1\n" - "gHl ng 1\n" - "Wpm me 1\n" - "wFj ij 1\n" - "hGm th 1\n" - "wwC wa 1\n" - "Mlf le 1\n" - "cJb ch 1\n" - "bnC an 1\n" - "Fvp va 1\n" - "tGc th 1\n" - "fhZ th 1\n" - "Vkh th 1\n" - "jwg ng 1\n" - "xbK be 1\n" - "zVq qu 1\n" - "qTz qu 1\n" - "vrD er 1\n" - "fRt th 1\n" - "fFs st 1\n" - "hWg th 1\n" - "lzE le 1\n" - "lwX le 1\n" - "jHy ij 1\n" - "Qqt th 1\n" - "Dqi in 1\n" - "Tvj ij 1\n" - "gPb ng 1\n" - "dPz sz 1\n" - "zdT sz 1\n" - "mvA va 1\n" - "Zvh th 1\n" - "qaU an 1\n" - "fwQ wa 1\n" - "Rsw st 1\n" - "klB le 1\n" - "vlN le 1\n" - "Gvx va 1\n" - "pdJ de 1\n" - "lcB ch 1\n" - "vTq qu 1\n" - "yhV th 1\n" - "jLv ij 1\n" - "pzR sz 1\n" - "Xyw wa 1\n" - "Xlq qu 1\n" - "Rqw wa 1\n" - "zhP th 1\n" - "sgT ng 1\n" - "gpG ng 1\n" - "tkY th 1\n" - "dqE qu 1\n" - "Qcg ch 1\n" - "bfB be 1\n" - "Wpv va 1\n" - "Wxl le 1\n" - "Xbq qu 1\n" - "yFh th 1\n" - "Rfq qu 1\n" - "hhL th 1\n" - "jxz sz 1\n" - "bKh th 1\n" - "ptU th 1\n" - "cXe ch 1\n" - "zXm sz 1\n" - "Ghw th 1\n" - "dzY sz 1\n" - "dXn an 1\n" - "kxW ka 1\n" - "vVr er 1\n" - "Jxu un 1\n" - "bbX be 1\n" - "rPb er 1\n" - "qCm qu 1\n" - "qiJ qu 1\n" - "Xgw ng 1\n" - "Nhq th 1\n" - "cGp po 1\n" - "hPw th 1\n" - "bTz sz 1\n" - "qIg ng 1\n" - "pJh th 1\n" - "wcE ch 1\n" - "mCb me 1\n" - "bJc ch 1\n" - "nzQ an 1\n" - "yqR qu 1\n" - "xHw wa 1\n" - "bwH wa 1\n" - "qCr qu 1\n" - "Uqe qu 1\n" - "qxM qu 1\n" - "fpO pr 1\n" - "kcN ch 1\n" - "ykV ka 1\n" - "mQb me 1\n" - "Yqs qu 1\n" - "yVk ka 1\n" - "vbX va 1\n" - "mTd de 1\n" - "jXo on 1\n" - "wqJ qu 1\n" - "kKt th 1\n" - "fkS ka 1\n" - "Wvz sz 1\n" - "Iyv va 1\n" - "hGk th 1\n" - "Fze er 1\n" - "bhM th 1\n" - "qvI qu 1\n" - "nXq an 1\n" - "nXc an 1\n" - "kJt th 1\n" - "Nqc ch 1\n" - "Yjc ch 1\n" - "Fhb th 1\n" - "jyK ij 1\n" - "Jzj sz 1\n" - "yqc ch 1\n" - "wmZ me 1\n" - "zbF sz 1\n" - "spq qu 1\n" - "gPn an 1\n" - "jSg ng 1\n" - "gMh th 1\n" - "fXt th 1\n" - "Fyw wa 1\n" - "Fwg ng 1\n" - "hmN th 1\n" - "hNl th 1\n" - "tqY th 1\n" - "pGm me 1\n" - "mXz sz 1\n" - "qYy qu 1\n" - "Rmq qu 1\n" - "Dqa an 1\n" - "Wkx ka 1\n" - "dpT de 1\n" - "jyJ ij 1\n" - "Jqj qu 1\n" - "wjZ ij 1\n" - "xNr er 1\n" - "qAm qu 1\n" - "hBn th 1\n" - "qpJ qu 1\n" - "ygW ng 1\n" - "jXf ij 1\n" - "rMl er 1\n" - "zgV ng 1\n" - "nLp an 1\n" - "pFx pr 1\n" - "tvG th 1\n" - "zQl le 1\n" - "fdF de 1\n" - "bxK be 1\n" - "Bcx ch 1\n" - "rpY er 1\n" - "sJb st 1\n" - "Kvh th 1\n" - "kNq qu 1\n" - "zHd sz 1\n" - "dzF sz 1\n" - "tJq th 1\n" - "Hfv va 1\n" - "vQd de 1\n" - "pKj ij 1\n" - "fhV th 1\n" - "qZi qu 1\n" - "ohY th 1\n" - "vqq qu 1\n" - "tnQ th 1\n" - "Vqk qu 1\n" - "zJf sz 1\n" - "Jkz sz 1\n" - "Rwf wa 1\n" - "zvM va 1\n" - "bxY be 1\n" - "pXh th 1\n" - "fUy ny 1\n" - "pvE va 1\n" - "Lpk ka 1\n" - "dzV sz 1\n" - "xIf fo 1\n" - "wZw wa 1\n" - "npQ an 1\n" - "pWk ka 1\n" - "jgQ ng 1\n" - "Jqr qu 1\n" - "gmX ng 1\n" - "jfM ij 1\n" - "lWj le 1\n" - "pbN pr 1\n" - "fvF va 1\n" - "sDd st 1\n" - "qdB qu 1\n" - "frL er 1\n" - "uHn an 1\n" - "gwN ng 1\n" - "yBh th 1\n" - "Zzq qu 1\n" - "vDg ng 1\n" - "Qcz ch 1\n" - "qzf qu 1\n" - "wEc ch 1\n" - "pxH pr 1\n" - "fqO qu 1\n" - "Vqe qu 1\n" - "gkD ng 1\n" - "Xfq qu 1\n" - "uXg qu 1\n" - "jCw ij 1\n" - "Pzu qu 1\n" - "gRh th 1\n" - "vqH qu 1\n" - "vvW va 1\n" - "Rfb be 1\n" - "gqJ qu 1\n" - "tgO th 1\n" - "wUy wa 1\n" - "Jkw ka 1\n" - "hSs th 1\n" - "gkW ng 1\n" - "Qgy ng 1\n" - "dJb de 1\n" - "prF er 1\n" - "buX qu 1\n" - "cVg ch 1\n" - "jtU th 1\n" - "fDc ch 1\n" - "Ygc ch 1\n" - "Kqr qu 1\n" - "Uyp pr 1\n" - "lJk le 1\n" - "sxY st 1\n" - "xfY fo 1\n" - "Xkz sz 1\n" - "cgZ ch 1\n" - "cyX ch 1\n" - "gbF ng 1\n" - "zTk sz 1\n" - "hsU th 1\n" - "tlW th 1\n" - "Zzv sz 1\n" - "kqE qu 1\n" - "lpQ po 1\n" - "qJu un 1\n" - "hYi th 1\n" - "zlM le 1\n" - "vDt th 1\n" - "Hvn an 1\n" - "Nsf st 1\n" - "bJg ng 1\n" - "fNg ng 1\n" - "kQo on 1\n" - "Kqp qu 1\n" - "bKs st 1\n" - "mHp me 1\n" - "Uyj ij 1\n" - "cxY ch 1\n" - "yIe er 1\n" - "qTj qu 1\n" - "wfP wa 1\n" - "fxI fo 1\n" - "vQa an 1\n" - "fvN va 1\n" - "pwN pr 1\n" - "vaQ an 1\n" - "mxQ me 1\n" - "bdV de 1\n" - "Cgj ng 1\n" - "xjz sz 1\n" - "Wqw qu 1\n" - "wpO pr 1\n" - "woQ on 1\n" - "xYj ij 1\n" - "fpT pr 1\n" - "lNp le 1\n" - "pvX va 1\n" - "pLp pr 1\n" - "Ksg ng 1\n" - "rWg ng 1\n" - "iUy in 1\n" - "bfX be 1\n" - "xsV st 1\n" - "Xnj an 1\n" - "dmW de 1\n" - "oQw on 1\n" - "Zxy ny 1\n" - "Oay an 1\n" - "pjG ij 1\n" - "Zbt th 1\n" - "Hql qu 1\n" - "Zxq qu 1\n" - "jWd de 1\n" - "qUp qu 1\n" - "qxN qu 1\n" - "qCo qu 1\n" - "Yfd de 1\n" - "vvU va 1\n" - "vIk ka 1\n" - "Dfj ij 1\n" - "Zmh th 1\n" - "Cqt th 1\n" - "vQf va 1\n" - "Nbn an 1\n" - "tJs th 1\n" - "Fhx th 1\n" - "dzQ sz 1\n" - "zYj ij 1\n" - "qBw qu 1\n" - "vcV ch 1\n" - "gGt th 1\n" - "iVw in 1\n" - "Fzp sz 1\n" - "bjH ij 1\n" - "cuY ch 1\n" - "jwS ij 1\n" - "Cqp qu 1\n" - "yJv va 1\n" - "kdJ de 1\n" - "kdT de 1\n" - "nqB an 1\n" - "hWs th 1\n" - "qsj qu 1\n" - "hLw th 1\n" - "hdX th 1\n" - "cgV ch 1\n" - "tYc th 1\n" - "eZx er 1\n" - "hfN th 1\n" - "gvw ng 1\n" - "aVp an 1\n" - "gMs ng 1\n" - "Pbf be 1\n" - "mQf me 1\n" - "yUi in 1\n" - "vGf va 1\n" - "xgF ng 1\n" - "zvY sz 1\n" - "wrA er 1\n" - "yrM er 1\n" - "vMj ij 1\n" - "Uyv va 1\n" - "dLp de 1\n" - "Gjj ij 1\n" - "zEi in 1\n" - "Xdg ng 1\n" - "jHf ij 1\n" - "oPz on 1\n" - "xIz sz 1\n" - "bCb be 1\n" - "Dzq qu 1\n" - "Yjn an 1\n" - "gGz ng 1\n" - "mjU ij 1\n" - "Cjx ij 1\n" - "xKc ch 1\n" - "mvO va 1\n" - "Pzb sz 1\n" - "crK ch 1\n" - "xhO th 1\n" - "ylB le 1\n" - "lDk le 1\n" - "zlO le 1\n" - "pgH ng 1\n" - "vQb va 1\n" - "sdZ st 1\n" - "kQm ka 1\n" - "lRh th 1\n" - "oQy on 1\n" - "twC th 1\n" - "Bdj ij 1\n" - "Qjg ng 1\n" - "dnP an 1\n" - "Nnp an 1\n" - "qiP qu 1\n" - "Ccj ch 1\n" - "uHt th 1\n" - "qLx qu 1\n" - "Qsf st 1\n" - "fKx fo 1\n" - "fkE ka 1\n" - "jlX le 1\n" - "jZb ij 1\n" - "Vwj ij 1\n" - "zbA sz 1\n" - "Hhd th 1\n" - "cbY ch 1\n" - "Ikf ka 1\n" - "Grx er 1\n" - "jpP ij 1\n" - "Qfh th 1\n" - "xhW th 1\n" - "wmX me 1\n" - "aJb an 1\n" - "sfO st 1\n" - "qXq qu 1\n" - "mXg ng 1\n" - "bnV an 1\n" - "Ypw pr 1\n" - "zCy sz 1\n" - "lhN th 1\n" - "rXn an 1\n" - "fGh th 1\n" - "Wxq qu 1\n" - "cxT ch 1\n" - "Zsg ng 1\n" - "uGv qu 1\n" - "bzM sz 1\n" - "zjS sz 1\n" - "dfS de 1\n" - "gpH ng 1\n" - "qgO ng 1\n" - "kqF qu 1\n" - "qfU qu 1\n" - "qTp qu 1\n" - "vZb va 1\n" - "Ejw ij 1\n" - "zQn an 1\n" - "gYz ng 1\n" - "kjV ij 1\n" - "fWl le 1\n" - "fRk ka 1\n" - "uSj qu 1\n" - "Cxg ng 1\n" - "Lcv ch 1\n" - "bzK sz 1\n" - "wqF qu 1\n" - "qJp qu 1\n" - "rCj er 1\n" - "qvs qu 1\n" - "lwN le 1\n" - "xmR me 1\n" - "btC th 1\n" - "kTx ka 1\n" - "qkU qu 1\n" - "Lhj th 1\n" - "dIx de 1\n" - "vsQ st 1\n" - "gSd ng 1\n" - "wDl le 1\n" - "Vjm ij 1\n" - "pmI me 1\n" - "vWh th 1\n" - "fKv va 1\n" - "xPt th 1\n" - "uoQ qu 1\n" - "Kgh th 1\n" - "gwX ng 1\n" - "sgJ ng 1\n" - "pWj ij 1\n" - "Qff fo 1\n" - "hkJ th 1\n" - "Hqo qu 1\n" - "jwW ij 1\n" - "sQz st 1\n" - "wUw wa 1\n" - "mKx me 1\n" - "oQf on 1\n" - "jVk ij 1\n" - "xwT wa 1\n" - "sTq qu 1\n" - "uqV qu 1\n" - "Qlp le 1\n" - "pMb pr 1\n" - "xKj ij 1\n" - "bpX pr 1\n" - "vQe er 1\n" - "Jjq qu 1\n" - "qKh th 1\n" - "fkJ ka 1\n" - "jbQ ij 1\n" - "mZw me 1\n" - "Xgc ch 1\n" - "vzU sz 1\n" - "pTm me 1\n" - "pNq qu 1\n" - "rwD er 1\n" - "Qdg ng 1\n" - "wqC qu 1\n" - "Yrn an 1\n" - "qww qu 1\n" - "qwU qu 1\n" - "xzF sz 1\n" - "flW le 1\n" - "jzP sz 1\n" - "Wxp pr 1\n" - "rDq qu 1\n" - "dGp de 1\n" - "Ztj th 1\n" - "Uvp va 1\n" - "eGc ch 1\n" - "zZb sz 1\n" - "gQh th 1\n" - "tFd th 1\n" - "Mqg ng 1\n" - "dnD an 1\n" - "hvY th 1\n" - "Iyb be 1\n" - "fDz sz 1\n" - "Kbj ij 1\n" - "vYm va 1\n" - "Wxr er 1\n" - "Kwz sz 1\n" - "hrQ th 1\n" - "yCt th 1\n" - "Hxw wa 1\n" - "hEf th 1\n" - "bdU de 1\n" - "sGj st 1\n" - "Gwt th 1\n" - "bYh th 1\n" - "zmU sz 1\n" - "pDm po 1\n" - "qmC qu 1\n" - "dTd de 1\n" - "Qxq qu 1\n" - "uVf qu 1\n" - "qAl qu 1\n" - "jEa an 1\n" - "Kpy pr 1\n" - "Hqv qu 1\n" - "fCk ka 1\n" - "aqZ an 1\n" - "lUo on 1\n" - "Pvo on 1\n" - "Dqf qu 1\n" - "gdM ng 1\n" - "fzL sz 1\n" - "Bhh th 1\n" - "dGd de 1\n" - "wtY th 1\n" - "qTy qu 1\n" - "Uxr er 1\n" - "Vvm va 1\n" - "vHh th 1\n" - "qZc ch 1\n" - "fhC th 1\n" - "xdZ de 1\n" - "hZp th 1\n" - "Pmz sz 1\n" - "cfT ch 1\n" - "pjI ij 1\n" - "mdZ de 1\n" - "jkQ ij 1\n" - "Sdj de 1\n" - "hDf th 1\n" - "eJj er 1\n" - "wjY ij 1\n" - "zLm sz 1\n" - "eFs er 1\n" - "wgj ng 1\n" - "Zmk ka 1\n" - "lvJ le 1\n" - "xYm me 1\n" - "Nzf sz 1\n" - "wJi in 1\n" - "yQs st 1\n" - "pfM pr 1\n" - "dhR th 1\n" - "cmK ch 1\n" - "dhM th 1\n" - "qGb qu 1\n" - "wvQ va 1\n" - "Cgq ng 1\n" - "Jfc ch 1\n" - "bkD ka 1\n" - "fdS de 1\n" - "Ivp va 1\n" - "Gkj ij 1\n" - "zIv sz 1\n" - "Bzl le 1\n" - "gBb ng 1\n" - "Tpj ij 1\n" - "vyY va 1\n" - "Uxs st 1\n" - "kwW ka 1\n" - "gPf ng 1\n" - "pqC qu 1\n" - "cTj ch 1\n" - "yzI sz 1\n" - "Yph th 1\n" - "bvD va 1\n" - "xCc ch 1\n" - "pcQ ch 1\n" - "fZw wa 1\n" - "Zxf fo 1\n" - "wbA wa 1\n" - "bTf be 1\n" - "rxR er 1\n" - "qqE qu 1\n" - "yFp pr 1\n" - "pNf pr 1\n" - "kMv ka 1\n" - "vUq qu 1\n" - "wOh th 1\n" - "hxH th 1\n" - "Xqh th 1\n" - "uIu qu 1\n" - "Fzq qu 1\n" - "Ysd st 1\n" - "ojY on 1\n" - "cEo ch 1\n" - "lwR le 1\n" - "qjF qu 1\n" - "jTp ij 1\n" - "yzT sz 1\n" - "jfO ij 1\n" - "qSg ng 1\n" - "Nck ch 1\n" - "hwF th 1\n" - "Gmq qu 1\n" - "Iiq qu 1\n" - "zwE sz 1\n" - "qQv qu 1\n" - "xVd de 1\n" - "Ywq qu 1\n" - "sFx st 1\n" - "fvB va 1\n" - "qYe le 1\n" - "gwT ng 1\n" - "Wjx ij 1\n" - "bHn an 1\n" - "fMn an 1\n" - "gJg ng 1\n" - "Vkg ng 1\n" - "Fxv va 1\n" - "lHv le 1\n" - "Wpk ka 1\n" - "xAq qu 1\n" - "rxB pr 1\n" - "xuQ qu 1\n" - "pIb pr 1\n" - "bfE be 1\n" - "gRx ng 1\n" - "Bpb pr 1\n" - "bxN be 1\n" - "kgU ng 1\n" - "Pxc ch 1\n" - "cCq ch 1\n" - "Npb pr 1\n" - "lxE le 1\n" - "lCy le 1\n" - "dgX ng 1\n" - "xLf fo 1\n" - "bQt th 1\n" - "qgF ng 1\n" - "pxZ pr 1\n" - "pPx pr 1\n" - "iYz in 1\n" - "vJl le 1\n" - "kTf ka 1\n" - "qVm qu 1\n" - "gwS ng 1\n" - "zTd sz 1\n" - "pQk ka 1\n" - "xEg ng 1\n" - "fpP pr 1\n" - "qjw qu 1\n" - "Oyw wa 1\n" - "mcO ch 1\n" - "Vjd de 1\n" - "qdg ng 1\n" - "Lfp pr 1\n" - "vZc ch 1\n" - "nOq an 1\n" - "qjn an 1\n" - "sKc ch 1\n" - "wgU ng 1\n" - "hgX th 1\n" - "dMv de 1\n" - "Xcp ch 1\n" - "Fwz sz 1\n" - "pwA pr 1\n" - "Lpj ij 1\n" - "bkP ka 1\n" - "vHn an 1\n" - "Jjy ij 1\n" - "mCq qu 1\n" - "wvM va 1\n" - "Icb ch 1\n" - "kfJ ka 1\n" - "hsQ th 1\n" - "dWd de 1\n" - "fUs st 1\n" - "fLn an 1\n" - "pjN ij 1\n" - "zgQ ng 1\n" - "jLj ij 1\n" - "zqE qu 1\n" - "Qmv va 1\n" - "Zjr er 1\n" - "Zkp ka 1\n" - "iyH in 1\n" - "wuY qu 1\n" - "mzT sz 1\n" - "cwK ch 1\n" - "bCm me 1\n" - "ydG de 1\n" - "xdU de 1\n" - "wTf wa 1\n" - "lHh th 1\n" - "qyD qu 1\n" - "xlV le 1\n" - "qyT qu 1\n" - "tWn th 1\n" - "rMz er 1\n" - "pXv va 1\n" - "Xbz sz 1\n" - "kHm ka 1\n" - "cVd ch 1\n" - "qzH qu 1\n" - "ydN de 1\n" - "qMb qu 1\n" - "yjS ij 1\n" - "gmC ng 1\n" - "zIi in 1\n" - "fpM pr 1\n" - "lcZ ch 1\n" - "qHn an 1\n" - "Jjd de 1\n" - "jlG le 1\n" - "qcK ch 1\n" - "xQm me 1\n" - "vIi in 1\n" - "wBp pr 1\n" - "wcI ch 1\n" - "dJd de 1\n" - "Qbn an 1\n" - "Bjf ij 1\n" - "dpY de 1\n" - "dcF ch 1\n" - "xSj ij 1\n" - "iXj in 1\n" - "Qgb ng 1\n" - "gDt th 1\n" - "xxq qu 1\n" - "xcQ ch 1\n" - "Sqs qu 1\n" - "Qmg ng 1\n" - "gcU ch 1\n" - "Bvv va 1\n" - "pzE sz 1\n" - "wtT th 1\n" - "vbL va 1\n" - "bCt th 1\n" - "Qpo on 1\n" - "mXs me 1\n" - "Zqr qu 1\n" - "Gky ka 1\n" - "Xmr er 1\n" - "Lnz an 1\n" - "vYq qu 1\n" - "yRl le 1\n" - "gmK ng 1\n" - "vwP va 1\n" - "eFg ng 1\n" - "Njd de 1\n" - "klG le 1\n" - "hbE th 1\n" - "kWz sz 1\n" - "qpM qu 1\n" - "oZc ch 1\n" - "jRm ij 1\n" - "wXl le 1\n" -#ifndef _MSC_VER // TODO: Hack to avoid unsupported long string for MS VC. - "iyD in 1\n" - "fvL va 1\n" - "rPw er 1\n" - "fdR de 1\n" - "iSg ng 1\n" - "dbQ de 1\n" - "xxQ xe 1\n" - "Djc ch 1\n" - "ygK ng 1\n" - "Rhb th 1\n" - "zgG ng 1\n" - "Yky ka 1\n" - "Cxj ij 1\n" - "wWk ka 1\n" - "lmY le 1\n" - "qrB qu 1\n" - "ywK wa 1\n" - "xqI qu 1\n" - "Twj ij 1\n" - "Xgq ng 1\n" - "dwZ de 1\n" - "nQl an 1\n" - "Ghc th 1\n" - "pnH an 1\n" - "vmU va 1\n" - "qqK qu 1\n" - "cjB ch 1\n" - "gzS ng 1\n" - "Rwz sz 1\n" - "gYr ng 1\n" - "Fgx ng 1\n" - "wdK de 1\n" - "hxZ th 1\n" - "xUx xe 1\n" - "wmT me 1\n" - "yYk ka 1\n" - "fcD ch 1\n" - "hVv th 1\n" - "Sgv ng 1\n" - "zPn an 1\n" - "vYb va 1\n" - "bzE sz 1\n" - "whV th 1\n" - "qNz qu 1\n" - "wtS th 1\n" - "vhY th 1\n" - "nLf an 1\n" - "Lfw wa 1\n" - "gVc ch 1\n" - "gkS ng 1\n" - "Jqb qu 1\n" - "hWx th 1\n" - "zgO ng 1\n" - "tgX th 1\n" - "jPb ij 1\n" - "Wxb be 1\n" - "gqw ng 1\n" - "Cfw wa 1\n" - "woU on 1\n" - "ycJ ch 1\n" - "kwD ka 1\n" - "Sbp pr 1\n" - "qcw ch 1\n" - "Hwr er 1\n" - "bmL me 1\n" - "gwZ ng 1\n" - "yKj ij 1\n" - "fXv va 1\n" - "iKx in 1\n" - "lRz le 1\n" - "cHj ch 1\n" - "fFt th 1\n" - "sJv sz 1\n" - "xmI me 1\n" - "cCd ch 1\n" - "iYd in 1\n" - "yfY ny 1\n" - "xbY be 1\n" - "bmE me 1\n" - "fBv va 1\n" - "dHw de 1\n" - "ycR ch 1\n" - "wvL va 1\n" - "rjL er 1\n" - "sYv sz 1\n" - "Wpn an 1\n" - "zxB sz 1\n" - "yBq qu 1\n" - "gdJ ng 1\n" - "Yjo on 1\n" - "fpQ pr 1\n" - "qOq qu 1\n" - "Wjf ij 1\n" - "qcT ch 1\n" - "Lfh th 1\n" - "cFj ch 1\n" - "lMq qu 1\n" - "wSf wa 1\n" - "wQc ch 1\n" - "zDy sz 1\n" - "qrl qu 1\n" - "pYw pr 1\n" - "Vnf an 1\n" - "Hcj ch 1\n" - "zdU sz 1\n" - "bvP va 1\n" - "Yfj ij 1\n" - "Qkn an 1\n" - "wHm me 1\n" - "qVv qu 1\n" - "gkV ng 1\n" - "vpq qu 1\n" - "hFk th 1\n" - "fWf fo 1\n" - "pYq qu 1\n" - "dNv de 1\n" - "Wwj ij 1\n" - "Fmx me 1\n" - "mDl le 1\n" - "jMg ng 1\n" - "fZk ka 1\n" - "jNp ij 1\n" - "qhf th 1\n" - "Vbg ng 1\n" - "lKx le 1\n" - "iZx in 1\n" - "sjT sz 1\n" - "ijY in 1\n" - "qtV th 1\n" - "yTk ka 1\n" - "Hpz sz 1\n" - "iGq qu 1\n" - "yqW qu 1\n" - "hgF th 1\n" - "mFk ka 1\n" - "Oqw qu 1\n" - "dXa an 1\n" - "Zbq qu 1\n" - "lKm le 1\n" - "Svz sz 1\n" - "zKc ch 1\n" - "Vmz sz 1\n" - "mIx me 1\n" - "gKj ng 1\n" - "gTt th 1\n" - "vfC fo 1\n" - "hKg th 1\n" - "hSx th 1\n" - "oKg ng 1\n" - "nQs an 1\n" - "yiG in 1\n" - "qgM ng 1\n" - "kQg ng 1\n" - "Cjd de 1\n" - "jPy ij 1\n" - "Xqe qu 1\n" - "Pzy sz 1\n" - "Ftq th 1\n" - "fcE ch 1\n" - "mkL ka 1\n" - "Hzj sz 1\n" - "bTn an 1\n" - "qXy qu 1\n" - "dmM de 1\n" - "dVx de 1\n" - "Tqn an 1\n" - "xWj ij 1\n" - "qxQ qu 1\n" - "fQx fo 1\n" - "vLl le 1\n" - "Pgk ng 1\n" - "gHk ng 1\n" - "hxV th 1\n" - "tJz th 1\n" - "fMz sz 1\n" - "Ixb be 1\n" - "Cyy ny 1\n" - "pXf pr 1\n" - "pLl le 1\n" - "Twq qu 1\n" - "Dtw th 1\n" - "wRn an 1\n" - "uXl qu 1\n" - "zhq th 1\n" - "wIv va 1\n" - "cjL ch 1\n" - "qxH qu 1\n" - "lDm le 1\n" - "tXv th 1\n" - "gjC ng 1\n" - "Zzd sz 1\n" - "tgT th 1\n" - "hnP th 1\n" - "Kjc ch 1\n" - "gVw ng 1\n" - "xbI be 1\n" - "Zpc ch 1\n" - "bfO be 1\n" - "mSx me 1\n" - "qaF an 1\n" - "aQh th 1\n" - "Hjd de 1\n" - "qXj qu 1\n" - "fqA qu 1\n" - "bvR va 1\n" - "qSn an 1\n" - "cdV ch 1\n" - "pTf pr 1\n" - "Kzc ch 1\n" - "qtI th 1\n" - "egY ng 1\n" - "Rxt th 1\n" - "bhY th 1\n" - "pGh th 1\n" - "jDg ng 1\n" - "foY on 1\n" - "dKs sz 1\n" - "qJt th 1\n" - "Xwz sz 1\n" - "Ixg ng 1\n" - "rMt th 1\n" - "zXu qu 1\n" - "sQy sz 1\n" - "Npz sz 1\n" - "Qfz sz 1\n" - "rLm er 1\n" - "zGm sz 1\n" - "wHz sz 1\n" - "vcY ch 1\n" - "kqZ qu 1\n" - "jDh th 1\n" - "qgG ng 1\n" - "Dqq qu 1\n" - "fmO me 1\n" - "qdW qu 1\n" - "dNw de 1\n" - "rXj er 1\n" - "Jwc ch 1\n" - "mDb me 1\n" - "wMw wa 1\n" - "Yjg ng 1\n" - "fjY ij 1\n" - "iJb in 1\n" - "cdC ch 1\n" - "Yxq qu 1\n" - "Vbk ka 1\n" - "Fpx pr 1\n" - "zhD th 1\n" - "hCs th 1\n" - "dXw de 1\n" - "kDd de 1\n" - "uqT un 1\n" - "Bxw wa 1\n" - "Bjq qu 1\n" - "jGx ij 1\n" - "fXb be 1\n" - "ybF be 1\n" - "dtA th 1\n" - "cVv ch 1\n" - "Cbd de 1\n" - "wtH th 1\n" - "Kdj de 1\n" - "kPs sz 1\n" - "Zvk ka 1\n" - "xPv va 1\n" - "woH on 1\n" - "Xpz sz 1\n" - "qXe qu 1\n" - "pTj ij 1\n" - "kwQ ka 1\n" - "kZf ka 1\n" - "Uqj qu 1\n" - "yJh th 1\n" - "hCq th 1\n" - "jMj ij 1\n" - "phY th 1\n" - "kbB ka 1\n" - "Gpz sz 1\n" - "sGz st 1\n" - "fwE wa 1\n" - "Ttf th 1\n" - "Gqm qu 1\n" - "bzN sz 1\n" - "fkO ka 1\n" - "uzW qu 1\n" - "oxQ on 1\n" - "Vgm ng 1\n" - "qmD qu 1\n" - "xqn an 1\n" - "vRl le 1\n" - "Tnr an 1\n" - "zjW sz 1\n" - "vwq qu 1\n" - "jtW th 1\n" - "qnL an 1\n" - "yDx ny 1\n" - "xfQ fo 1\n" - "wxJ wa 1\n" - "nxE an 1\n" - "vQn in 1\n" - "Wkh th 1\n" - "ywD wa 1\n" - "pFf pr 1\n" - "lbK le 1\n" - "vHy va 1\n" - "gVj ng 1\n" - "Oqh th 1\n" - "bcN ch 1\n" - "tWm th 1\n" - "wMc ch 1\n" - "nwQ an 1\n" - "qnM an 1\n" - "Ztx th 1\n" - "nQj an 1\n" - "Vxt th 1\n" - "Uxc ch 1\n" - "pWv va 1\n" - "yRx ny 1\n" - "qKu un 1\n" - "jXg ng 1\n" - "jpX ij 1\n" - "dkG de 1\n" - "Bnf an 1\n" - "Ykf ka 1\n" - "gbW ng 1\n" - "klX le 1\n" - "vkH ka 1\n" - "dKd de 1\n" - "Kpq qu 1\n" - "gqM ng 1\n" - "yBz sz 1\n" - "rPj er 1\n" - "Hzv sz 1\n" - "wYz sz 1\n" - "qGa an 1\n" - "jIs sz 1\n" - "bUj ij 1\n" - "rTt th 1\n" - "nqI an 1\n" - "jfP ij 1\n" - "hRt th 1\n" - "yRr er 1\n" - "jjK ij 1\n" - "tfE th 1\n" - "Qsw st 1\n" - "Fcm ch 1\n" - "bJm me 1\n" - "tXq th 1\n" - "fRl le 1\n" - "gqE ng 1\n" - "wGg ng 1\n" - "gKc ch 1\n" - "yXc ch 1\n" - "zBy sz 1\n" - "lTd le 1\n" - "Wqc ch 1\n" - "Ftf th 1\n" - "wdB de 1\n" - "xnX an 1\n" - "Bqc ch 1\n" - "zqO qu 1\n" - "Qdl le 1\n" - "ojJ on 1\n" - "qZn an 1\n" - "hzW th 1\n" - "ylQ le 1\n" - "Zbw wa 1\n" - "mvL va 1\n" - "Ljb ij 1\n" - "Gqe qu 1\n" - "mfE me 1\n" - "xQq qu 1\n" - "fLv va 1\n" - "xLt th 1\n" - "wBj ij 1\n" - "jUm ij 1\n" - "pdL de 1\n" - "mJv va 1\n" - "dxU de 1\n" - "xqN qu 1\n" - "fpG pr 1\n" - "tlO th 1\n" - "whL th 1\n" - "kDx ka 1\n" - "Rqb qu 1\n" - "uvX qu 1\n" - "vjY ij 1\n" - "crQ ch 1\n" - "xyY ny 1\n" - "yhQ th 1\n" - "yYc ch 1\n" - "Lmg ng 1\n" - "Jsq qu 1\n" - "Gbj ij 1\n" - "aPb an 1\n" - "dwJ de 1\n" - "Xyv va 1\n" - "ucJ ch 1\n" - "dTf de 1\n" - "lBb le 1\n" - "hKz th 1\n" - "jcR ch 1\n" - "eQc ch 1\n" - "qYi in 1\n" - "Vtb th 1\n" - "Ccg ch 1\n" - "zAe er 1\n" - "gxJ ng 1\n" - "uvC qu 1\n" - "Bhm ma 1\n" - "Zgx ng 1\n" - "yzJ sz 1\n" - "cvJ ch 1\n" - "xTk ka 1\n" - "qdK qu 1\n" - "vwG va 1\n" - "Ymx me 1\n" - "oYw on 1\n" - "jXx ij 1\n" - "ywf wa 1\n" - "vVx vi 1\n" - "Rwm me 1\n" - "Dvk ka 1\n" - "xKt th 1\n" - "qLp qu 1\n" - "Yyv vi 1\n" - "Cqa an 1\n" - "xRf fo 1\n" - "Qqk qu 1\n" - "Jqe qu 1\n" - "yZg ng 1\n" - "vqG qu 1\n" - "hbO th 1\n" - "uVq qu 1\n" - "Rlm le 1\n" - "uZc ch 1\n" - "Ppv va 1\n" - "pVd de 1\n" - "yVd de 1\n" - "zJl le 1\n" - "Yzg ng 1\n" - "Cvq qu 1\n" - "pwS pr 1\n" - "Kkw ka 1\n" - "Wvv va 1\n" - "Fdy de 1\n" - "ppX pr 1\n" - "hvC th 1\n" - "iwG in 1\n" - "rBg ng 1\n" - "hBq th 1\n" - "nYs an 1\n" - "kcO ch 1\n" - "qEe qu 1\n" - "Ybv va 1\n" - "Qsn an 1\n" - "svC st 1\n" - "qkD qu 1\n" - "Qiw in 1\n" - "Gtj th 1\n" - "qAh th 1\n" - "wVy wa 1\n" - "bxT be 1\n" - "Qhs th 1\n" - "tlX th 1\n" - "hbA th 1\n" - "Qfb be 1\n" - "xWl le 1\n" - "xeV er 1\n" - "rqG qu 1\n" - "vqZ qu 1\n" - "jKv ij 1\n" - "iTf in 1\n" - "kwU ka 1\n" - "iFq in 1\n" - "mjZ ij 1\n" - "xgJ ng 1\n" - "zLp sz 1\n" - "qsR qu 1\n" - "zDj sz 1\n" - "pdF de 1\n" - "wxN wa 1\n" - "wGk ka 1\n" - "dUq qu 1\n" - "dJw de 1\n" - "fCb be 1\n" - "Dhz th 1\n" - "yIq qu 1\n" - "aQm an 1\n" - "Yzs st 1\n" - "vHf va 1\n" - "bjV ij 1\n" - "zSq qu 1\n" - "Wqs qu 1\n" - "jrW er 1\n" - "Hzq qu 1\n" - "wWs st 1\n" - "Mkg ng 1\n" - "zgF ng 1\n" - "Cnk an 1\n" - "rDg ng 1\n" - "fzB sz 1\n" - "fOm me 1\n" - "uVt th 1\n" - "Qfi in 1\n" - "Mhj th 1\n" - "uYj qu 1\n" - "Rqx qu 1\n" - "hkY th 1\n" - "wYb wa 1\n" - "tqP th 1\n" - "Jpb pr 1\n" - "bGw wa 1\n" - "xFh th 1\n" - "Xwb wa 1\n" - "Kgt th 1\n" - "Iqc ch 1\n" - "pJm me 1\n" - "Qkq qu 1\n" - "bVh th 1\n" - "yTq qu 1\n" - "zZg ng 1\n" - "cDz ch 1\n" - "qfm qu 1\n" - "afQ an 1\n" - "Qwc ch 1\n" - "bdJ de 1\n" - "qTu un 1\n" - "Ucx ch 1\n" - "Hnx an 1\n" - "Hbh th 1\n" - "gyH ng 1\n" - "tTz th 1\n" - "txV th 1\n" - "bdS de 1\n" - "Wgg ng 1\n" - "oqP qu 1\n" - "Rrf er 1\n" - "gYy ng 1\n" - "fMs st 1\n" - "fKd de 1\n" - "Hyx ny 1\n" - "Mxz sz 1\n" - "qHk qu 1\n" - "tfM th 1\n" - "hgQ th 1\n" - "zmO sz 1\n" - "wzS sz 1\n" - "jwQ ij 1\n" - "Fhc ic 1\n" - "xIy ny 1\n" - "fHg ng 1\n" - "wqY qu 1\n" - "bFp pr 1\n" - "Qdq qu 1\n" - "bhV th 1\n" - "bCg ng 1\n" - "Hgr ng 1\n" - "xqL qu 1\n" - "qgS ng 1\n" - "Nqg ng 1\n" - "fQv va 1\n" - "Qzw sz 1\n" - "Ixd de 1\n" - "Cxm me 1\n" - "mxN me 1\n" - "vQi in 1\n" - "cAq ch 1\n" - "eCx er 1\n" - "mqX qu 1\n" - "rqY qu 1\n" - "fVp pr 1\n" - "qoP qu 1\n" - "Gxc ch 1\n" - "vzX sz 1\n" - "fXf fo 1\n" - "Qtc th 1\n" - "ohQ th 1\n" - "Ygy ng 1\n" - "Xnb an 1\n" - "cWm ch 1\n" - "jXw ij 1\n" - "gWj ng 1\n" - "Kmg ng 1\n" - "vvH va 1\n" - "Uew er 1\n" - "qJk qu 1\n" - "Hkd de 1\n" - "xmP me 1\n" - "slR is 1\n" - "Uaq an 1\n" - "zbG sz 1\n" - "vNv va 1\n" - "cVb ch 1\n" - "bGg ng 1\n" - "iwU in 1\n" - "Cnw an 1\n" - "rXd er 1\n" - "vWz sz 1\n" - "tGf th 1\n" - "fbY be 1\n" - "hzp th 1\n" - "uWz qu 1\n" - "bMb be 1\n" - "jzW sz 1\n" - "gLh th 1\n" - "kZc ch 1\n" - "kHg ng 1\n" - "Vwf wa 1\n" - "vtY th 1\n" - "qeA qu 1\n" - "cxG ch 1\n" - "uQz qu 1\n" - "jGc ch 1\n" - "cvA ch 1\n" - "oTm on 1\n" - "pjY ij 1\n" - "bUo on 1\n" - "jwU ij 1\n" - "Jgm ng 1\n" - "tfZ th 1\n" - "xeO er 1\n" - "qBp qu 1\n" - "pBz sz 1\n" - "qSb qu 1\n" - "jyP ij 1\n" - "Fkq qu 1\n" - "njS an 1\n" - "jtA th 1\n" - "Zmf me 1\n" - "Ytm th 1\n" - "Pqc ch 1\n" - "bwJ wa 1\n" - "oWf on 1\n" - "kxJ ka 1\n" - "jHx ij 1\n" - "gcP ch 1\n" - "gBs ng 1\n" - "bkK ka 1\n" - "vdQ de 1\n" - "pjZ ij 1\n" - "Vgf ng 1\n" - "svG st 1\n" - "kGj ij 1\n" - "Wjg ng 1\n" - "Qmk ka 1\n" - "Glv le 1\n" - "tmY th 1\n" - "klY le 1\n" - "Pcj ch 1\n" - "fQw wi 1\n" - "xaO an 1\n" - "jfN ij 1\n" - "qGx qu 1\n" - "qvB qu 1\n" - "hwA th 1\n" - "Xmq qu 1\n" - "Xvt th 1\n" - "Bpq qu 1\n" - "oJq qu 1\n" - "vmZ va 1\n" - "nJp an 1\n" - "zqJ qu 1\n" - "qHf qu 1\n" - "mQg ng 1\n" - "yGz sz 1\n" - "hQm th 1\n" - "mBp me 1\n" - "tpJ th 1\n" - "Qkj ij 1\n" - "uUg ng 1\n" - "tdJ th 1\n" - "Jfn an 1\n" - "Lvj ij 1\n" - "iXc ch 1\n" - "pOq qu 1\n" - "bhK th 1\n" - "bMk ka 1\n" - "Fsw st 1\n" - "qAt th 1\n" - "xwJ wa 1\n" - "fPm me 1\n" - "Dfy ny 1\n" - "Zbp pr 1\n" - "Bgw ng 1\n" - "pQp pr 1\n" - "kQp ka 1\n" - "qoV qu 1\n" - "Uqd qu 1\n" - "jYo on 1\n" - "sDf st 1\n" - "xuJ qu 1\n" - "vRk ka 1\n" - "Qsg ng 1\n" - "yTd de 1\n" - "Qxr er 1\n" - "Hvc ch 1\n" - "hZt th 1\n" - "qDu un 1\n" - "fxA fo 1\n" - "xPf fo 1\n" - "wXc ch 1\n" - "jJb ij 1\n" - "pdK de 1\n" - "gpW ng 1\n" - "Qgx ng 1\n" - "kxG ka 1\n" - "dLx de 1\n" - "Bwz sz 1\n" - "Vdx de 1\n" - "yQh th 1\n" - "Wsx st 1\n" - "fSb be 1\n" - "Ukg ng 1\n" - "Pjz sz 1\n" - "rFg ng 1\n" - "fjP ij 1\n" - "kWv ka 1\n" - "Khf th 1\n" - "yGv va 1\n" - "pnD an 1\n" - "jYf ij 1\n" - "mgR ng 1\n" - "rjC er 1\n" - "Xjl le 1\n" - "kzE sz 1\n" - "Qgq ng 1\n" - "zgb ng 1\n" - "mhD th 1\n" - "vkO ka 1\n" - "uwV qu 1\n" - "rPp er 1\n" - "wXd de 1\n" - "gAo ng 1\n" - "kvG ka 1\n" - "vcX ch 1\n" - "xOz sz 1\n" - "Xzq qu 1\n" - "Fmu qu 1\n" - "xGg ng 1\n" - "jjR ij 1\n" - "qkI ku 1\n" - "pqH qu 1\n" - "cnH an 1\n" - "dhT th 1\n" - "mdR de 1\n" - "dDf de 1\n" - "qIq qu 1\n" - "xCj ij 1\n" - "qRk qu 1\n" - "kKc ch 1\n" - "Iuu qu 1\n" - "jqR qu 1\n" - "qEk qu 1\n" - "hfO th 1\n" - "quJ un 1\n" - "nRp an 1\n" - "txI th 1\n" - "yfZ ny 1\n" - "oqT ho 1\n" - "cgX ch 1\n" - "pbL pr 1\n" - "Xmx me 1\n" - "Vjr er 1\n" - "ylY le 1\n" - "dfK de 1\n" - "xgD ng 1\n" - "uwL qu 1\n" - "bPm me 1\n" - "qCy qu 1\n" - "Rpq qu 1\n" - "yqh th 1\n" - "xJt th 1\n" - "lzQ le 1\n" - "fgM ng 1\n" - "Ylc ch 1\n" - "fTz sz 1\n" - "Rjf ij 1\n" - "Rgj jo 1\n" - "Gkt th 1\n" - "fxG fo 1\n" - "mtG th 1\n" - "lgJ ng 1\n" - "tdR th 1\n" - "iHk in 1\n" - "Gqv qu 1\n" - "lDj le 1\n" - "wzZ sz 1\n" - "dFp de 1\n" - "qTt th 1\n" - "Wtg th 1\n" - "cbT ch 1\n" - "dvK de 1\n" - "Ctw th 1\n" - "mdG de 1\n" - "vKj ij 1\n" - "Clf le 1\n" - "wrU er 1\n" - "gmT ng 1\n" - "bXx be 1\n" - "zOx sz 1\n" - "Xnf an 1\n" - "rzQ er 1\n" - "vQj ij 1\n" - "kpT ka 1\n" - "fYh th 1\n" - "zLr er 1\n" - "Xgd ng 1\n" - "cZl ch 1\n" - "lFy le 1\n" - "Zng an 1\n" - "aXg an 1\n" - "qbE qu 1\n" - "zcY ch 1\n" - "sqK qu 1\n" - "Blx le 1\n" - "oqJ qu 1\n" - "jPv ij 1\n" - "qZd qu 1\n" - "fdZ de 1\n" - "Bqm qu 1\n" - "cpG ch 1\n" - "xdP de 1\n" - "fuF qu 1\n" - "vbq qu 1\n" - "dhH th 1\n" - "Jwm me 1\n" - "qkO ko 1\n" - "gsY ng 1\n" - "qGh th 1\n" - "Jkv ka 1\n" - "zpg ng 1\n" - "rwK er 1\n" - "Lhq th 1\n" - "zuV qu 1\n" - "bqV qu 1\n" - "Qcv ch 1\n" - "mWd de 1\n" - "cnF an 1\n" - "lWw le 1\n" - "txS th 1\n" - "znE an 1\n" - "fTj ij 1\n" - "lFq qu 1\n" - "wdJ de 1\n" - "eVk er 1\n" - "zjZ sz 1\n" - "fPq qu 1\n" - "cqQ ch 1\n" - "Pcg ch 1\n" - "Ydk de 1\n" - "svE st 1\n" - "Wqb qu 1\n" - "bcV ch 1\n" - "nHx on 1\n" - "wAx wa 1\n" - "hfB th 1\n" - "aMv an 1\n" - "pwO pr 1\n" - "Ywx wa 1\n" - "cbH ch 1\n" - "ojZ on 1\n" - "suU qu 1\n" - "jcU ch 1\n" - "sqY qu 1\n" - "jMr er 1\n" - "pxG pr 1\n" - "rBq qu 1\n" - "vlY le 1\n" - "hyY th 1\n" - "Cvw va 1\n" - "Tqe qu 1\n" - "fSj ij 1\n" - "fVs st 1\n" - "Eqc ch 1\n" - "xnD an 1\n" - "Owp pr 1\n" - "xTb be 1\n" - "wjL ij 1\n" - "Rxv va 1\n" - "nWf an 1\n" - "vHp va 1\n" - "vBk ka 1\n" - "Nqv qu 1\n" - "Lzf sz 1\n" - "bwS wa 1\n" - "Cby be 1\n" - "zRr er 1\n" - "qwJ qu 1\n" - "xnB an 1\n" - "qIc ch 1\n" - "cGk ch 1\n" - "Yji in 1\n" - "gVh th 1\n" - "lDc ch 1\n" - "Qyr er 1\n" - "fcH ch 1\n" - "nxB an 1\n" - "dvw de 1\n" - "gQc ch 1\n" - "mrR er 1\n" - "fnK an 1\n" - "Hlr le 1\n" - "Dnq an 1\n" - "bnU an 1\n" - "qCe qu 1\n" - "Tjv ij 1\n" - "Epq qu 1\n" - "wLf wa 1\n" - "pZj ij 1\n" - "gvR ng 1\n" - "kqK qu 1\n" - "vlG le 1\n" - "vvN va 1\n" - "gbM ng 1\n" - "bNk ka 1\n" - "jzL sz 1\n" - "Wlq qu 1\n" - "aYq an 1\n" - "zdY de 1\n" - "sfG st 1\n" - "qfW qu 1\n" - "kBv ka 1\n" - "btG th 1\n" - "Mqb qu 1\n" - "lrC er 1\n" - "vuE qu 1\n" - "fyJ ny 1\n" - "qmZ qu 1\n" - "Jkq qu 1\n" - "Cmj ij 1\n" - "bXy be 1\n" - "Ymy me 1\n" - "qxY qu 1\n" - "cNl ch 1\n" - "fzU fo 1\n" - "Rvt th 1\n" - "ylI le 1\n" - "xMs st 1\n" - "Qhm th 1\n" - "dHq qu 1\n" - "dwL de 1\n" - "vYr er 1\n" - "Qxu qu 1\n" - "dNh th 1\n" - "zNc ch 1\n" - "jmP ij 1\n" - "Pbq qu 1\n" - "fqj qu 1\n" - "fUw wa 1\n" - "Hyq qu 1\n" - "Qdx de 1\n" - "zSl le 1\n" - "cWt th 1\n" - "Fke er 1\n" - "Ztz th 1\n" - "uUq qu 1\n" - "nBm an 1\n" - "zJy sz 1\n" - "pdI de 1\n" - "nTd an 1\n" - "Yjb ij 1\n" - "Qjn an 1\n" - "yXj ij 1\n" - "xwB ow 1\n" - "klq qu 1\n" - "hfY th 1\n" - "pDg ng 1\n" - "zZd de 1\n" - "mqO qu 1\n" - "hZr th 1\n" - "cmY ch 1\n" - "gLk ng 1\n" - "Qcj ch 1\n" - "uKj qu 1\n" - "nqD an 1\n" - "yKw wa 1\n" - "bfR be 1\n" - "Rqz qu 1\n" - "jhQ th 1\n" - "vNj ij 1\n" - "Tcf ch 1\n" - "Hbn an 1\n" - "Lwv va 1\n" - "wcZ ch 1\n" - "cdK ch 1\n" - "bpR pr 1\n" - "lWm le 1\n" - "wNq qu 1\n" - "pAj ij 1\n" - "grV ng 1\n" - "qmk qu 1\n" - "cLf ch 1\n" - "iwB in 1\n" - "eqV qu 1\n" - "Wqz qu 1\n" - "Qnj an 1\n" - "uoJ qu 1\n" - "fVj ij 1\n" - "cbU ch 1\n" - "qpT qu 1\n" - "pdZ de 1\n" - "dzW de 1\n" - "Wfw wa 1\n" - "Zqm qu 1\n" - "kJd de 1\n" - "zWf sz 1\n" - "bYg ng 1\n" - "rjQ er 1\n" - "dwB de 1\n" - "Vlx le 1\n" - "zKd de 1\n" - "Lxw wa 1\n" - "Hpw pr 1\n" - "mvR va 1\n" - "qMt th 1\n" - "pWb pr 1\n" - "dcW ch 1\n" - "zEh th 1\n" - "Xrs er 1\n" - "Ftz th 1\n" - "qyL qu 1\n" - "jSn an 1\n" - "Wzh th 1\n" - "Pzf sz 1\n" - "zkW sz 1\n" - "ywY wa 1\n" - "oGb on 1\n" - "jBw ij 1\n" - "Qpz sz 1\n" - "rWm er 1\n" - "smQ st 1\n" - "uGk qu 1\n" - "xkV ka 1\n" - "wJf wa 1\n" - "cjW ch 1\n" - "wNx wa 1\n" - "wjR ij 1\n" - "wDd wa 1\n" - "lrB er 1\n" - "qhJ th 1\n" - "jKp ij 1\n" - "kNn an 1\n" - "tqU th 1\n" - "Jmj ij 1\n" - "bJv va 1\n" - "frN er 1\n" - "uBj qu 1\n" - "Uuv qu 1\n" - "Mzv sz 1\n" - "Djq qu 1\n" - "Qgl le 1\n" - "hdC th 1\n" - "mFh th 1\n" - "vjU ij 1\n" - "prX er 1\n" - "Kvc ch 1\n" - "ryY er 1\n" - "vzQ sz 1\n" - "Ojh th 1\n" - "Qfn an 1\n" - "Vqg ng 1\n" - "aQv an 1\n" - "hHx th 1\n" - "uIg ng 1\n" - "Kpv va 1\n" - "dQk ko 1\n" - "Ghq th 1\n" - "cZs ch 1\n" - "nvH an 1\n" - "jwJ ij 1\n" - "dMm de 1\n" - "gjI ng 1\n" - "lPg ng 1\n" - "qBs qu 1\n" - "Vhq th 1\n" - "qLt th 1\n" - "hBd th 1\n" - "Vcu ch 1\n" - "cQd ch 1\n" - "ypX pr 1\n" - "mQv va 1\n" - "vmR va 1\n" - "xfH fo 1\n" - "pqY qu 1\n" - "Xtb th 1\n" - "Vcx ch 1\n" - "tWb th 1\n" - "Pxa an 1\n" - "Qmr er 1\n" - "mdX de 1\n" - "Bxt th 1\n" - "jZv ij 1\n" - "hNp th 1\n" - "ybN be 1\n" - "bkZ ka 1\n" - "nVf an 1\n" - "lKq qu 1\n" - "oJj on 1\n" - "pBv va 1\n" - "hgA th 1\n" - "qxE qu 1\n" - "nvJ an 1\n" - "Xcf ch 1\n" - "Fdb de 1\n" - "zAo on 1\n" - "wQk ka 1\n" - "tmX th 1\n" - "pvZ va 1\n" - "fNw wa 1\n" - "zKk sz 1\n" - "hRx th 1\n" - "Tlj le 1\n" - "iQj in 1\n" - "jmU ij 1\n" - "tbW th 1\n" - "wVh th 1\n" - "Tvh th 1\n" - "nVg an 1\n" - "Lxp pr 1\n" - "vgO ng 1\n" - "dfE de 1\n" - "nVm an 1\n" - "qKy qu 1\n" - "eqZ qu 1\n" - "Tcc ch 1\n" - "cTk ch 1\n" - "fKz sz 1\n" - "Wkz sz 1\n" - "lvZ le 1\n" - "rGp er 1\n" - "kKz sz 1\n" - "Cbf be 1\n" - "jQd de 1\n" - "Zfc ch 1\n" - "hvX th 1\n" - "xgN ng 1\n" - "Kpe er 1\n" - "hzM th 1\n" - "jxZ ij 1\n" - "yqL qu 1\n" - "pgC ng 1\n" - "Fqd qu 1\n" - "tMb th 1\n" - "njQ an 1\n" - "tfB th 1\n" - "gjN ng 1\n" - "wNc ch 1\n" - "Pzj sz 1\n" - "mhO th 1\n" - "qUm qu 1\n" - "Fhh th 1\n" - "Sjd de 1\n" - "hWj th 1\n" - "yhL th 1\n" - "lGp le 1\n" - "dtX th 1\n" - "hwX th 1\n" - "srK er 1\n" - "vqE qu 1\n" - "bcO ch 1\n" - "xQl le 1\n" - "Qqf qu 1\n" - "kJg ng 1\n" - "pXz sz 1\n" - "yuJ qu 1\n" - "Gnp an 1\n" - "Dlc ch 1\n" - "Mxf fo 1\n" - "yNr er 1\n" - "bmV me 1\n" - "fXo on 1\n" - "mwW me 1\n" - "lIj le 1\n" - "Fvq qu 1\n" - "Utq th 1\n" - "jGk ij 1\n" - "wYw wa 1\n" - "wVm me 1\n" - "bTq qu 1\n" - "Ijp ij 1\n" - "znM an 1\n" - "xmO me 1\n" - "gQx ng 1\n" - "dKw de 1\n" - "dUf de 1\n" - "cSb ch 1\n" - "zVb sz 1\n" - "ccY ch 1\n" - "xjE ij 1\n" - "pYt th 1\n" - "Vrq qu 1\n" - "kzK sz 1\n" - "zfC sz 1\n" - "Ybh th 1\n" - "dgS ng 1\n" - "xcV ch 1\n" - "xNm me 1\n" - "Xkw ka 1\n" - "Tpw pr 1\n" - "Bwd de 1\n" - "hwT th 1\n" - "gQl ng 1\n" - "cDs ch 1\n" - "zYr er 1\n" - "xTp pr 1\n" - "qWm qu 1\n" - "xjT ij 1\n" - "hjK th 1\n" - "uDc ch 1\n" - "xhS th 1\n" - "bWd de 1\n" - "vCw va 1\n" - "jyB ij 1\n" - "uWd qu 1\n" - "Nnq qu 1\n" - "Qvb va 1\n" - "jzV sz 1\n" - "zBx sz 1\n" - "wIj ij 1\n" - "qRt th 1\n" - "qrJ qu 1\n" - "zZj sz 1\n" - "kRr er 1\n" - "Nzv sz 1\n" - "Qfw wa 1\n" - "Njt th 1\n" - "bFy be 1\n" - "lhY th 1\n" - "eWj er 1\n" - "jbM ij 1\n" - "Xsg ng 1\n" - "Rsd de 1\n" - "flF le 1\n" - "Phz th 1\n" - "xWs st 1\n" - "bCw wa 1\n" - "gfJ ng 1\n" - "qVo qu 1\n" - "eQh th 1\n" - "vcP ch 1\n" - "mDj ij 1\n" - "qTs qu 1\n" - "Xgs ng 1\n" - "Vuq qu 1\n" - "ufN qu 1\n" - "xBs st 1\n" - "pTk ka 1\n" - "fSq qu 1\n" - "mbD me 1\n" - "Vwz sz 1\n" - "hhQ th 1\n" - "kfP ka 1\n" - "Pwq qu 1\n" - "dhG th 1\n" - "qZj qu 1\n" - "yRj ij 1\n" - "yCs st 1\n" - "fjN ij 1\n" - "Rqg ng 1\n" - "jJh th 1\n" - "dlR le 1\n" - "Xmb me 1\n" - "Jjt th 1\n" - "gqI ng 1\n" - "fqM qu 1\n" - "iVg ng 1\n" - "Hgu ng 1\n" - "iHw in 1\n" - "eQv er 1\n" - "mzE sz 1\n" - "fjZ ij 1\n" - "qNn an 1\n" - "wlE le 1\n" - "kGp ka 1\n" - "Iqv qu 1\n" - "kBn an 1\n" - "xZd de 1\n" - "Dkc ch 1\n" - "zlH le 1\n" - "txB th 1\n" - "tQr th 1\n" - "uOx qu 1\n" - "pJi in 1\n" - "zbL sz 1\n" - "xkD ka 1\n" - "scV ch 1\n" - "qXh th 1\n" - "kIq qu 1\n" - "xNn an 1\n" - "gJf ng 1\n" - "tmB th 1\n" - "tcK th 1\n" - "kwZ ka 1\n" - "uZj qu 1\n" - "snQ an 1\n" - "uKq qu 1\n" - "crX ch 1\n" - "hXy th 1\n" - "Zcc ch 1\n" - "Pfz sz 1\n" - "dwM de 1\n" - "qIy qu 1\n" - "xuP qu 1\n" - "wDw wa 1\n" - "Hjr er 1\n" - "dQf de 1\n" - "wvJ wa 1\n" - "tHm th 1\n" - "Ydw de 1\n" - "wxI wa 1\n" - "pOv va 1\n" - "Wmq qu 1\n" - "dhD th 1\n" - "qpw qu 1\n" - "bmC me 1\n" - "wcX ch 1\n" - "wjH ij 1\n" - "bWf be 1\n" - "Gdp de 1\n" - "Ldw de 1\n" - "Sbq qu 1\n" - "vZv va 1\n" - "Kwb wa 1\n" - "qhT th 1\n" - "yRf ny 1\n" - "hwC th 1\n" - "npJ an 1\n" - "jmV ij 1\n" - "vGg ng 1\n" - "xqF qu 1\n" - "Phm th 1\n" - "pWc ch 1\n" - "Vxk ka 1\n" - "sHz st 1\n" - "Wbx be 1\n" - "bfK be 1\n" - "Jgl ng 1\n" - "kTb ka 1\n" - "Kbf be 1\n" - "kzC sz 1\n" - "pKq qu 1\n" - "zwB sz 1\n" - "uZg ng 1\n" - "btI th 1\n" - "zXj sz 1\n" - "uzS qu 1\n" - "vWk ka 1\n" - "xrH er 1\n" - "oQc ch 1\n" - "zlT le 1\n" - "dfI de 1\n" - "Qmf me 1\n" - "sgE ng 1\n" - "Ysx st 1\n" - "Rzd de 1\n" - "xLd de 1\n" - "qsX qu 1\n" - "kqJ qu 1\n" - "kCm ka 1\n" - "bFm me 1\n" - "igQ ng 1\n" - "sRq qu 1\n" - "jGm ij 1\n" - "Szs st 1\n" - "Yvz sz 1\n" - "kXz sz 1\n" - "Gnz an 1\n" - "mWc ch 1\n" - "tDq th 1\n" - "gqz ng 1\n" - "nHb ng 1\n" - "tdM th 1\n" - "Ovx va 1\n" - "Znl an 1\n" - "wuE qu 1\n" - "zLt th 1\n" - "ofQ on 1\n" - "vYj ij 1\n" - "jyH ij 1\n" - "zqA qu 1\n" - "cJy ch 1\n" - "Wbf be 1\n" - "lTt th 1\n" - "klW le 1\n" - "Xxa an 1\n" - "fCz sz 1\n" - "lKf le 1\n" - "qwT qu 1\n" - "rHk er 1\n" - "dbN de 1\n" - "uUy qu 1\n" - "zgN ng 1\n" - "Pxg ng 1\n" - "pNc ch 1\n" - "cyJ ch 1\n" - "jpH ij 1\n" - "Vtf th 1\n" - "sjJ st 1\n" - "Qlh th 1\n" - "twV th 1\n" - "yGq qu 1\n" - "tVp th 1\n" - "ksQ st 1\n" - "xnT an 1\n" - "rpJ er 1\n" - "wzI sz 1\n" - "Zhp th 1\n" - "aDf an 1\n" - "Uxj ij 1\n" - "cPg ch 1\n" - "qSq qu 1\n" - "mKq qu 1\n" - "vBz sz 1\n" - "yPj ij 1\n" - "Vkz sz 1\n" - "qiB qu 1\n" - "tkJ th 1\n" - "Ouq qu 1\n" - "zoH on 1\n" - "qVt th 1\n" - "Gxs st 1\n" - "jzF sz 1\n" - "swH st 1\n" - "nBb an 1\n" - "zhQ th 1\n" - "yRn an 1\n" - "fnX an 1\n" - "qoQ qu 1\n" - "mxP me 1\n" - "bwR wa 1\n" - "gJj ng 1\n" - "qnk an 1\n" - "tMk th 1\n" - "dxO de 1\n" - "rzV er 1\n" - "vpP va 1\n" - "Nvz sz 1\n" - "Nfp pr 1\n" - "Cnz an 1\n" - "oTd on 1\n" - "dqG qu 1\n" - "Hmx me 1\n" - "psX st 1\n" - "swM st 1\n" - "dqC qu 1\n" - "Vwx wa 1\n" - "nXf an 1\n" - "wkY ka 1\n" - "wfC wa 1\n" - "qSr qu 1\n" - "qVc ch 1\n" - "kDn an 1\n" - "Yvb va 1\n" - "zqH qu 1\n" - "qxJ qu 1\n" - "zKj sz 1\n" - "jcN ch 1\n" - "tWk th 1\n" - "Rrz er 1\n" - "bmG me 1\n" - "srZ er 1\n" - "wWq qu 1\n" - "Cfh th 1\n" - "lNt th 1\n" - "hcV th 1\n" - "Znf an 1\n" - "Jhv th 1\n" - "qIp qu 1\n" - "vSz sz 1\n" - "feU er 1\n" - "xIi in 1\n" - "Zmq qu 1\n" - "eGf er 1\n" - "bQk ka 1\n" - "Xcb ch 1\n" - "nlK an 1\n" - "tmJ th 1\n" - "jlL le 1\n" - "mwC me 1\n" - "qjr qu 1\n" - "zBb sz 1\n" - "fhU th 1\n" - "sPq qu 1\n" - "sBf st 1\n" - "uXy qu 1\n" - "Lkx ka 1\n" - "rGz er 1\n" - "hXz th 1\n" - "zuW qu 1\n" - "Rvx va 1\n" - "bcJ ch 1\n" - "Eoj on 1\n" - "iVt in 1\n" - "yhH th 1\n" - "xVv va 1\n" - "pMr er 1\n" - "vZd de 1\n" - "Vvn an 1\n" - "iCv in 1\n" - "vQp va 1\n" - "vlB le 1\n" - "wVt th 1\n" - "Ugk ng 1\n" - "ktQ th 1\n" - "jCr er 1\n" - "qvz qu 1\n" - "bVf be 1\n" - "rPv er 1\n" - "wfH wa 1\n" - "hbU th 1\n" - "pjF ij 1\n" - "oXg ng 1\n" - "zSr er 1\n" - "wRb wa 1\n" - "Hcu ch 1\n" - "yxJ ny 1\n" - "lTc ch 1\n" - "bYb be 1\n" - "Wxz sz 1\n" - "vrE er 1\n" - "zGy sz 1\n" - "Jqm qu 1\n" - "rzI er 1\n" - "xgV gi 1\n" - "Rvw va 1\n" - "Vnx an 1\n" - "uJg ng 1\n" - "hFq th 1\n" - "Tgz ng 1\n" - "aQc an 1\n" - "xzJ sz 1\n" - "tNc th 1\n" - "jfA ij 1\n" - "ycO ch 1\n" - "Wkj ij 1\n" - "yBp pr 1\n" - "hgD th 1\n" - "iSx in 1\n" - "xCm me 1\n" - "yjX ij 1\n" - "uIh th 1\n" - "qgq ng 1\n" - "Tzj sz 1\n" - "yjO ij 1\n" - "yrY er 1\n" - "bmZ me 1\n" - "zqT qu 1\n" - "mBd de 1\n" - "qvK qu 1\n" - "zcA ch 1\n" - "xrX er 1\n" - "mJm me 1\n" - "Xqf qu 1\n" - "Pxk ka 1\n" - "aDb an 1\n" - "qXg ng 1\n" - "eGw er 1\n" - "hjD th 1\n" - "tTx th 1\n" - "oMd on 1\n" - "fKg ng 1\n" - "Npn an 1\n" - "kqU qu 1\n" - "lbF le 1\n" - "Hvj ij 1\n" - "qZe qu 1\n" - "lQj le 1\n" - "dkY de 1\n" - "dZl le 1\n" - "zZh th 1\n" - "qyM qu 1\n" - "dmJ de 1\n" - "kfK ka 1\n" - "iPq qu 1\n" - "zwU sz 1\n" - "pvS va 1\n" - "ihJ th 1\n" - "ucW ch 1\n" - "Jjz sz 1\n" - "mMd de 1\n" - "vpw va 1\n" - "xCg ng 1\n" - "hKs th 1\n" - "vlI le 1\n" - "Nmc ch 1\n" - "xzV sz 1\n" - "gZs ng 1\n" - "rRp er 1\n" - "Ufd de 1\n" - "fpF pr 1\n" - "fwY wa 1\n" - "Gxr er 1\n" - "xLr er 1\n" - "vzE sz 1\n" - "jRf ij 1\n" - "brR er 1\n" - "gkZ ng 1\n" - "dUy de 1\n" - "Xji in 1\n" - "Kdb de 1\n" - "jpC ij 1\n" - "oUj on 1\n" - "qmh th 1\n" - "qjL qu 1\n" - "wRs sz 1\n" - "jhM th 1\n" - "Rhr th 1\n" - "btN th 1\n" - "Pjq ij 1\n" - "xwU wa 1\n" - "qyE qu 1\n" - "Jxd de 1\n" - "Pqr qu 1\n" - "lRd le 1\n" - "jqI qu 1\n" - "qFs qu 1\n" - "Mwk ka 1\n" - "jEb ij 1\n" - "Nxy ny 1\n" - "Pzm sz 1\n" - "tfL th 1\n" - "vFc ch 1\n" - "jQg ng 1\n" - "Bnx an 1\n" - "lMv le 1\n" - "tKq th 1\n" - "eVq qu 1\n" - "Tyq qu 1\n" - "drJ er 1\n" - "oHw on 1\n" - "lFk le 1\n" - "jpW ij 1\n" - "Qjw ij 1\n" - "cNx ch 1\n" - "Bhz th 1\n" - "bhB th 1\n" - "pDx pr 1\n" - "xpY pr 1\n" - "tnH th 1\n" - "dfL de 1\n" - "hzL th 1\n" - "zNk sz 1\n" - "lBm le 1\n" - "lXl le 1\n" - "yPv va 1\n" - "Zcl ch 1\n" - "hMq th 1\n" - "rJj ri 1\n" - "aXw an 1\n" - "zsQ sz 1\n" - "cQm ch 1\n" - "Sqc ch 1\n" - "tKm th 1\n" - "hvO th 1\n" - "hGd th 1\n" - "Wbn an 1\n" - "vCf va 1\n" - "lGg ng 1\n" - "vDh th 1\n" - "wDq qu 1\n" - "xRy ny 1\n" - "vXi in 1\n" - "qiQ qu 1\n" - "cFs ch 1\n" - "Lhp th 1\n" - "xEp pr 1\n" - "fQt th 1\n" - "cJv ch 1\n" - "lzO le 1\n" - "Fxk ka 1\n" - "tDd th 1\n" - "Xnx an 1\n" - "txC th 1\n" - "tGb th 1\n" - "zvG sz 1\n" - "gpC ng 1\n" - "pxD pr 1\n" - "Zfp pr 1\n" - "oWt th 1\n" - "vvV va 1\n" - "Gwf wa 1\n" - "Ycv ch 1\n" - "gcZ ch 1\n" - "mMw me 1\n" - "yQl le 1\n" - "uGp qu 1\n" - "lNj le 1\n" - "Ycm ch 1\n" - "vIx va 1\n" - "yLp pr 1\n" - "mRx me 1\n" - "nrK an 1\n" - "Zyh th 1\n" - "Nct th 1\n" - "Qml le 1\n" - "zPd de 1\n" - "dWq qu 1\n" - "Egx ng 1\n" - "vNs st 1\n" - "sNl le 1\n" - "pdW de 1\n" - "Snh th 1\n" - "yrP er 1\n" - "fJl le 1\n" - "tVg th 1\n" - "jvC ij 1\n" - "yhN th 1\n" - "qdC qu 1\n" - "pmT me 1\n" - "Lbg ng 1\n" - "xpJ pr 1\n" - "mYt th 1\n" - "bwV wa 1\n" - "wjD ij 1\n" - "fqC qu 1\n" - "xUf fo 1\n" - "dhU th 1\n" - "bZb be 1\n" - "twD th 1\n" - "bbM be 1\n" - "hgC th 1\n" - "dKb de 1\n" - "vJm va 1\n" - "wEq qu 1\n" - "Ofq qu 1\n" - "cXl ch 1\n" - "wpV pr 1\n" - "tqM th 1\n" - "pUf pr 1\n" - "Twx wa 1\n" - "Mgq ng 1\n" - "vQo on 1\n" - "yjT ij 1\n" - "aVd an 1\n" - "eHp er 1\n" - "vGv va 1\n" - "srG er 1\n" - "qVb qu 1\n" - "tlM th 1\n" - "nrT an 1\n" - "zRh th 1\n" - "cLr ch 1\n" - "lrH er 1\n" - "wTl le 1\n" - "cvI ch 1\n" - "kqN qu 1\n" - "Ixp pr 1\n" - "xeQ er 1\n" - "cNy ch 1\n" - "kRh th 1\n" - "ruY qu 1\n" - "Xcq ch 1\n" - "Kzb bi 1\n" - "Wxh th 1\n" - "pjM ij 1\n" - "jdO de 1\n" - "Jfy ny 1\n" - "bVz sz 1\n" - "dQo on 1\n" - "ncQ an 1\n" - "pVw pr 1\n" - "Sxj ij 1\n" - "Ubp pr 1\n" - "wvC va 1\n" - "khG th 1\n" - "cqF ch 1\n" - "Nxj ij 1\n" - "wDm me 1\n" - "yDd de 1\n" - "iyI in 1\n" - "eXq qu 1\n" - "hqP th 1\n" - "Kxr er 1\n" - "vsY st 1\n" - "Twb wa 1\n" - "fqw qu 1\n" - "wmC me 1\n" - "vFx va 1\n" - "vnC an 1\n" - "nWq an 1\n" - "hzB th 1\n" - "Kfk ka 1\n" - "tQe th 1\n" - "juW qu 1\n" - "qlX qu 1\n" - "hGw th 1\n" - "Oqd qu 1\n" - "Npw pr 1\n" - "hgW th 1\n" - "fxM fo 1\n" - "jSy ij 1\n" - "fJt th 1\n" - "mjG ij 1\n" - "tgV th 1\n" - "Ogx ng 1\n" - "Hbx be 1\n" - "Ljl le 1\n" - "ivZ in 1\n" - "bmY me 1\n" - "Qfp pr 1\n" - "wfQ wa 1\n" - "hCg th 1\n" - "vuU qu 1\n" - "ydZ de 1\n" - "vVk ka 1\n" - "mZf me 1\n" - "lOq qu 1\n" - "qIv qu 1\n" - "xZb be 1\n" - "xqk qu 1\n" - "Wmy me 1\n" - "Jqi qu 1\n" - "cxL ch 1\n" - "Ztq th 1\n" - "tdT th 1\n" - "uWt th 1\n" - "xGz sz 1\n" - "Wwk ka 1\n" - "pBk ka 1\n" - "yqg ng 1\n" - "cYl ch 1\n" - "ynW an 1\n" - "wyJ wa 1\n" - "qGy qu 1\n" - "fNp pr 1\n" - "hFs th 1\n" - "Yxu qu 1\n" - "kvJ ka 1\n" - "Fxz sz 1\n" - "twG th 1\n" - "qvG qu 1\n" - "vRp va 1\n" - "Qqi qu 1\n" - "gzE ng 1\n" - "pNl le 1\n" - "zpW sz 1\n" - "dcP ch 1\n" - "cPx ch 1\n" - "wcQ ch 1\n" - "pQc ch 1\n" - "qyF qu 1\n" - "zcX ch 1\n" - "wqk qu 1\n" - "kmY ka 1\n" - "qlG qu 1\n" - "xEz sz 1\n" - "pqV qu 1\n" - "Ohp th 1\n" - "xdM de 1\n" - "fLp pr 1\n" - "qAe qu 1\n" - "Xwv va 1\n" - "Lzi in 1\n" - "qOk qu 1\n" - "cXn an 1\n" - "Kds de 1\n" - "gvU ng 1\n" - "fPk ka 1\n" - "nZr an 1\n" - "Hxq qu 1\n" - "fCm me 1\n" - "qfD qu 1\n" - "Wfv va 1\n" - "qfb qu 1\n" - "jqC qu 1\n" - "fuX qu 1\n" - "qfA qu 1\n" - "Rlt th 1\n" - "xjD ij 1\n" - "wtF th 1\n" - "Xmz sz 1\n" - "pWp pr 1\n" - "Qxv va 1\n" - "zVf sz 1\n" - "gmZ ng 1\n" - "qdU qu 1\n" - "jqV qu 1\n" - "gXc ch 1\n" - "qmK qu 1\n" - "Gfj ij 1\n" - "cQr ch 1\n" - "Yhr th 1\n" - "vvS va 1\n" - "uDb qu 1\n" - "cdB ch 1\n" - "bvE va 1\n" - "xvS va 1\n" - "jRq qu 1\n" - "rvD er 1\n" - "Xyy ny 1\n" - "Jfi in 1\n" - "aBw an 1\n" - "nWc an 1\n" - "xBq qu 1\n" - "kgY ng 1\n" - "bGb bi 1\n" - "gjE ng 1\n" - "Rlw le 1\n" - "wrT er 1\n" - "bQr er 1\n" - "ljY le 1\n" - "qvU qu 1\n" - "fKm me 1\n" - "pTt th 1\n" - "zTw sz 1\n" - "qnV an 1\n" - "rWx er 1\n" - "nWd an 1\n" - "nKf an 1\n" - "kMf ka 1\n" - "fkG ka 1\n" - "bwX wa 1\n" - "cwV ch 1\n" - "uwK qu 1\n" - "rLv er 1\n" - "zMb sz 1\n" - "zpZ sz 1\n" - "rMq qu 1\n" - "Ttj th 1\n" - "gvO ng 1\n" - "Jcz ch 1\n" - "Cyx ny 1\n" - "njX an 1\n" - "aVx an 1\n" - "qXn an 1\n" - "Uqs qu 1\n" - "dVz de 1\n" - "Rcp ch 1\n" - "eKg ng 1\n" - "Xzn in 1\n" - "vyF va 1\n" - "Klc ch 1\n" - "xdI de 1\n" - "Hqb qu 1\n" - "xEe er 1\n" - "qpI qu 1\n" - "gDx ng 1\n" - "Jhf th 1\n" - "quK un 1\n" - "vgU ng 1\n" - "rWv er 1\n" - "Pnm an 1\n" - "nLm an 1\n" - "Bhj th 1\n" - "bPt th 1\n" - "jpI ij 1\n" - "tLz th 1\n" - "vpS va 1\n" - "Fxj ij 1\n" - "qDs qu 1\n" - "wzM sz 1\n" - "gwJ ng 1\n" - "zBw sz 1\n" - "qGv qu 1\n" - "rLh th 1\n" - "Bjl le 1\n" - "hfH th 1\n" - "clW ch 1\n" - "Rgk ng 1\n" - "Gsg ng 1\n" - "Uvx va 1\n" - "Qgv ng 1\n" - "gfX ng 1\n" - "rQv er 1\n" - "xvG va 1\n" - "kjx ij 1\n" - "dGf de 1\n" - "fcA ch 1\n" - "Ehq th 1\n" - "zBz sz 1\n" - "Gpk ka 1\n" - "tBv th 1\n" - "Xfg ng 1\n" - "yJm me 1\n" - "sqT qu 1\n" - "prY er 1\n" - "Dqo qu 1\n" - "Jzg ng 1\n" - "qMp qu 1\n" - "yfM ny 1\n" - "Gxf fo 1\n" - "wzP sz 1\n" - "zNm sz 1\n" - "wKg ng 1\n" - "Rrd er 1\n" - "Hvw va 1\n" - "gfD ng 1\n" - "Wmz sz 1\n" - "cJn an 1\n" - "nTf an 1\n" - "uvW qu 1\n" - "uPf qu 1\n" - "vwR va 1\n" - "bMf be 1\n" - "wIu qu 1\n" - "kxY ka 1\n" - "gZk ng 1\n" - "qFd qu 1\n" - "bMl le 1\n" - "wHl le 1\n" - "wVg ng 1\n" - "wlX le 1\n" - "fsL st 1\n" - "pRf pr 1\n" - "zsX st 1\n" - "qBk qu 1\n" - "Xzp sz 1\n" - "jdR de 1\n" - "Zlz le 1\n" - "Wfc ch 1\n" - "Rjv ij 1\n" - "vFz sz 1\n" - "tkV th 1\n" - "Xbw wa 1\n" - "xQc ch 1\n" - "Kxy ny 1\n" - "xCv va 1\n" - "nqV an 1\n" - "Wwx wa 1\n" - "kdW de 1\n" - "pkI ka 1\n" - "ohS th 1\n" - "Zdc ch 1\n" - "mCg ng 1\n" - "sxL st 1\n" - "Qrx er 1\n" - "qXw qu 1\n" - "wqQ qu 1\n" - "ijK in 1\n" - "sFz st 1\n" - "Hlw le 1\n" - "Gqn an 1\n" - "xPk ka 1\n" - "wZq qu 1\n" - "jqm qu 1\n" - "Lzp sz 1\n" - "Bdz de 1\n" - "wQl le 1\n" - "wtJ th 1\n" - "Uyi in 1\n" - "Wcy ch 1\n" - "wqH qu 1\n" - "Bns an 1\n" - "cDt th 1\n" - "xJv va 1\n" - "Wfz sz 1\n" - "xhP th 1\n" - "cWp ch 1\n" - "rqZ qu 1\n" - "bkB ka 1\n" - "Wtl th 1\n" - "gzf ng 1\n" - "bMr er 1\n" - "pxN pr 1\n" - "vhV th 1\n" - "kqX qu 1\n" - "Kdq qu 1\n" - "vQl le 1\n" - "ykC ka 1\n" - "zMh th 1\n" - "Eqz qu 1\n" - "lXq qu 1\n" - "zmZ sz 1\n" - "qpB qu 1\n" - "vGj ij 1\n" - "Tjx zj 1\n" - "tvK th 1\n" - "gYc ch 1\n" - "lFc ch 1\n" - "iJt th 1\n" - "Pkx ka 1\n" - "cDv ch 1\n" - "Yyd de 1\n" - "Vcq ch 1\n" - "Xhq th 1\n" - "zNf sz 1\n" - "vcD ch 1\n" - "bnW an 1\n" - "uvQ qu 1\n" - "Zzj sz 1\n" - "gPj ng 1\n" - "jwD ij 1\n" - "jpO ij 1\n" - "bDx be 1\n" - "vEi in 1\n" - "Zct th 1\n" - "wrX er 1\n" - "dhS th 1\n" - "zjJ sz 1\n" - "dDk de 1\n" - "srJ er 1\n" - "aWg an 1\n" - "mvJ va 1\n" - "Ytc th 1\n" - "jiQ in 1\n" - "tFz th 1\n" - "sJl le 1\n" - "vZq qu 1\n" - "xUd de 1\n" - "oqB qu 1\n" - "xDh th 1\n" - "hfE th 1\n" - "mSb me 1\n" - "jmR ij 1\n" - "rFp er 1\n" - "Xjy ij 1\n" - "bPp pr 1\n" - "iqQ ti 1\n" - "mfq qu 1\n" - "txL th 1\n" - "jBd de 1\n" - "Xvq qu 1\n" - "dvY de 1\n" - "sdM de 1\n" - "xgY ng 1\n" - "rYh th 1\n" - "vlA le 1\n" - "pFb pr 1\n" - "yFz sz 1\n" - "gcK ch 1\n" - "xfZ fo 1\n" - "jDc ch 1\n" - "yNv va 1\n" - "tKt th 1\n" - "wtU th 1\n" - "bHk ka 1\n" - "qCw qu 1\n" - "Zca an 1\n" - "kDw ka 1\n" - "Ywc ch 1\n" - "pXs st 1\n" - "yMm me 1\n" - "Gwq qu 1\n" - "mYv va 1\n" - "wCx wa 1\n" - "jZx ij 1\n" - "oQd on 1\n" - "Fzk sz 1\n" - "lwF le 1\n" - "Xzk sz 1\n" - "Njx ij 1\n" - "yoI on 1\n" - "sJm st 1\n" - "wKk ka 1\n" - "Qth ch 1\n" - "Llz le 1\n" - "gVf gi 1\n" - "pPq qu 1\n" - "lGy le 1\n" - "gzR ng 1\n" - "rXg ng 1\n" - "Npf pr 1\n" - "wvR va 1\n" - "yXs st 1\n" - "mMl li 1\n" - "bYx be 1\n" - "fzZ sz 1\n" - "vrG er 1\n" - "Kdk de 1\n" - "yqw qu 1\n" - "Lkq qu 1\n" - "jKs st 1\n" - "Zqx qu 1\n" - "Pfm me 1\n" - "rlW er 1\n" - "hPv th 1\n" - "Ojx ij 1\n" - "Gtq th 1\n" - "vtJ th 1\n" - "Wly le 1\n" - "yHd de 1\n" - "kQb ka 1\n" - "Ldc de 1\n" - "sUx st 1\n" - "cJg ch 1\n" - "fLd de 1\n" - "Mjq qu 1\n" - "Cjm ij 1\n" - "awX an 1\n" - "Gtl th 1\n" - "wzN sz 1\n" - "bqx qu 1\n" - "fAq qu 1\n" - "ezX er 1\n" - "cBx ch 1\n" - "csX ch 1\n" - "cUf ch 1\n" - "qsJ qu 1\n" - "hsZ th 1\n" - "qzg ng 1\n" - "Qgk ng 1\n" - "Nxg ng 1\n" - "Hqa an 1\n" - "rXl er 1\n" - "nlP an 1\n" - "aVg an 1\n" - "yhG th 1\n" - "kfA ka 1\n" - "Vmk mG 1\n" - "jKm ij 1\n" - "hPd th 1\n" - "aPd an 1\n" - "bYy be 1\n" - "bnZ an 1\n" - "Gsj st 1\n" - "kxQ ka 1\n" - "vkF ka 1\n" - "jzS sz 1\n" - "fWm me 1\n" - "Qcu ch 1\n" - "rZf er 1\n" - "jbZ ij 1\n" - "aQj an 1\n" - "bzO sz 1\n" - "fZq qu 1\n" - "lrN er 1\n" - "fkL ka 1\n" - "Dqv qu 1\n" - "zkC sz 1\n" - "sLw st 1\n" - "Nvr er 1\n" - "Nby be 1\n" - "eMh th 1\n" - "wFc ch 1\n" - "Cxz sz 1\n" - "iZp in 1\n" - "dvZ de 1\n" - "vIh th 1\n" - "qCl qu 1\n" - "Pzo on 1\n" - "vNq qu 1\n" - "zqK qu 1\n" - "Lmx me 1\n" - "xVt th 1\n" - "glD ng 1\n" - "Gbf be 1\n" - "Jvq qu 1\n" - "zFw sz 1\n" - "tMq th 1\n" - "vkJ ka 1\n" - "Sxu qu 1\n" - "afU an 1\n" - "mHb me 1\n" - "jxU ij 1\n" - "cJl ch 1\n" - "uqE qu 1\n" - "Nqq qu 1\n" - "xGt th 1\n" - "czG ch 1\n" - "Kfg ng 1\n" - "zWh th 1\n" - "yXm me 1\n" - "fnD an 1\n" - "Jrd er 1\n" - "oxZ on 1\n" - "hXn th 1\n" - "fqI qu 1\n" - "wAo on 1\n" - "iGk in 1\n" - "xEw wa 1\n" - "fVq qu 1\n" - "ytU th 1\n" - "bhG th 1\n" - "oQz on 1\n" - "pgO ng 1\n" - "Yqm qu 1\n" - "bJi in 1\n" - "kcV ch 1\n" - "knM an 1\n" - "Cwr er 1\n" - "Wgd ng 1\n" - "bpT pr 1\n" - "Jdj de 1\n" - "Nbq qu 1\n" - "twJ th 1\n" - "Qep er 1\n" - "Kdc ch 1\n" - "kQq qu 1\n" - "rPq qu 1\n" - "lWp le 1\n" - "Fbq qu 1\n" - "bVk ka 1\n" - "zlI le 1\n" - "Bzp sz 1\n" - "jfK ij 1\n" - "Yvm va 1\n" - "Ftm th 1\n" - "aMj an 1\n" - "zzV sz 1\n" - "zOa an 1\n" - "mHc ch 1\n" - "xWn an 1\n" - "fFh th 1\n" - "sDv st 1\n" - "vmD va 1\n" - "xjL ij 1\n" - "iBq qu 1\n" - "jqT qu 1\n" - "hsR th 1\n" - "Qxo on 1\n" - "jsG st 1\n" - "cXb ch 1\n" - "Ybj ij 1\n" - "xeJ er 1\n" - "oPq qu 1\n" - "yXt th 1\n" - "xvL va 1\n" - "jcF ch 1\n" - "kFb ka 1\n" - "jXv ij 1\n" - "Aox on 1\n" - "zkQ sz 1\n" - "fPd de 1\n" - "Fvx va 1\n" - "fbX be 1\n" - "oCf on 1\n" - "Yjd de 1\n" - "Ppf pr 1\n" - "Njs st 1\n" - "cZh th 1\n" - "vnG an 1\n" - "cwJ cm 1\n" - "qJl qu 1\n" - "gNf ng 1\n" - "Tfv va 1\n" - "vwK va 1\n" - "Zcs ch 1\n" - "eBv er 1\n" - "qLf qu 1\n" - "Yqt th 1\n" - "crD ch 1\n" - "Icj ch 1\n" - "qBl qu 1\n" - "gzX ng 1\n" - "ujF qu 1\n" - "vxU va 1\n" - "kZt th 1\n" - "Ldh th 1\n" - "bfM be 1\n" - "mQm QO 1\n" - "zlQ le 1\n" - "jbU ij 1\n" - "Kvz sz 1\n" - "Uxw wa 1\n" - "pjS ij 1\n" - "Xvv va 1\n" - "kjI ij 1\n" - "cYi ch 1\n" - "nJn an 1\n" - "Qxz sz 1\n" - "aNw an 1\n" - "Jfp pr 1\n" - "bNz sz 1\n" - "xdQ de 1\n" - "Bzk sz 1\n" - "qZz qu 1\n" - "Ycp ch 1\n" - "pGs st 1\n" - "kCf ka 1\n" - "gwP ng 1\n" - "wbV wa 1\n" - "Eqt eq 1\n" - "Xhn th 1\n" - "oUf on 1\n" - "dKc ch 1\n" - "sxN st 1\n" - "Ofz sz 1\n" - "gCp ng 1\n" - "bhI th 1\n" - "hgU th 1\n" - "knU an 1\n" - "kjT ij 1\n" - "fsZ st 1\n" - "lGv le 1\n" - "wMd de 1\n" - "ukQ qu 1\n" - "Ghk th 1\n" - "kRw ka 1\n" - "zRc ch 1\n" - "gwK ng 1\n" - "vJp va 1\n" - "tVc th 1\n" - "pqT qu 1\n" - "iYl in 1\n" - "xLv va 1\n" - "Xdq qu 1\n" - "zcO ch 1\n" - "plM le 1\n" - "bDz sz 1\n" - "Nmx me 1\n" - "dKv de 1\n" - "hPk th 1\n" - "Tjy ij 1\n" - "wYs st 1\n" - "nfJ an 1\n" - "tfC th 1\n" - "zJt th 1\n" - "lKp le 1\n" - "Iyc ch 1\n" - "xuB qu 1\n" - "eKx er 1\n" - "sZf st 1\n" - "zpQ sz 1\n" - "sfL st 1\n" - "mjT ij 1\n" - "zXw sz 1\n" - "yKt th 1\n" - "rwV er 1\n" - "pjB ij 1\n" - "qYb qu 1\n" - "bYz sz 1\n" - "qqY eq 1\n" - "uIf qu 1\n" - "jTc ch 1\n" - "sqC qu 1\n" - "uJc ch 1\n" - "dGx de 1\n" - "swF st 1\n" - "Hfn an 1\n" - "Htb th 1\n" - "pfW hW 1\n" - "iyG in 1\n" - "zPc ch 1\n" - "yzV sz 1\n" - "pVz sz 1\n" - "sPg ng 1\n" - "fKj ij 1\n" - "eFb er 1\n" - "Qji jS 1\n" - "mtH th 1\n" - "wgZ ng 1\n" - "hHd th 1\n" - "fTt th 1\n" - "gxZ ng 1\n" - "Ktg th 1\n" - "hWd th 1\n" - "fWq qu 1\n" - "wSv va 1\n" - "Fzn an 1\n" - "ghH th 1\n" - "npW an 1\n" - "jvP ij 1\n" - "uYk qu 1\n" - "Uxn an 1\n" - "Sqg ng 1\n" - "zcJ ch 1\n" - "dMr er 1\n" - "Zgc ch 1\n" - "qGp qu 1\n" - "oVq qu 1\n" - "oUa an 1\n" - "oqV qu 1\n" - "jGs st 1\n" - "Ybq qu 1\n" - "qRf qu 1\n" - "brZ er 1\n" - "qTv qu 1\n" - "wZf wa 1\n" - "gOj ng 1\n" - "Jji in 1\n" - "Ppx pr 1\n" - "qwB qu 1\n" - "qcJ ch 1\n" - "fFz sz 1\n" - "wwY wa 1\n" - "kTc ch 1\n" - "uGn an 1\n" - "eQq qu 1\n" - "qGk qu 1\n" - "dpV de 1\n" - "vTm va 1\n" - "Ojq qu 1\n" - "dpX de 1\n" - "bYf be 1\n" - "tjV th 1\n" - "Lzn LG 1\n" - "Yjm ij 1\n" - "uYw qu 1\n" - "Zdg ng 1\n" - "hXs th 1\n" - "Iwp pr 1\n" - "hJw th 1\n" - "Tfd de 1\n" - "cxO ch 1\n" - "Qqy qu 1\n" - "lDv le 1\n" - "zsO st 1\n" - "mrG er 1\n" - "cjJ ch 1\n" - "dgD ng 1\n" - "cUw ch 1\n" - "zdB de 1\n" - "jlU le 1\n" - "bBf be 1\n" - "qbJ qu 1\n" - "qlR qu 1\n" - "cWc ch 1\n" - "Xgb ng 1\n" - "zrU er 1\n" - "bgI ng 1\n" - "wjJ ij 1\n" - "mvU va 1\n" - "rCp GC 1\n" - "nVx an 1\n" - "xbG be 1\n" - "tdN th 1\n" - "yjR ij 1\n" - "wQj ij 1\n" - "xzZ sz 1\n" - "qUk qu 1\n" - "xjY ij 1\n" - "Jxz sz 1\n" - "xZs st 1\n" - "vZx va 1\n" - "lRs le 1\n" - "vwp va 1\n" - "wpj ij 1\n" - "swS st 1\n" - "Eqx qu 1\n" - "vEw va 1\n" - "tkQ th 1\n" - "vgX ng 1\n" - "Rwb wa 1\n" - "sjW st 1\n" - "dXm de 1\n" - "fvY vK 1\n" - "lrO er 1\n" - "Ldx de 1\n" - "cxV ch 1\n" - "qFh th 1\n" - "qVw qu 1\n" - "Pyf ny 1\n" - "Kxz sz 1\n" - "hwJ th 1\n" - "cpL ch 1\n" - "Hge ng 1\n" - "Wbh th 1\n" - "lQq qu 1\n" - "hDl th 1\n" - "Zph th 1\n" - "wZj ij 1\n" - "Zqt th 1\n" - "xmU me 1\n" - "tUf th 1\n" - "qWo qu 1\n" - "Lrd er 1\n" - "pQs st 1\n" - "rZv er 1\n" - "mjI ij 1\n" - "xQy ny 1\n" - "vGy va 1\n" - "jwY ij 1\n" - "cNn an 1\n" - "zpP sz 1\n" - "vKd de 1\n" - "wVk ka 1\n" - "tMh ch 1\n" - "Ktd th 1\n" - "tpG th 1\n" - "iDf in 1\n" - "qKl qu 1\n" - "jLc ch 1\n" - "Jjl le 1\n" - "hcQ th 1\n" - "Tqg qu 1\n" - "bGk ka 1\n" - "jxV ij 1\n" - "fcC ch 1\n" - "Fwx wa 1\n" - "qPy qu 1\n" - "jmE ij 1\n" - "xmT me 1\n" - "lxC GC 1\n" - "lRr er 1\n" - "Qkl le 1\n" - "ihF th 1\n" - "Llt th 1\n" - "Kqe qu 1\n" - "Hhf th 1\n" - "nPq an 1\n" - "zvQ QO 1\n" - "jGy ij 1\n" - "lMk le 1\n" - "uOj qu 1\n" - "fdT de 1\n" - "qvH qu 1\n" - "pcZ ch 1\n" - "qkc ch 1\n" - "cbJ ch 1\n" - "gfK ng 1\n" - "pMt th 1\n" - "vpF va 1\n" - "dgP ng 1\n" - "mxF me 1\n" - "rZp er 1\n" - "cGd ch 1\n" - "sPx st 1\n" - "rGd er 1\n" - "gbQ ng 1\n" - "Dfz sz 1\n" - "sjC st 1\n" - "zSx sz 1\n" - "qIo qu 1\n" - "dIw de 1\n" - "kpF ka 1\n" - "eUw er 1\n" - "Hxc ch 1\n" - "yvG va 1\n" - "vUf va 1\n" - "fjF ij 1\n" - "kLq qu 1\n" - "Zjt th 1\n" - "fLq qu 1\n" - "ydS de 1\n" - "zwK sz 1\n" - "hHy th 1\n" - "Ssw st 1\n" - "hjG th 1\n" - "Ddp de 1\n" - "bPs st 1\n" - "Wpq qu 1\n" - "crW ch 1\n" - "Xpj ij 1\n" - "oXr er 1\n" - "vjK ij 1\n" - "Vzf sz 1\n" - "lYd le 1\n" - "Odx de 1\n" - "hVt th 1\n" - "gRc ch 1\n" - "Ztf th 1\n" - "hVj th 1\n" - "Jjf ij 1\n" - "jFb ij 1\n" - "Lhf th 1\n" - "jlO le 1\n" - "jvB ij 1\n" - "gbN ng 1\n" - "vPm va 1\n" - "tQd th 1\n" - "Vvj ij 1\n" - "rqX qu 1\n" - "zEo on 1\n" - "jsB st 1\n" - "qmH qu 1\n" - "btE th 1\n" - "Wdd de 1\n" - "Dmj ij 1\n" - "ywI wa 1\n" - "jpQ ij 1\n" - "uXs qu 1\n" - "bYm me 1\n" - "oFz on 1\n" - "tBg th 1\n" - "cCn ch 1\n" - "dZg ng 1\n" - "wrL er 1\n" - "Jry er 1\n" - "iKd in 1\n" - "vcN ch 1\n" - "zNp sz 1\n" - "nRf an 1\n" - "dcH ch 1\n" - "qaO an 1\n" - "uaQ an 1\n" - "jxL ij 1\n" - "mUf me 1\n" - "vOk ka 1\n" - "Pxt th 1\n" - "fuQ qu 1\n" - "sfN st 1\n" - "Qlv le 1\n" - "bZy be 1\n" - "vEq vK 1\n" - "Xvg ng 1\n" - "Jxb be 1\n" - "zGz sz 1\n" - "Cqf qu 1\n" - "sPp st 1\n" - "vAq qu 1\n" - "kWd de 1\n" - "rcZ cm 1\n" - "lDs le 1\n" - "xDd de 1\n" - "pSj ij 1\n" - "vwS va 1\n" - "kgQ ng 1\n" - "crT ch 1\n" - "fKs st 1\n" - "qhc th 1\n" - "gMl ng 1\n" - "zKt th 1\n" - "jdF de 1\n" - "cfN ch 1\n" - "sdO st 1\n" - "kHh th 1\n" - "xvE va 1\n" - "bPf be 1\n" - "rzX er 1\n" - "vSj ij 1\n" - "dFf de 1\n" - "vXl le 1\n" - "bRv va 1\n" - "Zxw wa 1\n" - "Xzw sz 1\n" - "vrR er 1\n" - "xHb be 1\n" - "qeE qu 1\n" - "jrQ er 1\n" - "vkI ka 1\n" - "frY er 1\n" - "jqL qu 1\n" - "cZj ch 1\n" - "Tmg ng 1\n" - "mHw me 1\n" - "dqS qu 1\n" - "qlI qu 1\n" - "Zvb va 1\n" - "Klx le 1\n" - "gbS ng 1\n" - "sbQ st 1\n" - "quF un 1\n" - "qzT qu 1\n" - "qaI an 1\n" - "Vmd de 1\n" - "qaQ an 1\n" - "Qkb ka 1\n" - "Xjb ij 1\n" - "oCq GC 1\n" - "qQh QO 1\n" - "cwO ch 1\n" - "tMf th 1\n" - "zrK er 1\n" - "wKy wa 1\n" - "wKb wa 1\n" - "cqS ch 1\n" - "iGv in 1\n" - "xXw wa 1\n" - "fMx fo 1\n" - "Zmv va 1\n" - "Yqq qu 1\n" - "kDh th 1\n" - "Jxy ny 1\n" - "yyE ny 1\n" - "sUv st 1\n" - "cVr ch 1\n" - "bqH qu 1\n" - "Wgq qu 1\n" - "uqQ qu 1\n" - "bTg ng 1\n" - "iMv in 1\n" - "qWk qu 1\n" - "fdV de 1\n" - "oQq qu 1\n" - "nZp an 1\n" - "zoY on 1\n" - "jRk ij 1\n" - "qPj qu 1\n" - "uqL qu 1\n" - "cqX ch 1\n" - "lBq qu 1\n" - "fpX pr 1\n" - "bYw wa 1\n" - "Yeq qu 1\n" - "hjN th 1\n" - "tqW th 1\n" - "jhT th 1\n" - "cvF ch 1\n" - "Ycx ch 1\n" - "jFs st 1\n" - "Hdy de 1\n" - "lrZ er 1\n" - "fZv va 1\n" - "Tfw wa 1\n" - "zrI er 1\n" - "dDv de 1\n" - "xeH er 1\n" - "lzH le 1\n" - "sLr er 1\n" - "iKq qu 1\n" - "Fzc cm 1\n" - "xRd de 1\n" - "fSd de 1\n" - "qwF qu 1\n" - "wxY wa 1\n" - "Ykw ka 1\n" - "oVp on 1\n" - "cgB ch 1\n" - "bFh th 1\n" - "njT an 1\n" - "dZz de 1\n" - "bhS th 1\n" - "Fzu qu 1\n" - "fHm me 1\n" - "vNz sz 1\n" - "qlF qu 1\n" - "Lvf va 1\n" - "zpU sz 1\n" - "jtL th 1\n" - "cQq ch 1\n" - "mKm me 1\n" - "Rwc ch 1\n" - "jrO er 1\n" - "npB an 1\n" - "Qtx th 1\n" - "Mqj qu 1\n" - "Oqx qu 1\n" - "Dzp sz 1\n" - "hVg th 1\n" - "pTn an 1\n" - "gQj ng 1\n" - "mTn an 1\n" - "tQv th 1\n" - "lZh th 1\n" - "kJj ij 1\n" - "crP ch 1\n" - "mqC qu 1\n" - "Dwl le 1\n" - "vVj ij 1\n" - "hqT th 1\n" - "mJw me 1\n" - "txT th 1\n" - "wZm me 1\n" - "Xnq an 1\n" - "hfU th 1\n" - "kVr er 1\n" - "gVp ng 1\n" - "nBp an 1\n" - "xnZ an 1\n" - "jqA qu 1\n" - "Pzk sz 1\n" - "fJq qu 1\n" - "Gnf an 1\n" - "Kxp pr 1\n" - "dXl Xm 1\n" - "hwL th 1\n" - "Rrn an 1\n" - "klL le 1\n" - "fOg ng 1\n" - "Qwx wa 1\n" - "Cmx me 1\n" - "Fbf be 1\n" - "hWq th 1\n" - "bSw wa 1\n" - "Bxr er 1\n" - "zcB ch 1\n" - "lvX le 1\n" - "Kkx ka 1\n" - "qfI qu 1\n" - "uKg qu 1\n" - "Yku qu 1\n" - "jJz sz 1\n" - "uIp qu 1\n" - "qAd qu 1\n" - "pfH pr 1\n" - "Qwf wa 1\n" - "wbU wa 1\n" - "vDv va 1\n" - "gJn an 1\n" - "zlR le 1\n" - "mXr er 1\n" - "rHx er 1\n" - "oVz on 1\n" - "gtG th 1\n" - "lrK HK 1\n" - "Wxe er 1\n" - "pnJ an 1\n" - "Fqy qu 1\n" - "jVl le 1\n" - "cbP ch 1\n" - "Gjc jS 1\n" - "jQs st 1\n" - "tvV th 1\n" - "Hzk sz 1\n" - "jyW ij 1\n" - "Xbf be 1\n" - "qfS qu 1\n" - "Wvp va 1\n" - "wbL wa 1\n" - "mkO ka 1\n" - "eqB qu 1\n" - "dvS de 1\n" - "zGh th 1\n" - "vWu qu 1\n" - "flX le 1\n" - "xJq qu 1\n" - "qLk qu 1\n" - "vNl le 1\n" - "kzQ sz 1\n" - "Czv sz 1\n" - "knV an 1\n" - "Rjb ij 1\n" - "bNq qu 1\n" - "zPm sz 1\n" - "qxB qu 1\n" - "Lhh th 1\n" - "Uvt th 1\n" - "xfU fo 1\n" - "iNp in 1\n" - "yYg ng 1\n" - "oPb on 1\n" - "qiW qu 1\n" - "ycD ch 1\n" - "wVz sz 1\n" - "wGq qu 1\n" - "hRb th 1\n" - "xbB be 1\n" - "sZl le 1\n" - "gxO ng 1\n" - "wFk ka 1\n" - "Mxd de 1\n" - "dxP de 1\n" - "lRq qu 1\n" - "hbZ th 1\n" - "Eao an 1\n" - "zgA ng 1\n" - "qcW ch 1\n" - "vmQ va 1\n" - "Yqf qu 1\n" - "wiO in 1\n" - "xOe er 1\n" - "Hfy ny 1\n" - "bfS be 1\n" - "Qhn th 1\n" - "Cmk ka 1\n" - "lYs le 1\n" - "Nqt th 1\n" - "qeJ qu 1\n" - "ztJ th 1\n" - "pMv va 1\n" - "uhW th 1\n" - "jSb ij 1\n" - "dYh th 1\n" - "cfW ch 1\n" - "gSx ng 1\n" - "qSv qu 1\n" - "jCs st 1\n" - "pwC pr 1\n" - "Gxq qu 1\n" - "fMq qu 1\n" - "kkC ka 1\n" - "uqI qu 1\n" - "zBk sz 1\n" - "zsW st 1\n" - "fZb be 1\n" - "xjb ij 1\n" - "vHq qu 1\n" - "fwN wa 1\n" - "vMw va 1\n" - "Hhq th 1\n" - "csJ ch 1\n" - "brJ er 1\n" - "xvM va 1\n" - "mXn an 1\n" - "qWw wa 1\n" - "dxZ de 1\n" - "sVj st 1\n" - "xrF er 1\n" - "pbU pr 1\n" - "Tfz sz 1\n" - "wqT qu 1\n" - "vcF ch 1\n" - "nrS an 1\n" - "Whz th 1\n" - "kgX ng 1\n" - "yXk ka 1\n" - "kJb ka 1\n" - "rZk er 1\n" - "pBc ch 1\n" - "gUv ng 1\n" - "Hqe qu 1\n" - "Kqj qu 1\n" - "oFj on 1\n" - "xbN be 1\n" - "pnK an 1\n" - "Lbw wa 1\n" - "dMb de 1\n" - "qSp qu 1\n" - "Zsv st 1\n" - "wrV er 1\n" - "uKf qu 1\n" - "mlY le 1\n" - "gxF ng 1\n" - "tjL th 1\n" - "Xrc ch 1\n" - "rvF er 1\n" - "mLq qu 1\n" - "jrK er 1\n" - "Qlz le 1\n" - "zxD sz 1\n" - "fdY de 1\n" - "jvD ij 1\n" - "xQg ng 1\n" - "qFu un 1\n" - "sfJ st 1\n" - "pIf pr 1\n" - "hxJ th 1\n" - "cNc ch 1\n" - "Idq qu 1\n" - "yHf ny 1\n" - "qXm qu 1\n" - "ylD le 1\n" - "zFq qu 1\n" - "jWp ij 1\n" - "eKp er 1\n" - "xhf th 1\n" - "ybV be 1\n" - "xXs st 1\n" - "Yhk th 1\n" - "fwX wa 1\n" - "bqK qu 1\n" - "nvY an 1\n" - "xvk ka 1\n" - "rbP er 1\n" - "sXl le 1\n" - "Uwt th 1\n" - "wmW me 1\n" - "pxV pr 1\n" - "njZ an 1\n" - "Tqk qu 1\n" - "zmE sz 1\n" - "Rqu un 1\n" - "qqM qu 1\n" - "dhQ th 1\n" - "uJz qu 1\n" - "Vqd qu 1\n" - "yCk ka 1\n" - "pWu qu 1\n" - "Vdy de 1\n" - "iRx in 1\n" - "Vcm ch 1\n" - "wIg ng 1\n" - "Xbh th 1\n" - "vcG ch 1\n" - "jjX ij 1\n" - "nmO an 1\n" - "dQj de 1\n" - "dfV de 1\n" - "dbK de 1\n" - "gqk qu 1\n" - "nFd an 1\n" - "oWv on 1\n" - "nHp an 1\n" - "knK an 1\n" - "bxZ be 1\n" - "wmH me 1\n" - "fgX ng 1\n" - "gzH ng 1\n" - "Zbv va 1\n" - "vgM ng 1\n" - "dmK de 1\n" - "cvB ch 1\n" - "eQs er 1\n" - "cHm ch 1\n" - "sBt th 1\n" - "bHx be 1\n" - "vqd qu 1\n" - "Npy pr 1\n" - "xzL sz 1\n" - "gMx ng 1\n" - "vwU va 1\n" - "pfX pr 1\n" - "nFg an 1\n" - "sFs st 1\n" - "Vqh th 1\n" - "Emq qu 1\n" - "tXy th 1\n" - "uVd qu 1\n" - "Yvj ij 1\n" - "qHo qu 1\n" - "pWm me 1\n" - "xcK ch 1\n" - "pUv va 1\n" - "pLn an 1\n" - "uVn an 1\n" - "Fsq qu 1\n" - "cGj ch 1\n" - "Xwy wa 1\n" - "gzT ng 1\n" - "dNq qu 1\n" - "jrU er 1\n" - "qtA th 1\n" - "gqT qu 1\n" - "pwM pr 1\n" - "lrP er 1\n" - "jmC ij 1\n" - "pmP me 1\n" - "yiY in 1\n" - "pTs st 1\n" - "Zwj ij 1\n" - "qpF qu 1\n" - "fhJ ch 1\n" - "fOv va 1\n" - "wcK ch 1\n" - "kqk qu 1\n" - "Ugz ng 1\n" - "xfF fo 1\n" - "cTv ch 1\n" - "gpX ng 1\n" - "Lfx fo 1\n" - "gwU ng 1\n" - "Dzx sz 1\n" - "kDc ch 1\n" - "Pvh th 1\n" - "kdY de 1\n" - "wWv va 1\n" - "sQq qu 1\n" - "mjY ij 1\n" - "yCb be 1\n" - "rSq qu 1\n" - "Sfv va 1\n" - "fZh th 1\n" - "dMd de 1\n" - "dNs st 1\n" - "jTv ij 1\n" - "tmW th 1\n" - "cxJ ch 1\n" - "uAo qu 1\n" - "mHx me 1\n" - "fgA ng 1\n" - "Rhx th 1\n" - "wWt th 1\n" - "pfU pr 1\n" - "oIj on 1\n" - "lhQ th 1\n" - "vDk ka 1\n" - "vJd de 1\n" - "sDp st 1\n" - "qiU qu 1\n" - "Yfs st 1\n" - "qxW qu 1\n" - "sFh th 1\n" - "vhP th 1\n" - "Vjj ij 1\n" - "tmQ th 1\n" - "wmM me 1\n" - "cVy ch 1\n" - "Kzw sz 1\n" - "tfA th 1\n" - "gjR ij 1\n" - "xyQ ny 1\n" - "mBv va 1\n" - "fQy ny 1\n" - "dZc ch 1\n" - "eVh th 1\n" - "Nvc ch 1\n" - "qFb qu 1\n" - "qhl th 1\n" - "Zcn ch 1\n" - "qwW qu 1\n" - "xZq qu 1\n" - "jhL th 1\n" - "lWf le 1\n" - "jJx ij 1\n" - "Yzt th 1\n" - "Eoq qu 1\n" - "Njm ij 1\n" - "Zgd ng 1\n" - "pGq qu 1\n" - "sgY ng 1\n" - "jyE ij 1\n" - "jzE sz 1\n" - "ujK qu 1\n" - "qbm qu 1\n" - "Wsf st 1\n" - "mQn an 1\n" - "sQs st 1\n" - "yXg ng 1\n" - "vYe er 1\n" - "ePv er 1\n" - "aCv an 1\n" - "pVm me 1\n" - "zxO sz 1\n" - "jjW ij 1\n" - "vgI ng 1\n" - "tZc th 1\n" - "Qtg th 1\n" - "vMt th 1\n" - "kTt th 1\n" - "Mxj ij 1\n" - "fbI be 1\n" - "qAu un 1\n" - "wfT wa 1\n" - "fcF ch 1\n" - "pfK pr 1\n" - "bOq qu 1\n" - "huX th 1\n" - "cJm ch 1\n" - "Xpg ng 1\n" - "tqJ th 1\n" - "Ovf va 1\n" - "Xlj le 1\n" - "Nrl er 1\n" - "fxW fo 1\n" - "Swq qu 1\n" - "qvE qu 1\n" - "qpY qu 1\n" - "oNw on 1\n" - "kYc ch 1\n" - "jXb ij 1\n" - "Qfk ka 1\n" - "eDp er 1\n" - "Vqb qu 1\n" - "sKz us 1\n" - "qjp qu 1\n" - "Uxl le 1\n" - "Lky ka 1\n" - "zFy sz 1\n" - "nMl an 1\n" - "yYi in 1\n" - "cQe ch 1\n" - "oYj on 1\n" - "tbB th 1\n" - "Ybg ng 1\n" - "nVk nd 1\n" - "bXc ch 1\n" - "Lqn an 1\n" - "mdK de 1\n" - "pdP de 1\n" - "tqS th 1\n" - "Zjf ij 1\n" - "kcC ch 1\n" - "qZq qu 1\n" - "aSd an 1\n" - "Cmh th 1\n" - "hzG th 1\n" - "wQm me 1\n" - "Gqg qu 1\n" - "yWp pr 1\n" - "Xrw er 1\n" - "yJy ny 1\n" - "sqD qu 1\n" - "dWb de 1\n" - "nbQ an 1\n" - "iwP in 1\n" - "lWs le 1\n" - "Tsg ng 1\n" - "dHz de 1\n" - "tcF th 1\n" - "Qkt th 1\n" - "Bdd de 1\n" - "Mxq qu 1\n" - "pjV ij 1\n" - "kQr er 1\n" - "dnI an 1\n" - "fyY ny 1\n" - "aFq an 1\n" - "Ylx le 1\n" - "Yym me 1\n" - "jbV ij 1\n" - "qcV ch 1\n" - "pzX sz 1\n" - "qRh th 1\n" - "djA de 1\n" - "bnI an 1\n" - "Llv le 1\n" - "tmZ th 1\n" - "hQo th 1\n" - "ztW th 1\n" - "Rxz sz 1\n" - "dxW de 1\n" - "qtW th 1\n" - "kqO qu 1\n" - "lHc ch 1\n" - "lRj le 1\n" - "hNf th 1\n" - "Giq qu 1\n" - "cYq ch 1\n" - "Ydp de 1\n" - "qWn an 1\n" - "xkB ka 1\n" - "kxC ka 1\n" - "ljA le 1\n" - "Qwp pr 1\n" - "mCp me 1\n" - "fJd de 1\n" - "vCt th 1\n" - "Vcz ch 1\n" - "vBf va 1\n" - "cYx ch 1\n" - "fHw wa 1\n" - "kvW ka 1\n" - "Jmz sz 1\n" - "hQj th 1\n" - "rbQ er 1\n" - "vxX va 1\n" - "wFh th 1\n" - "Tjz sz 1\n" - "hxR th 1\n" - "vdY de 1\n" - "pmF me 1\n" - "sDl le 1\n" - "rVh th 1\n" - "wDc ch 1\n" - "gBw ng 1\n" - "cHf ch 1\n" - "pzQ sz 1\n" - "lVp le 1\n" - "gfH ng 1\n" - "oGc ch 1\n" - "tvJ th 1\n" - "cMv ch 1\n" - "xnS an 1\n" - "vQx va 1\n" - "uoM qu 1\n" - "zkX sz 1\n" - "zHp sz 1\n" - "yuW qu 1\n" - "Qbv va 1\n" - "zwG sz 1\n" - "cpX ch 1\n" - "Rpv va 1\n" - "zKq qu 1\n" - "wUb wa 1\n" - "qnJ an 1\n" - "Rpy pr 1\n" - "bcS ch 1\n" - "qxK qu 1\n" - "qjD qu 1\n" - "lQg ng 1\n" - "krX er 1\n" - "Fcg ch 1\n" - "oVx on 1\n" - "vJf va 1\n" - "Bvk ka 1\n" - "dmX de 1\n" - "Wdj de 1\n" - "Yzp sz 1\n" - "Ycd ch 1\n" - "jKx ij 1\n" - "krH er 1\n" - "Lnm an 1\n" - "zCm sz 1\n" - "Uwj ij 1\n" - "Uvk ka 1\n" - "Mfj ij 1\n" - "yqJ qu 1\n" - "Lfq qu 1\n" - "yHz sz 1\n" - "kgJ ng 1\n" - "aGq an 1\n" - "tjH th 1\n" - "Zkc ch 1\n" - "wHv va 1\n" - "Nzp sz 1\n" - "cZx ch 1\n" - "jvK ij 1\n" - "clF ch 1\n" - "xmD me 1\n" - "Ypz sz 1\n" - "pFy pr 1\n" - "hvF th 1\n" - "mtW th 1\n" - "hqG th 1\n" - "kvN ka 1\n" - "tcZ th 1\n" - "tkR th 1\n" - "pdH de 1\n" - "qEs qu 1\n" - "Zcw ch 1\n" - "Vwu un 1\n" - "gXz ng 1\n" - "mWj ij 1\n" - "mWv va 1\n" - "Jqx qu 1\n" - "oSj on 1\n" - "lwY le 1\n" - "Tkf ka 1\n" - "pcC ch 1\n" - "ohG th 1\n" - "dzG de 1\n" - "fdN de 1\n" - "xrS er 1\n" - "hHk th 1\n" - "Fjz sz 1\n" - "vbZ va 1\n" - "Udx de 1\n" - "wzX sz 1\n" - "uNq qu 1\n" - "wfZ wa 1\n" - "swB st 1\n" - "dmQ de 1\n" - "dcA ch 1\n" - "qzP qu 1\n" - "jJj ij 1\n" - "qWq qu 1\n" - "tVk th 1\n" - "gwB ng 1\n" - "bIw wa 1\n" - "bpU pr 1\n" - "bwM wa 1\n" - "fkA ka 1\n" - "xUc ch 1\n" - "xTd de 1\n" - "fKl le 1\n" - "lxS le 1\n" - "xaS an 1\n" - "yvQ va 1\n" - "dhV th 1\n" - "mdW de 1\n" - "wfJ wa 1\n" - "Wqq qu 1\n" - "sZj st 1\n" - "Lxy ny 1\n" - "xXy ny 1\n" - "qDm qu 1\n" - "gKq qu 1\n" - "Qvj ij 1\n" - "kfH ka 1\n" - "aQp an 1\n" - "xFz sz 1\n" - "njW an 1\n" - "Rpn an 1\n" - "Mmn an 1\n" - "fhD th 1\n" - "jKk ij 1\n" - "zAq qu 1\n" - "qfL qu 1\n" - "ywN wa 1\n" - "qpz qu 1\n" - "hxP th 1\n" - "Gdq qu 1\n" - "tMx th 1\n" - "jwL ij 1\n" - "kBb ka 1\n" - "fAw wa 1\n" - "Sdx de 1\n" - "Jmv va 1\n" - "bgX ng 1\n" - "xWp pr 1\n" - "hHt th 1\n" - "Gww wa 1\n" - "Fbb be 1\n" - "zoT on 1\n" - "yjG ij 1\n" - "Rlg ng 1\n" - "vFn an 1\n" - "zcK ch 1\n" - "xdC de 1\n" - "wvO va 1\n" - "oQl le 1\n" - "nIw an 1\n" - "wzA sz 1\n" - "Rzj sz 1\n" - "Qzn an 1\n" - "Yjt th 1\n" - "xkQ ku 1\n" - "lrq qu 1\n" - "nwZ an 1\n" - "pGk ka 1\n" - "mnL an 1\n" - "Rlq qu 1\n" - "ccD ch 1\n" - "rRd er 1\n" - "Ofj ij 1\n" - "Fjh th 1\n" - "uuO qu 1\n" - "zZx sz 1\n" - "Nbj ij 1\n" - "znW an 1\n" - "jbH ij 1\n" - "rDx er 1\n" - "Qmc ch 1\n" - "dwV de 1\n" - "Oqv qu 1\n" - "Zqe qu 1\n" - "fwI wa 1\n" - "njP an 1\n" - "Oqq qu 1\n" - "pVv va 1\n" - "fqx qu 1\n" - "gfO ng 1\n" - "hqU th 1\n" - "gDj ng 1\n" - "Tmj ij 1\n" - "vcK ch 1\n" - "qmV qu 1\n" - "sVx st 1\n" - "Wfh th 1\n" - "mJk ka 1\n" - "fuK qu 1\n" - "bfN be 1\n" - "qfT qu 1\n" - "Fmj ij 1\n" - "tbN th 1\n" - "kjN ij 1\n" - "yhZ th 1\n" - "Nxk ka 1\n" - "wxU wa 1\n" - "zXb sz 1\n" - "Nzd de 1\n" - "ohL th 1\n" - "pVt th 1\n" - "Zsx st 1\n" - "Zqj qu 1\n" - "wUj ij 1\n" - "yjC ij 1\n" - "kTn an 1\n" - "vqV qu 1\n" - "Fyc ch 1\n" - "Icd ch 1\n" - "svN st 1\n" - "Jjv ij 1\n" - "bVp pr 1\n" - "fdI de 1\n" - "nbX an 1\n" - "cfU ch 1\n" - "lGm le 1\n" - "Ovg ng 1\n" - "zDc ch 1\n" - "jgq qu 1\n" - "lYr er 1\n" - "hjR th 1\n" - "qPm qu 1\n" - "iRq qu 1\n" - "Zrx er 1\n" - "wpT pr 1\n" - "xsB st 1\n" - "qxT qu 1\n" - "gFx ng 1\n" - "qoJ qu 1\n" - "smD st 1\n" - "lbM le 1\n" - "wCc ch 1\n" - "wFm me 1\n" - "Xlv le 1\n" - "zyU sz 1\n" - "vFk ka 1\n" - "tjR th 1\n" - "iYx in 1\n" - "uJk qu 1\n" - "Qeh th 1\n" - "Xrv er 1\n" - "Bqq qu 1\n" - "Vdb de 1\n" - "znR an 1\n" - "pmL me 1\n" - "tvH th 1\n" - "Tmd de 1\n" - "Dgb ng 1\n" - "ozO on 1\n" - "fQb be 1\n" - "Pqb qu 1\n" - "qYn an 1\n" - "xPm me 1\n" - "gWf ng 1\n" - "cCv ch 1\n" - "qeP qu 1\n" - "qZm qu 1\n" - "dgZ ng 1\n" - "mjO ij 1\n" - "gCw ng 1\n" - "svQ st 1\n" - "Rqq qu 1\n" - "Qbt th 1\n" - "Lkj ij 1\n" - "Fza an 1\n" - "jlB le 1\n" - "iWj in 1\n" - "Zxi in 1\n" - "Kxw wa 1\n" - "jcJ ij 1\n" - "uCf qu 1\n" - "cAx ch 1\n" - "Vjw ij 1\n" - "vUs st 1\n" - "Mnq an 1\n" - "jjM ij 1\n" - "vUx va 1\n" - "uZr qu 1\n" - "twU th 1\n" - "Ytv th 1\n" - "hRp th 1\n" - "kzV sz 1\n" - "mvY va 1\n" - "jFj ij 1\n" - "jBp ij 1\n" - "kGz sz 1\n" - "qUq qu 1\n" - "qgR qu 1\n" - "lWb le 1\n" - "wwP wa 1\n" - "wvE va 1\n" - "Fsx st 1\n" - "Izx sz 1\n" - "bwC wa 1\n" - "Fmq qu 1\n" - "cLd ch 1\n" - "bRl le 1\n" - "iXf in 1\n" - "yMq qu 1\n" - "cqP ch 1\n" - "jsL st 1\n" - "jIq qu 1\n" - "wuG qu 1\n" - "Lbv va 1\n" - "Eqf qu 1\n" - "Ogf ng 1\n" - "kGv ka 1\n" - "pjK ij 1\n" - "vcQ ch 1\n" - "Xzh th 1\n" - "jUv ij 1\n" - "wGd de 1\n" - "hmX th 1\n" - "yqm qu 1\n" - "qkE qu 1\n" - "zgX ng 1\n" - "vwO va 1\n" - "wmS me 1\n" - "vhT th 1\n" - "syX st 1\n" - "nbC an 1\n" - "zgW ng 1\n" - "vqM qu 1\n" - "dWf de 1\n" - "cwF ch 1\n" - "dnF an 1\n" - "qDi qu 1\n" - "qSw qu 1\n" - "jQf ij 1\n" - "crZ ch 1\n" - "qGl qu 1\n" - "Wxu qu 1\n" - "grW ng 1\n" - "glX ng 1\n" - "vFd de 1\n" - "pbF pr 1\n" - "bNf be 1\n" - "Qcf ch 1\n" - "fVx fo 1\n" - "pPf pr 1\n" - "pVq qu 1\n" - "xlG le 1\n" - "Dwj ij 1\n" - "xQj ij 1\n" - "lkQ le 1\n" - "sqH qu 1\n" - "Yyx ny 1\n" - "vFm va 1\n" - "tQo th 1\n" - "zlU le 1\n" - "vlW le 1\n" - "glW ng 1\n" - "qmW qu 1\n" - "aWl an 1\n" - "zmV sz 1\n" - "gLm ng 1\n" - "glB ng 1\n" - "tqA th 1\n" - "hgJ th 1\n" - "cGb ch 1\n" - "qwE qu 1\n" - "Ffy ny 1\n" - "wmL me 1\n" - "xLh th 1\n" - "sbE st 1\n" - "bQl le 1\n" - "xkR ka 1\n" - "yFd de 1\n" - "Omq qu 1\n" - "Xfj ij 1\n" - "wJj ij 1\n" - "Lws st 1\n" - "wfU wa 1\n" - "zfk sz 1\n" - "lNv le 1\n" - "ykQ ka 1\n" - "xDt th 1\n" - "jDw ij 1\n" - "zbx sz 1\n" - "vQs st 1\n" - "vvM va 1\n" - "Xqq qu 1\n" - "jLq qu 1\n" - "zkZ sz 1\n" - "qAg qu 1\n" - "Xjw ij 1\n" - "cFw ch 1\n" - "rwQ er 1\n" - "mWk ka 1\n" - "Yrx er 1\n" - "eUo er 1\n" - "uDm qu 1\n" - "Mhw th 1\n" - "fGp pr 1\n" - "Rpz sz 1\n" - "sbF st 1\n" - "nfX an 1\n" - "Wfu qu 1\n" - "Mwq qu 1\n" - "qDj qu 1\n" - "Wpw pr 1\n" - "zFv sz 1\n" - "qXc ch 1\n" - "qsT qu 1\n" - "pZh th 1\n" - "lLc ch 1\n" - "pqB qu 1\n" - "Xjo on 1\n" - "kDk ka 1\n" - "Jxf fo 1\n" - "Vqz qu 1\n" - "Hvq qu 1\n" - "Zqw qu 1\n" - "kRc ch 1\n" - "tvR th 1\n" - "dNx de 1\n" - "jWq qu 1\n" - "nRw an 1\n" - "rGb er 1\n" - "vZz sz 1\n" - "Xtz th 1\n" - "kZn an 1\n" - "Vmj ij 1\n" - "dMp de 1\n" - "cPy ch 1\n" - "uzR qu 1\n" - "yjE ij 1\n" - "gzF ng 1\n" - "tCp th 1\n" - "qfC qu 1\n" - "vcq ch 1\n" - "Zfg ng 1\n" - "kwC ka 1\n" - "fkM ko 1\n" - "vJh th 1\n" - "eCq qu 1\n" - "wPp pr 1\n" - "qJy qu 1\n" - "dmY de 1\n" - "uMj qu 1\n" - "fKh th 1\n" - "sqU qu 1\n" - "vNp va 1\n" - "Crj er 1\n" - "hsH th 1\n" - "Vwn an 1\n" - "Sdy de 1\n" - "Fpw pr 1\n" - "Wcq ch 1\n" - "pjW ij 1\n" - "dwW de 1\n" - "gjX ng 1\n" - "yZk ka 1\n" - "cKg ch 1\n" - "xdR de 1\n" - "wqW qu 1\n" - "khD th 1\n" - "vgG ng 1\n" - "vMl le 1\n" - "qnQ an 1\n" - "hJt th 1\n" - "fvC va 1\n" - "cpR ch 1\n" - "Wtt th 1\n" - "uyX qu 1\n" - "cXf ch 1\n" - "uKv qu 1\n" - "gVv ng 1\n" - "xzg ng 1\n" - "cPq ch 1\n" - "fTn an 1\n" - "sFj st 1\n" - "mzX sz 1\n" - "gMq qu 1\n" - "rxI er 1\n" - "eYf er 1\n" - "kwB ka 1\n" - "eQk er 1\n" - "jBq qu 1\n" - "lbH le 1\n" - "qCt th 1\n" - "Wnv an 1\n" - "gYd ng 1\n" - "Zxe er 1\n" - "fZj ij 1\n" - "Hgj ng 1\n" - "bRj ij 1\n" - "fpR pr 1\n" - "cbR ch 1\n" - "lqT qu 1\n" - "cMt th 1\n" - "tQy to 1\n" - "vxG va 1\n" - "gpB ng 1\n" - "Gkw ka 1\n" - "zqX qu 1\n" - "tPw th 1\n" - "fnN an 1\n" - "Gkp ka 1\n" - "mvQ va 1\n" - "hHf th 1\n" - "wfS wa 1\n" - "qCx qu 1\n" - "mqH qu 1\n" - "hgR th 1\n" - "Mwg ng 1\n" - "bqQ qu 1\n" - "Fkz sz 1\n" - "oFv on 1\n" - "Ddq qu 1\n" - "uIo qu 1\n" - "Yfh th 1\n" - "ygQ ng 1\n" - "fxh th 1\n" - "Zqd qu 1\n" - "Htn th 1\n" - "Gvz sz 1\n" - "zRw sz 1\n" - "vCb va 1\n" - "rjT ro 1\n" - "rjD er 1\n" - "Qpm me 1\n" - "Xdb de 1\n" - "Lkf ka 1\n" - "Ajx ij 1\n" - "Ylz le 1\n" - "Qtb th 1\n" - "bHz sz 1\n" - "bDg ng 1\n" - "Lqx qu 1\n" - "yhW th 1\n" - "zLv sz 1\n" - "xgK ng 1\n" - "eWq qu 1\n" - "sjS st 1\n" - "qVe qu 1\n" - "Okq qu 1\n" - "Ewj ij 1\n" - "Dsv st 1\n" - "jhI th 1\n" - "xGf fo 1\n" - "Okx ka 1\n" - "Fqx qu 1\n" - "dPv de 1\n" - "zsK st 1\n" - "qLn an 1\n" - "fkB ka 1\n" - "cCb ch 1\n" - "gNp ng 1\n" - "Qwd de 1\n" - "zTf sz 1\n" - "Pqq qu 1\n" - "rFv ro 1\n" - "Rwt th 1\n" - "uKc ch 1\n" - "hqN th 1\n" - "kmK ka 1\n" - "wuC qu 1\n" - "pnZ an 1\n" - "tgM th 1\n" - "Qds st 1\n" - "Axq qu 1\n" - "xwO wa 1\n" - "eQg ng 1\n" - "mFj ij 1\n" - "Dpm me 1\n" - "pQm me 1\n" - "aFp an 1\n" - "mfB me 1\n" - "fpA pr 1\n" - "jgZ ng 1\n" - "lGk le 1\n" - "xcA ch 1\n" - "gWw ng 1\n" - "lzF le 1\n" - "xsQ st 1\n" - "bQx be 1\n" - "wjc ch 1\n" - "bDc ch 1\n" - "Wpz sz 1\n" - "rfV er 1\n" - "Zbs st 1\n" - "hKq th 1\n" - "qXa ar 1\n" - "wjA ij 1\n" - "vzS sz 1\n" - "cWy ch 1\n" - "gjK ng 1\n" - "yRb be 1\n" - "qgU qu 1\n" - "pqF qu 1\n" - "qnU an 1\n" - "Zqc ch 1\n" - "Xqg qu 1\n" - "zLq qu 1\n" - "gzV ng 1\n" - "Kqs qu 1\n" - "zgZ ng 1\n" - "jqG qu 1\n" - "pqJ qu 1\n" - "Ieq qu 1\n" - "hjH th 1\n" - "vmN va 1\n" - "iuF qu 1\n" - "wGy wa 1\n" - "Kdh th 1\n" - "hQb th 1\n" - "jWr er 1\n" - "Cxy ny 1\n" - "Kqz qu 1\n" - "wXr er 1\n" - "xoQ on 1\n" - "wBh th 1\n" - "qyI qu 1\n" - "qhC th 1\n" - "Vpy pr 1\n" - "nJb an 1\n" - "uGw qu 1\n" - "hhX th 1\n" - "mjS ij 1\n" - "Scv ch 1\n" - "hFw th 1\n" - "bKg ng 1\n" - "Xmn an 1\n" - "bdT de 1\n" - "sJq qu 1\n" - "xTm me 1\n" - "qjz qu 1\n" - "Mqp qu 1\n" - "dHp de 1\n" - "rRn ar 1\n" - "Xlf le 1\n" - "cNs ch 1\n" - "Xql qu 1\n" - "iFz in 1\n" - "Nlk le 1\n" - "sPw st 1\n" - "vWq qu 1\n" - "wXt th 1\n" - "Fnq an 1\n" - "ozJ on 1\n" - "zIg ng 1\n" - "lSf le 1\n" - "wRc ch 1\n" - "Bvp va 1\n" - "Wwr er 1\n" - "pWg pr 1\n" - "pLk ka 1\n" - "krJ er 1\n" - "Zfv va 1\n" - "yIx ny 1\n" - "oKx on 1\n" - "qLb qu 1\n" - "dHj de 1\n" - "oqK qu 1\n" - "cxC ch 1\n" - "wJh th 1\n" - "wZd de 1\n" - "cWz ch 1\n" - "yqS qu 1\n" - "kXq qu 1\n" - "fYd de 1\n" - "dGy de 1\n" - "dDt th 1\n" - "pKg ng 1\n" - "Xjd de 1\n" - "sjM st 1\n" - "sfC st 1\n" - "dMh th 1\n" - "dZp de 1\n" - "wcD ch 1\n" - "Qoj on 1\n" - "gxC ng 1\n" - "Zfn an 1\n" - "hYv th 1\n" - "xWq qu 1\n" - "gZw ng 1\n" - "pQi in 1\n" - "Xlb le 1\n" - "gQz ng 1\n" - "nbZ an 1\n" - "Ezx sz 1\n" - "wNg ng 1\n" - "Xrj er 1\n" - "cxX ch 1\n" - "dQp de 1\n" - "Ypn an 1\n" - "pNp pr 1\n" - "pbQ pr 1\n" - "gMv ng 1\n" - "qeF qu 1\n" - "uVv qu 1\n" - "dVk de 1\n" - "uMv qu 1\n" - "jQn an 1\n" - "mhP th 1\n" - "iTb in 1\n" - "Pvw va 1\n" - "zCw sz 1\n" - "wcR ch 1\n" - "svU st 1\n" - "nMz an 1\n" - "cjE ch 1\n" - "jmH ij 1\n" - "Qzc ch 1\n" - "mqc ch 1\n" - "qlU qu 1\n" - "Zvp va 1\n" - "xHl le 1\n" - "gqB qu 1\n" - "xsN st 1\n" - "kCj ij 1\n" - "Olx le 1\n" - "Gxw wa 1\n" - "xwV wa 1\n" - "fPb be 1\n" - "Rhv th 1\n" - "pgV ng 1\n" - "Qdp de 1\n" - "zFs st 1\n" - "klQ le 1\n" - "yJd de 1\n" - "rxE er 1\n" - "uHv qu 1\n" - "wKl le 1\n" - "wpJ pr 1\n" - "Cjr er 1\n" - "tYg th 1\n" - "Vpz sz 1\n" - "Zxh th 1\n" - "pQl le 1\n" - "Fxe er 1\n" - "Qok on 1\n" - "plK le 1\n" - "lpX le 1\n" - "jdP de 1\n" - "Zqy qu 1\n" - "yRz sz 1\n" - "nDg an 1\n" - "kqL qu 1\n" - "ugW qu 1\n" - "Mbf be 1\n" - "Kql qu 1\n" - "Nqw qu 1\n" - "Jzw sz 1\n" - "sGn an 1\n" - "wDv va 1\n" - "Jjk ij 1\n" - "ztQ th 1\n" - "hwP th 1\n" - "wDp pr 1\n" - "gfG ng 1\n" - "qhL th 1\n" - "cUv ch 1\n" - "Wbk ka 1\n" - "fkF ko 1\n" - "Pqv qu 1\n" - "nbK an 1\n" - "qSz qu 1\n" - "vwI va 1\n" - "cFc ch 1\n" - "qfG qu 1\n" - "rhF th 1\n" - "xzl le 1\n" - "dNc ch 1\n" - "zwR sz 1\n" - "wzK sz 1\n" - "bQa an 1\n" - "hLq th 1\n" - "fUv va 1\n" - "rHg ng 1\n" - "uJj qu 1\n" - "Fhz th 1\n" - "Nzm sz 1\n" - "gRz ng 1\n" - "qXf qu 1\n" - "Tzm sz 1\n" - "Zkx ka 1\n" - "hLx th 1\n" - "Ukd de 1\n" - "fMf fo 1\n" - "vGp va 1\n" - "jtI th 1\n" - "hxE th 1\n" - "jrH er 1\n" - "Fgh th 1\n" - "dlF le 1\n" - "jcO ja 1\n" - "sCw st 1\n" - "Bqh th 1\n" - "kZy ka 1\n" - "fOh th 1\n" - "rJb er 1\n" - "rjV er 1\n" - "Kwq qu 1\n" - "Hcw ch 1\n" - "mCw ma 1\n" - "hxM th 1\n" - "jTb ij 1\n" - "mmQ me 1\n" - "pjR ij 1\n" - "cdP ch 1\n" - "Zjs st 1\n" - "jqF qu 1\n" - "vMn an 1\n" - "Mqs qu 1\n" - "svX st 1\n" - "iXn an 1\n" - "nwR an 1\n" - "ytR th 1\n" - "Vjb ij 1\n" - "Cjl le 1\n" - "pXd de 1\n" - "Gwu qu 1\n" - "qIj qu 1\n" - "kQn an 1\n" - "fYm me 1\n" - "vtZ th 1\n" - "Usx st 1\n" - "nfP an 1\n" - "dQx de 1\n" - "oXf on 1\n" - "fEw wa 1\n" - "sgX ng 1\n" - "cPp ch 1\n" - "ybW be 1\n" - "kcW ch 1\n" - "kHf ka 1\n" - "vcU ch 1\n" - "tXo th 1\n" - "Kzh th 1\n" - "Cfq qu 1\n" - "Ujy ij 1\n" - "Fxa an 1\n" - "hxS th 1\n" - "tWx th 1\n" - "mlK le 1\n" - "nZj an 1\n" - "qOv qu 1\n" - "Xkt th 1\n" - "Fzf sz 1\n" - "uTd qu 1\n" - "qrS qu 1\n" - "Ptw th 1\n" - "dDs st 1\n" - "rNm er 1\n" - "Ewf wa 1\n" - "hJk th 1\n" - "Hdq qu 1\n" - "Jtw th 1\n" - "kqc ch 1\n" - "nHq an 1\n" - "rhH th 1\n" - "oqH qu 1\n" - "vpZ va 1\n" - "Dgd ng 1\n" - "qxV qu 1\n" - "Cxv va 1\n" - "plV pr 1\n" - "kIi in 1\n" - "Khc th 1\n" - "jsY st 1\n" - "fLh th 1\n" - "Ykq qu 1\n" - "Qmx me 1\n" - "zvI sz 1\n" - "yhS th 1\n" - "qfg qu 1\n" - "wxZ wa 1\n" - "jVy ij 1\n" - "kQw ka 1\n" - "zXv sz 1\n" - "Lhs th 1\n" - "Mkq qu 1\n" - "jkU ij 1\n" - "Yhq th 1\n" - "zrH er 1\n" - "vhG va 1\n" - "drD er 1\n" - "Psj st 1\n" - "gDf ng 1\n" - "Xjj ij 1\n" - "pLm me 1\n" - "klC le 1\n" - "hTx th 1\n" - "zrJ er 1\n" - "Xgk ng 1\n" - "Wxf fo 1\n" - "fdD de 1\n" - "jHp ij 1\n" - "yDw wa 1\n" - "kPv ka 1\n" - "Rkm ka 1\n" - "mzg ng 1\n" - "lHz le 1\n" - "vpR va 1\n" - "wZt th 1\n" - "pBd de 1\n" - "qPf qu 1\n" - "hNw th 1\n" - "Nvj ij 1\n" - "pyU pr 1\n" - "Sjh th 1\n" - "Kzx sz 1\n" - "oQp on 1\n" - "xdL de 1\n" - "dnZ an 1\n" - "qfB qu 1\n" - "kJc ch 1\n" - "fWn an 1\n" - "Xmc ch 1\n" - "rGx er 1\n" - "sFf st 1\n" - "Vwv va 1\n" - "tKd th 1\n" - "sQx st 1\n" - "oNm on 1\n" - "uXj qu 1\n" - "Xsq qu 1\n" - "yWc ch 1\n" - "hfC th 1\n" - "Ijd de 1\n" - "dkW de 1\n" - "Nxn an 1\n" - "juC qu 1\n" - "bPy be 1\n" - "lKs le 1\n" - "aLq an 1\n" - "jPp ij 1\n" - "wpZ pr 1\n" - "fjE ij 1\n" - "zNt th 1\n" - "mhN th 1\n" - "bQn an 1\n" - "bxB be 1\n" - "fdX de 1\n" - "Jcv va 1\n" - "Fdp de 1\n" - "wVx wa 1\n" - "tmU th 1\n" - "njJ an 1\n" - "qzK qu 1\n" - "jtD th 1\n" - "bcX ch 1\n" - "Ghx th 1\n" - "xZj ij 1\n" - "vKw va 1\n" - "pvO va 1\n" - "gXs ng 1\n" - "wRv va 1\n" - "hgN th 1\n" - "gpO ng 1\n" - "hWc th 1\n" - "Upq qu 1\n" - "vwD va 1\n" - "mxE me 1\n" - "Zvm va 1\n" - "ozM on 1\n" - "fbJ be 1\n" - "tpQ th 1\n" - "yeV er 1\n" - "Znb an 1\n" - "wXv va 1\n" - "bcY ch 1\n" - "sgZ ng 1\n" - "qfM qu 1\n" - "fcL ch 1\n" - "mXl le 1\n" - "uBq qu 1\n" - "jxW ij 1\n" - "mtU th 1\n" - "qgJ qu 1\n" - "dAq qu 1\n" - "jBv ij 1\n" - "Gty th 1\n" - "Jfm me 1\n" - "xqQ qu 1\n" - "cBp ch 1\n" - "Xqd qu 1\n" - "fvM va 1\n" - "uWm qu 1\n" - "rSb er 1\n" - "Xqj qu 1\n" - "qTd qu 1\n" - "lLg ng 1\n" - "Jrp er 1\n" - "oJb on 1\n" - "pXy pr 1\n" - "zrQ er 1\n" - "cnT ch 1\n" - "qsE qu 1\n" - "pZc ch 1\n" - "bVy be 1\n" - "qIz qu 1\n" - "dgR ng 1\n" - "mLv va 1\n" - "hVl th 1\n" - "qRj qu 1\n" - "fhA th 1\n" - "zLc ch 1\n" - "Sgq qu 1\n" - "pLc ch 1\n" - "Txq qu 1\n" - "ypY pr 1\n" - "tXz th 1\n" - "dcC ch 1\n" - "iYf in 1\n" - "Wwm me 1\n" - "kZk ka 1\n" - "Ywr er 1\n" - "gFv ng 1\n" - "Fmz sz 1\n" - "uQq qu 1\n" - "xwR wa 1\n" - "Yfc ch 1\n" - "aIo an 1\n" - "sBq qu 1\n" - "Gzb sz 1\n" - "jwI ij 1\n" - "cFf ch 1\n" - "aWv an 1\n" - "Eaw an 1\n" - "vkW ka 1\n" - "Nfh th 1\n" - "flN le 1\n" - "Lpm me 1\n" - "ylK le 1\n" - "Znr an 1\n" - "mcQ ch 1\n" - "kfE ka 1\n" - "Iyf ny 1\n" - "qrV qu 1\n" - "fPx fo 1\n" - "fgJ ng 1\n" - "jIi in 1\n" - "bPw wa 1\n" - "Qyx ny 1\n" - "Qnb an 1\n" - "Wdm de 1\n" - "nJt th 1\n" - "qCd qu 1\n" - "gZl ng 1\n" - "Nlz le 1\n" - "Zwh th 1\n" - "iWl in 1\n" - "bUu qu 1\n" - "lbJ le 1\n" - "sNq qu 1\n" - "qjU qu 1\n" - "wbT wa 1\n" - "yNc ch 1\n" - "mxM me 1\n" - "pHk ka 1\n" - "Rdq qu 1\n" - "gkE ng 1\n" - "hbN th 1\n" - "Tgq qu 1\n" - "gjV ng 1\n" - "Gjw ij 1\n" - "gqX qu 1\n" - "qXx qu 1\n" - "vQq qu 1\n" - "pNb pr 1\n" - "fJy ny 1\n" - "yvZ va 1\n" - "zNl le 1\n" - "zDb sz 1\n" - "lUz le 1\n" - "Dxy ny 1\n" - "Wwn an 1\n" - "hPn th 1\n" - "kNb ko 1\n" - "Wdb de 1\n" - "zXt th 1\n" - "pjL ij 1\n" - "tJg th 1\n" - "jmM ij 1\n" - "bXg ng 1\n" - "hTv th 1\n" - "Ysf st 1\n" - "hmQ th 1\n" - "Vyq qu 1\n" - "Fpd de 1\n" - "yQw wa 1\n" - "Pbn an 1\n" - "xVj ij 1\n" - "whP th 1\n" - "fSg ng 1\n" - "Gxz ze 1\n" - "Dfw wa 1\n" - "rMx er 1\n" - "zMf sz 1\n" - "vJw va 1\n" - "xJl le 1\n" - "xfN fo 1\n" - "dQw de 1\n" - "fuD qu 1\n" - "xjB ij 1\n" - "lPj le 1\n" - "mqA qu 1\n" - "mfM me 1\n" - "kwG ka 1\n" - "eaY an 1\n" - "Vmm me 1\n" - "zfS sz 1\n" - "Fmy me 1\n" - "sqP qu 1\n" - "fKk ka 1\n" - "Qdv de 1\n" - "djZ de 1\n" - "qrR qu 1\n" - "txK th 1\n" - "bxH be 1\n" - "jRb ij 1\n" - "cjD ch 1\n" - "Sxw wa 1\n" - "Sxh th 1\n" - "vrZ er 1\n" - "xmH me 1\n" - "dfH de 1\n" - "fJw wa 1\n" - "mwZ me 1\n" - "vRm va 1\n" - "xwj ij 1\n" - "Xqr er 1\n" - "Gvj ij 1\n" - "hzF th 1\n" - "xnK an 1\n" - "xhU th 1\n" - "Nls le 1\n" - "zbV sz 1\n" - "fTq qu 1\n" - "Wxv va 1\n" - "upG qu 1\n" - "qAo qu 1\n" - "kKx ka 1\n" - "zlD le 1\n" - "hTl th 1\n" - "Gqr qu 1\n" - "Gxm me 1\n" - "zPj sz 1\n" - "bvZ va 1\n" - "jHc ch 1\n" - "iXg ng 1\n" - "Kgz ng 1\n" - "Jyi in 1\n" - "vFh th 1\n" - "ytW th 1\n" - "qBd qu 1\n" - "Xjq qu 1\n" - "dgO ng 1\n" - "mjN ij 1\n" - "Djg ng 1\n" - "zIj sz 1\n" - "uDx qu 1\n" - "qJf qu 1\n" - "fAx fo 1\n" - "Fsj st 1\n" - "yDf ny 1\n" - "xjV ij 1\n" - "hdB th 1\n" - "dwG de 1\n" - "slW le 1\n" - "zYb sz 1\n" - "vzO sz 1\n" - "vqO qu 1\n" - "Jzv sz 1\n" - "xmG me 1\n" - "Kdw de 1\n" - "xVq qu 1\n" - "jtE th 1\n" - "kJy ka 1\n" - "xjW ij 1\n" - "mwR me 1\n" - "zVx sz 1\n" - "tMj th 1\n" - "qqb qu 1\n" - "nlQ le 1\n" - "bxQ be 1\n" - "hJv th 1\n" - "jnY an 1\n" - "yfS ny 1\n" - "Mdw de 1\n" - "zZc ch 1\n" - "ysJ st 1\n" - "Qqv qu 1\n" - "zxl le 1\n" - "jAq qu 1\n" - "lJw le 1\n" - "kwJ ka 1\n" - "sxC st 1\n" - "hJr th 1\n" - "xGp pr 1\n" - "ccF ch 1\n" - "vGq qu 1\n" - "qSc ch 1\n" - "fqq qu 1\n" - "kkV ka 1\n" - "gVq qu 1\n" - "Wqg qu 1\n" - "kJp ka 1\n" - "Wlr er 1\n" - "Jwz sz 1\n" - "qEa an 1\n" - "krL er 1\n" - "tqE th 1\n" - "eJz er 1\n" - "Whx th 1\n" - "vWw va 1\n" - "Qzh th 1\n" - "pcF ch 1\n" - "Vmx me 1\n" - "dvC de 1\n" - "qjZ qu 1\n" - "pkF ka 1\n" - "cvO ch 1\n" - "Qyv va 1\n" - "hNs th 1\n" - "snJ an 1\n" - "yjU ij 1\n" - "Yfq qu 1\n" - "xLw wa 1\n" - "rVz er 1\n" - "gOw ng 1\n" - "fxL fo 1\n" - "snW an 1\n" - "yWk ka 1\n" - "wgK ng 1\n" - "aTf an 1\n" - "eVf er 1\n" - "vZp va 1\n" - "uVp qu 1\n" - "Vjh th 1\n" - "zwT sz 1\n" - "wSn an 1\n" - "nNp an 1\n" - "gfF ng 1\n" - "hcW th 1\n" - "gTf ng 1\n" - "qaJ an 1\n" - "kzY sz 1\n" - "ljX le 1\n" - "wMm me 1\n" - "btB st 1\n" - "zfE sz 1\n" - "bxO be 1\n" - "wPc ch 1\n" - "fgK ng 1\n" - "fzW sz 1\n" - "dcX ch 1\n" - "qqR qu 1\n" - "kjq qu 1\n" - "vMh th 1\n" - "gZj ng 1\n" - "qtw th 1\n" - "vkY ka 1\n" - "lCb le 1\n" - "dpO de 1\n" - "mXm me 1\n" - "vWc ch 1\n" - "fOq qu 1\n" - "Vgy ng 1\n" - "dkD de 1\n" - "fQh th 1\n" - "vIq qu 1\n" - "lZr er 1\n" - "zKn an 1\n" - "Vpt th 1\n" - "Dmw me 1\n" - "Nwf wa 1\n" - "kYl le 1\n" - "jpJ ij 1\n" - "qXi qu 1\n" - "Bnj an 1\n" - "xfK fo 1\n" - "fCc ch 1\n" - "vPd de 1\n" - "Qnp an 1\n" - "ypW pr 1\n" - "uwJ qu 1\n" - "Pvb va 1\n" - "cnC ch 1\n" - "hvA th 1\n" - "hGz th 1\n" - "nZx an 1\n" - "kbS ka 1\n" - "Swx wa 1\n" - "hvP th 1\n" - "kqG qu 1\n" - "bLq qu 1\n" - "qjP qu 1\n" - "sUo on 1\n" - "lDq qu 1\n" - "Zlp le 1\n" - "dwQ de 1\n" - "dlN le 1\n" - "fTl le 1\n" - "Npv va 1\n" - "bMn an 1\n" - "dNz sz 1\n" - "efV er 1\n" - "aCw an 1\n" - "aWf an 1\n" - "Lqo qu 1\n" - "fzT sz 1\n" - "Jjr er 1\n" - "zvK sz 1\n" - "nwT an 1\n" - "fXr er 1\n" - "cGm ch 1\n" - "lvS le 1\n" - "qDq qu 1\n" - "qRm qu 1\n" - "vYt th 1\n" - "iQv in 1\n" - "fkH ka 1\n" - "fcO ch 1\n" - "rNn an 1\n" - "qmS qu 1\n" - "kzR sz 1\n" - "Dfc ch 1\n" - "qUs qu 1\n" - "xqP qu 1\n" - "sXk st 1\n" - "Xyt th 1\n" - "pWt th 1\n" - "jbL ij 1\n" - "jYd ij 1\n" - "kqV qu 1\n" - "Fqm qu 1\n" - "xoX on 1\n" - "zuX qu 1\n" - "xUq qu 1\n" - "cgC ch 1\n" - "wBq qu 1\n" - "gQp ng 1\n" - "jnE an 1\n" - "yZs st 1\n" - "fkD ka 1\n" - "sVk st 1\n" - "qyX qu 1\n" - "cBf ch 1\n" - "Cjy ij 1\n" - "dPq qu 1\n" - "wDg ng 1\n" - "dxB de 1\n" - "Dkm ka 1\n" - "kPp ka 1\n" - "hWz th 1\n" - "Bjv ij 1\n" - "Izf sz 1\n" - "Hnk an 1\n" - "rQc ch 1\n" - "Jwu qu 1\n" - "fbP be 1\n" - "frQ er 1\n" - "Aov on 1\n" - "yqQ qu 1\n" - "jfY ij 1\n" - "xsH st 1\n" - "zxh th 1\n" - "Jbj ij 1\n" - "Mjz sz 1\n" - "gRp ng 1\n" - "Gvw va 1\n" - "mzF sz 1\n" - "oqF qu 1\n" - "ejU er 1\n" - "xmQ me 1\n" - "hOq th 1\n" - "pwX pr 1\n" - "zgK ng 1\n" - "wLk ka 1\n" - "fqc ch 1\n" - "dPm de 1\n" - "tCg th 1\n" - "qrF qu 1\n" - "pWl le 1\n" - "rDf er 1\n" - "Ynw an 1\n" - "jnQ an 1\n" - "tFb th 1\n" - "rpU er 1\n" - "pPj ij 1\n" - "yjM ij 1\n" - "jmY ij 1\n" - "Cpz sz 1\n" - "uDn an 1\n" - "uqY qu 1\n" - "Pjx ij 1\n" - "qFv qu 1\n" - "Ktf th 1\n" - "Jcj ch 1\n" - "kpO pr 1\n" - "pgZ ng 1\n" - "kfO ka 1\n" - "tZv th 1\n" - "jHq qu 1\n" - "cRq ch 1\n" - "zDm sz 1\n" - "lPm le 1\n" - "svP st 1\n" - "qkx qu 1\n" - "bNp pr 1\n" - "Kjq qu 1\n" - "vqS qu 1\n" - "fQp pr 1\n" - "txR th 1\n" - "Hpf pr 1\n" - "iQg ng 1\n" - "vvP va 1\n" - "iGf in 1\n" - "tjI th 1\n" - "pWn an 1\n" - "Qqg qu 1\n" - "qiF ti 1\n" - "Zzr er 1\n" - "aYf an 1\n" - "zjA sz 1\n" - "kwR ka 1\n" - "gkM ng 1\n" - "Cjf ij 1\n" - "zgM ng 1\n" - "Rxk ka 1\n" - "bCd de 1\n" - "Ypv va 1\n" - "wyE wa 1\n" - "iyB in 1\n" - "hQp th 1\n" - "ipQ in 1\n" - "Ucj ch 1\n" - "qkW qu 1\n" - "krK er 1\n" - "Hpp pr 1\n" - "xnN an 1\n" - "jwB ij 1\n" - "Zdm de 1\n" - "mYj ij 1\n" - "tQx th 1\n" - "qwS qu 1\n" - "Hxo on 1\n" - "qDx qu 1\n" - "cXd ch 1\n" - "gdO ng 1\n" - "aEo an 1\n" - "Twd de 1\n" - "avQ an 1\n" - "lhZ th 1\n" - "lzV le 1\n" - "bHf be 1\n" - "bJn an 1\n" - "Uqz qu 1\n" - "uFy qu 1\n" - "jNl le 1\n" - "xBp pr 1\n" - "dRb de 1\n" - "nlT an 1\n" - "wrO er 1\n" - "lzW le 1\n" - "fYf fo 1\n" - "mRw me 1\n" - "rXy er 1\n" - "qyR qu 1\n" - "fGv va 1\n" - "Uwk ka 1\n" - "kXm ka 1\n" - "hJy th 1\n" - "Xgv ng 1\n" - "xYv va 1\n" - "yYd de 1\n" - "xzC sz 1\n" - "gjB ng 1\n" - "jzI sz 1\n" - "zrO er 1\n" - "tqF th 1\n" - "vwM va 1\n" - "zCq qu 1\n" - "ljL le 1\n" - "vnZ an 1\n" - "eDq qu 1\n" - "Qvq qu 1\n" - "pfL pr 1\n" - "iRb in 1\n" - "gdR ng 1\n" - "qAv qu 1\n" - "vnL an 1\n" - "mkT ka 1\n" - "pVk ka 1\n" - "xKh th 1\n" - "jNk ij 1\n" - "jLt th 1\n" - "cNp ch 1\n" - "tmP th 1\n" - "vVt th 1\n" - "qfP qu 1\n" - "Uqo qu 1\n" - "Dnp an 1\n" - "yGb be 1\n" - "sHd st 1\n" - "pwF pr 1\n" - "fPy ny 1\n" - "Drq qu 1\n" - "bJh th 1\n" - "sQp st 1\n" - "Iws st 1\n" - "uCw qu 1\n" - "Lwj ij 1\n" - "rFw er 1\n" - "sJp st 1\n" - "xiI in 1\n" - "Rqv qu 1\n" - "bkQ ka 1\n" - "qNp qu 1\n" - "dYl le 1\n" - "Vmf me 1\n" - "lYc ch 1\n" - "oPw on 1\n" - "kjO ij 1\n" - "mKb me 1\n" - "fDf fo 1\n" - "fFb be 1\n" - "Vhv th 1\n" - "Hjq qu 1\n" - "qfK qu 1\n" - "Kjp ij 1\n" - "vTg ng 1\n" - "pBq qu 1\n" - "Htd th 1\n" - "pNd de 1\n" - "bQv va 1\n" - "aSx an 1\n" - "jwx ij 1\n" - "Uyx ny 1\n" - "wVj ij 1\n" - "Ioq qu 1\n" - "Nhm th 1\n" - "Hqh th 1\n" - "rUq qu 1\n" - "bBx be 1\n" - "Gqb qu 1\n" - "Ccw ch 1\n" - "hZw th 1\n" - "Qbl le 1\n" - "xFv va 1\n" - "sZv st 1\n" - "qzY qu 1\n" - "pDb pr 1\n" - "cfR ch 1\n" - "rqk qu 1\n" - "fzP sz 1\n" - "hqO th 1\n" - "pzH sz 1\n" - "qSj qu 1\n" - "pxJ pr 1\n" - "xbq qu 1\n" - "sXf st 1\n" - "ybT be 1\n" - "sHn an 1\n" - "vTz sz 1\n" - "Pgf ng 1\n" - "hKw th 1\n" - "jPj ij 1\n" - "wTx wa 1\n" - "jSj ij 1\n" - "Fgz ng 1\n" - "bKk ka 1\n" - "eUj er 1\n" - "cDf ch 1\n" - "xFg ng 1\n" - "cnW an 1\n" - "tUy th 1\n" - "Jgx ng 1\n" - "yuF qu 1\n" - "vyQ va 1\n" - "xCz sz 1\n" - "jRh th 1\n" - "cXx ch 1\n" - "kGk ka 1\n" - "Xnh th 1\n" - "qPh th 1\n" - "lfZ le 1\n" - "qVa an 1\n" - "xws st 1\n" - "Dzt th 1\n" - "xfG fo 1\n" - "fXh th 1\n" - "jgV ng 1\n" - "vJj ij 1\n" - "bXj ij 1\n" - "cgG ch 1\n" - "vuW qu 1\n" - "txG th 1\n" - "Zxz sz 1\n" - "fNc ch 1\n" - "oBq qu 1\n" - "Wgv ng 1\n" - "Hwz sz 1\n" - "oaW an 1\n" - "vRg ng 1\n" - "uXz qu 1\n" - "fzQ sz 1\n" - "bcB ch 1\n" - "Bnw an 1\n" - "gvB ng 1\n" - "rQm er 1\n" - "cvU ch 1\n" - "xhR th 1\n" - "zxR sz 1\n" - "btZ th 1\n" - "Kkf ka 1\n" - "zJw sz 1\n" - "uwq qu 1\n" - "pSx pr 1\n" - "yRv va 1\n" - "nCq an 1\n" - "tGv th 1\n" - "wgT ng 1\n" - "kNz sz 1\n" - "oHk on 1\n" - "Wzw sz 1\n" - "hvU th 1\n" - "skX st 1\n" - "vYz sz 1\n" - "joZ on 1\n" - "nGq an 1\n" - "qmM qu 1\n" - "Bmr er 1\n" - "sVg ng 1\n" - "uCv qu 1\n" - "iXz in 1\n" - "vKp va 1\n" - "lEw le 1\n" - "hhF th 1\n" - "iwS in 1\n" - "qyU qu 1\n" - "jjY ij 1\n" - "Ygm ng 1\n" - "wJd de 1\n" - "eQp er 1\n" - "Yfb be 1\n" - "Wpg ng 1\n" - "jdS de 1\n" - "vmG va 1\n" - "mdT de 1\n" - "grZ ng 1\n" - "yqN qu 1\n" - "pBp po 1\n" - "fkZ ka 1\n" - "qeB qu 1\n" - "cGs ch 1\n" - "Eqg qu 1\n" - "cfO ch 1\n" - "uSx qu 1\n" - "Dhf th 1\n" - "Qjr er 1\n" - "xqZ qu 1\n" - "yQf ny 1\n" - "npY an 1\n" - "xDc ch 1\n" - "bmQ me 1\n" - "kMb ka 1\n" - "aqC an 1\n" - "jYl le 1\n" - "wkD ka 1\n" - "cWs ch 1\n" - "yyJ ny 1\n" - "wvV va 1\n" - "lYb le 1\n" - "qrW qu 1\n" - "bqz qu 1\n" - "wjC ij 1\n" - "vKy va 1\n" - "vjD ij 1\n" - "sDs st 1\n" - "fKf fo 1\n" - "zsT st 1\n" - "jYc ch 1\n" - "Ywt th 1\n" - "Hjw ij 1\n" - "wIy wa 1\n" - "ffU fo 1\n" - "Wnx an 1\n" - "eHq qu 1\n" - "fWy ny 1\n" - "Nwv va 1\n" - "ySj ij 1\n" - "jfC ij 1\n" - "xXq qu 1\n" - "grI ng 1\n" - "oVf on 1\n" - "Vfy ny 1\n" - "jgY ng 1\n" - "Hjp ij 1\n" - "zqC qu 1\n" - "qyH qu 1\n" - "kcQ ch 1\n" - "zsE st 1\n" - "pCx pr 1\n" - "kwP ka 1\n" - "jfQ ij 1\n" - "wZg ng 1\n" - "Vxm me 1\n" - "Jvb va 1\n" - "sEw sz 1\n" - "jLl le 1\n" - "dOx de 1\n" - "wpS pr 1\n" - "yIo on 1\n" - "tGt th 1\n" - "vHz sz 1\n" - "xGj ij 1\n" - "gvQ ng 1\n" - "pNr er 1\n" - "gqY qu 1\n" - "sfK st 1\n" - "dYd de 1\n" - "sMm st 1\n" - "oBx on 1\n" - "qsF qu 1\n" - "bmI me 1\n" - "tmC th 1\n" - "wlW le 1\n" - "Twg ng 1\n" - "srV er 1\n" - "rNz er 1\n" - "Uuc ch 1\n" - "Gjg ng 1\n" - "njY an 1\n" - "vOh th 1\n" - "Qmh th 1\n" - "Fnf an 1\n" - "yvY va 1\n" - "pGf pr 1\n" - "lHp al 1\n" - "qgZ qu 1\n" - "jbS ij 1\n" - "xQi in 1\n" - "tqG th 1\n" - "nwI an 1\n" - "qkY qu 1\n" - "Wxy ny 1\n" - "hDm th 1\n" - "qQe qu 1\n" - "iJp in 1\n" - "xrN er 1\n" - "dGg ng 1\n" - "kQx ka 1\n" - "Jqg qu 1\n" - "hMk th 1\n" - "ljT le 1\n" - "Xkn an 1\n" - "ztq th 1\n" - "qNd qu 1\n" - "suY qu 1\n" - "Uoa an 1\n" - "djR de 1\n" - "mFf me 1\n" - "jzq qu 1\n" - "zjR sz 1\n" - "Nnl an 1\n" - "tJp th 1\n" - "gZr ng 1\n" - "Bwx wa 1\n" - "dWz sz 1\n" - "lwM le 1\n" - "Iqk qu 1\n" - "twZ th 1\n" - "Mwt th 1\n" - "kjY ij 1\n" - "zBv sz 1\n" - "iwF in 1\n" - "rHz er 1\n" - "Sqh th 1\n" - "oKq qu 1\n" - "qjO qu 1\n" - "htQ th 1\n" - "cKx ch 1\n" - "bqW qu 1\n" - "kYh th 1\n" - "tBq th 1\n" - "gmJ ng 1\n" - "eYx er 1\n" - "hGv th 1\n" - "hQd th 1\n" - "pnX an 1\n" - "bvJ va 1\n" - "sxM st 1\n" - "qNt th 1\n" - "Wlj le 1\n" - "kqD qu 1\n" - "qdZ qu 1\n" - "mhY th 1\n" - "tlC th 1\n" - "pqI qu 1\n" - "ybD be 1\n" - "xAe er 1\n" - "pLt th 1\n" - "lHb le 1\n" - "xVc ch 1\n" - "dhN th 1\n" - "qxU qu 1\n" - "dVf de 1\n" - "Zkm ka 1\n" - "kpD ka 1\n" - "pjH ij 1\n" - "yGm me 1\n" - "iyP in 1\n" - "wmK me 1\n" - "mJz sz 1\n" - "fmL me 1\n" - "cBv ch 1\n" - "Vvf va 1\n" - "Eql qu 1\n" - "ohV th 1\n" - "lCx le 1\n" - "oWc ch 1\n" - "nzX an 1\n" - "fIj ij 1\n" - "kPt th 1\n" - "pYm me 1\n" - "zhG th 1\n" - "cqN ch 1\n" - "umQ qu 1\n" - "wXs st 1\n" - "lZj le 1\n" - "Sxs st 1\n" - "Kqd qu 1\n" - "tWc th 1\n" - "Kcc ch 1\n" - "pvB po 1\n" - "tgR th 1\n" - "yrN er 1\n" - "xQr er 1\n" - "Xvz sz 1\n" - "lJh th 1\n" - "Xfk ka 1\n" - "Fvr er 1\n" - "fUb be 1\n" - "lZb le 1\n" - "gdI ng 1\n" - "joI on 1\n" - "yKq qu 1\n" - "twz th 1\n" - "qJj qu 1\n" - "vxM va 1\n" - "Vzs st 1\n" - "fjR ij 1\n" - "Kmz sz 1\n" - "qIw qu 1\n" - "jyD ij 1\n" - "qbU qu 1\n" - "qkZ qu 1\n" - "jVg ng 1\n" - "Fhj th 1\n" - "qJq qu 1\n" - "wPq qu 1\n" - "Ueo er 1\n" - "zXd sz 1\n" - "gFb ng 1\n" - "jJy ij 1\n" - "Nsj st 1\n" - "lMb le 1\n" - "yQn an 1\n" - "dnM an 1\n" - "yRg ng 1\n" - "Fjc ch 1\n" - "dKg ng 1\n" - "gqV ng 1\n" - "gCk ng 1\n" - "sOz st 1\n" - "hlO th 1\n" - "qbN qu 1\n" - "sjN st 1\n" - "Ujz sz 1\n" - "rVm er 1\n" - "Wjs st 1\n" - "bmM me 1\n" - "Vzx sz 1\n" - "hZg th 1\n" - "zFt th 1\n" - "yhJ th 1\n" - "vNk ka 1\n" - "zbT sz 1\n" - "xmJ me 1\n" - "Fcs ch 1\n" - "yTc ch 1\n" - "cSg ch 1\n" - "qmP qu 1\n" - "mFz sz 1\n" - "bdI de 1\n" - "jlK le 1\n" - "bnB an 1\n" - "qyQ qu 1\n" - "Vjk ij 1\n" - "hzU th 1\n" - "qgp qu 1\n" - "lqW qu 1\n" - "fNn an 1\n" - "Tjp ij 1\n" - "vlV le 1\n" - "rVp er 1\n" - "bLd de 1\n" - "ydQ de 1\n" - "gYg ng 1\n" - "qhE th 1\n" - "Gsq qu 1\n" - "gWz ng 1\n" - "Qtk th 1\n" - "Hzw sz 1\n" - "kIo ho 1\n" - "kfC ka 1\n" - "zBg ng 1\n" - "jJp ij 1\n" - "eIq qu 1\n" - "vuB qu 1\n" - "Wbg ng 1\n" - "Jjp ij 1\n" - "lXk le 1\n" - "Tfx fo 1\n" - "zLl le 1\n" - "dqT qu 1\n" - "oZq qu 1\n" - "Jfu qu 1\n" - "Qhh th 1\n" - "qkK qu 1\n" - "Ejc ch 1\n" - "zwN sz 1\n" - "yQq qu 1\n" - "dDp de 1\n" - "Pww wa 1\n" - "ztC th 1\n" - "jtH th 1\n" - "yrX er 1\n" - "vwT va 1\n" - "yRh th 1\n" - "wQt th 1\n" - "lXz le 1\n" - "cfL ch 1\n" - "Fwl le 1\n" - "rNw er 1\n" - "Bhx th 1\n" - "glZ ng 1\n" - "gcD ch 1\n" - "Sfs st 1\n" - "Uzf sz 1\n" - "Tdl le 1\n" - "dRn an 1\n" - "vYw va 1\n" - "xcD ch 1\n" - "xcC ch 1\n" - "lBx le 1\n" - "gHq qu 1\n" - "wJy wa 1\n" - "yrO er 1\n" - "vqF qu 1\n" - "tYb th 1\n" - "Zjw ij 1\n" - "jLk ij 1\n" - "Hvf va 1\n" - "pnS an 1\n" - "pcT ch 1\n" - "sFk st 1\n" - "dcO ch 1\n" - "zPw sz 1\n" - "vNf va 1\n" - "Gdx de 1\n" - "dlP le 1\n" - "jLx jo 1\n" - "jZj ij 1\n" - "wwT wa 1\n" - "tGx th 1\n" - "fhS th 1\n" - "Xtk th 1\n" - "xnW on 1\n" - "pkJ ka 1\n" - "xIo on 1\n" - "Zxb be 1\n" - "nOj an 1\n" - "wHj ij 1\n" - "fjS ij 1\n" - "wdL de 1\n" - "jbN ij 1\n" - "ykO ka 1\n" - "xqB qu 1\n" - "qzN qu 1\n" - "Qbq qu 1\n" - "Fqw qu 1\n" - "jWw ij 1\n" - "nxM an 1\n" - "tpX th 1\n" - "Ttz th 1\n" - "zsH st 1\n" - "fjz sz 1\n" - "xIg ng 1\n" - "xkY ka 1\n" - "Fqa an 1\n" - "oGk on 1\n" - "Hnc an 1\n" - "jPq qu 1\n" - "zlW le 1\n" - "uRx qu 1\n" - "uGx qu 1\n" - "jYv ij 1\n" - "Kpz sz 1\n" - "gQo ng 1\n" - "Kwx wa 1\n" - "jNw ij 1\n" - "tdD th 1\n" - "yGj ij 1\n" - "Lbq qu 1\n" - "Rrc ch 1\n" - "qvX qu 1\n" - "hhK th 1\n" - "kZx ka 1\n" - "xDf fo 1\n" - "Pjf ij 1\n" - "cgF ch 1\n" - "vCk ka 1\n" - "fWw ow 1\n" - "mJp me 1\n" - "fXe er 1\n" - "uYp qu 1\n" - "jHk ij 1\n" - "wdP de 1\n" - "qFk qu 1\n" - "jrG er 1\n" - "fgD ng 1\n" - "fsG st 1\n" - "Vgb ng 1\n" - "xAa an 1\n" - "gtZ th 1\n" - "tlq th 1\n" - "Tmw me 1\n" - "gyY ng 1\n" - "Qxt th 1\n" - "Uxz sz 1\n" - "iVr in 1\n" - "zqI qu 1\n" - "Nbw wa 1\n" - "Dhd th 1\n" - "mOq qu 1\n" - "iBd in 1\n" - "cqB ch 1\n" - "zQq qu 1\n" - "Wbv va 1\n" - "Qks ka 1\n" - "qPa an 1\n" - "tfI th 1\n" - "mZs st 1\n" - "pDs st 1\n" - "nJj an 1\n" - "zcp ch 1\n" - "tWj th 1\n" - "Zxp pr 1\n" - "vPy va 1\n" - "dxK de 1\n" - "oPv on 1\n" - "rjN er 1\n" - "oQh th 1\n" - "vwH va 1\n" - "Qhp th 1\n" - "xsU st 1\n" - "kGq qu 1\n" - "wjW ij 1\n" - "Pwx wa 1\n" - "Bbn an 1\n" - "xOq qu 1\n" - "qpN qu 1\n" - "nbq an 1\n" - "zpM sz 1\n" - "jmB ij 1\n" - "Nqj qu 1\n" - "zYd sz 1\n" - "Ybc ch 1\n" - "xcW ch 1\n" - "gPg ng 1\n" - "Qys st 1\n" - "Bhq th 1\n" - "yGx ny 1\n" - "qxL qu 1\n" - "Jfd de 1\n" - "mbV me 1\n" - "pkY ka 1\n" - "cWl ch 1\n" - "wBg ng 1\n" - "vOw va 1\n" - "Gpb pr 1\n" - "Ppq qu 1\n" - "fsX st 1\n" - "vtQ th 1\n" - "yCj ij 1\n" - "yoY on 1\n" - "pwQ pr 1\n" - "yGd de 1\n" - "qtJ th 1\n" - "nrZ an 1\n" - "eVx er 1\n" - "Nrq qu 1\n" - "wtA th 1\n" - "fHf fo 1\n" - "gsQ ng 1\n" - "hlC th 1\n" - "dLc ch 1\n" - "zjC sz 1\n" - "jvY ij 1\n" - "tIj th 1\n" - "pvL va 1\n" - "Hhg th 1\n" - "yMv va 1\n" - "xMn an 1\n" - "tYx th 1\n" - "vVp va 1\n" - "Ynb an 1\n" - "vmX va 1\n" - "qjQ qu 1\n" - "vQr er 1\n" - "hQz th 1\n" - "mNf me 1\n" - "zfY sz 1\n" - "xjS ij 1\n" - "jBm ij 1\n" - "jpq qu 1\n" - "nJq an 1\n" - "Knz an 1\n" - "gGf ng 1\n" - "pZx pr 1\n" - "Gql qu 1\n" - "Uqm qu 1\n" - "eWv er 1\n" - "fGg ng 1\n" - "qsA qu 1\n" - "uhY th 1\n" - "xhH th 1\n" - "yxS ny 1\n" - "rxK er 1\n" - "hNc th 1\n" - "Vwh th 1\n" - "aNv an 1\n" - "Qzv sz 1\n" - "fQn an 1\n" - "jzH sz 1\n" - "Rvh th 1\n" - "Qpt th 1\n" - "qXv qu 1\n" - "phQ th 1\n" - "Qlb le 1\n" - "bnQ an 1\n" - "njK an 1\n" - "Jjs st 1\n" - "tJx th 1\n" - "iwX in 1\n" - "nVd an 1\n" - "kzA sz 1\n" - "uwE qu 1\n" - "Tsq qu 1\n" - "hqM th 1\n" - "Rnq an 1\n" - "rDn an 1\n" - "yNb be 1\n" - "uqN qu 1\n" - "fKw wa 1\n" - "Iqn an 1\n" - "xHc ch 1\n" - "Wwq qu 1\n" - "gMw ng 1\n" - "yWf ny 1\n" - "vcO ch 1\n" - "Gkm ka 1\n" - "fRh th 1\n" - "dMc nd 1\n" - "Zhx th 1\n" - "qlH qu 1\n" - "qUl qu 1\n" - "zHf sz 1\n" - "wCk ka 1\n" - "Qfj ij 1\n" - "Qkw ka 1\n" - "mYh th 1\n" - "dcU ch 1\n" - "jTf ij 1\n" - "rjF er 1\n" - "hxQ th 1\n" - "wNf wa 1\n" - "Lgg ng 1\n" - "Fdu qu 1\n" - "tJw th 1\n" - "ycQ ch 1\n" - "xXf fo 1\n" - "wwQ wa 1\n" - "evQ er 1\n" - "Fcj ch 1\n" - "Cyq qu 1\n" - "tpF th 1\n" - "Axj ij 1\n" - "zGg ng 1\n" - "Qbb be 1\n" - "vfY va 1\n" - "oXd on 1\n" - "wAq qu 1\n" - "Xbk ka 1\n" - "wmR me 1\n" - "rzN er 1\n" - "fcB ch 1\n" - "Bwc ch 1\n" - "xgS ng 1\n" - "dQr er 1\n" - "kJw ka 1\n" - "bgx ng 1\n" - "pZs sz 1\n" - "wfA wa 1\n" - "jmX ij 1\n" - "dNp de 1\n" - "Vxr er 1\n" - "Rvb va 1\n" - "wZl le 1\n" - "wgA ng 1\n" - "Wrq qu 1\n" - "Jcq ch 1\n" - "ljW le 1\n" - "qPt th 1\n" - "gjY ng 1\n" - "jUo on 1\n" - "mIj ij 1\n" - "Hpy pr 1\n" - "Mpj ij 1\n" - "bkO ka 1\n" - "Avz sz 1\n" - "vKk ka 1\n" - "Bfz sz 1\n" - "yYj ij 1\n" - "Egq qu 1\n" - "wxH wa 1\n" - "zHh th 1\n" - "svA st 1\n" - "zcP ch 1\n" - "Bxo on 1\n" - "hSv th 1\n" - "Lxt th 1\n" - "hBz th 1\n" - "cWk ch 1\n" - "xBv va 1\n" - "hwN th 1\n" - "mkJ ka 1\n" - "oNj on 1\n" - "Ugq qu 1\n" - "jZq qu 1\n" - "xfP fo 1\n" - "bYv va 1\n" - "qxF qu 1\n" - "dcI ch 1\n" - "dhY th 1\n" - "cvP ch 1\n" - "qUy qu 1\n" - "mxC me 1\n" - "zPx sz 1\n" - "Nql qu 1\n" - "Yfw wa 1\n" - "Wgp ng 1\n" - "jgD ng 1\n" - "Qfq qu 1\n" - "lcW ch 1\n" - "qxy qu 1\n" - "Xpq qu 1\n" - "wrD er 1\n" - "bEo on 1\n" - "bzV sz 1\n" - "fwS wa 1\n" - "mLj ij 1\n" - "wMr er 1\n" - "vFb va 1\n" - "zfT sz 1\n" - "nRk an 1\n" - "kJh th 1\n" - "Rmw me 1\n" - "nqR an 1\n" - "qpO qu 1\n" - "bHb be 1\n" - "Tkq qu 1\n" - "sjG st 1\n" - "qaT an 1\n" - "Pql qu 1\n" - "hlQ th 1\n" - "kzW sz 1\n" - "yFc ch 1\n" - "uBv qu 1\n" - "vxO va 1\n" - "qvC qu 1\n" - "Yqx qu 1\n" - "jCb ij 1\n" - "Qjk ij 1\n" - "fBh th 1\n" - "vKq qu 1\n" - "rMg ng 1\n" - "hRw th 1\n" - "ykU ka 1\n" - "bUq qu 1\n" - "vYv va 1\n" - "Pdx de 1\n" - "oGv on 1\n" - "jLy ij 1\n" - "duY qu 1\n" - "Wcp ch 1\n" - "oGx on 1\n" - "vGl le 1\n" - "Jdz sz 1\n" - "ijH in 1\n" - "mlX le 1\n" - "jNr er 1\n" - "kCq qu 1\n" - "Ghh th 1\n" - "rMv er 1\n" - "Bgp ng 1\n" - "bFt th 1\n" - "uWl qu 1\n" - "dXg ng 1\n" - "Wcf ch 1\n" - "dbI de 1\n" - "bGx be 1\n" - "exQ er 1\n" - "jWj jo 1\n" - "pQb pr 1\n" - "jcH ch 1\n" - "qOl qu 1\n" - "mtL th 1\n" - "crC ch 1\n" - "pBh th 1\n" - "Wlz le 1\n" - "nHn an 1\n" - "Hfp pr 1\n" - "Xpc ch 1\n" - "Uxp pr 1\n" - "Ksq qu 1\n" - "xWk ka 1\n" - "nqZ an 1\n" - "Cxd de 1\n" - "zJx sz 1\n" - "rWq qu 1\n" - "Cbq qu 1\n" - "qqP qu 1\n" - "lhU th 1\n" - "Ufv va 1\n" - "Uxg ng 1\n" - "hJf th 1\n" - "nvQ an 1\n" - "dhF th 1\n" - "Cvb va 1\n" - "aPf an 1\n" - "Jxj ij 1\n" - "Dwp pr 1\n" - "Ixw wa 1\n" - "kfS ka 1\n" - "rZm er 1\n" - "fmE me 1\n" - "sLq qu 1\n" - "bmR me 1\n" - "uCs qu 1\n" - "kFm ka 1\n" - "Kqk qu 1\n" - "xQk ka 1\n" - "Sfn an 1\n" - "fgU ng 1\n" - "vvT va 1\n" - "mQe er 1\n" - "Gbt th 1\n" - "tbY th 1\n" - "lQk le 1\n" - "cIh th 1\n" - "Tjq qu 1\n" - "nQg an 1\n" - "yYp pr 1\n" - "qPw qu 1\n" - "xOa an 1\n" - "pNw pr 1\n" - "fJz sz 1\n" - "zHb sz 1\n" - "kBh th 1\n" - "fdE de 1\n" - "wPg ng 1\n" - "lVv le 1\n" - "mPw me 1\n" - "Rmg ng 1\n" - "xoE on 1\n" - "hnJ th 1\n" - "uvE qu 1\n" - "Woq qu 1\n" - "ucX ch 1\n" - "nmD an 1\n" - "pcX ch 1\n" - "hDw th 1\n" - "dgI ng 1\n" - "vVd de 1\n" - "tDh ch 1\n" - "jHn an 1\n" - "hkX th 1\n" - "pxT pr 1\n" - "xYz sz 1\n" - "rTp er 1\n" - "Ubz sz 1\n" - "Llm le 1\n" - "yjZ ij 1\n" - "Qss st 1\n" - "cfM ch 1\n" - "jbG be 1\n" - "Jfz sz 1\n" - "mWb me 1\n" - "jDp ij 1\n" - "lWz le 1\n" - "cXy ch 1\n" - "oQr er 1\n" - "ucZ ch 1\n" - "cvN ch 1\n" - "cvK ch 1\n" - "zDk sz 1\n" - "bLr er 1\n" - "dDl le 1\n" - "hhD th 1\n" - "vmK va 1\n" - "hLt th 1\n" - "mqW qu 1\n" - "Bfs st 1\n" - "Acj ch 1\n" - "dcG ch 1\n" - "yJc ch 1\n" - "mfS me 1\n" - "drL er 1\n" - "qyK qu 1\n" - "tQz th 1\n" - "jrL er 1\n" - "ccJ ch 1\n" - "wpX pr 1\n" - "Zzf sz 1\n" - "snU an 1\n" - "qEw qu 1\n" - "tQb th 1\n" - "mPd de 1\n" - "vJq qu 1\n" - "vpU va 1\n" - "vzM sz 1\n" - "uZb qu 1\n" - "ywU wa 1\n" - "Rjs st 1\n" - "hKt th 1\n" - "Bfb be 1\n" - "wuQ qu 1\n" - "bvM va 1\n" - "yiW in 1\n" - "hqC th 1\n" - "iUq qu 1\n" - "lBd le 1\n" - "Zxj ij 1\n" - "wpW pr 1\n" - "rHm er 1\n" - "mhQ th 1\n" - "fMb be 1\n" - "vWf va 1\n" - "Fdq qu 1\n" - "jGb ij 1\n" - "Dhw th 1\n" - "cjR ch 1\n" - "kvD ka 1\n" - "qvD qu 1\n" - "Xmk ka 1\n" - "Cjj ij 1\n" - "kkX ka 1\n" - "qkF qu 1\n" - "vWg ng 1\n" - "Msq qu 1\n" - "nNv an 1\n" - "Hzu qu 1\n" - "zrY er 1\n" - "hgB th 1\n" - "pwB pr 1\n" - "Jxc ch 1\n" - "vcJ ch 1\n" - "sYw st 1\n" - "Tqx qu 1\n" - "eJf le 1\n" - "czJ ch 1\n" - "Qyh th 1\n" - "bvV va 1\n" - "Xyh th 1\n" - "fjq qu 1\n" - "dYc ch 1\n" - "pBx pr 1\n" - "jvR ij 1\n" - "gbH ng 1\n" - "ygH ng 1\n" - "hbV th 1\n" - "lwU le 1\n" - "tJk th 1\n" - "pIw pr 1\n" - "Vjl le 1\n" - "Dgm ng 1\n" - "nvR an 1\n" - "yRp pr 1\n" - "fOj ij 1\n" - "Ecf ch 1\n" - "Zrf er 1\n" - "mxD me 1\n" - "Iqf qu 1\n" - "zBj sz 1\n" - "tTs th 1\n" - "lqB qu 1\n" - "kCv ka 1\n" - "nVh th 1\n" - "jGq qu 1\n" - "cgQ ch 1\n" - "Ppd de 1\n" - "Jcd ch 1\n" - "hhP th 1\n" - "sLg ng 1\n" - "xYt th 1\n" - "Qps st 1\n" - "sfE st 1\n" - "wxR wa 1\n" - "pFp pr 1\n" - "Ymf me 1\n" - "Jgy ng 1\n" - "yvI va 1\n" - "Ncz ch 1\n" - "wBf wa 1\n" - "rVx er 1\n" - "jvX ij 1\n" - "nYp an 1\n" - "nNb an 1\n" - "cQi ch 1\n" - "Qwy wa 1\n" - "vPf va 1\n" - "qvd qu 1\n" - "hkD th 1\n" - "Wmr er 1\n" - "gdY ng 1\n" - "Kjj ij 1\n" - "qsN qu 1\n" - "vJg ng 1\n" - "mDc ch 1\n" - "kvF ka 1\n" - "kWx ka 1\n" - "xYu qu 1\n" - "eMq qu 1\n" - "mYy me 1\n" - "Hxt th 1\n" - "pbM pr 1\n" - "Hwd de 1\n" - "mWu qu 1\n" - "zNs st 1\n" - "Qjh th 1\n" - "aqD an 1\n" - "Gcd ch 1\n" - "btX th 1\n" - "Zql qu 1\n" - "Ujw ij 1\n" - "yvM va 1\n" - "Hhw th 1\n" - "zWd sz 1\n" - "pYj ij 1\n" - "xWt th 1\n" - "ylO le 1\n" - "cnX ch 1\n" - "cMf ch 1\n" - "pKb pr 1\n" - "woV on 1\n" - "fzG sz 1\n" - "Lqb qu 1\n" - "eOj er 1\n" - "Gtb th 1\n" - "clX ch 1\n" - "kdC de 1\n" - "cfq ch 1\n" - "hKk th 1\n" - "cJi ch 1\n" - "uSb qu 1\n" - "jgT ng 1\n" - "tcG th 1\n" - "qNv qu 1\n" - "fpB pr 1\n" - "vPw va 1\n" - "jmA ij 1\n" - "dxI de 1\n" - "jGg ng 1\n" - "Bvg ng 1\n" - "qrC qu 1\n" - "nPx an 1\n" - "Qmn an 1\n" - "cqC ch 1\n" - "kFh th 1\n" - "Jtf th 1\n" - "Cqz qu 1\n" - "rCd er 1\n" - "Zms st 1\n" - "dVq qu 1\n" - "Gwg ng 1\n" - "cwP ch 1\n" - "wVu qu 1\n" - "dNg ng 1\n" - "jXc ch 1\n" - "Mbz sz 1\n" - "wvG ve 1\n" - "Vpw pr 1\n" - "yXq qu 1\n" - "hlK th 1\n" - "pYv va 1\n" - "Fbd de 1\n" - "zcV ch 1\n" - "rQk er 1\n" - "wtN th 1\n" - "qeI qu 1\n" - "eGt th 1\n" - "kMq qu 1\n" - "kqS qu 1\n" - "cqd ch 1\n" - "pLf po 1\n" - "xvO va 1\n" - "rfH er 1\n" - "gIq qu 1\n" - "Pqk qu 1\n" - "xCn an 1\n" - "dVs st 1\n" - "iqY qu 1\n" - "bsJ st 1\n" - "Vww wa 1\n" - "Znm an 1\n" - "Yrz er 1\n" - "Rvz sz 1\n" - "dzK de 1\n" - "zbW sz 1\n" - "tkx th 1\n" - "xkP ka 1\n" - "kzS sz 1\n" - "gXq qu 1\n" - "Lxf fo 1\n" - "Fwr er 1\n" - "lHs le 1\n" - "zrB er 1\n" - "jNb ij 1\n" - "Hxy ny 1\n" - "Gfw wa 1\n" - "Egw ng 1\n" - "Jxw wa 1\n" - "tVm th 1\n" - "bwQ wa 1\n" - "gIx ng 1\n" - "Wqu un 1\n" - "jvI ij 1\n" - "cGc ch 1\n" - "kSb ka 1\n" - "hxG th 1\n" - "zHm sz 1\n" - "Jpk ka 1\n" - "fVb be 1\n" - "Ukf ka 1\n" - "rxF er 1\n" - "dVu qu 1\n" - "sdX st 1\n" - "mjM ij 1\n" - "xwq qu 1\n" - "Ogk ng 1\n" - "qhr th 1\n" - "vfA va 1\n" - "qbA qu 1\n" - "Lfu qu 1\n" - "hzY th 1\n" - "iHf in 1\n" - "jxb ij 1\n" - "vmP va 1\n" - "bvI va 1\n" - "fmH me 1\n" - "qtx th 1\n" - "bvQ va 1\n" - "qzX qu 1\n" - "bVn an 1\n" - "Xmt th 1\n" - "qXo qu 1\n" - "pfD pr 1\n" - "fCd de 1\n" - "vbx va 1\n" - "Zhz th 1\n" - "Kwg ng 1\n" - "rcJ ch 1\n" - "jlT le 1\n" - "jzM sz 1\n" - "rpP er 1\n" - "tmA th 1\n" - "aYw an 1\n" - "zBq qu 1\n" - "xhT th 1\n" - "yLq qu 1\n" - "cKf ch 1\n" - "qdP qu 1\n" - "Ybx be 1\n" - "dHs st 1\n" - "jhH th 1\n" - "Bsv st 1\n" - "rZt th 1\n" - "mhJ th 1\n" - "Zwq qu 1\n" - "kXf ka 1\n" - "zvT sz 1\n" - "yiC in 1\n" - "gkT ng 1\n" - "nJw an 1\n" - "zpV sz 1\n" - "tPq th 1\n" - "cVt th 1\n" - "dBg ng 1\n" - "cRf ch 1\n" - "vRq qu 1\n" - "jgA ng 1\n" - "bMz sz 1\n" - "hJh th 1\n" - "mHd de 1\n" - "Ckq qu 1\n" - "qcj ch 1\n" - "yIb be 1\n" - "wqE qu 1\n" - "pMh th 1\n" - "Hqj qu 1\n" - "jZu qu 1\n" - "iqO qu 1\n" - "tqC th 1\n" - "qoK qu 1\n" - "Knq an 1\n" - "bQm me 1\n" - "uuX qu 1\n" - "Wzc ch 1\n" - "Pxy ny 1\n" - "Qgf ng 1\n" - "sFw st 1\n" - "gHf ng 1\n" - "kgN ng 1\n" - "rCw er 1\n" - "Yjy ij 1\n" - "pnV an 1\n" - "fbS be 1\n" - "iHz in 1\n" - "kGx ka 1\n" - "kwS ka 1\n" - "sDm st 1\n" - "Vhk th 1\n" - "phN th 1\n" - "Jbf be 1\n" - "pWz sz 1\n" - "vvQ va 1\n" - "vNm va 1\n" - "lYw le 1\n" - "zHx sz 1\n" - "Zzc ch 1\n" - "bDt th 1\n" - "Fcv ch 1\n" - "dJg ng 1\n" - "Qwb wa 1\n" - "qFw qu 1\n" - "wmO me 1\n" - "Bvy va 1\n" - "qgY qu 1\n" - "vYs st 1\n" - "xwF wa 1\n" - "qwP qu 1\n" - "uEc ch 1\n" - "mWq qu 1\n" - "fzO sz 1\n" - "bPg ng 1\n" - "pnW an 1\n" - "hGx th 1\n" - "Vkk ka 1\n" - "Xrx er 1\n" - "gJd ng 1\n" - "Llq qu 1\n" - "Vqu un 1\n" - "fgH ng 1\n" - "Vcy ch 1\n" - "hVc th 1\n" - "rwZ er 1\n" - "Xlc ch 1\n" - "xJd de 1\n" - "Fnn an 1\n" - "Ypj ij 1\n" - "lhJ th 1\n" - "aUj an 1\n" - "lBp pr 1\n" - "dlW le 1\n" - "pvV va 1\n" - "Mwr er 1\n" - "Zwc ch 1\n" - "wcU ch 1\n" - "cVq ch 1\n" - "ycU ch 1\n" - "Lcq ch 1\n" - "rvQ er 1\n" - "eYm er 1\n" - "qCn an 1\n" - "dBx de 1\n" - "Iwq qu 1\n" - "gMt th 1\n" - "bhC th 1\n" - "bDs st 1\n" - "Vhz th 1\n" - "kJz sz 1\n" - "Ohz th 1\n" - "kDz sz 1\n" - "hTn th 1\n" - "eqG qu 1\n" - "gJr ng 1\n" - "Zpz sz 1\n" - "hwQ th 1\n" - "fgY ng 1\n" - "sdV st 1\n" - "ljV le 1\n" - "yGg ng 1\n" - "uWg qu 1\n" - "sbO st 1\n" - "qdD qu 1\n" - "yJj ij 1\n" - "nwq an 1\n" - "Apq qu 1\n" - "ccK ch 1\n" - "Qwl le 1\n" - "oyQ on 1\n" - "lPw le 1\n" - "cYt th 1\n" - "brG er 1\n" - "xkT ka 1\n" - "dUj de 1\n" - "rhR th 1\n" - "xPw wa 1\n" - "xoF on 1\n" - "hYj th 1\n" - "hYw th 1\n" - "lPn an 1\n" - "zCg ng 1\n" - "sJt th 1\n" - "wDs st 1\n" - "fVh th 1\n" - "zwW sz 1\n" - "yLj ij 1\n" - "aBx an 1\n" - "Dvv va 1\n" - "tKb th 1\n" - "jfG ij 1\n" - "xMm me 1\n" - "bLp pr 1\n" - "xwW wa 1\n" - "bzH sz 1\n" - "cIw ch 1\n" - "zdN sz 1\n" - "Ggv va 1\n" - "lwV le 1\n" - "qyV qu 1\n" - "vBv va 1\n" - "Owm me 1\n" - "Ltx th 1\n" - "mqE qu 1\n" - "Xjc ch 1\n" - "pzY sz 1\n" - "Jds st 1\n" - "kMl le 1\n" - "Ddj de 1\n" - "tfX th 1\n" - "cqT ch 1\n" - "buG qu 1\n" - "oHb po 1\n" - "vRx va 1\n" - "qyq qu 1\n" - "kpY ka 1\n" - "vqN qu 1\n" - "jNq qu 1\n" - "cWb ch 1\n" - "gbJ ng 1\n" - "oZw on 1\n" - "cBz ch 1\n" - "Pvv va 1\n" - "ljI le 1\n" - "hvQ th 1\n" - "kwY ka 1\n" - "hBg th 1\n" - "kdN de 1\n" - "yxH ny 1\n" - "fxH fo 1\n" - "tXj th 1\n" - "uBx qu 1\n" - "uJm qu 1\n" - "Gxh th 1\n" - "fjK ij 1\n" - "gqO qu 1\n" - "dMt th 1\n" - "lVx le 1\n" - "Rhp th 1\n" - "cDn ch 1\n" - "Xkv ka 1\n" - "zmB sz 1\n" - "qaY an 1\n" - "Ivq qu 1\n" - "wmP me 1\n" - "bjq qu 1\n" - "cmU ch 1\n" - "slC le 1\n" - "Krx er 1\n" - "iVv in 1\n" - "Zwz sz 1\n" - "yPd de 1\n" - "qUv qu 1\n" - "Pdz sz 1\n" - "Qzk sz 1\n" - "zoU on 1\n" - "xJf fo 1\n" - "Udq qu 1\n" - "Qwj ij 1\n" - "Kvd de 1\n" - "vQw va 1\n" - "Rdk de 1\n" - "sIj st 1\n" - "Ggt th 1\n" - "lNw le 1\n" - "qvr qu 1\n" - "yqD qu 1\n" - "fXl le 1\n" - "jqg qu 1\n" - "qmA qu 1\n" - "Tgd ng 1\n" - "zpO po 1\n" - "tEz th 1\n" - "Bqz qu 1\n" - "wfL wa 1\n" - "vYu qu 1\n" - "Dxw wa 1\n" - "qWl qu 1\n" - "Rzc ch 1\n" - "mQo on 1\n" - "Ttc th 1\n" - "tVv th 1\n" - "Rqn an 1\n" - "Wcn ch 1\n" - "Nwu qu 1\n" - "xoJ on 1\n" - "vDf va 1\n" - "phH th 1\n" - "fJs st 1\n" - "Pxm me 1\n" - "rFb er 1\n" - "hlM th 1\n" - "mkX ka 1\n" - "nnQ an 1\n" - "Xfn an 1\n" - "sbZ st 1\n" - "Yyf ny 1\n" - "Bjw ij 1\n" - "Ilx le 1\n" - "qpA qu 1\n" - "Mqc ch 1\n" - "gqZ qu 1\n" - "sNv st 1\n" - "Zvq qu 1\n" - "kSx ka 1\n" - "vBd de 1\n" - "wvZ va 1\n" - "Uoe er 1\n" - "Fjy ij 1\n" - "zKb sz 1\n" - "pvI va 1\n" - "Zll le 1\n" - "hdE th 1\n" - "Fpv va 1\n" - "lhV th 1\n" - "rqQ qu 1\n" - "wjG ij 1\n" - "pLq qu 1\n" - "bpJ pr 1\n" - "wzV sz 1\n" - "Hgq ng 1\n" - "zhW th 1\n" - "Lvq qu 1\n" - "Xhr th 1\n" - "quY un 1\n" - "jqZ qu 1\n" - "vuH qu 1\n" - "Fzj sz 1\n" - "gzG ng 1\n" - "tFc th 1\n" - "vfE va 1\n" - "Igx ng 1\n" - "fqY qu 1\n" - "gYb ng 1\n" - "lJg ng 1\n" - "wcO ch 1\n" - "Qvk ka 1\n" - "Tqq qu 1\n" - "bdY de 1\n" - "wuT qu 1\n" - "lHw le 1\n" - "zRm sz 1\n" - "Hgw ng 1\n" - "tPk th 1\n" - "Jqv qu 1\n" - "tKx th 1\n" - "xpA pr 1\n" - "bkI ka 1\n" - "bSj ij 1\n" - "mxW me 1\n" - "mjR ij 1\n" - "Oip in 1\n" - "wyY wa 1\n" - "dFc ch 1\n" - "qDg qu 1\n" - "wXp pr 1\n" - "Vbp pr 1\n" - "jyN ij 1\n" - "yvP va 1\n" - "yVr er 1\n" - "aWm an 1\n" - "Gjk ij 1\n" - "Apw pr 1\n" - "Zsw st 1\n" - "jQv ij 1\n" - "jbT ij 1\n" - "bdB de 1\n" - "kcY ch 1\n" - "rqC qu 1\n" - "bxD be 1\n" - "vlx le 1\n" - "kjJ ij 1\n" - "xqW qu 1\n" - "zxE sz 1\n" - "sHf st 1\n" - "juF qu 1\n" - "kwX ka 1\n" - "oqW qu 1\n" - "qWt th 1\n" - "fHc ch 1\n" - "cHc ch 1\n" - "Jjm ij 1\n" - "xbA be 1\n" - "Rqj qu 1\n" - "Ijy ij 1\n" - "vSx va 1\n" - "pVj ij 1\n" - "rQx er 1\n" - "fmK me 1\n" - "fnA an 1\n" - "Phv th 1\n" - "bhN th 1\n" - "Hxp pr 1\n" - "Vjq qu 1\n" - "lqC qu 1\n" - "Whd th 1\n" - "zsF st 1\n" - "tYt th 1\n" - "Jzq qu 1\n" - "Nff fo 1\n" - "qXs qu 1\n" - "xJj ij 1\n" - "lXn an 1\n" - "Zpv va 1\n" - "qTh th 1\n" - "npH an 1\n" - "kYx ka 1\n" - "bBs st 1\n" - "vEa an 1\n" - "pjq qu 1\n" - "qIi qu 1\n" - "Fdk de 1\n" - "fNx fo 1\n" - "Ofh th 1\n" - "wXe er 1\n" - "mvZ va 1\n" - "Cjs st 1\n" - "Fmm me 1\n" - "pkR ka 1\n" - "zfZ sz 1\n" - "Zpm me 1\n" - "cbA ch 1\n" - "tvY th 1\n" - "Lmp me 1\n" - "gFd ng 1\n" - "bFx be 1\n" - "Fjm ij 1\n" - "wjF ij 1\n" - "bjv ij 1\n" - "dbT de 1\n" - "jmQ ij 1\n" - "xFw wa 1\n" - "cDk ch 1\n" - "hFz th 1\n" - "uGm qu 1\n" - "Yhx th 1\n" - "Vtl th 1\n" - "azV an 1\n" - "xJs st 1\n" - "Mxw wa 1\n" - "vgK ng 1\n" - "cwQ ch 1\n" - "Gnx an 1\n" - "lbP le 1\n" - "kdS de 1\n" - "kDt th 1\n" - "Pvq qu 1\n" - "yHs st 1\n" - "Lgq qu 1\n" - "Xmj ij 1\n" - "pvA va 1\n" - "vUu qu 1\n" - "Qju qu 1\n" - "qDf qu 1\n" - "Gxj ij 1\n" - "Gfz sz 1\n" - "gbY ng 1\n" - "Sjf ij 1\n" - "Ogw ng 1\n" - "hGt th 1\n" - "btT th 1\n" - "gwH ng 1\n" - "Mwj ij 1\n" - "fvU va 1\n" - "frG er 1\n" - "cMx ch 1\n" - "Ydv de 1\n" - "xkZ ka 1\n" - "fjL ij 1\n" - "yPx ny 1\n" - "drX er 1\n" - "jxR ij 1\n" - "hYq th 1\n" - "xHn an 1\n" - "jrP er 1\n" - "tcJ th 1\n" - "qJz qu 1\n" - "zUd sz 1\n" - "jXj ij 1\n" - "qDd qu 1\n" - "Bjh th 1\n" - "qFz sz 1\n" - "mxG me 1\n" - "xOd de 1\n" - "hgL th 1\n" - "cpD ch 1\n" - "jhS th 1\n" - "Zqp qu 1\n" - "yNq qu 1\n" - "pHq qu 1\n" - "rZq qu 1\n" - "Wjy ij 1\n" - "Tfb be 1\n" - "Nwb wa 1\n" - "zQk sz 1\n" - "Rkc ch 1\n" - "Qvw va 1\n" - "wlJ le 1\n" - "cFp ch 1\n" - "oDb on 1\n" - "lsY le 1\n" - "Zbn an 1\n" - "wCd de 1\n" - "zxN sz 1\n" - "bQf be 1\n" - "Kjy ij 1\n" - "Ovk ka 1\n" - "cxA ch 1\n" - "Hqw qu 1\n" - "hwY th 1\n" - "sGv st 1\n" - "Rwn an 1\n" - "zvH sz 1\n" - "yVw wa 1\n" - "zmX sz 1\n" - "qdM qu 1\n" - "dJv de 1\n" - "wDj ij 1\n" - "Vhm th 1\n" - "fLt th 1\n" - "bvC va 1\n" - "xVn an 1\n" - "Hfx fo 1\n" - "tQl th 1\n" - "lhW th 1\n" - "oqS qu 1\n" - "Qya an 1\n" - "gZf ng 1\n" - "bKy be 1\n" - "tjX th 1\n" - "Vkc ch 1\n" - "yjv ij 1\n" - "bgN ng 1\n" - "lNm le 1\n" - "Jzl le 1\n" - "Lwx wa 1\n" - "vcL ch 1\n" - "yXh th 1\n" - "ztZ th 1\n" - "yJx ny 1\n" - "npV an 1\n" - "swG st 1\n" - "sXn an 1\n" - "eJb er 1\n" - "dcR ch 1\n" - "Zrg ng 1\n" - "Pgv ng 1\n" - "xYr er 1\n" - "jlI le 1\n" - "Fmf me 1\n" - "Gqk qu 1\n" - "vlZ le 1\n" - "Csq qu 1\n" - "uQj qu 1\n" - "lLm le 1\n" - "hwK th 1\n" - "cQv ch 1\n" - "qfH qu 1\n" - "rRw er 1\n" - "aUo an 1\n" - "qpE qu 1\n" - "lPc ch 1\n" - "dHd de 1\n" - "gqL qu 1\n" - "zWp sz 1\n" - "bBq be 1\n" - "wWp pr 1\n" - "cfK ch 1\n" - "fWx fo 1\n" - "rvV er 1\n" - "zhR th 1\n" - "Klh th 1\n" - "cbQ ch 1\n" - "Jmg ng 1\n" - "fPg ng 1\n" - "Qnn an 1\n" - "sMq qu 1\n" - "aFz an 1\n" - "sJs st 1\n" - "Pwj ij 1\n" - "jcL ch 1\n" - "gmQ ng 1\n" - "Yqr qu 1\n" - "Cgz ng 1\n" - "wqz qu 1\n" - "fnI nt 1\n" - "qOt th 1\n" - "vyU va 1\n" - "wQz sz 1\n" - "vUa an 1\n" - "xBt th 1\n" - "dNm de 1\n" - "Ewx wa 1\n" - "ypD pr 1\n" - "wxL wa 1\n" - "qeN qu 1\n" - "vkB ka 1\n" - "jBj ij 1\n" - "gUj ng 1\n" - "kQk ka 1\n" - "fwO wa 1\n" - "qQt th 1\n" - "Qrl er 1\n" - "dTx de 1\n" - "fWd de 1\n" - "jxK ij 1\n" - "fHl le 1\n" - "jcY ch 1\n" - "oJs on 1\n" - "sRx st 1\n" - "uQg qu 1\n" - "hhY th 1\n" - "sdN st 1\n" - "mxR me 1\n" - "Xsv st 1\n" - "Pcq ch 1\n" - "pkZ ka 1\n" - "zDl le 1\n" - "rIh th 1\n" - "Hnv an 1\n" - "jpA ij 1\n" - "hZj th 1\n" - "Znd an 1\n" - "hZd th 1\n" - "qrO qu 1\n" - "Sbx be 1\n" - "tWp th 1\n" - "Hpd de 1\n" - "Hjz sz 1\n" - "zcS ch 1\n" - "kPz sz 1\n" - "Htq th 1\n" - "gcG ch 1\n" - "Xqx qu 1\n" - "mZc ch 1\n" - "Xzv sz 1\n" - "Kgw ng 1\n" - "aUf an 1\n" - "Ymq qu 1\n" - "wcY ch 1\n" - "oVh th 1\n" - "pdM de 1\n" - "vzK sz 1\n" - "lrX er 1\n" - "ydV de 1\n" - "uqP qu 1\n" - "fmN me 1\n" - "Ocg ch 1\n" - "fLk ka 1\n" - "cJs ch 1\n" - "uGf qu 1\n" - "cMk ch 1\n" - "gTx ng 1\n" - "xNc ch 1\n" - "bHl le 1\n" - "uWp qu 1\n" - "dxL de 1\n" - "zxG sz 1\n" - "dVn an 1\n" - "Nbh th 1\n" - "Cxs st 1\n" - "cvG ch 1\n" - "wCf wa 1\n" - "kjC ij 1\n" - "cfY ch 1\n" - "zcf ch 1\n" - "dpW de 1\n" - "Pqy qu 1\n" - "tlN th 1\n" - "sIi in 1\n" - "qxC qu 1\n" - "Kjm ij 1\n" - "zZk sz 1\n" - "Fks st 1\n" - "gWb ng 1\n" - "tqK th 1\n" - "Jlv le 1\n" - "kCk ka 1\n" - "whT th 1\n" - "Owv va 1\n" - "zKm sz 1\n" - "jql qu 1\n" - "tGz th 1\n" - "dCw de 1\n" - "ymQ me 1\n" - "xnF an 1\n" - "wuF qu 1\n" - "pFq qu 1\n" - "jyS ij 1\n" - "pjX ij 1\n" - "lOj le 1\n" - "Jmd de 1\n" - "Zvz sz 1\n" - "jqM qu 1\n" - "jTd de 1\n" - "qOi qu 1\n" - "oJg ng 1\n" - "Mjx ij 1\n" - "Tpb pr 1\n" - "Wtv th 1\n" - "jxO ij 1\n" - "dBs st 1\n" - "tNv th 1\n" - "qTb qu 1\n" - "vnU an 1\n" - "zDx sz 1\n" - "pSq qu 1\n" - "xRm me 1\n" - "qUf qu 1\n" - "mBb me 1\n" - "qjI qu 1\n" - "sIy st 1\n" - "dCg ng 1\n" - "qIx qu 1\n" - "pZp pr 1\n" - "qDt th 1\n" - "xrM er 1\n" - "uOe qu 1\n" - "xgO ng 1\n" - "grX ng 1\n" - "Pgg ng 1\n" - "yVq qu 1\n" - "qEu un 1\n" - "kBc ch 1\n" - "Sgz ng 1\n" - "hjX th 1\n" - "gOq qu 1\n" - "pmW me 1\n" - "Gnw an 1\n" - "xZl le 1\n" - "hTd th 1\n" - "Gfq qu 1\n" - "sLf st 1\n" - "Pgj ng 1\n" - "twF th 1\n" - "mDk ka 1\n" - "qdY qu 1\n" - "vsZ st 1\n" - "vcC ch 1\n" - "Dcj ch 1\n" - "wUh th 1\n" - "qId qu 1\n" - "qrZ qu 1\n" - "cbS ch 1\n" - "Xzc ch 1\n" - "vWj ij 1\n" - "pvC va 1\n" - "Jrw er 1\n" - "yxI ny 1\n" - "dqI qu 1\n" - "uCm qu 1\n" - "vXd de 1\n" - "Wdp de 1\n" - "Dzc ch 1\n" - "hdV th 1\n" - "qbO qu 1\n" - "Jwk ka 1\n" - "Wqm qu 1\n" - "iXw in 1\n" - "fYl le 1\n" - "quQ un 1\n" - "kjD ij 1\n" - "mIh th 1\n" - "xWw wa 1\n" - "oCw on 1\n" - "Zcv ch 1\n" - "jdN de 1\n" - "uYb qu 1\n" - "Srx er 1\n" - "pgU ng 1\n" - "rQg ng 1\n" - "mHf me 1\n" - "fBt th 1\n" - "jVx ij 1\n" - "vYc ch 1\n" - "Vgj ng 1\n" - "qaS an 1\n" - "pxW pr 1\n" - "mnJ an 1\n" - "Bww wa 1\n" - "Tqz qu 1\n" - "jFv ij 1\n" - "xwM wa 1\n" - "Dqw qu 1\n" - "mwI me 1\n" - "vhW th 1\n" - "sqX qu 1\n" - "tlR th 1\n" - "aBh th 1\n" - "qnZ an 1\n" - "gXg ng 1\n" - "sCj st 1\n" - "grN ng 1\n" - "tYv th 1\n" - "Wwg ng 1\n" - "fYi in 1\n" - "btF th 1\n" - "wQn an 1\n" - "Zlt th 1\n" - "cJz ch 1\n" - "Xbn an 1\n" - "tLm th 1\n" - "Zlx le 1\n" - "Nmj ij 1\n" - "hcG th 1\n" - "Wrk er 1\n" - "Nhc th 1\n" - "vqD qu 1\n" - "ujY qu 1\n" - "iJd in 1\n" - "dLf de 1\n" - "cQn ch 1\n" - "Wfx fo 1\n" - "hkZ th 1\n" - "mhC th 1\n" - "zMq qu 1\n" - "zLz sz 1\n" - "Xgt th 1\n" - "qKr qu 1\n" - "yjJ ij 1\n" - "rJm er 1\n" - "Vxc ch 1\n" - "Bxn an 1\n" - "cnQ ch 1\n" - "qkQ qu 1\n" - "Nlw le 1\n" - "hWv th 1\n" - "wdU de 1\n" - "qtB th 1\n" - "qIe qu 1\n" - "qeY qu 1\n" - "Zrp er 1\n" - "Nhd th 1\n" - "fDp po 1\n" - "Cnj an 1\n" - "kxU ka 1\n" - "Bqv qu 1\n" - "vXr er 1\n" - "kBx ka 1\n" - "fBn an 1\n" - "pMx pr 1\n" - "kxR ka 1\n" - "Lzg ng 1\n" - "jBh th 1\n" - "Fjn an 1\n" - "wpC pr 1\n" - "fKy ny 1\n" - "hwD th 1\n" - "fqf qu 1\n" - "qBy qu 1\n" - "Ycq ch 1\n" - "Nns an 1\n" - "jmZ ij 1\n" - "gKw ng 1\n" - "dqA qu 1\n" - "Bjg ng 1\n" - "fGx fo 1\n" - "Lnp an 1\n" - "whU th 1\n" - "qPd qu 1\n" - "yMx ny 1\n" - "wEj ij 1\n" - "kmJ ka 1\n" - "Qsx st 1\n" - "lCw le 1\n" - "Qqb qu 1\n" - "hvJ th 1\n" - "xkN ka 1\n" - "uVg qu 1\n" - "sQm st 1\n" - "uJp qu 1\n" - "Yzn an 1\n" - "cXh th 1\n" - "srI er 1\n" - "tBz th 1\n" - "cRj ch 1\n" - "yIw wa 1\n" - "jHg ng 1\n" - "xFp pr 1\n" - "wJq qu 1\n" - "qdF qu 1\n" - "vKv va 1\n" - "sHc ch 1\n" - "hBf th 1\n" - "jDy ij 1\n" - "Gjx ij 1\n" - "Fkd de 1\n" - "Hhz th 1\n" - "xSg ng 1\n" - "jFf ij 1\n" - "qvM qu 1\n" - "oRw on 1\n" - "xgX ng 1\n" - "gjF ng 1\n" - "qDz qu 1\n" - "Ycf ch 1\n" - "Xcw ch 1\n" - "nfQ an 1\n" - "qGs qu 1\n" - "kGs st 1\n" - "fxV fo 1\n" - "iPj in 1\n" - "qgP qu 1\n" - "jIv ij 1\n" - "Vhu th 1\n" - "Bzj sz 1\n" - "Jvg ng 1\n" - "Vjf ij 1\n" - "wTq qu 1\n" - "pDw pr 1\n" - "Ysv st 1\n" - "ztV th 1\n" - "mtZ th 1\n" - "jFy ij 1\n" - "gqC qu 1\n" - "Vsg ng 1\n" - "gjS ng 1\n" - "vXz sz 1\n" - "bpK pr 1\n" - "nDq an 1\n" - "sKx st 1\n" - "xYg ng 1\n" - "fZd de 1\n" - "pxf pr 1\n" - "jqS qu 1\n" - "hTb th 1\n" - "Nkq qu 1\n" - "qpH qu 1\n" - "vEz sz 1\n" - "vqP qu 1\n" - "vHw va 1\n" - "Dkp ka 1\n" - "cqY ch 1\n" - "mqS qu 1\n" - "sVt th 1\n" - "Pxh th 1\n" - "hxN th 1\n" - "yTf ny 1\n" - "wCj ij 1\n" - "qQw qu 1\n" - "Vfv va 1\n" - "yQd de 1\n" - "gUc ch 1\n" - "wsQ st 1\n" - "fGw wa 1\n" - "wKf wa 1\n" - "wwB wa 1\n" - "vFt th 1\n" - "twQ th 1\n" - "nrB an 1\n" - "lpY le 1\n" - "xlR le 1\n" - "fdK de 1\n" - "eFz er 1\n" - "jyQ ij 1\n" - "lwT le 1\n" - "xCw wa 1\n" - "cgM ch 1\n" - "wtV th 1\n" - "aqJ an 1\n" - "bXu qu 1\n" - "qdQ qu 1\n" - "Yxd de 1\n" - "xcS ch 1\n" - "nmV an 1\n" - "rQd er 1\n" - "Glk le 1\n" - "qEm qu 1\n" - "uvO qu 1\n" - "svF st 1\n" - "sJx st 1\n" - "Qyg ng 1\n" - "mXh th 1\n" - "btD th 1\n" - "wGc ch 1\n" - "fZo on 1\n" - "Evx va 1\n" - "vzD sz 1\n" - "ufC qu 1\n" - "Pxq qu 1\n" - "qdt th 1\n" - "rKz er 1\n" - "Jhh th 1\n" - "Cxk ka 1\n" - "qxR qu 1\n" - "gTl ng 1\n" - "qGf qu 1\n" - "wYh th 1\n" - "cEh th 1\n" - "bzU sz 1\n" - "zWq qu 1\n" - "rWb er 1\n" - "Wrp er 1\n" - "sLc ch 1\n" - "Jpu qu 1\n" - "Jkf ka 1\n" - "vgE ng 1\n" - "Bqk qu 1\n" - "oQs on 1\n" - "kbZ ka 1\n" - "rVf er 1\n" - "qLw qu 1\n" - "Lrc ch 1\n" - "xsR st 1\n" - "hwB th 1\n" - "Qnk an 1\n" - "cPz ch 1\n" - "Ucq ch 1\n" - "egJ ng 1\n" - "Qyq qu 1\n" - "Xwr pr 1\n" - "xfD fo 1\n" - "wyH wa 1\n" - "lBw le 1\n" - "Mdx de 1\n" - "Qsy st 1\n" - "zqV qu 1\n" - "vpY va 1\n" - "slY le 1\n" - "wgL ng 1\n" - "snN an 1\n" - "hVd th 1\n" - "yKx ny 1\n" - "bdW de 1\n" - "lqL qu 1\n" - "yhD th 1\n" - "tNz th 1\n" - "zJg ng 1\n" - "kIx ka 1\n" - "fHp pr 1\n" - "yrJ er 1\n" - "lrR er 1\n" - "wzY sz 1\n" - "pgB pr 1\n" - "mfC me 1\n" - "qkL qu 1\n" - "jUu qu 1\n" - "qCh th 1\n" - "zlN le 1\n" - "Bgj ng 1\n" - "gcE ch 1\n" - "zRx sz 1\n" - "jhN th 1\n" - "eGz er 1\n" - "Fpq qu 1\n" - "Wvi in 1\n" - "mBf me 1\n" - "hhW th 1\n" - "oUq qu 1\n" - "dxQ de 1\n" - "Whq th 1\n" - "rMk er 1\n" - "lWd le 1\n" - "xWz sz 1\n" - "oQn an 1\n" - "mWx me 1\n" - "nuV an 1\n" - "wWz sz 1\n" - "hvR th 1\n" - "Zwd de 1\n" - "smJ st 1\n" - "Hlh th 1\n" - "sJh th 1\n" - "zmY sz 1\n" - "hZn th 1\n" - "Vjg ng 1\n" - "Jhz th 1\n" - "mqR qu 1\n" - "hcO th 1\n" - "dqL qu 1\n" - "Bfh th 1\n" - "pkV ka 1\n" - "tBx th 1\n" - "Hkc ch 1\n" - "Kqm qu 1\n" - "qWv qu 1\n" - "lXy le 1\n" - "yRd de 1\n" - "mjH ij 1\n" - "qzA qu 1\n" - "qxm qu 1\n" - "Qvm va 1\n" - "gcM ch 1\n" - "xqx qu 1\n" - "kKv ka 1\n" - "yoX po 1\n" - "xrT er 1\n" - "cWq ch 1\n" - "jqW qu 1\n" - "sWj st 1\n" - "Sdw de 1\n" - "dfR de 1\n" - "Kqn an 1\n" - "Gjd do 1\n" - "Qbd de 1\n" - "yyK ny 1\n" - "xmX me 1\n" - "xuF qu 1\n" - "yVg ng 1\n" - "qoO qu 1\n" - "Glq qu 1\n" - "Mkx ka 1\n" - "xLb be 1\n" - "gMr ng 1\n" - "sCp st 1\n" - "bGh th 1\n" - "cXo ch 1\n" - "zTz sz 1\n" - "qkC qu 1\n" - "hTp th 1\n" - "qNf qu 1\n" - "mXk ka 1\n" - "xcZ ch 1\n" - "jVm ij 1\n" - "bIi in 1\n" - "qnH an 1\n" - "nwC an 1\n" - "dSg ng 1\n" - "qoD qu 1\n" - "tDx th 1\n" - "jdU de 1\n" - "Xmw me 1\n" - "kNh th 1\n" - "jYr er 1\n" - "Ygp ng 1\n" - "blJ le 1\n" - "mFv va 1\n" - "Sxr er 1\n" - "Fzl le 1\n" - "jTq qu 1\n" - "cIp pr 1\n" - "ajY an 1\n" - "yYb be 1\n" - "rKb er 1\n" - "pzB sz 1\n" - "eIy er 1\n" - "wfK wa 1\n" - "Fmh th 1\n" - "ufL qu 1\n" - "Xlm le 1\n" - "Czg ng 1\n" - "lPq qu 1\n" - "tqV th 1\n" - "wFy wa 1\n" - "bQc ch 1\n" - "kVw ka 1\n" - "nMh th 1\n" - "cCj ch 1\n" - "oeE er 1\n" - "wHf wa 1\n" - "fNf fo 1\n" - "mXv va 1\n" - "Nkg ng 1\n" - "jWc ch 1\n" - "zFj sz 1\n" - "Kfx fo 1\n" - "bgY ng 1\n" - "lYz le 1\n" - "cgD ch 1\n" - "pgM ng 1\n" - "fhH th 1\n" - "jrD er 1\n" - "jwA ij 1\n" - "jyM ij 1\n" - "vzC sz 1\n" - "lQd le 1\n" - "zcH ch 1\n" - "lbX le 1\n" - "vzG sz 1\n" - "mSr er 1\n" - "xYf fo 1\n" - "qgB qu 1\n" - "jYk ij 1\n" - "dIq qu 1\n" - "wpG pr 1\n" - "hVk th 1\n" - "Tjb ij 1\n" - "zvP sz 1\n" - "bZg ng 1\n" - "bFg ng 1\n" - "kfU ka 1\n" - "Sxz sz 1\n" - "fwF wa 1\n" - "Qwg ng 1\n" - "fWb be 1\n" - "jqQ ij 1\n" - "Vfx fo 1\n" - "cJj ch 1\n" - "zwJ sz 1\n" - "xBg ng 1\n" - "Ddm de 1\n" - "bWv va 1\n" - "zpG sz 1\n" - "xrQ er 1\n" - "hcS th 1\n" - "wHn an 1\n" - "hIy th 1\n" - "Yxj ij 1\n" - "sdC st 1\n" - "yVu qu 1\n" - "qjf qu 1\n" - "Tzy sz 1\n" - "Ffn an 1\n" - "zzX sz 1\n" - "Hdx de 1\n" - "gLg ng 1\n" - "Yqg qu 1\n" - "fLb be 1\n" - "lQc ch 1\n" - "vjG ij 1\n" - "wpL pr 1\n" - "cJr ch 1\n" - "aJq an 1\n" - "Ynq an 1\n" - "Wvc ch 1\n" - "lKy le 1\n" - "eYq qu 1\n" - "kxL ka 1\n" - "gCb ng 1\n" - "sRd st 1\n" - "rMd er 1\n" - "Bvh th 1\n" - "kKg ng 1\n" - "wlK le 1\n" - "mDd de 1\n" - "zkJ sz 1\n" - "vRc ch 1\n" - "Xlh th 1\n" - "pRk ka 1\n" - "xvN va 1\n" - "nxI an 1\n" - "fCx fo 1\n" - "Ybt th 1\n" - "Ebq qu 1\n" - "bkN ka 1\n" - "bQy be 1\n" - "rDw er 1\n" - "djJ de 1\n" - "tmM th 1\n" - "nwH an 1\n" - "hJz th 1\n" - "lcM ch 1\n" - "ozV on 1\n" - "mLd de 1\n" - "bKc ch 1\n" - "eZf er 1\n" - "Fhg th 1\n" - "Zcj ch 1\n" - "pLr er 1\n" - "wqs qu 1\n" - "bXi in 1\n" - "tgD th 1\n" - "hQc th 1\n" - "zDp sz 1\n" - "oDg ng 1\n" - "sgM ng 1\n" - "bnD an 1\n" - "gHp ng 1\n" - "Wkf ka 1\n" - "qIs qu 1\n" - "wLd de 1\n" - "ztN th 1\n" - "gdQ ng 1\n" - "wCm ow 1\n" - "vVf va 1\n" - "Jmw me 1\n" - "hbC th 1\n" - "srW er 1\n" - "nxN an 1\n" - "pVs st 1\n" - "uWq qu 1\n" - "hgM th 1\n" - "lBc ch 1\n" - "wUo on 1\n" - "flH le 1\n" - "yWg ng 1\n" - "jjN ij 1\n" - "Uwn an 1\n" - "nYj an 1\n" - "mtN th 1\n" - "Pgp ng 1\n" - "zFc ch 1\n" - "oXz on 1\n" - "iCg ng 1\n" - "Lpc ch 1\n" - "Gqd qu 1\n" - "rYc ch 1\n" - "vqA qu 1\n" - "Vhc th 1\n" - "zmF sz 1\n" - "Bpc ch 1\n" - "Jfq qu 1\n" - "oXv on 1\n" - "lgX ng 1\n" - "Jfx fo 1\n" - "zpS sz 1\n" - "gcO ch 1\n" - "xwQ wa 1\n" - "pkQ ka 1\n" - "wOc ch 1\n" - "Wgm ng 1\n" - "cOj ch 1\n" - "Nft th 1\n" - "pqN qu 1\n" - "qsB qu 1\n" - "ydH de 1\n" - "qRs qu 1\n" - "ykX ka 1\n" - "cDq ch 1\n" - "mfU me 1\n" - "xzM sz 1\n" - "vGt th 1\n" - "fuW qu 1\n" - "lqG qu 1\n" - "Tqp qu 1\n" - "zvD sz 1\n" - "wWb wa 1\n" - "Fzi in 1\n" - "qpK qu 1\n" - "oyq qu 1\n" - "gQe ng 1\n" - "Zmw me 1\n" - "qYp qu 1\n" - "Wvf va 1\n" - "aQl an 1\n" - "oqO qu 1\n" - "eqJ qu 1\n" - "nvT an 1\n" - "fUk ka 1\n" - "ibH in 1\n" - "jvZ ij 1\n" - "Wwz sz 1\n" - "lgY ng 1\n" - "eFp er 1\n" - "Xgx ng 1\n" - "fYs st 1\n" - "kZs st 1\n" - "vpD va 1\n" - "qcZ ch 1\n" - "Bqo qu 1\n" - "jLb ij 1\n" - "rwX er 1\n" - "fyK ny 1\n" - "Sxv va 1\n" - "sxZ st 1\n" - "wkK ka 1\n" - "yJp pr 1\n" - "tjT th 1\n" - "qPv qu 1\n" - "yZj ij 1\n" - "Rrm er 1\n" - "nhJ th 1\n" - "vqJ qu 1\n" - "yxY ny 1\n" - "vsE st 1\n" - "fkK ka 1\n" - "fuY qu 1\n" - "zQo on 1\n" - "Xvr er 1\n" - "mMq qu 1\n" - "Oqm qu 1\n" - "Dxs st 1\n" - "Lqa an 1\n" - "Wnh th 1\n" - "jmG ij 1\n" - "Wqa an 1\n" - "mhT th 1\n" - "bgZ ng 1\n" - "vmO va 1\n" - "zFm sz 1\n" - "Khk th 1\n" - "yqB qu 1\n" - "nVv an 1\n" - "Rft th 1\n" - "zmL sz 1\n" - "hdD th 1\n" - "nWp an 1\n" - "vvO va 1\n" - "dYp de 1\n" - "ohX th 1\n" - "qoU qu 1\n" - "rjB er 1\n" - "Dwc ch 1\n" - "aWq an 1\n" - "clD ch 1\n" - "Vdk de 1\n" - "twM th 1\n" - "fZz sz 1\n" - "wQp pr 1\n" - "dwD de 1\n" - "iYv in 1\n" - "Awv va 1\n" - "pgG ng 1\n" - "Xoq qu 1\n" - "krQ er 1\n" - "Vxg ng 1\n" - "lwB le 1\n" - "Pxw wa 1\n" - "Jwf wa 1\n" - "zLh th 1\n" - "btH th 1\n" - "pwY pr 1\n" - "Mjd de 1\n" - "Xrh th 1\n" - "qXu un 1\n" - "Eqy qu 1\n" - "Bpy pr 1\n" - "znY an 1\n" - "Rqd qu 1\n" - "nQf an 1\n" - "Zvw va 1\n" - "zjO sz 1\n" - "wNd de 1\n" - "lIq qu 1\n" - "vMq qu 1\n" - "Gqt th 1\n" - "lMf le 1\n" - "Jqn an 1\n" - "fVw wa 1\n" - "qvQ qu 1\n" - "eHk er 1\n" - "jbK ij 1\n" - "fWs st 1\n" - "qTk qu 1\n" - "znF an 1\n" - "yxO ny 1\n" - "Fqr qu 1\n" - "nFb an 1\n" - "oDp on 1\n" - "jUc ch 1\n" - "qHg qu 1\n" - "gGq qu 1\n" - "qPs qu 1\n" - "jHv ij 1\n" - "Iwj ij 1\n" - "vzV sz 1\n" - "yUq qu 1\n" - "jQt th 1\n" - "sFb st 1\n" - "Lvg ng 1\n" - "zTt th 1\n" - "bvK va 1\n" - "Ccx ch 1\n" - "jyA ij 1\n" - "yEj ij 1\n" - "zdG sz 1\n" - "tqT th 1\n" - "qbH qu 1\n" - "nHd an 1\n" - "Hhj th 1\n" - "jVb ij 1\n" - "uHw un 1\n" - "Zck ch 1\n" - "gPq qu 1\n" - "mxq qu 1\n" - "wHs st 1\n" - "fDy ny 1\n" - "tlV th 1\n" - "Lsv st 1\n" - "zvF va 1\n" - "mqx qu 1\n" - "nqF an 1\n" - "xgM ng 1\n" - "gyq qu 1\n" - "grJ ng 1\n" - "jSq qu 1\n" - "Mmw me 1\n" - "Cgx ng 1\n" - "Rlr er 1\n" - "mvG va 1\n" - "fuA qu 1\n" - "uVh th 1\n" - "sMz st 1\n" - "wWr er 1\n" - "qpD qu 1\n" - "hQw th 1\n" - "xBc ch 1\n" - "fcW ch 1\n" - "hxL th 1\n" - "rfK er 1\n" - "mFn an 1\n" - "Qnw an 1\n" - "tjB th 1\n" - "Rkx ka 1\n" - "srE er 1\n" - "drG er 1\n" - "Cfy ny 1\n" - "yZw wa 1\n" - "Wxw wa 1\n" - "zCp sz 1\n" - "jZt th 1\n" - "Nqf qu 1\n" - "jgO ng 1\n" - "fWc ch 1\n" - "qrN qu 1\n" - "Nzj sz 1\n" - "Hjy ij 1\n" - "Uxy ny 1\n" - "oIy on 1\n" - "rfX er 1\n" - "oBw on 1\n" - "yyV ny 1\n" - "Qiv in 1\n" - "dKh th 1\n" - "qDk qu 1\n" - "tgQ th 1\n" - "xNw wa 1\n" - "qdL qu 1\n" - "ovY on 1\n" - "fbZ be 1\n" - "qiI qu 1\n" - "bvT va 1\n" - "jYq qu 1\n" - "kbK ka 1\n" - "Mfn an 1\n" - "Rpd de 1\n" - "pHb pr 1\n" - "qqO qu 1\n" - "vkV ka 1\n" - "sWp st 1\n" - "kPf ka 1\n" - "qLy qu 1\n" - "qoE qu 1\n" - "wLh th 1\n" - "zhV th 1\n" - "bpL pr 1\n" - "Tqf qu 1\n" - "pzG sz 1\n" - "kcT ch 1\n" - "wjX ij 1\n" - "kPy ku 1\n" - "fdB de 1\n" - "Qxs st 1\n" - "gYf ng 1\n" - "Ypx pr 1\n" - "zSk sz 1\n" - "tDg th 1\n" - "xbJ be 1\n" - "yfO ny 1\n" - "uQf qu 1\n" - "bpQ pr 1\n" - "dXc ch 1\n" - "lwP le 1\n" - "vTs st 1\n" - "Jlq qu 1\n" - "Cqw qu 1\n" - "bWy be 1\n" - "cUq ch 1\n" - "Ybk ka 1\n" - "wyq qu 1\n" - "jhq th 1\n" - "xUy ny 1\n" - "Ncj ch 1\n" - "kMh th 1\n" - "vZy va 1\n" - "zcq ch 1\n" - "Qsr er 1\n" - "Lhx th 1\n" - "Gcj ch 1\n" - "uQt th 1\n" - "wYn an 1\n" - "dYm de 1\n" - "Qvx va 1\n" - "Rcg ch 1\n" - "qGz qu 1\n" - "bxJ be 1\n" - "jFg ng 1\n" - "xLp pr 1\n" - "lDn an 1\n" - "wqS qu 1\n" - "bIq qu 1\n" - "tBm th 1\n" - "bQs st 1\n" - "zJb sz 1\n" - "jfJ ij 1\n" - "qTc ch 1\n" - "kbX ka 1\n" - "Hlz le 1\n" - "puQ qu 1\n" - "hKb th 1\n" - "rBb er 1\n" - "vpW va 1\n" - "Yjk ij 1\n" - "Wnm an 1\n" - "pZr er 1\n" - "ldZ le 1\n" - "gMm ng 1\n" - "pZf pi 1\n" - "eYp er 1\n" - "vTp va 1\n" - "Gkc ch 1\n" - "Cgy ng 1\n" - "qDw qu 1\n" - "gxW ng 1\n" - "Cwz sz 1\n" - "jhY th 1\n" - "Fvk ka 1\n" - "nfH an 1\n" - "zcW ch 1\n" - "zgC ng 1\n" - "Dfk ka 1\n" - "vpJ va 1\n" - "Wpj ij 1\n" - "sCb st 1\n" - "fgF ng 1\n" - "tPx th 1\n" - "oCp on 1\n" - "Nrx er 1\n" - "Hwm me 1\n" - "fRp pr 1\n" - "aeX an 1\n" - "jdI de 1\n" - "sBv st 1\n" - "vOv va 1\n" - "gQt th 1\n" - "Wmk ka 1\n" - "Pqj qu 1\n" - "khV th 1\n" - "Hkj ij 1\n" - "hbB th 1\n" - "vzF sz 1\n" - "Ybz sz 1\n" - "sXb st 1\n" - "yQr er 1\n" - "hhV th 1\n" - "tgW th 1\n" - "bXo on 1\n" - "Nxp pr 1\n" - "aOx an 1\n" - "zfb sz 1\n" - "Qxp pr 1\n" - "qwQ qu 1\n" - "fjV ij 1\n" - "hjY ij 1\n" - "wtX th 1\n" - "jgU ng 1\n" - "nMq an 1\n" - "Nwx wa 1\n" - "vPg ng 1\n" - "Xfh th 1\n" - "yFf ny 1\n" - "fHz sz 1\n" - "nZf an 1\n" - "jPt th 1\n" - "Jgb ng 1\n" - "xBb bi 1\n" - "sjO st 1\n" - "wDx wa 1\n" - "njN an 1\n" - "ohF th 1\n" - "pqR qu 1\n" - "Fzw sz 1\n" - "qrU qu 1\n" - "cjG ch 1\n" - "kFv ka 1\n" - "zQd sz 1\n" - "vbE vi 1\n" - "Ujt th 1\n" - "qIb qu 1\n" - "cFt th 1\n" - "bvY va 1\n" - "Szq qu 1\n" - "wlH le 1\n" - "qcY ch 1\n" - "gEw ng 1\n" - "xhL th 1\n" - "kVg ng 1\n" - "bfH be 1\n" - "Nrz er 1\n" - "sJn an 1\n" - "bWn an 1\n" - "nvK an 1\n" - "qiH qu 1\n" - "qbS qu 1\n" - "vxB va 1\n" - "tvT th 1\n" - "Nrh th 1\n" - "lYx le 1\n" - "tkX th 1\n" - "Gzx sz 1\n" - "vCx vi 1\n" - "Zbj ij 1\n" - "mWp me 1\n" - "Dqx qu 1\n" - "pfE pr 1\n" - "hvW th 1\n" - "Eox on 1\n" - "dbZ de 1\n" - "lNb le 1\n" - "rTd er 1\n" - "ljQ le 1\n" - "Vvp va 1\n" - "gJw ng 1\n" - "uqW qu 1\n" - "Gjf ij 1\n" - "pDd de 1\n" - "sgQ ng 1\n" - "hkQ th 1\n" - "fJc ch 1\n" - "mdI de 1\n" - "Gcp ch 1\n" - "pXa an 1\n" - "pQj ij 1\n" - "bgE ng 1\n" - "Kzv sz 1\n" - "cPb ch 1\n" - "Hcz ch 1\n" - "djQ de 1\n" - "pGd de 1\n" - "fyE ny 1\n" - "dBb de 1\n" - "ePj er 1\n" - "fgO ng 1\n" - "xRq qu 1\n" - "xqK qu 1\n" - "pKp pr 1\n" - "xmY me 1\n" - "hgO th 1\n" - "wdG de 1\n" - "hvZ th 1\n" - "srF er 1\n" - "Bvf vi 1\n" - "yvD va 1\n" - "xVg ng 1\n" - "fYg ng 1\n" - "bqd qu 1\n" - "eFq qu 1\n" - "cwZ ch 1\n" - "cqG ch 1\n" - "sKp st 1\n" - "hJq th 1\n" - "vLd de 1\n" - "hdK th 1\n" - "pcN ch 1\n" - "tNf th 1\n" - "xlK le 1\n" - "rJx er 1\n" - "qaN an 1\n" - "zKf sz 1\n" - "sNf st 1\n" - "qPz qu 1\n" - "bzL sz 1\n" - "Jdw de 1\n" - "nRb an 1\n" - "jNs st 1\n" - "tnV th 1\n" - "ynI an 1\n" - "tZp th 1\n" - "fZp pr 1\n" - "wMq qu 1\n" - "Onq an 1\n" - "zIh th 1\n" - "bvH va 1\n" - "Uvc ch 1\n" - "zxJ sz 1\n" - "Vmq qu 1\n" - "uPm qu 1\n" - "mwD me 1\n" - "jQc ch 1\n" - "gPk ng 1\n" - "vfV va 1\n" - "Tql qu 1\n" - "bJl le 1\n" - "lwO le 1\n" - "wbG wa 1\n" - "fTd de 1\n" - "Xtq th 1\n" - "hzX th 1\n" - "Pzv sz 1\n" - "Pmx me 1\n" - "xZm me 1\n" - "jCp ij 1\n" - "bKm me 1\n" - "Tmq qu 1\n" - "Hnf an 1\n" - "kjX ij 1\n" - "vgH ng 1\n" - "fSm me 1\n" - "ylN le 1\n" - "gvq qu 1\n" - "jTz sz 1\n" - "tWw th 1\n" - "ywB wa 1\n" - "bCq qu 1\n" - "dNk de 1\n" - "yCq qu 1\n" - "Rxj ij 1\n" - "nTq an 1\n" - "gFs ng 1\n" - "Xwq qu 1\n" - "gJl ng 1\n" - "vcR ch 1\n" - "fbT be 1\n" - "Fcd ch 1\n" - "Wxm me 1\n" - "qwv qu 1\n" - "Sfh th 1\n" - "lcK ch 1\n" - "sbV st 1\n" - "fSf fo 1\n" - "lbB le 1\n" - "Ocw ch 1\n" - "jgM ng 1\n" - "nbI an 1\n" - "qsK qu 1\n" - "Xyf ny 1\n" - "pxv va 1\n" - "mRc ch 1\n" - "Ogq qu 1\n" - "zuY qu 1\n" - "fXu qu 1\n" - "Wbj ij 1\n" - "Tbw wa 1\n" - "zrR er 1\n" - "gmP ng 1\n" - "cCm ch 1\n" - "gtQ th 1\n" - "phG th 1\n" - "qjV qu 1\n" - "ygG ng 1\n" - "wFb wa 1\n" - "rqL qu 1\n" - "qSx qu 1\n" - "ybK be 1\n" - "mqJ qu 1\n" - "Qrq qu 1\n" - "qdI qu 1\n" - "bcG ch 1\n" - "iFb in 1\n" - "mcZ ch 1\n" - "vCz sz 1\n" - "xHz tz 1\n" - "hjM th 1\n" - "qtL th 1\n" - "tmH th 1\n" - "slD le 1\n" - "vRz sz 1\n" - "gCd ng 1\n" - "Xxc ch 1\n" - "qKc ch 1\n" - "sIw st 1\n" - "fsY st 1\n" - "xrJ er 1\n" - "tNs th 1\n" - "gbD ng 1\n" - "wLl le 1\n" - "hFf th 1\n" - "Nxi in 1\n" - "fRb be 1\n" - "Jrb er 1\n" - "jEq qu 1\n" - "hwM th 1\n" - "uVw qu 1\n" - "fgN ng 1\n" - "mAo on 1\n" - "Pjb ij 1\n" - "npP in 1\n" - "Jcy ch 1\n" - "yJb bi 1\n" - "jxI ij 1\n" - "Kkc ch 1\n" - "kwV ka 1\n" - "gRf ng 1\n" - "Wfm me 1\n" - "Tdp po 1\n" - "wEz sz 1\n" - "Lvk ka 1\n" - "Dqn an 1\n" - "tqL th 1\n" - "jJq qu 1\n" - "vdC de 1\n" - "hxU th 1\n" - "xUe er 1\n" - "tQc th 1\n" - "Lzk sz 1\n" - "dTj de 1\n" - "Tlz le 1\n" - "xQw wa 1\n" - "Fcq ch 1\n" - "wgE ng 1\n" - "Ckd de 1\n" - "yKs st 1\n" - "xwS wa 1\n" - "wRt th 1\n" - "gkK ng 1\n" - "hQv th 1\n" - "sLp st 1\n" - "jAi in 1\n" - "dmG de 1\n" - "jKn an 1\n" - "qUb qu 1\n" - "wXy wa 1\n" - "bzJ sz 1\n" - "gzJ ng 1\n" - "hNz th 1\n" - "ygY ng 1\n" - "qhU th 1\n" - "afX an 1\n" - "jZw ij 1\n" - "Xdx de 1\n" - "Tdx de 1\n" - "jNn an 1\n" - "vXf va 1\n" - "qcE ch 1\n" - "Mnw an 1\n" - "qDh th 1\n" - "Tdj de 1\n" - "dgJ ng 1\n" - "sdR st 1\n" - "qGn an 1\n" - "Mjj ij 1\n" - "sxH st 1\n" - "Ppz sz 1\n" - "gfV ng 1\n" - "fOy ny 1\n" - "Nvx vi 1\n" - "qaV an 1\n" - "xjl le 1\n" - "xgZ ng 1\n" - "cGv ch 1\n" - "Zxu qu 1\n" - "Mfp pr 1\n" - "zFp sz 1\n" - "jgJ ng 1\n" - "bpG pr 1\n" - "vKz sz 1\n" - "hqI th 1\n" - "Qgw ng 1\n" - "Qyy ny 1\n" - "jmI ij 1\n" - "Vgd ng 1\n" - "xCt th 1\n" - "yVs st 1\n" - "uEq qu 1\n" - "dcN ch 1\n" - "Bzb sz 1\n" - "gVl ng 1\n" - "sXg ng 1\n" - "kQf ka 1\n" - "lrY er 1\n" - "Vtd th 1\n" - "nHs an 1\n" - "wjN ij 1\n" - "rzJ er 1\n" - "sYy st 1\n" - "wxQ wa 1\n" - "Ztb th 1\n" - "tWf th 1\n" - "tCx th 1\n" - "aFb an 1\n" - "lqf qu 1\n" - "feZ er 1\n" - "fPz sz 1\n" - "cjY ch 1\n" - "wKh th 1\n" - "Qhy th 1\n" - "dCj de 1\n" - "bkH ka 1\n" - "yjD ij 1\n" - "jTs st 1\n" - "hxI th 1\n" - "lvK vi 1\n" - "Lwz sz 1\n" - "swQ st 1\n" - "dTk di 1\n" - "fsO st 1\n" - "ljE le 1\n" - "wjM ij 1\n" - "uQk qu 1\n" - "xPg ng 1\n" - "vmC va 1\n" - "qsD qu 1\n" - "gDw ng 1\n" - "wJk ka 1\n" - "Zpq qu 1\n" - "Yhg th 1\n" - "kNc ch 1\n" - "bWl le 1\n" - "Fwh th 1\n" - "fHx fo 1\n" - "Fnv an 1\n" - "fdL de 1\n" - "oqD qu 1\n" - "aYx an 1\n" - "Vqx qu 1\n" - "vKf va 1\n" - "Cbw wa 1\n" - "vyq qu 1\n" - "cqZ ch 1\n" - "Rfh th 1\n" - "Swc ch 1\n" - "qNi qu 1\n" - "qoW qu 1\n" - "jhD th 1\n" - "kJq qu 1\n" - "gdF ng 1\n" - "pvF va 1\n" - "cpV ch 1\n" - "qtC th 1\n" - "gWm ng 1\n" - "gPc ch 1\n" - "jBs st 1\n" - "rlV er 1\n" - "gZc ch 1\n" - "kTk ka 1\n" - "hfJ th 1\n" - "Svv va 1\n" - "kmG ka 1\n" - "sDq qu 1\n" - "hGb th 1\n" - "Blq qu 1\n" - "Qry er 1\n" - "hHz th 1\n" - "yLx ny 1\n" - "lqF qu 1\n" - "wbB bi 1\n" - "iYr in 1\n" - "wDz tz 1\n" - "xsJ st 1\n" - "bzY sz 1\n" - "pMw pr 1\n" - "Uuj qu 1\n" - "hxK th 1\n" - "Xvf va 1\n" - "krZ er 1\n" - "fwV wa 1\n" - "gPw ng 1\n" - "qVn an 1\n" - "Qnq an 1\n" - "gDb ng 1\n" - "hVr th 1\n" - "zKh th 1\n" - "Fxy ny 1\n" - "oZj on 1\n" - "zAy sz 1\n" - "jMm ij 1\n" - "mvI va 1\n" - "Fwm me 1\n" - "zql qu 1\n" - "eVv er 1\n" - "yWq qu 1\n" - "Lwk ka 1\n" - "Lmw me 1\n" - "vXb va 1\n" - "Xhs th 1\n" - "hlR th 1\n" - "Qqw qu 1\n" - "zbK sz 1\n" - "Pxl le 1\n" - "nPm an 1\n" - "wQo on 1\n" - "Dcb ch 1\n" - "hjT th 1\n" - "rjJ er 1\n" - "bMc ch 1\n" - "iYb in 1\n" - "Fqj qu 1\n" - "Uoq qu 1\n" - "Xvp va 1\n" - "Lwb wa 1\n" - "Jpd de 1\n" - "qUg qu 1\n" - "lJx le 1\n" - "Xwd de 1\n" - "xKf fo 1\n" - "Znq an 1\n" - "qCb qu 1\n" - "Zbz sz 1\n" - "Qux qu 1\n" - "qNq qu 1\n" - "fvV va 1\n" - "Qqz qu 1\n" - "Hdf de 1\n" - "ySx ny 1\n" - "qSm qu 1\n" - "Lhb th 1\n" - "Mvf va 1\n" - "cDp ch 1\n" - "bHq qu 1\n" - "Wmg ng 1\n" - "ytG th 1\n" - "dbJ de 1\n" - "Ffg ng 1\n" - "hvM th 1\n" - "Wqy qu 1\n" - "gXd ng 1\n" - "uFg qu 1\n" - "jpR ij 1\n" - "Xcc ch 1\n" - "Tbp pr 1\n" - "Qwq qu 1\n" - "tPp th 1\n" - "fMh th 1\n" - "qiV qu 1\n" - "dcB ch 1\n" - "dFx de 1\n" - "Ymj ij 1\n" - "Ldq qu 1\n" - "lxV le 1\n" - "cCk ch 1\n" - "hVx th 1\n" - "dlT le 1\n" - "khP th 1\n" - "qVg qu 1\n" - "Ljj ij 1\n" - "zCv sz 1\n" - "ywV wa 1\n" - "ybZ be 1\n" - "vGh th 1\n" - "Bvj ij 1\n" - "Zqq qu 1\n" - "Gwk ka 1\n" - "qLq qu 1\n" - "fkX ka 1\n" - "Nbz sz 1\n" - "bXm me 1\n" - "dQh th 1\n" - "uYd qu 1\n" - "xYs st 1\n" - "zSs st 1\n" - "ycZ ch 1\n" - "lnU an 1\n" - "tCj th 1\n" - "xnY an 1\n" - "ptQ th 1\n" - "swO st 1\n" - "hXu th 1\n" - "mBw mb 1\n" - "wmF me 1\n" - "xJx xe 1\n" - "dXj de 1\n" - "eqg qu 1\n" - "nBf an 1\n" - "Xbd de 1\n" - "fcQ ch 1\n" - "xkS ka 1\n" - "tOq th 1\n" - "uQb qu 1\n" - "cvV ch 1\n" - "sBh th 1\n" - "dCk de 1\n" - "cKv ch 1\n" - "cVf ch 1\n" - "wZx wa 1\n" - "Bvm va 1\n" - "lqJ qu 1\n" - "fxR fo 1\n" - "vmF va 1\n" - "xnq an 1\n" - "bBg ng 1\n" - "tPd th 1\n" - "fNs st 1\n" - "Fkp ka 1\n" - "Yye er 1\n" - "Ubq qu 1\n" - "xzP sz 1\n" - "fmQ me 1\n" - "qcA ch 1\n" - "yKc ch 1\n" - "xvZ va 1\n" - "cbN ch 1\n" - "yYl le 1\n" - "Pmw me 1\n" - "wFx wa 1\n" - "hRh th 1\n" - "qpS qu 1\n" - "Vqf qu 1\n" - "Ghg th 1\n" - "Wvq qu 1\n" - "xkC ka 1\n" - "ytM th 1\n" - "Lnh th 1\n" - "dxD de 1\n" - "bMw wa 1\n" - "xvU va 1\n" - "Qzx sz 1\n" - "srM er 1\n" - "vLg ng 1\n" - "cGq ch 1\n" - "Vmy me 1\n" - "hcL th 1\n" - "pKx pr 1\n" - "Jxs st 1\n" - "blW le 1\n" - "pQo on 1\n" - "bEq qu 1\n" - "fWt th 1\n" - "sYm st 1\n" - "nKw an 1\n" - "dtF th 1\n" - "kTz sz 1\n" - "epX er 1\n" - "fCp pr 1\n" - "bFk ka 1\n" - "Rzb sz 1\n" - "vqI qu 1\n" - "Zhc th 1\n" - "Hvv va 1\n" - "mVt th 1\n" - "Iwx wa 1\n" - "phR th 1\n" - "wNb wa 1\n" - "fRc ch 1\n" - "ljq qu 1\n" - "lvY le 1\n" - "jcA ch 1\n" - "dGw de 1\n" - "Cqn an 1\n" - "mBx me 1\n" - "Mmx me 1\n" - "Vxa an 1\n" - "Xhw th 1\n" - "eqK qu 1\n" - "tCw th 1\n" - "zvU sz 1\n" - "lxQ le 1\n" - "vMv va 1\n" - "gqA qu 1\n" - "Jbn an 1\n" - "gCj ng 1\n" - "oTf on 1\n" - "kbW ka 1\n" - "qjY qu 1\n" - "Rqf qu 1\n" - "hYh th 1\n" - "yhE th 1\n" - "gYj ng 1\n" - "jcI ch 1\n" - "qvJ qu 1\n" - "qoC qu 1\n" - "qFc ch 1\n" - "qqH qu 1\n" - "Nxq qu 1\n" - "wVo on 1\n" - "zHv sz 1\n" - "ybS be 1\n" - "Hwc ch 1\n" - "Mxa an 1\n" - "xkL ka 1\n" - "qmO qu 1\n" - "qbR qu 1\n" - "Zfy ny 1\n" - "Rkf ka 1\n" - "vgV ng 1\n" - "hBw th 1\n" - "pXx pr 1\n" - "brQ er 1\n" - "fvO va 1\n" - "hDc th 1\n" - "xQa an 1\n" - "wfF wa 1\n" - "hZx th 1\n" - "Jgz ng 1\n" - "qnY an 1\n" - "qXl le 1\n" - "eNb er 1\n" - "fxS fo 1\n" - "sNk st 1\n" - "mFc ch 1\n" - "Uux qu 1\n" - "Ydg ng 1\n" - "ozW on 1\n" - "Xzd de 1\n" - "Jfe er 1\n" - "Ftx th 1\n" - "vzR sz 1\n" - "wZk ka 1\n" - "oHz on 1\n" - "qvT qu 1\n" - "qoA qu 1\n" - "Sdq qu 1\n" - "txW th 1\n" - "Egf ng 1\n" - "dMf de 1\n" - "Rhh th 1\n" - "vRn an 1\n" - "ujX qu 1\n" - "fRj ij 1\n" - "gjA ng 1\n" - "gDg ng 1\n" - "smZ st 1\n" - "jId de 1\n" - "qkM qu 1\n" - "bKz sz 1\n" - "sCg ng 1\n" - "uTp qu 1\n" - "lVs le 1\n" - "uQo qu 1\n" - "Jfs st 1\n" - "vKm va 1\n" - "jQh th 1\n" - "fUf fo 1\n" - "uTf qu 1\n" - "Bnv an 1\n" - "tdU th 1\n" - "dxY de 1\n" - "hgV th 1\n" - "Zdf de 1\n" - "hqS th 1\n" - "eJg ng 1\n" - "qGu un 1\n" - "vmE va 1\n" - "gKz ng 1\n" - "mUg ng 1\n" - "Vjy ij 1\n" - "uvJ qu 1\n" - "mHr er 1\n" - "Mhv th 1\n" - "zsZ st 1\n" - "Vzy sz 1\n" - "jKb ij 1\n" - "zPp sz 1\n" - "qgD qu 1\n" - "Xhf th 1\n" - "Ogp ng 1\n" - "jwX ij 1\n" - "lYy le 1\n" - "qzD qu 1\n" - "wXj jo 1\n" - "Kpx pr 1\n" - "ydY de 1\n" - "vBq qu 1\n" - "Zpp pr 1\n" - "bDd de 1\n" - "Fjk ij 1\n" - "kdA de 1\n" - "zWt th 1\n" - "wSd de 1\n" - "kFd de 1\n" - "Sxl le 1\n" - "Fvh th 1\n" - "pbR pr 1\n" - "qrD qu 1\n" - "vZs st 1\n" - "vUm va 1\n" - "wEy wa 1\n" - "jjH jo 1\n" - "sDg ng 1\n" - "Ujc ch 1\n" - "knI an 1\n" - "fOa an 1\n" - "Cjg ng 1\n" - "tbV th 1\n" - "gqd qu 1\n" - "ePx er 1\n" - "wRm me 1\n" - "pvG va 1\n" - "Qyl le 1\n" - "cwG ch 1\n" - "Dtq th 1\n" - "Pbz sz 1\n" - "Rgq qu 1\n" - "fjU ij 1\n" - "jJf ij 1\n" - "Rxq qu 1\n" - "Jtx th 1\n" - "qvZ qu 1\n" - "kKm ka 1\n" - "hFm th 1\n" - "kcX ch 1\n" - "fNm me 1\n" - "bpB pr 1\n" - "xqY qu 1\n" - "hYy th 1\n" - "gGp ng 1\n" - "Vfs st 1\n" - "wDt th 1\n" - "bTs st 1\n" - "hfV th 1\n" - "qzp qu 1\n" - "yUv va 1\n" - "qGc ch 1\n" - "Vdl le 1\n" - "Xjt th 1\n" - "kMj ij 1\n" - "hTg th 1\n" - "Hlc ch 1\n" - "tKz th 1\n" - "Wvt th 1\n" - "lMz le 1\n" - "Mwx wa 1\n" - "Wlv le 1\n" - "xzG sz 1\n" - "gmD ng 1\n" - "zOi in 1\n" - "bbI be 1\n" - "bpI pr 1\n" - "fQg ng 1\n" - "pQv va 1\n" - "vEb va 1\n" - "jFz sz 1\n" - "Whf th 1\n" - "jvQ ij 1\n" - "qYx qu 1\n" - "rxM er 1\n" - "vPp va 1\n" - "fjD ij 1\n" - "Vwy wa 1\n" - "Yqc ch 1\n" - "tcW th 1\n" - "jYg ng 1\n" - "gJb ng 1\n" - "Tkc ch 1\n" - "qhj th 1\n" - "jxF ij 1\n" - "Fpz sz 1\n" - "kXh th 1\n" - "lgZ ng 1\n" - "znI an 1\n" - "qyN qu 1\n" - "vBj ij 1\n" - "jSx ij 1\n" - "cqI ch 1\n" - "qYv qu 1\n" - "Zrr er 1\n" - "sHr er 1\n" - "vrK er 1\n" - "pbH pr 1\n" - "zVh th 1\n" - "dQb de 1\n" - "lxF le 1\n" - "sgW ng 1\n" - "Ghf th 1\n" - "xpq qu 1\n" - "qhN th 1\n" - "Fsf st 1\n" - "Qga an 1\n" - "Rdp de 1\n" - "fvK va 1\n" - "Ydz de 1\n" - "wvW va 1\n" - "cPm ch 1\n" - "cQy ch 1\n" - "ywF wa 1\n" - "Ypq qu 1\n" - "Rsj st 1\n" - "Ygw ng 1\n" - "xVp pr 1\n" - "yxL ny 1\n" - "Ywl le 1\n" - "jMc ch 1\n" - "zTl le 1\n" - "aIq an 1\n" - "qQi qu 1\n" - "tqI th 1\n" - "Hvp va 1\n" - "wQd de 1\n" - "hfG th 1\n" - "cTd ch 1\n" - "bfQ be 1\n" - "Kfd de 1\n" - "cXs ch 1\n" - "vYx va 1\n" - "Qoc ro 1\n" - "vrL er 1\n" - "pZk ka 1\n" - "cdX ch 1\n" - "Ygn an 1\n" - "lnO an 1\n" - "mfY me 1\n" - "fnV an 1\n" - "mbZ me 1\n" - "gbE ng 1\n" - "xjZ ij 1\n" - "Fpy pr 1\n" - "npE an 1\n" - "Rxy ny 1\n" - "oWp on 1\n" - "hVh th 1\n" - "yJf ny 1\n" - "sQd st 1\n" - "Zvg ng 1\n" - "bDm me 1\n" - "pLv va 1\n" - "wwF wa 1\n" - "xBh th 1\n" - "qKm qu 1\n" - "wXx wa 1\n" - "Iux qu 1\n" - "dgB ng 1\n" - "gJp ng 1\n" - "qgx qu 1\n" - "fNh ho 1\n" - "cvE ch 1\n" - "cgH ch 1\n" - "lNs le 1\n" - "vDj ij 1\n" - "zcG ch 1\n" - "fZn on 1\n" - "uUx qu 1\n" - "clQ le 1\n" - "fdH de 1\n" - "eZj er 1\n" - "Vqc ch 1\n" - "Rcx ch 1\n" - "jGh th 1\n" - "qzM sz 1\n" - "Qpw pr 1\n" - "Spx pr 1\n" - "cGx ch 1\n" - "cqA ch 1\n" - "vbK va 1\n" - "xeW er 1\n" - "vkC ka 1\n" - "xzB sz 1\n" - "xuR qu 1\n" - "Oyq qu 1\n" - "Mqx qu 1\n" - "qqj qu 1\n" - "yqY qu 1\n" - "cwL ch 1\n" - "pPt th 1\n" - "dSx de 1\n" - "dPk de 1\n" - "uzH qu 1\n" - "fvH va 1\n" - "pcH ch 1\n" - "hlY le 1\n" - "qtX th 1\n" - "Nvs st 1\n" - "hvL th 1\n" - "zRk sz 1\n" - "tNj th 1\n" - "Dbv va 1\n" - "jKc ch 1\n" - "dKy de 1\n" - "yVz sz 1\n" - "iqJ qu 1\n" - "zgJ ng 1\n" - "eJs er 1\n" - "wOx wa 1\n" - "rXh th 1\n" - "Hqp qu 1\n" - "vWx va 1\n" - "bTt th 1\n" - "fCy ny 1\n" - "aOq an 1\n" - "oCg ng 1\n" - "pnE an 1\n" - "Fwc ch 1\n" - "zrT er 1\n" - "xHs st 1\n" - "ydX de 1\n" - "dkV de 1\n" - "Rqy qu 1\n" - "Zyq qu 1\n" - "kXl le 1\n" - "oJt th 1\n" - "sxI st 1\n" - "qZw qu 1\n" - "zqx qu 1\n" - "clZ ch 1\n" - "swX sz 1\n" - "aHw an 1\n" - "rWc ch 1\n" - "cQp ch 1\n" - "Jwj ij 1\n" - "qeV qu 1\n" - "sQj st 1\n" - "Rpb pr 1\n" - "mZq qu 1\n" - "rBx er 1\n" - "mxV me 1\n" - "Mvy ny 1\n" - "cRl ch 1\n" - "Fzv sz 1\n" - "pBs sz 1\n" - "jWs st 1\n" - "vqK qu 1\n" - "Ixl le 1\n" - "yhw th 1\n" - "wyQ wa 1\n" - "uCb qu 1\n" - "zrF sz 1\n" - "iyQ in 1\n" - "qsP qu 1\n" - "hLr er 1\n" - "cvX ch 1\n" - "Scq ch 1\n" - "zrL er 1\n" - "ecU ch 1\n" - "Vxz sz 1\n" - "fCq qu 1\n" - "ovX on 1\n" - "Uqn an 1\n" - "sVw st 1\n" - "spX st 1\n" - "Qkv ka 1\n" - "fyW ny 1\n" - "rBc ch 1\n" - "mdC de 1\n" - "Wjk ij 1\n" - "jYh th 1\n" - "hXq th 1\n" - "xkm ka 1\n" - "hhU th 1\n" - "Dvz sz 1\n" - "tcq th 1\n" - "wZy wa 1\n" - "jtC th 1\n" - "qnD an 1\n" - "vmB va 1\n" - "kjB ij 1\n" - "cdG ch 1\n" - "Vkt th 1\n" - "hNq th 1\n" - "Jft th 1\n" - "iWv in 1\n" - "Wtn th 1\n" - "lfE le 1\n" - "dZb de 1\n" - "eqQ qu 1\n" - "gUq qu 1\n" - "qwL qu 1\n" - "hUq th 1\n" - "hGc th 1\n" - "nwX an 1\n" - "Nbt th 1\n" - "jjP ij 1\n" - "sqJ qu 1\n" - "lQf le 1\n" - "jZz sz 1\n" - "wWn an 1\n" - "Mxu qu 1\n" - "qFi qu 1\n" - "mjX ij 1\n" - "vDx va 1\n" - "vDn an 1\n" - "wUc ch 1\n" - "zhU th 1\n" - "zHw sz 1\n" - "Tjl le 1\n" - "xuX qu 1\n" - "jZp ij 1\n" - "wVc ch 1\n" - "gFp ng 1\n" - "Gyq qu 1\n" - "Jlh th 1\n" - "Bkf ka 1\n" - "hhJ th 1\n" - "tvW th 1\n" - "bIy ny 1\n" - "Llg ng 1\n" - "zJz sz 1\n" - "qeQ qu 1\n" - "nlX an 1\n" - "tcQ th 1\n" - "qtU th 1\n" - "fkW ka 1\n" - "gJk ng 1\n" - "gQy ng 1\n" - "sPz st 1\n" - "bmO me 1\n" - "Ytx th 1\n" - "yqF qu 1\n" - "iBk in 1\n" - "uzV qu 1\n" - "xNp pr 1\n" - "zRz sz 1\n" - "qHq qu 1\n" - "yuY qu 1\n" - "jqh th 1\n" - "xBd de 1\n" - "vvA va 1\n" - "eVj er 1\n" - "zGp sz 1\n" - "vcB ch 1\n" - "kpH ka 1\n" - "mDw me 1\n" - "vuG qu 1\n" - "vVy ny 1\n" - "mzS sz 1\n" - "jvM ij 1\n" - "sfV st 1\n" - "hQq th 1\n" - "wTm me 1\n" - "Plq qu 1\n" - "fxJ fo 1\n" - "qQq qu 1\n" - "Fnw an 1\n" - "qJo qu 1\n" - "Nsg ng 1\n" - "Ljx ij 1\n" - "sRb st 1\n" - "pcY ch 1\n" - "vVm va 1\n" - "sQg ng 1\n" - "Ywz sz 1\n" - "hqJ th 1\n" - "sjK st 1\n" - "Zks st 1\n" - "Mjt th 1\n" - "Dwh th 1\n" - "wbN wa 1\n" - "mvK va 1\n" - "rLp er 1\n" - "Lbm me 1\n" - "wjO ij 1\n" - "lQz le 1\n" - "Kwf wa 1\n" - "qmB qu 1\n" - "Xbv va 1\n" - "cKq ch 1\n" - "hqR th 1\n" - "yVb be 1\n" - "xcF ch 1\n" - "Ewv va 1\n" - "Gpq qu 1\n" - "Gbh th 1\n" - "yHj ij 1\n" - "gXk ng 1\n" - "qOx qu 1\n" - "Kbw wa 1\n" - "qHx qu 1\n" - "wjP ij 1\n" - "jQl le 1\n" - "Ffq qu 1\n" - "oYb on 1\n" - "Fqo qu 1\n" - "wXz sz 1\n" - "fIp pr 1\n" - "pMf pr 1\n" - "nqP an 1\n" - "bbZ be 1\n" - "hsX th 1\n" - "Wjr er 1\n" - "Zqn an 1\n" - "Pxb be 1\n" - "Bzs st 1\n" - "pbI pr 1\n" - "Yvp va 1\n" - "jxM ij 1\n" - "jyZ ij 1\n" - "mzJ sz 1\n" - "vYg ng 1\n" - "qMm qu 1\n" - "fhL th 1\n" - "qOg qu 1\n" - "Mnp an 1\n" - "Ifv va 1\n" - "qYm qu 1\n" - "gxv ng 1\n" - "zfG sz 1\n" - "fqG qu 1\n" - "lLq qu 1\n" - "hkK th 1\n" - "oYk on 1\n" - "lRg le 1\n" - "lOx le 1\n" - "Vxv va 1\n" - "qAs qu 1\n" - "tKk th 1\n" - "lhF th 1\n" - "dCv de 1\n" - "wvY va 1\n" - "wiV in 1\n" - "crF ch 1\n" - "fEp pr 1\n" - "Rrl er 1\n" - "Zjy ij 1\n" - "qbY qu 1\n" - "kMw ka 1\n" - "vZi in 1\n" - "Fxi in 1\n" - "zkS sz 1\n" - "vKb va 1\n" - "zbI sz 1\n" - "uHg qu 1\n" - "qzG qu 1\n" - "jMk ij 1\n" - "Fkc ch 1\n" - "dKm de 1\n" - "nHh th 1\n" - "xGc ch 1\n" - "qpU qu 1\n" - "rcU ch 1\n" - "aWx an 1\n" - "xdS de 1\n" - "qhV th 1\n" - "aHc ch 1\n" - "vmI va 1\n" - "Wcc ch 1\n" - "zBn an 1\n" - "kQe er 1\n" - "awJ an 1\n" - "xdD de 1\n" - "yZx ny 1\n" - "Kkd de 1\n" - "wBz sz 1\n" - "lzA le 1\n" - "yyT ny 1\n" - "qeK qu 1\n" - "zpE sz 1\n" - "zFn an 1\n" - "yyG ny 1\n" - "lLw le 1\n" - "bvS va 1\n" - "mvX va 1\n" - "hlW th 1\n" - "pgX ng 1\n" - "lQt th 1\n" - "ymY me 1\n" - "mjJ ij 1\n" - "mVc ch 1\n" - "Xqs qu 1\n" - "bKr er 1\n" - "bHt th 1\n" - "jRv ij 1\n" - "Lpw pr 1\n" - "zPb sz 1\n" - "wkR ka 1\n" - "kxS ka 1\n" - "jWf ij 1\n" - "Nkx ka 1\n" - "Kcj ch 1\n" - "bJb be 1\n" - "xwZ wa 1\n" - "Rqc ch 1\n" - "Qzg ng 1\n" - "jwH ij 1\n" - "Dqd qu 1\n" - "vLf va 1\n" - "hXd th 1\n" - "cfD ch 1\n" - "sjX st 1\n" - "hzI th 1\n" - "qUd qu 1\n" - "tSx th 1\n" - "hxA th 1\n" - "gxK ng 1\n" - "hVm th 1\n" - "yzX sz 1\n" - "Ucs ch 1\n" - "qaH an 1\n" - "Yfy ny 1\n" - "sJg ng 1\n" - "iHp in 1\n" - "iyC in 1\n" - "Tjf ij 1\n" - "dJp de 1\n" - "Jgv ng 1\n" - "uJf qu 1\n" - "nNl an 1\n" - "zdA sz 1\n" - "xIq qu 1\n" - "qjK qu 1\n" - "vzY sz 1\n" - "wqv qu 1\n" - "Xvx va 1\n" - "fJr er 1\n" - "nqH an 1\n" - "qGd qu 1\n" - "vQg ng 1\n" - "iQz in 1\n" - "tLn th 1\n" - "lVj le 1\n" - "vqW qu 1\n" - "zrN er 1\n" - "xKz sz 1\n" - "waV an 1\n" - "Ydq qu 1\n" - "dkq qu 1\n" - "fCn an 1\n" - "Xcy ch 1\n" - "pIl le 1\n" - "hXl th 1\n" - "aFs an 1\n" - "iwM in 1\n" - "Gwx wa 1\n" - "Xlp le 1\n" - "Qfu qu 1\n" - "jqE qu 1\n" - "lqP qu 1\n" - "kVq qu 1\n" - "xqJ qu 1\n" - "Mzf sz 1\n" - "mNw me 1\n" - "Wsv st 1\n" - "fnM an 1\n" - "uSf qu 1\n" - "hCf th 1\n" - "zjH sz 1\n" - "mTs st 1\n" - "jWz sz 1\n" - "Dxk ka 1\n" - "Ztd th 1\n" - "Rvv va 1\n" - "gBx ng 1\n" - "Lzx sz 1\n" - "ezU er 1\n" - "jqH qu 1\n" - "Rjh th 1\n" - "Dcg ch 1\n" - "bBh th 1\n" - "fhO th 1\n" - "hpH th 1\n" - "Zqa an 1\n" - "kCx ka 1\n" - "rRv er 1\n" - "dkZ de 1\n" - "Ggx ng 1\n" - "pQh th 1\n" - "Gcv ch 1\n" - "Scg ch 1\n" - "vDb va 1\n" - "pbD pr 1\n" - "vEh th 1\n" - "vlE le 1\n" - "Rjl le 1\n" - "lFw le 1\n" - "zqN qu 1\n" - "aPq an 1\n" - "gjD ng 1\n" - "jcE ch 1\n" - "wSw wa 1\n" - "Dgj ng 1\n" - "huZ th 1\n" - "gPv ng 1\n" - "pJj ij 1\n" - "cQh th 1\n" - "mwq qu 1\n" - "vpA va 1\n" - "hGf th 1\n" - "cXz ch 1\n" - "Lcb ch 1\n" - "fJm me 1\n" - "Qzy sz 1\n" - "zQm sz 1\n" - "Hhn th 1\n" - "xdY de 1\n" - "uYl qu 1\n" - "Xkj ij 1\n" - "jvA ij 1\n" - "Jvp va 1\n" - "iwZ in 1\n" - "zkq qu 1\n" - "Nhb th 1\n" - "kmV ka 1\n" - "qKd qu 1\n" - "Bcq ch 1\n" - "pfY pr 1\n" - "qUj qu 1\n" - "gqR qu 1\n" - "gwO ng 1\n" - "gXm ng 1\n" - "jHh th 1\n" - "rBn an 1\n" - "uPw qu 1\n" - "pJk ka 1\n" - "Ipj ij 1\n" - "yqM qu 1\n" - "Yqn an 1\n" - "Kbz sz 1\n" - "vfL va 1\n" - "npZ an 1\n" - "oqY qu 1\n" - "Zqf qu 1\n" - "jzU sz 1\n" - "vNx va 1\n" - "hXf th 1\n" - "fCg ng 1\n" - "nzJ an 1\n" - "mKj ij 1\n" - "wmB me 1\n" - "Wjq qu 1\n" - "Dbq qu 1\n" - "zXy sz 1\n" - "xYw wa 1\n" - "fQf fo 1\n" - "dqP qu 1\n" - "Kxq qu 1\n" - "jdZ de 1\n" - "qrX qu 1\n" - "Lxb be 1\n" - "yfL ny 1\n" - "yYm me 1\n" - "sbH st 1\n" - "wlV le 1\n" - "uKp qu 1\n" - "hhN th 1\n" - "Xxq qu 1\n" - "jLg ng 1\n" - "nQh th 1\n" - "Wqp qu 1\n" - "Nqd qu 1\n" - "jfD ij 1\n" - "Jnq an 1\n" - "Bzn an 1\n" - "mJr er 1\n" - "qaX an 1\n" - "pJw pr 1\n" - "jHz sz 1\n" - "yaX an 1\n" - "Whs th 1\n" - "hYr th 1\n" - "tmS th 1\n" - "Fhy th 1\n" - "Ggd ng 1\n" - "Xmy me 1\n" - "Rqh th 1\n" - "Fsn an 1\n" - "qhA th 1\n" - "fhX th 1\n" - "Hqx qu 1\n" - "wIo on 1\n" - "Ibx be 1\n" - "cFx ch 1\n" - "dRg ng 1\n" - "snV an 1\n" - "kqz qu 1\n" - "eqO er 1\n" - "Gkz sz 1\n" - "Nnz an 1\n" - "yqE qu 1\n" - "cJh th 1\n" - "xvA va 1\n" - "qMx qu 1\n" - "dwS de 1\n" - "yAj ij 1\n" - "xCq qu 1\n" - "gmE ng 1\n" - "bhP th 1\n" - "rwE er 1\n" - "Xnz an 1\n" - "Uhw th 1\n" - "xnR an 1\n" - "nfZ an 1\n" - "Qpx pr 1\n" - "qxO qu 1\n" - "lGt th 1\n" - "qRc ch 1\n" - "Rwx wa 1\n" - "tcM th 1\n" - "fBd de 1\n" - "Rjc ch 1\n" - "dfY de 1\n" - "hhR th 1\n" - "bCj ij 1\n" - "fqL qu 1\n" - "lzS le 1\n" - "Lrm er 1\n" - "eqE qu 1\n" - "vgL ng 1\n" - "wQr er 1\n" - "bwB wa 1\n" - "lGf le 1\n" - "Nwq qu 1\n" - "sdU st 1\n" - "Zxv va 1\n" - "yDm me 1\n" - "Lsw st 1\n" - "cNq ch 1\n" - "Dqc ch 1\n" - "vLz sz 1\n" - "dWv de 1\n" - "fkQ ka 1\n" - "zjD sz 1\n" - "yYv va 1\n" - "qeT qu 1\n" - "cvL ch 1\n" - "wkA ka 1\n" - "Nvb va 1\n" - "djM de 1\n" - "hgK th 1\n" - "pXb pr 1\n" - "Tlw le 1\n" - "Rhz ha 1\n" - "wkP ka 1\n" - "wDk ka 1\n" - "eFc ch 1\n" - "ehU th 1\n" - "Xly le 1\n" - "wxK wa 1\n" - "dPw de 1\n" - "sFd st 1\n" - "vcI ch 1\n" - "Fxd de 1\n" - "fvR va 1\n" - "jqs qu 1\n" - "rMj er 1\n" - "qbW qu 1\n" - "kpP ka 1\n" - "Bvw va 1\n" - "Tmk ka 1\n" - "hbP th 1\n" - "hMx th 1\n" - "jgL ng 1\n" - "efU er 1\n" - "cQb ch 1\n" - "mcA ch 1\n" - "Ewq qu 1\n" - "xmV me 1\n" - "Qcq ch 1\n" - "mzG sz 1\n" - "pKm me 1\n" - "Fwq qu 1\n" - "lRn an 1\n" - "jPk ij 1\n" - "jMb ij 1\n" - "mzO sz 1\n" - "oFw on 1\n" - "hJb th 1\n" - "sVq qu 1\n" - "iVz in 1\n" - "oqU qu 1\n" - "bhW th 1\n" - "Oxq qu 1\n" - "mQk ka 1\n" - "Xfb be 1\n" - "cNw ch 1\n" - "fgZ ng 1\n" - "Tvf va 1\n" - "sIx st 1\n" - "uZs qu 1\n" - "xzX sz 1\n" - "Ylq qu 1\n" - "oHf on 1\n" - "csU ch 1\n" - "Qzs st 1\n" - "Bfq qu 1\n" - "yJn an 1\n" - "pgQ ng 1\n" - "wxk ka 1\n" - "Tnw an 1\n" - "bKx be 1\n" - "bqX qu 1\n" - "Qjs st 1\n" - "pFh th 1\n" - "Xvl le 1\n" - "kfB ka 1\n" - "mZl le 1\n" - "Csg ng 1\n" - "vrJ er 1\n" - "Gfy ny 1\n" - "jbP ij 1\n" - "Yvl le 1\n" - "Hxb be 1\n" - "lrD er 1\n" - "qTl qu 1\n" - "aBc ch 1\n" - "fGb be 1\n" - "mhS th 1\n" - "zTp sz 1\n" - "kRd de 1\n" - "Wph th 1\n" - "Npj ij 1\n" - "lwS le 1\n" - "mGm me 1\n" - "nqT an 1\n" - "Ujn an 1\n" - "xjO ij 1\n" - "dMz sz 1\n" - "wKj ij 1\n" - "yZr er 1\n" - "Njb ij 1\n" - "Ylr er 1\n" - "mVf me 1\n" - "gZg ng 1\n" - "Hcb ch 1\n" - "xcB ch 1\n" - "kMm ka 1\n" - "lwC le 1\n" - "Dnf an 1\n" - "hjW th 1\n" - "rTk er 1\n" - "Vzj sz 1\n" - "Vxy ny 1\n" - "wlQ le 1\n" - "Nrv er 1\n" - "pjP ij 1\n" - "fwZ wa 1\n" - "tnW th 1\n" - "oJw on 1\n" - "kJx ka 1\n" - "Vpj ij 1\n" - "qAw qu 1\n" - "Qht th 1\n" - "bCn an 1\n" - "vrU er 1\n" - "hRc th 1\n" - "clC ch 1\n" - "rFd er 1\n" - "twH th 1\n" - "kCw ka 1\n" - "mSd de 1\n" - "Xnw an 1\n" - "fXm me 1\n" - "Twf wa 1\n" - "Fwj ij 1\n" - "bjJ ij 1\n" - "lbQ le 1\n" - "kvS ka 1\n" - "Smz sz 1\n" - "fBp pr 1\n" - "Nzz sz 1\n" - "bQp pr 1\n" - "vLx va 1\n" - "hVf th 1\n" - "yUj ij 1\n" - "cZd ch 1\n" - "gIy eg 1\n" - "hVq th 1\n" - "aQx an 1\n" - "Qfv va 1\n" - "lKb le 1\n" - "zhN th 1\n" - "Zbm me 1\n" - "Gcq ch 1\n" - "gbT ng 1\n" - "pYk ka 1\n" - "Xvd de 1\n" - "xMl le 1\n" - "uHb qu 1\n" - "bXf be 1\n" - "sNc ch 1\n" - "qVy qu 1\n" - "cpO ch 1\n" - "Fgb ng 1\n" - "eWl er 1\n" - "kKd de 1\n" - "Cbj ij 1\n" - "mfH me 1\n" - "qIa an 1\n" - "sfX st 1\n" - "snH an 1\n" - "Hjg ng 1\n" - "Lmf me 1\n" - "xgf ng 1\n" - "Evw va 1\n" - "wOk ka 1\n" - "Hjf ij 1\n" - "zuJ qu 1\n" - "fZm me 1\n" - "lNq qu 1\n" - "xUg ng 1\n" - "nLs an 1\n" - "jkS ij 1\n" - "Gvp va 1\n" - "jPd de 1\n" - "ywQ wa 1\n" - "qrG qu 1\n" - "bbH be 1\n" - "ghJ th 1\n" - "mMh th 1\n" - "Yvt th 1\n" - "xLq qu 1\n" - "Bdq qu 1\n" - "zJd sz 1\n" - "xRs st 1\n" - "vgP ng 1\n" - "Hhb th 1\n" - "npL an 1\n" - "vFp va 1\n" - "hSj th 1\n" - "bdC de 1\n" - "kGg ng 1\n" - "kVf ka 1\n" - "qvP qu 1\n" - "kwO ka 1\n" - "Jqt th 1\n" - "zWx sz 1\n" - "sQk st 1\n" - "hnV th 1\n" - "rrD er 1\n" - "jVh th 1\n" - "vvY va 1\n" - "bfI be 1\n" - "fSz sz 1\n" - "Czf sz 1\n" - "kWl le 1\n" - "jJc ch 1\n" - "Gwj ij 1\n" - "lFh th 1\n" - "Vpf fo 1\n" - "fkV ka 1\n" - "cYj ch 1\n" - "mrW er 1\n" - "hBb th 1\n" - "hJx th 1\n" - "wIq qu 1\n" - "cdA ch 1\n" - "wQy wa 1\n" - "wCq qu 1\n" - "wqZ qu 1\n" - "jfX ij 1\n" - "jtG th 1\n" - "xkJ ka 1\n" - "Qzf sz 1\n" - "gKs ng 1\n" - "Qzo on 1\n" - "bwI wa 1\n" - "Tsb st 1\n" - "vvX va 1\n" - "jlR le 1\n" - "qlQ qu 1\n" - "dbX de 1\n" - "Hfc ch 1\n" - "Bsj st 1\n" - "Yqk qu 1\n" - "Xnc ch 1\n" - "bzZ sz 1\n" - "dGt th 1\n" - "Xgg ng 1\n" - "jwE wa 1\n" - "Oyc ch 1\n" - "pQd de 1\n" - "jRy ij 1\n" - "pmX me 1\n" - "lZx le 1\n" - "gFq qu 1\n" - "mJd de 1\n" - "sKq qu 1\n" - "Ikj ij 1\n" - "zkG sz 1\n" - "wGf wa 1\n" - "qRp qu 1\n" - "xDn an 1\n" - "gvL ng 1\n" - "mGx me 1\n" - "iIj in 1\n" - "Gzd sz 1\n" - "bLx be 1\n" - "jUl le 1\n" - "Qvc ch 1\n" - "mVh th 1\n" - "uhF th 1\n" - "fVk ka 1\n" - "cnM ch 1\n" - "uFh th 1\n" - "mXf me 1\n" - "rCb er 1\n" - "nLw an 1\n" - "vfH fo 1\n" - "iqV qu 1\n" - "qhD th 1\n" - "sHx st 1\n" - "Ywy wa 1\n" - "mDx me 1\n" - "cBt th 1\n" - "Bmq qu 1\n" - "xRc ch 1\n" - "bSz sz 1\n" - "vCj ij 1\n" - "Tcv ch 1\n" - "aZq an 1\n" - "Jcx ch 1\n" - "nbF an 1\n" - "Qzb sz 1\n" - "vkQ ka 1\n" - "hzD th 1\n" - "xHp pr 1\n" - "hqX th 1\n" - "fEv va 1\n" - "yjF ij 1\n" - "Pjk ij 1\n" - "sfU st 1\n" - "bGc ch 1\n" - "mcX ch 1\n" - "pXc ch 1\n" - "yvS va 1\n" - "pMl le 1\n" - "wJs st 1\n" - "Vwq qu 1\n" - "yCw wa 1\n" - "qds qu 1\n" - "rRj er 1\n" - "Qhv th 1\n" - "ucG ch 1\n" - "oEh th 1\n" - "wQi in 1\n" - "lSg ng 1\n" - "Lqt th 1\n" - "nlH an 1\n" - "uqG qu 1\n" - "Oao an 1\n" - "hlX th 1\n" - "fPw wa 1\n" - "tIb th 1\n" - "zIq qu 1\n" - "qmG qu 1\n" - "xJm me 1\n" - "Vgw ng 1\n" - "Ukx ka 1\n" - "ztH th 1\n" - "lhP th 1\n" - "Jtk th 1\n" - "Hzd sz 1\n" - "yxQ ny 1\n" - "nrP an 1\n" - "fHh th 1\n" - "Yct th 1\n" - "Gqa an 1\n" - "Fgy ng 1\n" - "oBn an 1\n" - "vuC qu 1\n" - "Bnz an 1\n" - "vPu qu 1\n" - "xFf fo 1\n" - "jdJ de 1\n" - "fGf fo 1\n" - "Yjq qu 1\n" - "Qjp ij 1\n" - "xTj ij 1\n" - "vOq qu 1\n" - "vLw va 1\n" - "sMf st 1\n" - "oVl on 1\n" - "cwN ch 1\n" - "sgR ng 1\n" - "jjQ ij 1\n" - "wzR sz 1\n" - "zhY th 1\n" - "vbR va 1\n" - "wgW ng 1\n" - "qwX qu 1\n" - "Nxw wa 1\n" - "eQo er 1\n" - "mQp me 1\n" - "Kqh th 1\n" - "tvA th 1\n" - "dlJ le 1\n" - "yVx ny 1\n" - "sPf st 1\n" - "dQz sz 1\n" - "sZb st 1\n" - "zhS th 1\n" - "kWb ka 1\n" - "mqP qu 1\n" - "Ffk ka 1\n" - "xql qu 1\n" - "gqH qu 1\n" - "Tly le 1\n" - "kpL ka 1\n" - "qEg qu 1\n" - "bMg ng 1\n" - "xRj ij 1\n" - "xsC st 1\n" - "jlS le 1\n" - "lzM le 1\n" - "Pfb be 1\n" - "uJv qu 1\n" - "yVf ny 1\n" - "Zgq qu 1\n" - "xbS be 1\n" - "oFh th 1\n" - "xvb va 1\n" - "hcU th 1\n" - "wwU wa 1\n" - "yCg ng 1\n" - "mPz sz 1\n" - "sJd st 1\n" - "bmN me 1\n" - "uVc ch 1\n" - "qdS qu 1\n" - "Vwp pr 1\n" - "Vml le 1\n" - "Hqy qu 1\n" - "Lfz sz 1\n" - "Ayj ij 1\n" - "yxK ny 1\n" - "Hwv va 1\n" - "gIp ng 1\n" - "Zgt th 1\n" - "Xtw th 1\n" - "hLf th 1\n" - "Nkd de 1\n" - "jMs st 1\n" - "xFt th 1\n" - "xBw wa 1\n" - "wHd de 1\n" - "Qzz sz 1\n" - "gYt th 1\n" - "Pvk ka 1\n" - "pvY va 1\n" - "Jxt th 1\n" - "ugQ qu 1\n" - "Lqq qu 1\n" - "xlL le 1\n" - "wMb wa 1\n" - "Sbz sz 1\n" - "vEv va 1\n" - "qfz qu 1\n" - "gcS ch 1\n" - "tCq th 1\n" - "yHp pr 1\n" - "zkF sz 1\n" - "xuK qu 1\n" - "Tbf be 1\n" - "Ipg ng 1\n" - "Yzk sz 1\n" - "Qwz sz 1\n" - "pFj ij 1\n" - "jPm ij 1\n" - "Dpq qu 1\n" - "pJz sz 1\n" - "wpN pr 1\n" - "wzE sz 1\n" - "gqD qu 1\n" - "Xwm me 1\n" - "oQx on 1\n" - "lCp le 1\n" - "Mhk th 1\n" - "dTq qu 1\n" - "xUw wa 1\n" - "hgE th 1\n" - "gcB ch 1\n" - "hpJ th 1\n" - "mqK qu 1\n" - "gBn an 1\n" - "hIv th 1\n" - "lqD qu 1\n" - "wPx wa 1\n" - "sMt th 1\n" - "yXw wa 1\n" - "jKq qu 1\n" - "Lrz er 1\n" - "Hwj ij 1\n" - "yfW ny 1\n" - "Yyu qu 1\n" - "qYs qu 1\n" - "yvR va 1\n" - "sRz st 1\n" - "Kyx ny 1\n" - "nxR an 1\n" - "cdJ ch 1\n" - "Nwc ch 1\n" - "tbE th 1\n" - "oeZ er 1\n" - "bcQ ch 1\n" - "Swb wa 1\n" - "Ikq qu 1\n" - "Bvz sz 1\n" - "zhF th 1\n" - "Xqy qu 1\n" - "kKb ka 1\n" - "Wdk de 1\n" - "wpP pr 1\n" - "kQy ka 1\n" - "Bqe qu 1\n" - "qfZ qu 1\n" - "pPw pr 1\n" - "Aoh th 1\n" - "plJ le 1\n" - "Ynv an 1\n" - "jMh th 1\n" - "bQg ng 1\n" - "afM an 1\n" - "jvO ij 1\n" - "eHf er 1\n" - "hQg th 1\n" - "kqY qu 1\n" - "zJq qu 1\n" - "pYh th 1\n" - "qeM qu 1\n" - "Kpk ka 1\n" - "kfW ka 1\n" - "Wds st 1\n" - "bNc ch 1\n" - "vBx va 1\n" - "suJ qu 1\n" - "qEx qu 1\n" - "rfZ er 1\n" - "oHg ng 1\n" - "eFw er 1\n" - "fPp pr 1\n" - "kDb ka 1\n" - "tZn th 1\n" - "dcK ch 1\n" - "yWv va 1\n" - "Uxv va 1\n" - "yQe er 1\n" - "Zjq qu 1\n" - "Wjv ij 1\n" - "ygO ng 1\n" - "ojQ on 1\n" - "Kwc ch 1\n" - "pFg ng 1\n" - "sMd st 1\n" - "Mfq qu 1\n" - "Mzy sz 1\n" - "Nwp pr 1\n" - "ywT wa 1\n" - "wLq qu 1\n" - "Hqm qu 1\n" - "qsC qu 1\n" - "bNn an 1\n" - "bUv va 1\n" - "nRc ch 1\n" - "Rlk le 1\n" - "Bqp qu 1\n" - "cfI ch 1\n" - "mVq qu 1\n" - "qGj qu 1\n" - "vlX le 1\n" - "kfG ka 1\n" - "wVd de 1\n" - "cdE ch 1\n" - "hzE th 1\n" - "Dhv th 1\n" - "bzj sz 1\n" - "vvL va 1\n" - "bzQ sz 1\n" - "wVb wa 1\n" - "Zxl le 1\n" - "zLw sz 1\n" - "hTq th 1\n" - "Vqp qu 1\n" - "hmW th 1\n" - "flD le 1\n" - "Kcd ch 1\n" - "pDq qu 1\n" - "kvY ka 1\n" - "cQl ch 1\n" - "Ixk ka 1\n" - "sGf st 1\n" - "gFh th 1\n" - "Rkd de 1\n" - "qHl qu 1\n" - "rCg ng 1\n" - "qBn an 1\n" - "sJw st 1\n" - "cWj ch 1\n" - "zXp sz 1\n" - "Hhl th 1\n" - "hjP th 1\n" - "qlZ qu 1\n" - "Hxr er 1\n" - "zrE er 1\n" - "gkH ng 1\n" - "uHk qu 1\n" - "Gzm sz 1\n" - "cBc ch 1\n" - "zff sz 1\n" - "zLs st 1\n" - "Uqy qu 1\n" - "vkD ka 1\n" - "fqX qu 1\n" - "hLj th 1\n" - "fYu qu 1\n" - "jKw ij 1\n" - "jIb ij 1\n" - "nrU an 1\n" - "fFp pr 1\n" - "sbC st 1\n" - "mGv va 1\n" - "fXp pr 1\n" - "Pkv ka 1\n" - "Cqe qu 1\n" - "cCx ch 1\n" - "rNq qu 1\n" - "Zwf wa 1\n" - "Jgc ch 1\n" - "xlQ le 1\n" - "gBz ng 1\n" - "cIx ch 1\n" - "odQ on 1\n" - "Qnz an 1\n" - "Uzx sz 1\n" - "Jpt th 1\n" - "gxX ng 1\n" - "Zkd de 1\n" - "Xkk ka 1\n" - "hRv th 1\n" - "ycV ch 1\n" - "zMm sz 1\n" - "eBq qu 1\n" - "gHd ng 1\n" - "bxU be 1\n" - "xdK de 1\n" - "mQc ch 1\n" - "tYj th 1\n" - "hlF th 1\n" - "cRz ch 1\n" - "lGz le 1\n" - "zFz ze 1\n" - "qOp qu 1\n" - "Ggc ch 1\n" - "oGm on 1\n" - "Xnp an 1\n" - "wYg ng 1\n" - "wuJ qu 1\n" - "sNs st 1\n" - "zqU qu 1\n" - "kCp ka 1\n" - "Whw th 1\n" - "nQx an 1\n" - "vwA va 1\n" - "Vcg ch 1\n" - "kWj ij 1\n" - "Hqd qu 1\n" - "Cpy pr 1\n" - "zcL ch 1\n" - "cfF ch 1\n" - "kXn an 1\n" - "aXj an 1\n" - "Swk ka 1\n" - "fhq th 1\n" - "Vxi in 1\n" - "Gqu un 1\n" - "Uxd de 1\n" - "zdK sz 1\n" - "hZq th 1\n" - "mwJ me 1\n" - "cvD ch 1\n" - "lbZ le 1\n" - "Pzl le 1\n" - "hdO th 1\n" - "hJn th 1\n" - "qWp qu 1\n" - "dXy de 1\n" - "fuU qu 1\n" - "fXy ny 1\n" - "xnL an 1\n" - "gMf ng 1\n" - "rNf er 1\n" - "xQh th 1\n" - "kqH qu 1\n" - "rFz er 1\n" - "vpT va 1\n" - "Nwy wa 1\n" - "yqA qu 1\n" - "vhO th 1\n" - "kVh th 1\n" - "nYb an 1\n" - "jvN ij 1\n" - "bIf be 1\n" - "qqS qu 1\n" - "jbF ij 1\n" - "gMk ng 1\n" - "bTd de 1\n" - "Rhd th 1\n" - "tWq th 1\n" - "gLz ng 1\n" - "fsD st 1\n" - "uMt th 1\n" - "yHq qu 1\n" - "Xgj ng 1\n" - "Lmm me 1\n" - "vkU ka 1\n" - "lAx le 1\n" - "Kzd sz 1\n" - "hKm th 1\n" - "kQd de 1\n" - "gFc ch 1\n" - "wyX wa 1\n" - "zfU sz 1\n" - "xpU pr 1\n" - "ywJ wa 1\n" - "Ayq qu 1\n" - "gIu qu 1\n" - "zuQ qu 1\n" - "Vfn an 1\n" - "vBn an 1\n" - "Hty th 1\n" - "gRv ng 1\n" - "pTb pr 1\n" - "Uqx qu 1\n" - "vTn an 1\n" - "vJc ch 1\n" - "Uiw in 1\n" - "Jlp le 1\n" - "zPq qu 1\n" - "rCx er 1\n" - "lqS qu 1\n" - "zlZ le 1\n" - "zOw sz 1\n" - "klK le 1\n" - "kfQ ka 1\n" - "uJx qu 1\n" - "pkP ka 1\n" - "Gqz qu 1\n" - "Jlc ch 1\n" - "yyD ny 1\n" - "jhX th 1\n" - "crV ch 1\n" - "Dww wa 1\n" - "yjw ij 1\n" - "qpX qu 1\n" - "Qmd de 1\n" - "yWz sz 1\n" - "wPd de 1\n" - "Uqk qu 1\n" - "nbR an 1\n" - "Ydc ch 1\n" - "qQl qu 1\n" - "pmD me 1\n" - "Jkj ka 1\n" - "jTk ka 1\n" - "wYf wa 1\n" - "Zzx sz 1\n" - "rkQ er 1\n" - "bDp pr 1\n" - "qSs qu 1\n" - "gXr ng 1\n" - "cZb ch 1\n" - "Ngp ng 1\n" - "hqQ th 1\n" - "Wvw va 1\n" - "Wbw wa 1\n" - "wvK va 1\n" - "cJf ch 1\n" - "Mwd de 1\n" - "ddJ de 1\n" - "iwE in 1\n" - "bxX be 1\n" - "jxT ij 1\n" - "Ycn ch 1\n" - "wMf wa 1\n" - "bqD qu 1\n" - "yqI qu 1\n" - "dRj de 1\n" - "wYy wa 1\n" - "Txz sz 1\n" - "vrN er 1\n" - "qVu un 1\n" - "mRj ij 1\n" - "Fjx ij 1\n" - "fyQ ny 1\n" - "xeI er 1\n" - "Wqf qu 1\n" - "Jly le 1\n" - "jDb ij 1\n" - "Yzu qu 1\n" - "Bxm me 1\n" - "wLj ij 1\n" - "bqc ch 1\n" - "sgK ng 1\n" - "kqW qu 1\n" - "Zsn an 1\n" - "Fqq qu 1\n" - "rXz er 1\n" - "lJq qu 1\n" - "jEh th 1\n" - "nCb an 1\n" - "Xrd er 1\n" - "Rzh th 1\n" - "gfW ng 1\n" - "Xtl th 1\n" - "mTx me 1\n" - "ufA qu 1\n" - "wjQ ij 1\n" - "xlW le 1\n" - "dqH qu 1\n" - "xhM th 1\n" - "Xwt th 1\n" - "dnW an 1\n" - "Rfz sz 1\n" - "fKp pr 1\n" - "kFw ka 1\n" - "Quv qu 1\n" - "mXw me 1\n" - "Vkw ka 1\n" - "tFh ch 1\n" - "hIu th 1\n" - "lTf le 1\n" - "Mwv va 1\n" - "wvT va 1\n" - "kKp ka 1\n" - "tRv th 1\n" - "wXo on 1\n" - "vzL sz 1\n" - "Jcf ch 1\n" - "Tbq qu 1\n" - "jdQ de 1\n" - "Rbx be 1\n" - "Jrm er 1\n" - "sRj st 1\n" - "zWz sz 1\n" - "qnE an 1\n" - "Kcf ch 1\n" - "Qqm qu 1\n" - "fpI pr 1\n" - "iNw in 1\n" - "ujE qu 1\n" - "qHv qu 1\n" - "Jvx va 1\n" - "hHc th 1\n" - "fvJ va 1\n" - "nqY an 1\n" - "wpE wa 1\n" - "Hws st 1\n" - "xzI sz 1\n" - "Cgg ng 1\n" - "cWd ch 1\n" - "quV un 1\n" - "bjN ij 1\n" - "xQp pr 1\n" - "bxE be 1\n" - "uVk qu 1\n" - "Wrl er 1\n" - "Lrx er 1\n" - "Iwl le 1\n" - "aqB an 1\n" - "Vcp ch 1\n" - "Wwt th 1\n" - "aGx an 1\n" - "fPn an 1\n" - "mFq qu 1\n" - "qgd qu 1\n" - "Zsd st 1\n" - "Vxs sz 1\n" - "Khq th 1\n" - "wSs st 1\n" - "oGq qu 1\n" - "Yzv sz 1\n" - "dqX qu 1\n" - "mpQ me 1\n" - "Kcp ch 1\n" - "swD st 1\n" - "rZg ng 1\n" - "jYm ij 1\n" - "uJl qu 1\n" - "vWv va 1\n" - "svO st 1\n" - "pFd de 1\n" - "Yjx ij 1\n" - "tpI th 1\n" - "dVt th 1\n" - "sNm st 1\n" - "lKt th 1\n" - "nvU an 1\n" - "Hxf fo 1\n" - "puW qu 1\n" - "wJg ng 1\n" - "gxR ng 1\n" - "fAg ng 1\n" - "Yqe qu 1\n" - "Pwz sz 1\n" - "hmC th 1\n" - "ylJ le 1\n" - "mqT qu 1\n" - "cCf ch 1\n" - "pZg ng 1\n" - "aFx an 1\n" - "oYq qu 1\n" - "fPj ij 1\n" - "dJt th 1\n" - "xwn an 1\n" - "Ccb ch 1\n" - "wFn an 1\n" - "wrY er 1\n" - "Cdh th 1\n" - "hLc th 1\n" - "Zxg ng 1\n" - "Mxc ch 1\n" - "hcY th 1\n" - "zVw sz 1\n" - "hkV th 1\n" - "txE th 1\n" - "yvT va 1\n" - "Mlw le 1\n" - "ztF th 1\n" - "fGd de 1\n" - "zjE sz 1\n" - "gjM ng 1\n" - "jwP ij 1\n" - "Kxt th 1\n" - "yFg ng 1\n" - "Wcg ch 1\n" - "thZ ch 1\n" - "hzQ th 1\n" - "Jtg th 1\n" - "yvK va 1\n" - "zVz sz 1\n" - "Pwb wa 1\n" - "xqD qu 1\n" - "uyQ qu 1\n" - "gCm ng 1\n" - "zjU sz 1\n" - "xGq qu 1\n" - "Mqy qu 1\n" - "Ocx ch 1\n" - "sqM qu 1\n" - "lRb le 1\n" - "tfU th 1\n" - "vZg ng 1\n" - "fZc ch 1\n" - "gpZ ng 1\n" - "Fpf pr 1\n" - "qtQ th 1\n" - "mhZ th 1\n" - "bqF qu 1\n" - "fgG ng 1\n" - "woT on 1\n" - "zSb sz 1\n" - "wxS wa 1\n" - "Wrf er 1\n" - "Oqk qu 1\n" - "xLc ch 1\n" - "Qzj sz 1\n" - "wXk ka 1\n" - "tdX th 1\n" - "Jqc ch 1\n" - "fXk ka 1\n" - "kBd de 1\n" - "iqW qu 1\n" - "Ocb ch 1\n" - "fUo on 1\n" - "jXk ij 1\n" - "hbI th 1\n" - "Zcg ch 1\n" - "zwS wa 1\n" - "cVm ch 1\n" - "vwj ij 1\n" - "gwG ng 1\n" - "zsM st 1\n" - "Pqo qu 1\n" - "hPj th 1\n" - "fwG wa 1\n" - "Xwh th 1\n" - "Wwh th 1\n" - "Vqw qu 1\n" - "vmY va 1\n" - "uvF qu 1\n" - "tfK th 1\n" - "Xbg ng 1\n" - "Nfn an 1\n" - "wpH pr 1\n" - "yJq qu 1\n" - "wqO qu 1\n" - "ncV ch 1\n" - "wgM ng 1\n" - "fQk ka 1\n" - "hvK th 1\n" - "qLr qu 1\n" - "Wce ch 1\n" - "kFn an 1\n" - "rBm er 1\n" - "mdV de 1\n" - "jFc ch 1\n" - "knX an 1\n" - "nMf an 1\n" - "sCc ch 1\n" - "pCq qu 1\n" - "uJt th 1\n" - "Cfk ka 1\n" - "Cxb be 1\n" - "fOw wa 1\n" - "aJz an 1\n" - "gLt th 1\n" - "bmX me 1\n" - "Yfo on 1\n" - "dJf de 1\n" - "Eay an 1\n" - "qSd qu 1\n" - "mjQ ij 1\n" - "pNk ka 1\n" - "Nvh th 1\n" - "xkX ka 1\n" - "Jwx wa 1\n" - "jvL ij 1\n" - "fpH pr 1\n" - "pxO pr 1\n" - "vPx va 1\n" - "dWu qu 1\n" - "hbR th 1\n" - "woE on 1\n" - "gtX th 1\n" - "bfF be 1\n" - "mvW va 1\n" - "xsM st 1\n" - "wLv va 1\n" - "wHh th 1\n" - "sCn an 1\n" - "pLw pr 1\n" - "kXw ka 1\n" - "xVl le 1\n" - "hCc th 1\n" - "oUk on 1\n" - "zcF ch 1\n" - "sMv st 1\n" - "drZ er 1\n" - "wfO wa 1\n" - "yFv va 1\n" - "hXa th 1\n" - "qMu un 1\n" - "fCv va 1\n" - "fwC wa 1\n" - "oTg ng 1\n" - "Fkm ka 1\n" - "eQt th 1\n" - "Pxd de 1\n" - "kjG ij 1\n" - "tGs th 1\n" - "dqB qu 1\n" - "fmX me 1\n" - "xYi in 1\n" - "kIk ka 1\n" - "vDd de 1\n" - "kvC ka 1\n" - "qtZ th 1\n" - "fPc ch 1\n" - "dpN de 1\n" - "hNr th 1\n" - "Znj an 1\n" - "Hke er 1\n" - "Iqp qu 1\n" - "wfN wa 1\n" - "Vhx th 1\n" - "Dgk ng 1\n" - "mkQ ka 1\n" - "Wxd de 1\n" - "Icx ch 1\n" - "yYt th 1\n" - "tqx th 1\n" - "Zvf va 1\n" - "sxU st 1\n" - "Lqk qu 1\n" - "nfI an 1\n" - "jyq qu 1\n" - "Wvn an 1\n" - "Sdv de 1\n" - "uYc ch 1\n" - "Qgm ng 1\n" - "cXa ch 1\n" - "wBx wa 1\n" - "pYx pr 1\n" - "jWl le 1\n" - "Kfw wa 1\n" - "qjJ qu 1\n" - "Pjj ij 1\n" - "ajX an 1\n" - "sXd st 1\n" - "xHg ng 1\n" - "xhA th 1\n" - "rGm er 1\n" - "Qtm th 1\n" - "srY er 1\n" - "qPx qu 1\n" - "wRz sz 1\n" - "wOg wa 1\n" - "fLg ng 1\n" - "hQt th 1\n" - "jhW th 1\n" - "Cwk ka 1\n" - "zWl le 1\n" - "wJc ch 1\n" - "Pxv va 1\n" - "npI an 1\n" - "lnW an 1\n" - "kqy qu 1\n" - "ywg ng 1\n" - "sCd st 1\n" - "qfF qu 1\n" - "qpg qu 1\n" - "Mbx be 1\n" - "nwN an 1\n" - "wLs st 1\n" - "Wcv ch 1\n" - "Vvr er 1\n" - "Vkx ka 1\n" - "dmU de 1\n" - "fGs st 1\n" - "gJz ng 1\n" - "dFz sz 1\n" - "qCf qu 1\n" - "lvW le 1\n" - "Svb va 1\n" - "xJr er 1\n" - "uZf qu 1\n" - "Tjc ch 1\n" - "pIj ij 1\n" - "bVg ng 1\n" - "vdO de 1\n" - "lTq qu 1\n" - "bMh th 1\n" - "nDm an 1\n" - "Tzb sz 1\n" - "pCw pr 1\n" - "Qkg ng 1\n" - "fpY pr 1\n" - "yQj ij 1\n" - "qiC qu 1\n" - "mQi in 1\n" - "wUq qu 1\n" - "kVj ij 1\n" - "tjQ th 1\n" - "mXj ij 1\n" - "Xfd de 1\n" - "cgI ch 1\n" - "Pkj ij 1\n" - "jjF ij 1\n" - "jrJ er 1\n" - "qwZ qu 1\n" - "Rtz th 1\n" - "fHb be 1\n" - "Hgx ng 1\n" - "Dzf sz 1\n" - "cbE ch 1\n" - "Xfs st 1\n" - "Rjm ij 1\n" - "fmY me 1\n" - "wYj ij 1\n" - "uFp qu 1\n" - "vWm va 1\n" - "yVc ch 1\n" - "cgL ch 1\n" - "zmR sz 1\n" - "zfB sz 1\n" - "znH an 1\n" - "hgG th 1\n" - "xuE qu 1\n" - "Bsl le 1\n" - "oWx on 1\n" - "Pjl le 1\n" - "Jdf de 1\n" - "Xmp me 1\n" - "sgO ng 1\n" - "hCj th 1\n" - "wtR th 1\n" - "fDs st 1\n" - "bQb be 1\n" - "quM un 1\n" - "fLl le 1\n" - "Nhp th 1\n" - "znU an 1\n" - "sdS st 1\n" - "wWu qu 1\n" - "tFq th 1\n" - "cFq ch 1\n" - "Wwl le 1\n" - "Lqy qu 1\n" - "nqQ an 1\n" - "zmD sz 1\n" - "Gyx ny 1\n" - "bkR ka 1\n" - "lQw le 1\n" - "Pqm qu 1\n" - "Fwk ka 1\n" - "tHt th 1\n" - "jyL ij 1\n" - "qxA qu 1\n" - "mrC er 1\n" - "qzL qu 1\n" - "jJg ng 1\n" - "jfS ij 1\n" - "qMh th 1\n" - "mlV le 1\n" - "bkJ ka 1\n" - "knH an 1\n" - "Uqt th 1\n" - "cuF ch 1\n" - "iYq qu 1\n" - "fUe er 1\n" - "sBb st 1\n" - "Nhx th 1\n" - "rhP th 1\n" - "dWp de 1\n" - "Yvf va 1\n" - "Rxr er 1\n" - "kzG sz 1\n" - "xuZ qu 1\n" - "xvD va 1\n" - "fwq qu 1\n" - "hjJ th 1\n" - "kZr er 1\n" - "vJn an 1\n" - "xnO an 1\n" - "vcA ch 1\n" - "mfK me 1\n" - "vjS ij 1\n" - "Nvp va 1\n" - "dfB de 1\n" - "Qsb st 1\n" - "dXp pr 1\n" - "zRl le 1\n" - "Ejq qu 1\n" - "aGz an 1\n" - "nHg an 1\n" - "bvA va 1\n" - "Bfd de 1\n" - "zVg ng 1\n" - "zsY st 1\n" - "hVz th 1\n" - "Pjm ij 1\n" - "sXi in 1\n" - "iKj in 1\n" - "qaE an 1\n" - "Cfj ij 1\n" - "zMc ch 1\n" - "mgZ ng 1\n" - "vgA ng 1\n" - "iwJ in 1\n" - "vGx va 1\n" - "tfY th 1\n" - "ljH le 1\n" - "zGj sz 1\n" - "bmK me 1\n" - "nUq an 1\n" - "zRt th 1\n" - "tGj th 1\n" - "zVd sz 1\n" - "jSr er 1\n" - "fNq qu 1\n" - "xTg ng 1\n" - "nqE an 1\n" - "Wng an 1\n" - "zVv sz 1\n" - "gVs ng 1\n" - "fNd de 1\n" - "qNw qu 1\n" - "Znc ch 1\n" - "uJs qu 1\n" - "yvJ va 1\n" - "xlM le 1\n" - "Jzc ch 1\n" - "vRh th 1\n" - "fcK ch 1\n" - "wVn an 1\n" - "rWw er 1\n" - "cHk ch 1\n" - "vOx va 1\n" - "iUa an 1\n" - "nWn an 1\n" - "zqZ qu 1\n" - "xFj ij 1\n" - "nCg an 1\n" - "fYj ij 1\n" - "Vsx st 1\n" - "mtM th 1\n" - "mhG th 1\n" - "jtN th 1\n" - "hcC th 1\n" - "Nwk ka 1\n" - "dXu qu 1\n" - "mJq qu 1\n" - "xsO st 1\n" - "qRn an 1\n" - "Rnj an 1\n" - "kmP ka 1\n" - "Xtg th 1\n" - "Gvh th 1\n" - "jqv qu 1\n" - "cVl ch 1\n" - "cdI ch 1\n" - "zdE sz 1\n" - "hZk th 1\n" - "Bdx de 1\n" - "hHn th 1\n" - "hkG th 1\n" - "vxJ va 1\n" - "lrA er 1\n" - "lrT er 1\n" - "hjV th 1\n" - "qbI qu 1\n" - "mTg ng 1\n" - "fmV me 1\n" - "rDk er 1\n" - "dNd de 1\n" - "Gzj sz 1\n" - "aVj an 1\n" - "vNr er 1\n" - "kXa an 1\n" - "rGs er 1\n" - "xaX an 1\n" - "crG ch 1\n" - "qJa an 1\n" - "jDt th 1\n" - "Mfx fo 1\n" - "xEa an 1\n" - "Qvz sz 1\n" - "wRg ng 1\n" - "pFc ch 1\n" - "Cpv va 1\n" - "rJk er 1\n" - "fbQ be 1\n" - "Xzg ng 1\n" - "qFy qu 1\n" - "Zfj ij 1\n" - "twE th 1\n" - "Oaq an 1\n" - "ysY st 1\n" - "wdZ de 1\n" - "gmO ng 1\n" - "wGn an 1\n" - "wRk ka 1\n" - "gqS qu 1\n" - "Agq qu 1\n" - "Twv va 1\n" - "Qnv an 1\n" - "bVv va 1\n" - "cDw ch 1\n" - "tGq th 1\n" - "fbq qu 1\n" - "Tvw va 1\n" - "mNv va 1\n" - "dtE th 1\n" - "pzP sz 1\n" - "Vsw sz 1\n" - "qGq qu 1\n" - "qPc ch 1\n" - "qyC qu 1\n" - "nxF an 1\n" - "jDl le 1\n" - "jHt th 1\n" - "fxZ fo 1\n" - "sQc ch 1\n" - "nmH an 1\n" - "xrD er 1\n" - "hMh th 1\n" - "vHk ka 1\n" - "hmS th 1\n" - "Xdt th 1\n" - "Xwl le 1\n" - "uJr qu 1\n" - "sPk st 1\n" - "Xjp ij 1\n" - "Uqi qu 1\n" - "kgD ng 1\n" - "jgI ng 1\n" - "uFw qu 1\n" - "xNd de 1\n" - "dhI th 1\n" - "Lxo on 1\n" - "Sfq qu 1\n" - "zRp sz 1\n" - "xwK wa 1\n" - "fmB me 1\n" - "vrV er 1\n" - "qSf qu 1\n" - "jPn an 1\n" - "Hbp pr 1\n" - "bJt th 1\n" - "lqQ qu 1\n" - "xSd de 1\n" - "dMk de 1\n" - "vVz sz 1\n" - "vkK ka 1\n" - "Xds de 1\n" - "ybB be 1\n" - "gpE ng 1\n" - "qcC ch 1\n" - "pxL pr 1\n" - "gPm ng 1\n" - "Bpd de 1\n" - "dpB de 1\n" - "jlJ le 1\n" - "pkC ka 1\n" - "ypP pr 1\n" - "Nqm qu 1\n" - "tgZ th 1\n" - "Eqo qu 1\n" - "dRk de 1\n" - "Ubc ch 1\n" - "xhY th 1\n" - "lJd le 1\n" - "pvN va 1\n" - "Qfc ch 1\n" - "Dbw wa 1\n" - "sFc ch 1\n" - "wkX ka 1\n" - "xpR pr 1\n" - "pjJ ij 1\n" - "gkQ ng 1\n" - "rMf er 1\n" - "Jsn an 1\n" - "xOw wa 1\n" - "Dqu un 1\n" - "nbJ an 1\n" - "gvF ng 1\n" - "Fnp an 1\n" - "jpV ij 1\n" - "qtD th 1\n" - "uEj qu 1\n" - "yhY th 1\n" - "Ohq th 1\n" - "nXy an 1\n" - "pdU de 1\n" - "mDz sz 1\n" - "iVk in 1\n" - "Hqq qu 1\n" - "xpZ po 1\n" - "aeU an 1\n" - "sjZ st 1\n" - "sGp st 1\n" - "Wqn an 1\n" - "xqS qu 1\n" - "Jjc ch 1\n" - "qPp qu 1\n" - "sXz st 1\n" - "xvP va 1\n" - "Wbq qu 1\n" - "tjK th 1\n" - "lhH th 1\n" - "hqV th 1\n" - "dYf de 1\n" - "pFk ka 1\n" - "sFq qu 1\n" - "uHq qu 1\n" - "vhA th 1\n" - "jlE le 1\n" - "sqB qu 1\n" - "qnr an 1\n" - "Fxq qu 1\n" - "zHn an 1\n" - "pdB de 1\n" - "wHc ch 1\n" - "Pxj ij 1\n" - "gHx ng 1\n" - "nqJ an 1\n" - "oqX qu 1\n" - "Xby be 1\n" - "tbI th 1\n" - "kSf ka 1\n" - "vhD th 1\n" - "qHj qu 1\n" - "Npx pr 1\n" - "Qzp sz 1\n" - "xiU in 1\n" - "rjZ er 1\n" - "wjU ij 1\n" - "jtB th 1\n" - "Ygq qu 1\n" - "aQf an 1\n" - "xWu qu 1\n" - "aVf an 1\n" - "pQx pr 1\n" - "Lnw an 1\n" - "qWa an 1\n" - "uHp qu 1\n" - "Lvp va 1\n" - "Jxp pr 1\n" - "zHk sz 1\n" - "wvU va 1\n" - "Wqh th 1\n" - "hVs th 1\n" - "Xgy ng 1\n" - "dZj de 1\n" - "uCq qu 1\n" - "Gxl le 1\n" - "Hlg ng 1\n" - "Wqd qu 1\n" - "Dxz sz 1\n" - "hdN th 1\n" - "pvM va 1\n" - "Wxk ka 1\n" - "qWd qu 1\n" - "fiO in 1\n" - "fDw wa 1\n" - "bHj ij 1\n" - "iVh th 1\n" - "Pmg ng 1\n" - "fXc ch 1\n" - "xfL fo 1\n" - "yGc ch 1\n" - "yBn an 1\n" - "hCk th 1\n" - "Llk le 1\n" - "yMh th 1\n" - "qrY qu 1\n" - "gdX ng 1\n" - "qxG qu 1\n" - "Zmt th 1\n" - "Rzw sz 1\n" - "nBd an 1\n" - "mWl le 1\n" - "xuI qu 1\n" - "jyF ij 1\n" - "bVu qu 1\n" - "ygP ng 1\n" - "dFq qu 1\n" - "jFm ij 1\n" - "Rml le 1\n" - "klH le 1\n" - "Vff fo 1\n" - "Kzk sz 1\n" - "Lhv th 1\n" - "cSj ch 1\n" - "Qrh th 1\n" - "uBw qu 1\n" - "sCk ka 1\n" - "qyS qu 1\n" - "cXu ch 1\n" - "wfM wa 1\n" - "kdK de 1\n" - "cXj ch 1\n" - "ctZ th 1\n" - "fjI ij 1\n" - "cgS ch 1\n" - "mwL me 1\n" - "kzU sz 1\n" - "cZr ch 1\n" - "fqU qu 1\n" - "qJi qu 1\n" - "gDd ng 1\n" - "bKq qu 1\n" - "aUw an 1\n" - "sxE st 1\n" - "mxU me 1\n" - "cwY ch 1\n" - "fpC pr 1\n" - "sRw st 1\n" - "Kkq qu 1\n" - "wxA wa 1\n" - "gQf ng 1\n" - "pPb pr 1\n" - "Hwu ku 1\n" - "suX qu 1\n" - "lqY qu 1\n" - "sxW st 1\n" - "aFh th 1\n" - "lWq qu 1\n" - "pbZ pr 1\n" - "bqm qu 1\n" - "kJk ka 1\n" - "qtT th 1\n" - "zMd sz 1\n" - "hGs th 1\n" - "xlH le 1\n" - "dmq qu 1\n" - "Xrk er 1\n" - "Ocf ch 1\n" - "mKc ch 1\n" - "zrA er 1\n" - "gxE ng 1\n" - "qWu un 1\n" - "xQf fo 1\n" - "Xoz on 1\n" - "fmP me 1\n" - "kdD de 1\n" - "bBz sz 1\n" - "wpA pr 1\n" - "nMb an 1\n" - "tHq th 1\n" - "jMt th 1\n" - "Svq qu 1\n" - "jMl le 1\n" - "wBc ch 1\n" - "ymX me 1\n" - "hcB th 1\n" - "brU er 1\n" - "paX an 1\n" - "hdG th 1\n" - "Fwp pr 1\n" - "sbY st 1\n" - "mhB th 1\n" - "pfZ pr 1\n" - "Vmh th 1\n" - "sCq qu 1\n" - "Zfw wa 1\n" - "Ljm ij 1\n" - "pqG qu 1\n" - "dpK de 1\n" - "tfG th 1\n" - "ijR in 1\n" - "iJy in 1\n" - "qfN qu 1\n" - "crS ch 1\n" - "cgT ch 1\n" - "wOt th 1\n" - "fnE an 1\n" - "hWp th 1\n" - "Zpw pr 1\n" - "wdO de 1\n" - "vYy va 1\n" - "qrI qu 1\n" - "dmF de 1\n" - "jhJ th 1\n" - "wHr er 1\n" - "Jzb sz 1\n" - "fEy ny 1\n" - "hhZ th 1\n" - "wpQ pr 1\n" - "qYg qu 1\n" - "qtY th 1\n" - "Kdx de 1\n" - "qfj qu 1\n" - "Rbv va 1\n" - "bbO be 1\n" - "Xcn ch 1\n" - "kCd de 1\n" - "Gcx ch 1\n" - "zmC sz 1\n" - "wJl le 1\n" - "qDc ch 1\n" - "Jzr er 1\n" - "Yrw er 1\n" - "Ksx st 1\n" - "uKx qu 1\n" - "jSc ch 1\n" - "Ljz sz 1\n" - "xdB de 1\n" - "zWb sz 1\n" - "vwY va 1\n" - "vMd de 1\n" - "dbH de 1\n" - "Qsu qu 1\n" - "wHq qu 1\n" - "gJh th 1\n" - "wZp pr 1\n" - "btO th 1\n" - "Xmv va 1\n" - "qpd qu 1\n" - "Jnw an 1\n" - "vlD le 1\n" - "xcX ch 1\n" - "Yvv va 1\n" - "Zft th 1\n" - "Hqz qu 1\n" - "xqM qu 1\n" - "Hth ch 1\n" - "ztL th 1\n" - "iOj in 1\n" - "cIz ch 1\n" - "hhC th 1\n" - "tvX th 1\n" - "Fgk ng 1\n" - "mjC ij 1\n" - "Ojp ij 1\n" - "kvI ka 1\n" - "zqb qu 1\n" - "qqW qu 1\n" - "iHg ng 1\n" - "jxJ ij 1\n" - "Gbz sz 1\n" - "nQc ch 1\n" - "pXq qu 1\n" - "jDd de 1\n" - "qQr qu 1\n" - "vJx va 1\n" - "zbY sz 1\n" - "fRm me 1\n" - "qEl qu 1\n" - "oaZ an 1\n" - "vjF ij 1\n" - "lqX qu 1\n" - "pSd de 1\n" - "bXq qu 1\n" - "jJv ij 1\n" - "Wrv er 1\n" - "Kpw pr 1\n" - "xaY an 1\n" - "jCv ij 1\n" - "fbR be 1\n" - "pTp pr 1\n" - "wdI de 1\n" - "qfQ qu 1\n" - "Rrq qu 1\n" - "dbF de 1\n" - "bzF sz 1\n" - "qwO qu 1\n" - "vrY er 1\n" - "twI th 1\n" - "zLf sz 1\n" - "bVc ch 1\n" - "Xnl an 1\n" - "Wgb ng 1\n" - "fuS qu 1\n" - "vIf va 1\n" - "Twt th 1\n" - "nKd an 1\n" - "Dkh th 1\n" - "uBd qu 1\n" - "kOz ka 1\n" - "zOj sz 1\n" - "nzE an 1\n" - "Zbh th 1\n" - "qMg qu 1\n" - "gfC ng 1\n" - "vgD ng 1\n" - "ytC th 1\n" - "mqM qu 1\n" - "Kjn an 1\n" - "xbX be 1\n" - "zfH sz 1\n" - "mwH me 1\n" - "zQb sz 1\n" - "Gzk sz 1\n" - "qsW qu 1\n" - "kNs st 1\n" - "Lqz qu 1\n" - "nmW an 1\n" - "qNx qu 1\n" - "zcQ ch 1\n" - "qMz qu 1\n" - "wGz sz 1\n" - "uCd qu 1\n" - "Bpv pr 1\n" - "qNe qu 1\n" - "bpP pr 1\n" - "lXf le 1\n" - "cLq ch 1\n" - "pdX de 1\n" - "qzU qu 1\n" - "Kxd de 1\n" - "jvF ij 1\n" - "rFn an 1\n" - "Etq th 1\n" - "zYh th 1\n" - "Ksv st 1\n" - "fJk ka 1\n" - "fkC ka 1\n" - "mxK me 1\n" - "fbz sz 1\n" - "vrW er 1\n" - "mPq qu 1\n" - "yBt th 1\n" - "iCf in 1\n" - "srH er 1\n" - "hjB th 1\n" - "fcG ch 1\n" - "Ftg th 1\n" - "uBp qu 1\n" - "yqT qu 1\n" - "djF de 1\n" - "tgU th 1\n" - "Wrj er 1\n" - "xFc ch 1\n" - "ycC ch 1\n" - "eqA qu 1\n" - "pbG pr 1\n" - "Cwh th 1\n" - "fDk ka 1\n" - "wTz sz 1\n" - "xrW er 1\n" - "kQs st 1\n" - "wMl le 1\n" - "yCn nd 1\n" - "eGp er 1\n" - "uPv qu 1\n" - "Wqe qu 1\n" - "yiI in 1\n" - "rqF qu 1\n" - "Kjs st 1\n" - "lwK le 1\n" - "fjQ ij 1\n" - "uIq qu 1\n" - "dxR de 1\n" - "Gqj qu 1\n" - "nLb an 1\n" - "gRd ng 1\n" - "qyv qu 1\n" - "wtZ th 1\n" - "cRk ch 1\n" - "iKf in 1\n" - "hbK th 1\n" - "rqT qu 1\n" - "xmF me 1\n" - "vHt th 1\n" - "tqN th 1\n" - "vLv va 1\n" - "xvJ va 1\n" - "bgJ ng 1\n" - "Qjq qu 1\n" - "Lvb va 1\n" - "Hxg ng 1\n" - "tVq th 1\n" - "rhZ th 1\n" - "slL le 1\n" - "kdH de 1\n" - "Kfb be 1\n" - "Dfh th 1\n" - "Cqq qu 1\n" - "nQk an 1\n" - "Wnz an 1\n" - "Njj ij 1\n" - "bJf be 1\n" - "wRh th 1\n" - "Dpb pr 1\n" - "sPj st 1\n" - "Zpn an 1\n" - "mPj ij 1\n" - "Qcl ch 1\n" - "zCd sz 1\n" - "yrC er 1\n" - "hCb th 1\n" - "aBv an 1\n" - "yuG qu 1\n" - "fcN ch 1\n" - "bZp pr 1\n" - "Gtf th 1\n" - "wbW wa 1\n" - "vPq qu 1\n" - "Vtj th 1\n" - "kWq qu 1\n" - "Jbm me 1\n" - "Wmb me 1\n" - "pxY pr 1\n" - "hQx th 1\n" - "tNn th 1\n" - "qdx qu 1\n" - "cYv ch 1\n" - "zlX le 1\n" - "rwF er 1\n" - "cZm ch 1\n" - "ybJ be 1\n" - "qaB an 1\n" - "tVj th 1\n" - "zUg ng 1\n" - "cfC ch 1\n" - "hxB th 1\n" - "Tbz sz 1\n" - "oFn an 1\n" - "bTp pr 1\n" - "hBk th 1\n" - "hQe th 1\n" - "qBe de 1\n" - "dpC de 1\n" - "kpW ka 1\n" - "Zkj ij 1\n" - "Nwn an 1\n" - "grC ng 1\n" - "uXq qu 1\n" - "Uoy on 1\n" - "Zfu qu 1\n" - "xKb be 1\n" - "hSb th 1\n" - "bPc ch 1\n" - "qcg ch 1\n" - "xIu qu 1\n" - "gBv ng 1\n" - "gZm me 1\n" - "qPu un 1\n" - "Bfp pr 1\n" - "rxC er 1\n" - "sLk st 1\n" - "hGj th 1\n" - "qvR qu 1\n" - "qpR qu 1\n" - "vNn an 1\n" - "Dft th 1\n" - "nRq an 1\n" - "khR th 1\n" - "pqP qu 1\n" - "tNp th 1\n" - "Vwt th 1\n" - "xwA wa 1\n" - "wMn an 1\n" - "Snq an 1\n" - "dfD de 1\n" - "vGw va 1\n" - "Xqb qu 1\n" - "Kww wa 1\n" - "Qhx th 1\n" - "Oyx ny 1\n" - "dvB de 1\n" - "sVh th 1\n" - "Hcn ch 1\n" - "sbU st 1\n" - "fFw wa 1\n" - "kfT ka 1\n" - "rvW er 1\n" - "Yxw wa 1\n" - "nFk an 1\n" - "Lqd qu 1\n" - "hoQ th 1\n" - "Nfj ij 1\n" - "grH ng 1\n" - "cJk ch 1\n" - "Pnv an 1\n" - "Nqx qu 1\n" - "yfE ny 1\n" - "kmI ka 1\n" - "Gmz sz 1\n" - "bxS be 1\n" - "quU un 1\n" - "qYf qu 1\n" - "zKw sz 1\n" - "whK th 1\n" - "ofY on 1\n" - "prH er 1\n" - "jXz sz 1\n" - "vQm va 1\n" - "iWx in 1\n" - "bzC sz 1\n" - "nYx an 1\n" - "qaK an 1\n" - "Ggb ng 1\n" - "zSf sz 1\n" - "rQz er 1\n" - "hkW th 1\n" - "Vnl an 1\n" - "Gtd th 1\n" - "rMw er 1\n" - "wvX va 1\n" - "jyU ij 1\n" - "Qqp qu 1\n" - "Hnq an 1\n" - "bFb be 1\n" - "qkH qu 1\n" - "Wck ch 1\n" - "fMw wa 1\n" - "zgE ng 1\n" - "oJz on 1\n" - "xvH va 1\n" - "hQy th 1\n" - "cYf ch 1\n" - "cxD ch 1\n" - "yDs st 1\n" - "qBh th 1\n" - "cJx ch 1\n" - "dPj de 1\n" - "wWd de 1\n" - "rHn an 1\n" - "iyM in 1\n" - "yxD ny 1\n" - "kPc ch 1\n" - "cXv ch 1\n" - "Nmg ng 1\n" - "vkN ka 1\n" - "lFj le 1\n" - "ymU me 1\n" - "pZv va 1\n" - "gZt th 1\n" - "Jqy qu 1\n" - "qAz qu 1\n" - "Bcy ch 1\n" - "pqj qu 1\n" - "cqE ch 1\n" - "Rwv va 1\n" - "crM ch 1\n" - "Axz sz 1\n" - "Zjp ij 1\n" - "yxF ny 1\n" - "vZh th 1\n" - "sPb st 1\n" - "vCs st 1\n" - "fQq qu 1\n" - "qYq qu 1\n" - "hBp th 1\n" - "Jbk ka 1\n" - "gqK qu 1\n" - "krq qu 1\n" - "Cfz sz 1\n" - "mbJ me 1\n" - "fRq qu 1\n" - "Iwv va 1\n" - "uFn an 1\n" - "cYz ch 1\n" - "qDb qu 1\n" - "xHd de 1\n" - "qmI qu 1\n" - "ycE ch 1\n" - "Mhf th 1\n" - "iuE qu 1\n" - "gXf ng 1\n" - "lPy le 1\n" - "bPv va 1\n" - "jXh th 1\n" - "gOx ng 1\n" - "Nmv va 1\n" - "xDg ng 1\n" - "Cwd de 1\n" - "ljP le 1\n" - "wqV qu 1\n" - "nrE an 1\n" - "Kmw me 1\n" - "gJt th 1\n" - "tgB th 1\n" - "xzR sz 1\n" - "vJr er 1\n" - "aUi an 1\n" - "ynY an 1\n" - "bZv va 1\n" - "fFq qu 1\n" - "Sxg ng 1\n" - "qAc ch 1\n" - "iZv in 1\n" - "jXu qu 1\n" - "gpR ng 1\n" - "wVl le 1\n" - "dNj de 1\n" - "fBw wa 1\n" - "Mjy ij 1\n" - "kjZ ij 1\n" - "tLs th 1\n" - "iYj in 1\n" - "wbO wa 1\n" - "qXb qu 1\n" - "uJq qu 1\n" - "qKt th 1\n" - "vjO ij 1\n" - "wuD qu 1\n" - "blQ le 1\n" - "yfB ny 1\n" - "Qsk st 1\n" - "Uwm me 1\n" - "Zqg qu 1\n" - "nmY an 1\n" - "pXw pr 1\n" - "yVj ij 1\n" - "gIw ng 1\n" - "Hxk ka 1\n" - "Pgy ng 1\n" - "lQv le 1\n" - "bnK an 1\n" - "xtZ th 1\n" - "Qce ch 1\n" - "Njq qu 1\n" - "mvq qu 1\n" - "Mwz sz 1\n" - "Gtn th 1\n" - "fJh th 1\n" - "vJz sz 1\n" - "gDk ng 1\n" - "dLw de 1\n" - "oeU er 1\n" - "cvY ch 1\n" - "Gbb be 1\n" - "Tqd qu 1\n" - "aTp an 1\n" - "Ywg ng 1\n" - "jdT de 1\n" - "Wkm ka 1\n" - "pxA pr 1\n" - "vDl le 1\n" - "sfD st 1\n" - "rqV qu 1\n" - "cHb ch 1\n" - "iVc ch 1\n" - "Mfh th 1\n" - "sVm st 1\n" - "nzR an 1\n" - "Qvs st 1\n" - "kZg ng 1\n" - "Wnw an 1\n" - "qZb qu 1\n" - "Gvq qu 1\n" - "vPk ka 1\n" - "Sxq qu 1\n" - "vNg ng 1\n" - "qrH qu 1\n" - "fLc ch 1\n" - "wVs st 1\n" - "qEh th 1\n" - "uqC qu 1\n" - "tZx th 1\n" - "yhI th 1\n" - "wNh th 1\n" - "rFj er 1\n" - "xPq qu 1\n" - "pqW qu 1\n" - "Pjc ch 1\n" - "jYj ij 1\n" - "pFv va 1\n" - "vLr er 1\n" - "lqq qu 1\n" - "xJg ng 1\n" - "lVz le 1\n" - "cZc ch 1\n" - "hcF th 1\n" - "uhJ th 1\n" - "cLj ch 1\n" - "qyW qu 1\n" - "zhT th 1\n" - "mtK th 1\n" - "pRb pr 1\n" - "bCx be 1\n" - "nJf an 1\n" - "jwF ij 1\n" - "Pdj de 1\n" - "jxE ij 1\n" - "slZ le 1\n" - "Lxn an 1\n" - "znL an 1\n" - "mzV sz 1\n" - "lGq le 1\n" - "Qbw wa 1\n" - "jbY ij 1\n" - "zSm sz 1\n" - "Qqx qu 1\n" - "ypR pr 1\n" - "gCc ch 1\n" - "Yvx va 1\n" - "ihI th 1\n" - "Zfx fo 1\n" - "njI nd 1\n" - "Ypt th 1\n" - "lxT le 1\n" - "fVv va 1\n" - "Jzm sz 1\n" - "jxA ij 1\n" - "gDl ng 1\n" - "Eaq an 1\n" - "Qcn an 1\n" - "zGb sz 1\n" - "jLh th 1\n" - "qkX qu 1\n" - "wbK wa 1\n" - "nNx an 1\n" - "sqW qu 1\n" - "wRx wa 1\n" - "xrU er 1\n" - "fnQ an 1\n" - "kzB sz 1\n" - "Rcn ch 1\n" - "qbL qu 1\n" - "srD er 1\n" - "Vxu qu 1\n" - "qvF qu 1\n" - "wJr er 1\n" - "Yxg ng 1\n" - "qiY qu 1\n" - "fMc ch 1\n" - "hbY th 1\n" - "hgH th 1\n" - "dmS de 1\n" - "jTn an 1\n" - "Zjm ij 1\n" - "Njl le 1\n" - "dqV qu 1\n" - "Yjh th 1\n" - "rKw er 1\n" - "cxU ch 1\n" - "Ckj ij 1\n" - "zfJ sz 1\n" - "ytF th 1\n" - "xrP er 1\n" - "qEj qu 1\n" - "rxO er 1\n" - "rZn an 1\n" - "bZq qu 1\n" - "cXq ch 1\n" - "wvD va 1\n" - "hcX th 1\n" - "zkO sz 1\n" - "hNx th 1\n" - "wFg ng 1\n" - "kXu qu 1\n" - "Vkn an 1\n" - "Gjz sz 1\n" - "Qcd ch 1\n" - "yvF va 1\n" - "xFx xe 1\n" - "dSj de 1\n" - "xPb be 1\n" - "oFp on 1\n" - "qAk qu 1\n" - "rqU qu 1\n" - "pGv va 1\n" - "hzC th 1\n" - "qIk qu 1\n" - "Lhl th 1\n" - "Fwb wa 1\n" - "pgE ng 1\n" - "Awz sz 1\n" - "fBk ka 1\n" - "xKd de 1\n" - "Pfw wa 1\n" - "uqK qu 1\n" - "pJc ch 1\n" - "bTc ch 1\n" - "tWg th 1\n" - "gdN ng 1\n" - "jrN er 1\n" - "klS le 1\n" - "qEi qu 1\n" - "sFn an 1\n" - "tqR th 1\n" - "Fnm an 1\n" - "hXv th 1\n" - "fxN fo 1\n" - "bvL va 1\n" - "oGf on 1\n" - "hZm th 1\n" - "yfH ny 1\n" - "dcE ch 1\n" - "pgW ng 1\n" - "wrB er 1\n" - "kWm ka 1\n" - "Shx th 1\n" - "twP th 1\n" - "Qvd de 1\n" - "Qgu qu 1\n" - "pJt th 1\n" - "zNv sz 1\n" - "Hph th 1\n" - "klF le 1\n" - "vqz qu 1\n" - "sgG ng 1\n" - "kdZ de 1\n" - "ejX er 1\n" - "Pxu qu 1\n" - "pvT va 1\n" - "Kqx qu 1\n" - "Qmb me 1\n" - "xFk ka 1\n" - "wQb wa 1\n" - "Pgx ng 1\n" - "ypL pr 1\n" - "bwE wa 1\n" - "xHt th 1\n" - "kVz sz 1\n" - "jmF ij 1\n" - "Ixq qu 1\n" - "qyP qu 1\n" - "rVv er 1\n" - "Ytw th 1\n" - "qpZ qu 1\n" - "tpZ th 1\n" - "zjX sz 1\n" - "Khg th 1\n" - "qfV qu 1\n" - "Jzx sz 1\n" - "kTj ij 1\n" - "Bzq qu 1\n" - "njR an 1\n" - "cgW ch 1\n" - "cmI ch 1\n" - "kCb ka 1\n" - "pYp pr 1\n" - "vkZ ka 1\n" - "wvk ka 1\n" - "Vfq qu 1\n" - "nlZ an 1\n" - "qNj qu 1\n" - "rCq qu 1\n" - "kbV ka 1\n" - "Dqj qu 1\n" - "brD er 1\n" - "lbG le 1\n" - "xhF th 1\n" - "kxZ ka 1\n" - "Iuq qu 1\n" - "yFx ny 1\n" - "qVl qu 1\n" - "lcG ch 1\n" - "vWr er 1\n" - "aBq an 1\n" - "yJk ka 1\n" - "czL ch 1\n" - "jIu qu 1\n" - "vUl le 1\n" - "pZq qu 1\n" - "vtW th 1\n" - "Qxw wa 1\n" - "dYv de 1\n" - "iqH qu 1\n" - "Xws st 1\n" - "fDj ij 1\n" - "xVz sz 1\n" - "dKq qu 1\n" - "vfQ va 1\n" - "hvD th 1\n" - "wdY de 1\n" - "Hzz sz 1\n" - "cYs ch 1\n" - "Ftj th 1\n" - "dpU de 1\n" - "Lld le 1\n" - "Gqw qu 1\n" - "kdR de 1\n" - "vXg ng 1\n" - "qsY qu 1\n" - "jNf ij 1\n" - "Qjj ij 1\n" - "pVl le 1\n" - "Jmx me 1\n" - "pDj ij 1\n" - "iBc ch 1\n" - "kLj ij 1\n" - "xnG an 1\n" - "vTl le 1\n" - "Ndg ng 1\n" - "pqU qu 1\n" - "Uaw an 1\n" - "fzN sz 1\n" - "gNq qu 1\n" - "kjM ij 1\n" - "lnK an 1\n" - "zxb sz 1\n" - "kcS ch 1\n" - "njM an 1\n" - "Gdw de 1\n" - "lnZ an 1\n" - "Ygj ng 1\n" - "hKd th 1\n" - "gpT ng 1\n" - "yqP qu 1\n" - "ijX in 1\n" - "jGf ij 1\n" - "bxI be 1\n" - "vXx va 1\n" - "Vrw er 1\n" - "Cwx wa 1\n" - "nBh th 1\n" - "qvy qu 1\n" - "sxB st 1\n" - "mVk ka 1\n" - "Czx sz 1\n" - "fyV ny 1\n" - "cXw ch 1\n" - "Qnf an 1\n" - "Yqd qu 1\n" - "lqH qu 1\n" - "dbY de 1\n" - "Sqb qu 1\n" - "Kqw qu 1\n" - "zpJ sz 1\n" - "cbM ch 1\n" - "zFg ng 1\n" - "sKb st 1\n" - "qrK qu 1\n" - "zJc ch 1\n" - "nRn an 1\n" - "fqN qu 1\n" - "hfA th 1\n" - "qoG qu 1\n" - "Owz sz 1\n" - "nlG an 1\n" - "wIx wa 1\n" - "qrP qu 1\n" - "Nwg ng 1\n" - "qaW an 1\n" - "hcT th 1\n" - "wkB ka 1\n" - "Ndt th 1\n" - "Kzq qu 1\n" - "gxB ng 1\n" - "Bjz sz 1\n" - "vTf va 1\n" - "jFq qu 1\n" - "qMe qu 1\n" - "ufQ qu 1\n" - "npG an 1\n" - "uZk qu 1\n" - "qTw qu 1\n" - "Glw le 1\n" - "Kqq qu 1\n" - "Cxr er 1\n" - "jZs st 1\n" - "Sqv qu 1\n" - "yPm me 1\n" - "eQj er 1\n" - "aIh th 1\n" - "gDq qu 1\n" - "lIp le 1\n" - "jNj ij 1\n" - "qOd qu 1\n" - "vkM ka 1\n" - "vFy va 1\n" - "cfV ch 1\n" - "Kjh th 1\n" - "gkP ng 1\n" - "rJc ch 1\n" - "uPq qu 1\n" - "ozQ on 1\n" - "Dlk le 1\n" - "vXh th 1\n" - "ktY th 1\n" - "vWy va 1\n" - "gQv ng 1\n" - "Yww wa 1\n" - "Tpz sz 1\n" - "Qhc th 1\n" - "xuT qu 1\n" - "nbS an 1\n" - "zQg ng 1\n" - "vgZ ng 1\n" - "pUo on 1\n" - "uWb qu 1\n" - "mMf me 1\n" - "Zcd ch 1\n" - "iBp in 1\n" - "fwp pr 1\n" - "zYf sz 1\n" - "wCp pr 1\n" - "Cqy qu 1\n" - "cjF ch 1\n" - "Gfh th 1\n" - "mcW ch 1\n" - "cqV ch 1\n" - "uJd qu 1\n" - "iUj in 1\n" - "vkR ka 1\n" - "wgI ng 1\n" - "vUg ng 1\n" - "Wdn de 1\n" - "sjF st 1\n" - "tPv th 1\n" - "xRn an 1\n" - "klV le 1\n" - "sbM st 1\n" - "mfT me 1\n" - "dbV de 1\n" - "Fmn an 1\n" - "gfU ng 1\n" - "cbB ch 1\n" - "Yxz sz 1\n" - "Kxk ka 1\n" - "Dwq qu 1\n" - "wgX ng 1\n" - "sPv st 1\n" - "vHd de 1\n" - "nbH an 1\n" - "cFn an 1\n" - "qqX qu 1\n" - "jFe er 1\n" - "qEb qu 1\n" - "dFh th 1\n" - "uEo qu 1\n" - "lcI ch 1\n" - "bMm me 1\n" - "zZw sz 1\n" - "hjO th 1\n" - "hKx th 1\n" - "jgC ng 1\n" - "cnL an 1\n" - "Fdg ng 1\n" - "bGf be 1\n" - "Sjz sz 1\n" - "bMj ij 1\n" - "vXw va 1\n" - "Gff fo 1\n" - "Cww wa 1\n" - "jsQ st 1\n" - "Zgv ng 1\n" - "lPf le 1\n" - "nmQ an 1\n" - "Vdq qu 1\n" - "lcX ch 1\n" - "gjT ng 1\n" - "mwE me 1\n" - "qLm qu 1\n" - "cHq ch 1\n" - "Xtn th 1\n" - "Ntq th 1\n" - "gWk ng 1\n" - "Pqd qu 1\n" - "qpP qu 1\n" - "sRf st 1\n" - "qpL qu 1\n" - "cnD an 1\n" - "qpG qu 1\n" - "dzS sz 1\n" - "tZb th 1\n" - "ygM ng 1\n" - "bxC be 1\n" - "dfU de 1\n" - "bmB me 1\n" - "lBz le 1\n" - "gJx ng 1\n" - "Ykv ka 1\n" - "Zdk de 1\n" - "wnQ an 1\n" - "tZj th 1\n" - "Zzm sz 1\n" - "Vfh th 1\n" - "Mwc ch 1\n" - "rUo on 1\n" - "qwp qu 1\n" - "tcI th 1\n" - "tfD th 1\n" - "uoZ qu 1\n" - "fCw wa 1\n" - "iQq qu 1\n" - "qBg qu 1\n" - "sVb st 1\n" - "pjU ij 1\n" - "scQ ch 1\n" - "pqQ qu 1\n" - "svZ st 1\n" - "Zpj ij 1\n" - "piV in 1\n" - "kbP ka 1\n" - "wqM qu 1\n" - "rVb er 1\n" - "qZr qu 1\n" - "hxO th 1\n" - "wTn an 1\n" - "Jzf sz 1\n" - "Qjb ij 1\n" - "uYv qu 1\n" - "pwK pr 1\n" - "hvH th 1\n" - "Dqe qu 1\n" - "pfI pr 1\n" - "mhV th 1\n" - "jgE ng 1\n" - "rcQ ch 1\n" - "kmT ka 1\n" - "Wzj sz 1\n" - "xNs st 1\n" - "Pbj ij 1\n" - "zvB sz 1\n" - "xhJ th 1\n" - "svq qu 1\n" - "Nvn an 1\n" - "swZ st 1\n" - "jgF ng 1\n" - "mfL me 1\n" - "zkL sz 1\n" - "jVp ij 1\n" - "Dkj ij 1\n" - "xuY qu 1\n" - "hHq th 1\n" - "cSf ch 1\n" - "Jzd sz 1\n" - "lqU qu 1\n" - "qMd qu 1\n" - "Qgj ng 1\n" - "fxk ka 1\n" - "tRt th 1\n" - "zFk sz 1\n" - "qEo qu 1\n" - "voY on 1\n" - "Awj ij 1\n" - "Txj ij 1\n" - "cIg ch 1\n" - "xUu qu 1\n" - "sRr er 1\n" - "Jxn an 1\n" - "iPf in 1\n" - "ejY er 1\n" - "Xts th 1\n" - "pfT pr 1\n" - "Pqa an 1\n" - "zsV st 1\n" - "ypC pr 1\n" - "wMs st 1\n" - "qEc ch 1\n" - "vxY va 1\n" - "fUg ng 1\n" - "Dff fo 1\n" - "gqQ qu 1\n" - "zMv sz 1\n" - "vJi in 1\n" - "fPv va 1\n" - "dLz sz 1\n" - "cdM ch 1\n" - "gNx ng 1\n" - "aGv an 1\n" - "vvD va 1\n" - "dJh th 1\n" - "rxY er 1\n" - "rWj er 1\n" - "Pvx va 1\n" - "rhD th 1\n" - "zRd sz 1\n" - "Kgv ng 1\n" - "Xvy va 1\n" - "kZj ij 1\n" - "kpK ka 1\n" - "Pfn an 1\n" - "wUe er 1\n" - "wWx wa 1\n" - "jPw ij 1\n" - "gLq qu 1\n" - "iJq qu 1\n" - "gPx ng 1\n" - "jHd de 1\n" - "vJb va 1\n" - "xhB th 1\n" - "xQv va 1\n" - "Eoa an 1\n" - "pjO ij 1\n" - "yFj ij 1\n" - "sXo on 1\n" - "wbY wa 1\n" - "cjO ch 1\n" - "mlZ le 1\n" - "bNv va 1\n" - "kjP ij 1\n" - "yXn an 1\n" - "qVj qu 1\n" - "fNv va 1\n" - "gjW ng 1\n" - "nXj an 1\n" - "dqJ qu 1\n" - "Hnh th 1\n" - "Qyk ka 1\n" - "kvB ka 1\n" - "qyB qu 1\n" - "mDt th 1\n" - "zgP ng 1\n" - "Zzk sz 1\n" - "fMk ka 1\n" - "xzY sz 1\n" - "qbT qu 1\n" - "xOt th 1\n" - "xsA st 1\n" - "gLj ng 1\n" - "zxH sz 1\n" - "cLm ch 1\n" - "Dnk an 1\n" - "zIu qu 1\n" - "kpJ ka 1\n" - "xrK er 1\n" - "eIb er 1\n" - "Jbp pr 1\n" - "Bqg qu 1\n" - "tXg th 1\n" - "Zjk ij 1\n" - "dRd de 1\n" - "tjZ th 1\n" - "hQl th 1\n" - "iyW in 1\n" - "Jwd de 1\n" - "qZt th 1\n" - "cJp ch 1\n" - "jBg ng 1\n" - "zrG er 1\n" - "hWf th 1\n" - "Zds st 1\n" - "qsZ qu 1\n" - "cQx ch 1\n" - "ccN ch 1\n" - "ywM wa 1\n" - "gbX ng 1\n" - "tfT th 1\n" - "vwt th 1\n" - "Qbp pr 1\n" - "yeY er 1\n" - "aUb an 1\n" - "qHw qu 1\n" - "Fhq th 1\n" - "Fng an 1\n" - "lvI le 1\n" - "jCf ij 1\n" - "hqH th 1\n" - "tTq th 1\n" - "sfI st 1\n" - "vsM st 1\n" - "lDp le 1\n" - "wJb wa 1\n" - "bhX th 1\n" - "rRq qu 1\n" - "qtS th 1\n" - "Zwp pr 1\n" - "Jbh th 1\n" - "hHb th 1\n" - "pDy pr 1\n" - "sjD st 1\n" - "Oyp pr 1\n" - "qwD qu 1\n" - "jbD ij 1\n" - "vpG va 1\n" - "Wjb ij 1\n" - "vpB va 1\n" - "aXq an 1\n" - "mWz sz 1\n" - "qHi qu 1\n" - "fyN ny 1\n" - "mbQ me 1\n" - "ywC wa 1\n" - "oVg ng 1\n" - "xmZ me 1\n" - "slO le 1\n" - "fXn an 1\n" - "kYs st 1\n" - "pVu qu 1\n" - "bkU ka 1\n" - "Brq qu 1\n" - "qCq qu 1\n" - "Xcx ch 1\n" - "zMt th 1\n" - "cRw ch 1\n" - "gzQ ng 1\n" - "Qbg ng 1\n" - "juU qu 1\n" - "xSz sz 1\n" - "Vgz ng 1\n" - "oMw on 1\n" - "fpE pr 1\n" - "xjX ij 1\n" - "qCg qu 1\n" - "zwM sz 1\n" - "uQl qu 1\n" - "qPk qu 1\n" - "pjD ij 1\n" - "Qzm sz 1\n" - "sIp st 1\n" - "uoG qu 1\n" - "rVl er 1\n" - "cbK ch 1\n" - "hXm th 1\n" - "Ksf st 1\n" - "kbF ka 1\n" - "wBm me 1\n" - "iYt th 1\n" - "sgH ng 1\n" - "Gzv sz 1\n" - "yvE va 1\n" - "xKq qu 1\n" - "sWf st 1\n" - "zBc ch 1\n" - "ykH ka 1\n" - "vjH ij 1\n" - "whI th 1\n" - "vPj ij 1\n" - "Zht th 1\n" - "iJx in 1\n" - "cZt th 1\n" - "dqU qu 1\n" - "hMd th 1\n" - "cUj ch 1\n" - "vMg ng 1\n" - "pcJ ch 1\n" - "Bcm ch 1\n" - "jXi in 1\n" - "xoI on 1\n" - "Zkq qu 1\n" - "Xzr er 1\n" - "yzM sz 1\n" - "qjX qu 1\n" - "mNq qu 1\n" - "hpX th 1\n" - "fBq qu 1\n" - "tXd th 1\n" - "Xki in 1\n" - "Hsq qu 1\n" - "bqU qu 1\n" - "sgF ng 1\n" - "dPc ch 1\n" - "Jxi in 1\n" - "Ugp ng 1\n" - "Rxi in 1\n" - "Kwm me 1\n" - "zkD sz 1\n" - "Rql qu 1\n" - "pJb pr 1\n" - "fcV ch 1\n" - "iVd in 1\n" - "bBp be 1\n" - "Ojw ij 1\n" - "vZl le 1\n" - "Iyj ij 1\n" - "fkU ka 1\n" - "Kcq ch 1\n" - "dBq qu 1\n" - "Mqq qu 1\n" - "iMg ng 1\n" - "Wws st 1\n" - "tqX th 1\n" - "xhD th 1\n" - "rNl er 1\n" - "pWd de 1\n" - "jrV er 1\n" - "Bmj ij 1\n" - "Hmq qu 1\n" - "vlH le 1\n" - "Mxb be 1\n" - "yyS ny 1\n" - "qvW qu 1\n" - "fvX va 1\n" - "Vfe er 1\n" - "Cdw de 1\n" - "Kge ng 1\n" - "Qej er 1\n" - "rvZ er 1\n" - "vzI sz 1\n" - "dDn an 1\n" - "nwS an 1\n" - "Qcb ch 1\n" - "wkV ka 1\n" - "uCx qu 1\n" - "Igk ng 1\n" - "Vpm me 1\n" - "hBm th 1\n" - "pdQ de 1\n" - "fgQ ng 1\n" - "yQm me 1\n" - "gxH ng 1\n" - "pqK qu 1\n" - "lRc ch 1\n" - "Xdv de 1\n" - "hDz th 1\n" - "dFw de 1\n" - "qQu un 1\n" - "xbD be 1\n" - "qmE qu 1\n" - "mWm me 1\n" - "jBb ij 1\n" - "jXt th 1\n" - "fxU fo 1\n" - "Xwc ch 1\n" - "Lqf qu 1\n" - "hcP th 1\n" - "pfB pr 1\n" - "vSg ng 1\n" - "xJw wa 1\n" - "mRf me 1\n" - "hqW th 1\n" - "nVb an 1\n" - "cEu ch 1\n" - "nfN an 1\n" - "nVj an 1\n" - "Rwk ka 1\n" - "nmG an 1\n" - "oDt th 1\n" - "kPb ka 1\n" - "gqW qu 1\n" - "Qhf th 1\n" - "qZl qu 1\n" - "zHq qu 1\n" - "iXl in 1\n" -#endif -}; - -const int ksizeofUniversalAmbigsFile = sizeof(kUniversalAmbigsFile); - -} // namespace tesseract diff --git a/src/ccutil/universalambigs.h b/src/ccutil/universalambigs.h index 8c38273cf..f7e165aa9 100644 --- a/src/ccutil/universalambigs.h +++ b/src/ccutil/universalambigs.h @@ -3,7 +3,6 @@ // Description: Data for a universal ambigs file that is useful for // any language. // Author: Ray Smith -// Created: Mon Mar 18 11:26:00 PDT 2013 // // (C) Copyright 2013, Google Inc. // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,8 +22,19019 @@ namespace tesseract { -extern const char kUniversalAmbigsFile[]; -extern const int ksizeofUniversalAmbigsFile; +inline const char kUniversalAmbigsFile[] = { + "v2\n" + "'' \" 1\n" + "`' \" 1\n" + "'` \" 1\n" + "‘' \" 1\n" + "'‘ \" 1\n" + "’' \" 1\n" + "'’ \" 1\n" + "`` \" 1\n" + "`‘ \" 1\n" + "‘` \" 1\n" + "`’ \" 1\n" + "’` \" 1\n" + "‘‘ “ 1\n" + "‘’ \" 1\n" + "’‘ \" 1\n" + "’’ ” 1\n" + ",, „ 1\n" + "m rn 0\n" + "rn m 0\n" + "m in 0\n" + "in m 0\n" + "d cl 0\n" + "cl d 0\n" + "nn rm 0\n" + "rm nn 0\n" + "n ri 0\n" + "ri n 0\n" + "li h 0\n" + "lr h 0\n" + "ii u 0\n" + "ii n 0\n" + "ni m 0\n" + "iii m 0\n" + "ll H 0\n" + "I-I H 0\n" + "vv w 0\n" + "VV W 0\n" + "t f 0\n" + "f t 0\n" + "a o 0\n" + "o a 0\n" + "e c 0\n" + "c e 0\n" + "rr n 0\n" + "E fi 0\n" + "l< k 0\n" + "ld ki 0\n" + "lx h 0\n" + "xn m 0\n" + "ux in 0\n" + "r t 0\n" + "d tl 0\n" + "di th 0\n" + "ur in 0\n" + "un im 0\n" + "u a 0\n" + "o ó 0\n" + "ó o 0\n" + "i í 0\n" + "í i 0\n" + "a á 0\n" + "á a 0\n" + "e é 0\n" + "é e 0\n" + "u ú 0\n" + "ú u 0\n" + "n ñ 0\n" + "ñ n 0\n" + "0 o 0\n" + "d tr 0\n" + "n tr 0\n" + "ñ fi 0\n" + "u ti 0\n" + "ñ ti 0\n" + "d ti 0\n" + "d tí 0\n" + "d rí 0\n" + "a à 0\n" + "e è 0\n" + "n ij 0\n" + "g ij 0\n" + "o ò 0\n" + "E É 0\n" + "E È 0\n" + "u ü 0\n" + "xnE an 1\n" + "mYx me 1\n" + "qtE nt 1\n" + "Tlb le 1\n" + "vxN va 1\n" + "gjQ ng 1\n" + "jpF ij 1\n" + "Yrl le 1\n" + "aqY an 1\n" + "zvJ va 1\n" + "fbL be 1\n" + "Nvk va 1\n" + "fJp pr 1\n" + "wxC wa 1\n" + "cuJ qu 1\n" + "Qzt ta 1\n" + "qKw wa 1\n" + "scJ st 1\n" + "pXp po 1\n" + "Vqi ti 1\n" + "Uxk ka 1\n" + "kJv ka 1\n" + "Ykd ka 1\n" + "vpX va 1\n" + "iBv ti 1\n" + "zRb sz 1\n" + "yTm mi 1\n" + "mKp pr 1\n" + "Vzq qu 1\n" + "Xtp ti 1\n" + "mvD va 1\n" + "mDq me 1\n" + "jxP ij 1\n" + "Bxv va 1\n" + "oIu qu 1\n" + "Rvc va 1\n" + "uCj qu 1\n" + "oAo vo 1\n" + "quB tu 1\n" + "btV ti 1\n" + "Lmc me 1\n" + "tVw ti 1\n" + "Yxv va 1\n" + "Hxm me 1\n" + "dVh th 1\n" + "xYc ch 1\n" + "uPj tu 1\n" + "fTf fo 1\n" + "Rjw ij 1\n" + "xdA di 1\n" + "jzN ij 1\n" + "mxL me 1\n" + "ygJ ng 1\n" + "Vvg va 1\n" + "rjK ij 1\n" + "yuV tu 1\n" + "sWk ku 1\n" + "Pgz sz 1\n" + "jHm me 1\n" + "zkU ku 1\n" + "gvG va 1\n" + "hdP th 1\n" + "mVb me 1\n" + "Qgd di 1\n" + "zcZ ch 1\n" + "zqj ij 1\n" + "zsJ sz 1\n" + "dfN di 1\n" + "dgW di 1\n" + "wNr ri 1\n" + "zvC va 1\n" + "qYw qu 1\n" + "uHy tu 1\n" + "tNq th 1\n" + "lxJ li 1\n" + "Hbk ku 1\n" + "xsG st 1\n" + "vSb va 1\n" + "xFb bu 1\n" + "Ntg th 1\n" + "oBj ij 1\n" + "qkv qu 1\n" + "bVj ij 1\n" + "zjT ij 1\n" + "bvX va 1\n" + "oZf to 1\n" + "kcU ko 1\n" + "fFm me 1\n" + "Xbj ij 1\n" + "Kqv va 1\n" + "Rwj ij 1\n" + "dvJ va 1\n" + "znJ sz 1\n" + "qqV qu 1\n" + "pxM po 1\n" + "eBj ij 1\n" + "mJx me 1\n" + "xnM ng 1\n" + "aCq va 1\n" + "pHj ij 1\n" + "tfQ th 1\n" + "wqn qu 1\n" + "mSs is 1\n" + "sBw st 1\n" + "Fhn th 1\n" + "zNb sz 1\n" + "Mvb va 1\n" + "bVt th 1\n" + "qHt th 1\n" + "qLv qu 1\n" + "kgF ng 1\n" + "vxW va 1\n" + "cdY ch 1\n" + "Xrz sz 1\n" + "Efh th 1\n" + "lqI qu 1\n" + "Lzq qu 1\n" + "zhX th 1\n" + "ghZ th 1\n" + "lFg ng 1\n" + "vVc va 1\n" + "lMr er 1\n" + "Tqj qu 1\n" + "jAx ij 1\n" + "iMt th 1\n" + "Nlv va 1\n" + "zbP sz 1\n" + "kVx ka 1\n" + "eQl te 1\n" + "sWb st 1\n" + "Bqy qu 1\n" + "dXk ka 1\n" + "vUc va 1\n" + "vOb va 1\n" + "uHf qu 1\n" + "qNr qu 1\n" + "uFz qu 1\n" + "Mlr er 1\n" + "kmZ ka 1\n" + "sRt th 1\n" + "Wqv qu 1\n" + "hfK th 1\n" + "vxQ va 1\n" + "lCq qu 1\n" + "fYw wa 1\n" + "tfS th 1\n" + "qdO qu 1\n" + "dQd de 1\n" + "xdX de 1\n" + "mNx me 1\n" + "kFz sz 1\n" + "wjS ij 1\n" + "yPp pr 1\n" + "wcW ch 1\n" + "Njz sz 1\n" + "dVp de 1\n" + "dqD qu 1\n" + "rJs sz 1\n" + "xpH po 1\n" + "xqR qu 1\n" + "gVr er 1\n" + "Btq th 1\n" + "nmB nt 1\n" + "zcM sz 1\n" + "cfG ch 1\n" + "mfO me 1\n" + "Yhc th 1\n" + "bZm me 1\n" + "mzB sz 1\n" + "vRw va 1\n" + "yDh th 1\n" + "Zgf ng 1\n" + "kqT qu 1\n" + "Iuz qu 1\n" + "rbW er 1\n" + "Jmq qu 1\n" + "Kvj va 1\n" + "zcD ch 1\n" + "xgC ng 1\n" + "jCx ij 1\n" + "bWg ng 1\n" + "ywW wa 1\n" + "Jkc ch 1\n" + "xGs sz 1\n" + "vbH va 1\n" + "lTz sz 1\n" + "eCb er 1\n" + "jVv va 1\n" + "jDq qu 1\n" + "joQ po 1\n" + "qtM th 1\n" + "Rqk qu 1\n" + "Hvg va 1\n" + "uAz qu 1\n" + "mfW me 1\n" + "tgS th 1\n" + "cqD qu 1\n" + "sfY sz 1\n" + "Yhv th 1\n" + "uqM qu 1\n" + "xpK pr 1\n" + "Jzh th 1\n" + "cQk ch 1\n" + "tjO th 1\n" + "qxZ qu 1\n" + "zPv sz 1\n" + "qNk qu 1\n" + "lvQ va 1\n" + "kGw ka 1\n" + "xuD qu 1\n" + "Jvy va 1\n" + "jYe te 1\n" + "fZu qu 1\n" + "qYo qu 1\n" + "vhI th 1\n" + "fxY fo 1\n" + "yPf fo 1\n" + "fGj ij 1\n" + "dmT me 1\n" + "vfX va 1\n" + "xQt th 1\n" + "cxS ch 1\n" + "vzA va 1\n" + "qaA qu 1\n" + "Jbx be 1\n" + "kVd ka 1\n" + "Xjv va 1\n" + "hkI th 1\n" + "vQu qu 1\n" + "vhK th 1\n" + "Dvj va 1\n" + "Vbm me 1\n" + "fpN pr 1\n" + "pkG ka 1\n" + "bLc ch 1\n" + "tJc th 1\n" + "wwJ wa 1\n" + "Zrw er 1\n" + "wdW de 1\n" + "Wgf ng 1\n" + "Pqz qu 1\n" + "wgN ng 1\n" + "zHt th 1\n" + "xTl le 1\n" + "Dvt th 1\n" + "wmU me 1\n" + "xhm th 1\n" + "hCx th 1\n" + "vwV va 1\n" + "zvL va 1\n" + "nGf nt 1\n" + "jjC ij 1\n" + "Ucg ch 1\n" + "pWf pr 1\n" + "jxG ij 1\n" + "Mqn qu 1\n" + "yvW va 1\n" + "lWk ka 1\n" + "mdO me 1\n" + "qNm qu 1\n" + "Rwg ng 1\n" + "xfv va 1\n" + "uOw qu 1\n" + "xhZ th 1\n" + "jLr er 1\n" + "fBy fo 1\n" + "nUj nt 1\n" + "lTg ng 1\n" + "jlP ij 1\n" + "wrR er 1\n" + "rXw er 1\n" + "eVw ve 1\n" + "zWn ng 1\n" + "mJs sz 1\n" + "Mgy ng 1\n" + "uZq qu 1\n" + "Tdg ng 1\n" + "mqI qu 1\n" + "Dhp th 1\n" + "pmK me 1\n" + "Ssf sz 1\n" + "sWl sz 1\n" + "iqK qu 1\n" + "gjG ng 1\n" + "djB ij 1\n" + "wKv va 1\n" + "wvI va 1\n" + "tcU th 1\n" + "tkG th 1\n" + "zUe te 1\n" + "lUh th 1\n" + "nBg nt 1\n" + "dHx de 1\n" + "Wbz sz 1\n" + "vuQ qu 1\n" + "Hpl le 1\n" + "oVj ij 1\n" + "vBb va 1\n" + "Tdz sz 1\n" + "pfV pr 1\n" + "qgN qu 1\n" + "pcU ch 1\n" + "gcN ch 1\n" + "vkA va 1\n" + "cQf ch 1\n" + "Yzx sz 1\n" + "ypF pr 1\n" + "vBw va 1\n" + "pPd de 1\n" + "qmU qu 1\n" + "eWf ve 1\n" + "jZr er 1\n" + "Hwl le 1\n" + "yyI ny 1\n" + "Zfh th 1\n" + "Lgw ng 1\n" + "uqp qu 1\n" + "xOj ij 1\n" + "dkJ ko 1\n" + "dqM qu 1\n" + "sbW is 1\n" + "zMp sz 1\n" + "nJz ng 1\n" + "kMc ko 1\n" + "zqW qu 1\n" + "vQk va 1\n" + "eqD qu 1\n" + "hFn th 1\n" + "vcZ ch 1\n" + "xGk ka 1\n" + "kzf sz 1\n" + "xZx xe 1\n" + "qvN qu 1\n" + "ykY ka 1\n" + "brH er 1\n" + "Wrh th 1\n" + "wjE ij 1\n" + "kjQ ka 1\n" + "fLj ij 1\n" + "mgE ng 1\n" + "xwI wa 1\n" + "iDw ti 1\n" + "Btx th 1\n" + "vPz va 1\n" + "yqH qu 1\n" + "wFe er 1\n" + "lQy le 1\n" + "gBp ng 1\n" + "jdY de 1\n" + "tvQ th 1\n" + "ljO le 1\n" + "Nsq qu 1\n" + "xdO de 1\n" + "gzW ng 1\n" + "wtM th 1\n" + "qfR qu 1\n" + "jZh th 1\n" + "Wcb ch 1\n" + "dvQ va 1\n" + "jHb ij 1\n" + "xbM be 1\n" + "nWg nt 1\n" + "Ywj ij 1\n" + "Xwj ij 1\n" + "pxK pr 1\n" + "ybQ be 1\n" + "Wvm va 1\n" + "Lgz ng 1\n" + "btS th 1\n" + "jRl le 1\n" + "qqJ qu 1\n" + "Cnq qu 1\n" + "Fmw me 1\n" + "dvP va 1\n" + "vqB qu 1\n" + "djI de 1\n" + "jVq qu 1\n" + "fvZ va 1\n" + "Cwt th 1\n" + "Uyb be 1\n" + "Ffc ch 1\n" + "soX sz 1\n" + "qhR th 1\n" + "fWz sz 1\n" + "vrX va 1\n" + "eOq qu 1\n" + "bwZ be 1\n" + "dnV ng 1\n" + "Gbw be 1\n" + "xGd de 1\n" + "mnZ ng 1\n" + "bpN pr 1\n" + "dzX de 1\n" + "Bxq qu 1\n" + "zpx sz 1\n" + "dqZ qu 1\n" + "xTf fo 1\n" + "wPv va 1\n" + "cxq qu 1\n" + "hdT th 1\n" + "ywX wa 1\n" + "Uvv va 1\n" + "rKp er 1\n" + "sdF de 1\n" + "Jcg ch 1\n" + "xzO sz 1\n" + "xTt th 1\n" + "djP de 1\n" + "gTn ng 1\n" + "Gtp th 1\n" + "xgA ng 1\n" + "bdL de 1\n" + "wzO sz 1\n" + "fhI th 1\n" + "Wmp me 1\n" + "Qdt th 1\n" + "uYq qu 1\n" + "pbJ pr 1\n" + "jRd de 1\n" + "Xsx sz 1\n" + "zgI ng 1\n" + "qhY th 1\n" + "Ggj ng 1\n" + "Fjq qu 1\n" + "Qwk ka 1\n" + "zxW sz 1\n" + "vCc ch 1\n" + "ccL ch 1\n" + "Kxs sz 1\n" + "mYr er 1\n" + "rQt er 1\n" + "Zxs sz 1\n" + "hdQ th 1\n" + "dwH de 1\n" + "Yml le 1\n" + "qVz qu 1\n" + "Rvl va 1\n" + "yHk ka 1\n" + "Wjt th 1\n" + "hMw th 1\n" + "pzU sz 1\n" + "gcL ch 1\n" + "qOa qu 1\n" + "eqI qu 1\n" + "iYp ti 1\n" + "vCq qu 1\n" + "uoV ro 1\n" + "fZx fo 1\n" + "qQd qu 1\n" + "qdE qu 1\n" + "qWx qu 1\n" + "Ykj ij 1\n" + "Fpj ij 1\n" + "zGv va 1\n" + "rwO er 1\n" + "Qzq qu 1\n" + "Kqb qu 1\n" + "zgT ng 1\n" + "jsZ sz 1\n" + "aHq qu 1\n" + "yjL ij 1\n" + "Ycw ch 1\n" + "bnP an 1\n" + "vWn an 1\n" + "zyY sz 1\n" + "zRs st 1\n" + "wuP qu 1\n" + "vjB va 1\n" + "jrT er 1\n" + "vwJ va 1\n" + "dVj de 1\n" + "zvW va 1\n" + "dZk de 1\n" + "nrG an 1\n" + "qsU qu 1\n" + "Pvs va 1\n" + "lLh th 1\n" + "qCz qu 1\n" + "dvV de 1\n" + "Pjw ij 1\n" + "Kmj ij 1\n" + "Jfh th 1\n" + "nwY an 1\n" + "gwC ng 1\n" + "vGb va 1\n" + "qWr qu 1\n" + "qpW qu 1\n" + "dKk de 1\n" + "yWb be 1\n" + "jmN ij 1\n" + "gpV ng 1\n" + "qzS qu 1\n" + "oZh th 1\n" + "Qmt th 1\n" + "mNk me 1\n" + "ypM pr 1\n" + "lwH le 1\n" + "zHs sz 1\n" + "jzC jo 1\n" + "oJh th 1\n" + "Lqh th 1\n" + "hXg th 1\n" + "xEf fo 1\n" + "uWx qu 1\n" + "kvT va 1\n" + "zsG sz 1\n" + "lSx le 1\n" + "qKb qu 1\n" + "Qye de 1\n" + "xHk ka 1\n" + "Cwp pr 1\n" + "zmJ sz 1\n" + "xuL qu 1\n" + "bdH de 1\n" + "Pbw wa 1\n" + "qdX qu 1\n" + "lVc ch 1\n" + "bqL qu 1\n" + "wNs sz 1\n" + "vzN va 1\n" + "qjA qu 1\n" + "Zhf th 1\n" + "ypJ pr 1\n" + "xMq qu 1\n" + "bTk ka 1\n" + "tLf th 1\n" + "xgR ng 1\n" + "kQz sz 1\n" + "Rjp ij 1\n" + "xhG th 1\n" + "bCc ch 1\n" + "hbF th 1\n" + "rxQ er 1\n" + "qVp qu 1\n" + "bkY ka 1\n" + "qPl qu 1\n" + "jQk ij 1\n" + "Ovq qu 1\n" + "sVv va 1\n" + "pmU me 1\n" + "uFv qu 1\n" + "xaZ va 1\n" + "gGn an 1\n" + "pgI ng 1\n" + "zTj sz 1\n" + "lvC va 1\n" + "wGv va 1\n" + "rNv va 1\n" + "Qtq th 1\n" + "vNh th 1\n" + "lPv va 1\n" + "Jdq qu 1\n" + "Xdj de 1\n" + "yqk qu 1\n" + "iwY ti 1\n" + "Nmq qu 1\n" + "fTp pr 1\n" + "qzQ qu 1\n" + "pjA ij 1\n" + "pvH va 1\n" + "xLj ij 1\n" + "qWh th 1\n" + "vVq qu 1\n" + "gQd de 1\n" + "svY va 1\n" + "fLf fo 1\n" + "qzB qu 1\n" + "Dxg ng 1\n" + "uzY qu 1\n" + "gVz sz 1\n" + "hZb th 1\n" + "Gpx pr 1\n" + "xqh th 1\n" + "gcX ch 1\n" + "Hxd de 1\n" + "tUq th 1\n" + "bKp pr 1\n" + "iGx ti 1\n" + "xvQ va 1\n" + "lxA le 1\n" + "sjH st 1\n" + "Gqo qu 1\n" + "dgQ de 1\n" + "yDk ka 1\n" + "Znv va 1\n" + "vfU va 1\n" + "vuD qu 1\n" + "oQj ij 1\n" + "bhD th 1\n" + "qLj qu 1\n" + "mdY de 1\n" + "rZb er 1\n" + "kDv va 1\n" + "fsK sz 1\n" + "Kqf qu 1\n" + "yWl le 1\n" + "mVw me 1\n" + "mcV ch 1\n" + "tDf th 1\n" + "lAo le 1\n" + "fzR sz 1\n" + "Xrq qu 1\n" + "jrZ er 1\n" + "qmN qu 1\n" + "Jnp an 1\n" + "jhC th 1\n" + "kqR qu 1\n" + "dWn de 1\n" + "Wmw me 1\n" + "Rgy ng 1\n" + "uvN qu 1\n" + "jiY ti 1\n" + "xWc ch 1\n" + "yJr er 1\n" + "oHq qu 1\n" + "yvw va 1\n" + "Ydn de 1\n" + "Nvq qu 1\n" + "Gmv va 1\n" + "xxZ xe 1\n" + "Xdf de 1\n" + "xYh th 1\n" + "Vnv an 1\n" + "jNz sz 1\n" + "Wnq qu 1\n" + "Xwk ka 1\n" + "qWz qu 1\n" + "mQs sz 1\n" + "Vxb be 1\n" + "xwG wa 1\n" + "wvp va 1\n" + "gmV ng 1\n" + "Rzq qu 1\n" + "Cpw pr 1\n" + "Gyy ny 1\n" + "xzA sz 1\n" + "wGx wa 1\n" + "bqS qu 1\n" + "whR th 1\n" + "jPc ch 1\n" + "iqG qu 1\n" + "djK de 1\n" + "cVk ch 1\n" + "rwT er 1\n" + "Vhn th 1\n" + "Hfw wa 1\n" + "bnJ an 1\n" + "Cpd de 1\n" + "Nmd de 1\n" + "dnO an 1\n" + "qWc qu 1\n" + "aVq qu 1\n" + "qOn qu 1\n" + "Qlr er 1\n" + "qnN qu 1\n" + "rLq qu 1\n" + "wtE th 1\n" + "jgR ng 1\n" + "Yqp qu 1\n" + "Hwg ng 1\n" + "nWk an 1\n" + "wqB qu 1\n" + "fAp pr 1\n" + "hZv th 1\n" + "Kzp sz 1\n" + "fNk ka 1\n" + "Tkd de 1\n" + "uYm qu 1\n" + "kcR ch 1\n" + "xNl le 1\n" + "kHk ka 1\n" + "bJk ka 1\n" + "jjD ij 1\n" + "Nlq qu 1\n" + "dhB th 1\n" + "jXl le 1\n" + "nwB an 1\n" + "Hzb sz 1\n" + "qQz qu 1\n" + "fKc ch 1\n" + "jVw ij 1\n" + "ylU le 1\n" + "Lzj sz 1\n" + "sXu qu 1\n" + "wBw wa 1\n" + "Iqg qu 1\n" + "wjV ij 1\n" + "wxt th 1\n" + "jzK sz 1\n" + "rDd de 1\n" + "uQy qu 1\n" + "qGw qu 1\n" + "tbU th 1\n" + "kUo ka 1\n" + "dVm de 1\n" + "Ddn an 1\n" + "vqC vo 1\n" + "jkZ ij 1\n" + "Lvz va 1\n" + "tPy th 1\n" + "Vfj ij 1\n" + "Qhb th 1\n" + "whB th 1\n" + "Fqf qu 1\n" + "hCv th 1\n" + "Fjf ij 1\n" + "Qfr er 1\n" + "zwF sz 1\n" + "Fwf wa 1\n" + "pvU va 1\n" + "whC th 1\n" + "hTk th 1\n" + "dlQ de 1\n" + "wzL sz 1\n" + "zqS qu 1\n" + "qtP th 1\n" + "yhC th 1\n" + "yjB ij 1\n" + "iTd de 1\n" + "kLx ka 1\n" + "Rqi qu 1\n" + "qjS qu 1\n" + "vjI va 1\n" + "pGz sz 1\n" + "wnV an 1\n" + "lQx le 1\n" + "uvS qu 1\n" + "Zge de 1\n" + "gJv ng 1\n" + "Ydb de 1\n" + "wDh th 1\n" + "zwV sz 1\n" + "hNm th 1\n" + "zwQ sz 1\n" + "fRr er 1\n" + "wVr er 1\n" + "nKg an 1\n" + "Tgg ng 1\n" + "bYp pr 1\n" + "lBn an 1\n" + "zjp sz 1\n" + "qAf qu 1\n" + "zmK me 1\n" + "wqK qu 1\n" + "vjT va 1\n" + "Lql qu 1\n" + "snC an 1\n" + "fzY sz 1\n" + "vqU qu 1\n" + "mGb me 1\n" + "fkP ka 1\n" + "wQg ng 1\n" + "Fqt th 1\n" + "bVm me 1\n" + "Wcx ch 1\n" + "wpY wa 1\n" + "lFv va 1\n" + "gwD ng 1\n" + "gWp ng 1\n" + "fjT ij 1\n" + "pFt th 1\n" + "iIp in 1\n" + "tbD th 1\n" + "Xqc qu 1\n" + "Qkc ch 1\n" + "qeZ qu 1\n" + "qPb qu 1\n" + "gwL ng 1\n" + "fHi in 1\n" + "xwP wa 1\n" + "xvB va 1\n" + "jSw ij 1\n" + "pzF sz 1\n" + "wYp wa 1\n" + "dDx de 1\n" + "nBx an 1\n" + "cNv ch 1\n" + "Ubm me 1\n" + "xXu qu 1\n" + "dRl de 1\n" + "dBz de 1\n" + "Xvh th 1\n" + "Xld de 1\n" + "mwY me 1\n" + "whQ th 1\n" + "Mzl le 1\n" + "Aqj qu 1\n" + "uDp qu 1\n" + "cjZ ch 1\n" + "Vkf ka 1\n" + "uGq qu 1\n" + "hBs th 1\n" + "qLh th 1\n" + "tfW th 1\n" + "cPn an 1\n" + "xoN on 1\n" + "Ydx de 1\n" + "Lxk ka 1\n" + "ccZ ch 1\n" + "uJh th 1\n" + "sVp sz 1\n" + "wrE er 1\n" + "xgP ng 1\n" + "hPp th 1\n" + "euU qu 1\n" + "sZh th 1\n" + "qnK qu 1\n" + "Bgh th 1\n" + "slQ le 1\n" + "gxA ng 1\n" + "jLd de 1\n" + "znD an 1\n" + "kXk ka 1\n" + "tfV th 1\n" + "Vwl le 1\n" + "xWd do 1\n" + "xnH an 1\n" + "cOq ch 1\n" + "Lkk ka 1\n" + "Nvy va 1\n" + "xIh th 1\n" + "xkK ka 1\n" + "rMr er 1\n" + "rmQ er 1\n" + "bPn an 1\n" + "fAa an 1\n" + "vQv va 1\n" + "fHr er 1\n" + "Pmv va 1\n" + "vzJ sz 1\n" + "wTg ng 1\n" + "bWc ch 1\n" + "Zwg ng 1\n" + "gKx ng 1\n" + "Gbq qu 1\n" + "wMk ka 1\n" + "Nfx fo 1\n" + "fAo on 1\n" + "dHb de 1\n" + "lxH le 1\n" + "dqO qu 1\n" + "Tlq qu 1\n" + "Yjj ij 1\n" + "Iyh th 1\n" + "uoY qu 1\n" + "mhH th 1\n" + "lMj le 1\n" + "fzF sz 1\n" + "frR er 1\n" + "yNl le 1\n" + "aPv an 1\n" + "ywG wa 1\n" + "Cmw me 1\n" + "svK va 1\n" + "srO er 1\n" + "Uhz th 1\n" + "vPn an 1\n" + "zTq qu 1\n" + "kzH sz 1\n" + "Iox on 1\n" + "fQa an 1\n" + "wZr er 1\n" + "nqU an 1\n" + "wPb wa 1\n" + "Tzg ng 1\n" + "pnR an 1\n" + "vfJ va 1\n" + "vyX va 1\n" + "fLz sz 1\n" + "zjP sz 1\n" + "pmR me 1\n" + "ePq qu 1\n" + "jyT ij 1\n" + "mjP ij 1\n" + "fsH sz 1\n" + "vwB va 1\n" + "Ynr an 1\n" + "Tqh th 1\n" + "Lvv va 1\n" + "tCf th 1\n" + "wpB wa 1\n" + "wXh th 1\n" + "mhX th 1\n" + "kYd de 1\n" + "Dpg ng 1\n" + "ygR ng 1\n" + "Rfp pr 1\n" + "Jyq qu 1\n" + "yxq qu 1\n" + "pPc ch 1\n" + "aOj an 1\n" + "Zww wa 1\n" + "fFx fo 1\n" + "bDh th 1\n" + "qKx qu 1\n" + "wHx wa 1\n" + "hrX th 1\n" + "rFh th 1\n" + "lLx le 1\n" + "aYj an 1\n" + "kCs sz 1\n" + "lWt th 1\n" + "pdY de 1\n" + "swI sz 1\n" + "bLw wa 1\n" + "Mzx sz 1\n" + "cKk ch 1\n" + "hMz th 1\n" + "Jcu qu 1\n" + "wjB ij 1\n" + "Mqe qu 1\n" + "rxW er 1\n" + "gZv ng 1\n" + "Rfn an 1\n" + "pwD wa 1\n" + "lhX th 1\n" + "fVg ng 1\n" + "vfW va 1\n" + "lxP le 1\n" + "Yyj ij 1\n" + "hPg th 1\n" + "Uxq qu 1\n" + "bdO de 1\n" + "bRz sz 1\n" + "dXq qu 1\n" + "Rjq qu 1\n" + "fgV ng 1\n" + "xAf fo 1\n" + "wXn an 1\n" + "Kvv va 1\n" + "svL va 1\n" + "fWv va 1\n" + "drQ er 1\n" + "Lpv va 1\n" + "qKp qu 1\n" + "eCv er 1\n" + "xwH wa 1\n" + "cvC ch 1\n" + "kUf ka 1\n" + "oPx on 1\n" + "tjJ th 1\n" + "bBk ka 1\n" + "vpI va 1\n" + "gzY ng 1\n" + "oZs on 1\n" + "pKc ch 1\n" + "xKs sz 1\n" + "qcH qu 1\n" + "Vfm me 1\n" + "svM va 1\n" + "Vjx ij 1\n" + "lVw le 1\n" + "wWf wa 1\n" + "Xpx pr 1\n" + "lcA ch 1\n" + "tLc th 1\n" + "lDg ng 1\n" + "Xjh th 1\n" + "Xdh th 1\n" + "rKm er 1\n" + "fnW an 1\n" + "Tcb ch 1\n" + "qgX qu 1\n" + "qZo qu 1\n" + "eJv er 1\n" + "Yxy ny 1\n" + "kfM ka 1\n" + "qKe qu 1\n" + "vMf va 1\n" + "dgY de 1\n" + "gGd ng 1\n" + "Vcj ch 1\n" + "Sfw wa 1\n" + "xDk ka 1\n" + "fTc ch 1\n" + "qRw qu 1\n" + "tOa th 1\n" + "guQ qu 1\n" + "mgJ ng 1\n" + "bRd de 1\n" + "kYq qu 1\n" + "xwD wa 1\n" + "vXs va 1\n" + "zlC le 1\n" + "kmH ka 1\n" + "jhZ th 1\n" + "Wxo on 1\n" + "vtX th 1\n" + "iWm in 1\n" + "qVx qu 1\n" + "Hjv va 1\n" + "Pxs sz 1\n" + "bYi in 1\n" + "wgG ng 1\n" + "Jvs va 1\n" + "gHh th 1\n" + "Kzy sz 1\n" + "xjI ij 1\n" + "uVb qu 1\n" + "Pzq qu 1\n" + "hxC th 1\n" + "wPy wa 1\n" + "bXh th 1\n" + "jzY sz 1\n" + "fqJ qu 1\n" + "qxX qu 1\n" + "vfB va 1\n" + "pPm me 1\n" + "bpC pr 1\n" + "hFv th 1\n" + "Cql qu 1\n" + "dwI de 1\n" + "Tcq ch 1\n" + "Zjx ij 1\n" + "wOz sz 1\n" + "Jfj ij 1\n" + "iZr in 1\n" + "Vxf fo 1\n" + "Lpx pr 1\n" + "fHt th 1\n" + "hFy th 1\n" + "lcD ch 1\n" + "vMc ch 1\n" + "xyU ny 1\n" + "mGq qu 1\n" + "wJv va 1\n" + "zKs sz 1\n" + "lMm le 1\n" + "mqU qu 1\n" + "vHg ng 1\n" + "lGc ch 1\n" + "eIj te 1\n" + "Vdh th 1\n" + "rCk er 1\n" + "wQh th 1\n" + "Ywf wa 1\n" + "zUf sz 1\n" + "qZs qu 1\n" + "vNt th 1\n" + "Dxj ij 1\n" + "cYr ch 1\n" + "dKt th 1\n" + "vDp va 1\n" + "qnF an 1\n" + "Lsj sz 1\n" + "xHv va 1\n" + "jCt th 1\n" + "bnX an 1\n" + "fBx fo 1\n" + "jVt th 1\n" + "qOy qu 1\n" + "uqD qu 1\n" + "Rfw wa 1\n" + "cjS ch 1\n" + "ufX qu 1\n" + "fvI va 1\n" + "Owx wa 1\n" + "gXw ng 1\n" + "oCv va 1\n" + "Mrx er 1\n" + "cIb ch 1\n" + "fJj ij 1\n" + "kqM qu 1\n" + "zqL qu 1\n" + "rPz er 1\n" + "iwW in 1\n" + "cMp ch 1\n" + "lVt th 1\n" + "vTb va 1\n" + "Iwf wa 1\n" + "xlZ le 1\n" + "vjQ va 1\n" + "iPb in 1\n" + "Whk th 1\n" + "Wvh th 1\n" + "mzD sz 1\n" + "Hqk qu 1\n" + "jqB qu 1\n" + "qhM th 1\n" + "prR er 1\n" + "nlV an 1\n" + "qYk qu 1\n" + "zVp sz 1\n" + "vpO va 1\n" + "Rvr er 1\n" + "scY ch 1\n" + "qdA qu 1\n" + "vLk va 1\n" + "svI va 1\n" + "mdE de 1\n" + "hBx th 1\n" + "Zrv er 1\n" + "jWt th 1\n" + "fTx fo 1\n" + "Ypc ch 1\n" + "mMk ka 1\n" + "fdq qu 1\n" + "hcK th 1\n" + "xCy ny 1\n" + "fVr er 1\n" + "aPx an 1\n" + "fpU pr 1\n" + "Vkb ka 1\n" + "tbM th 1\n" + "zQt th 1\n" + "gxV ng 1\n" + "Sfg ng 1\n" + "pYl le 1\n" + "gWt th 1\n" + "xEb be 1\n" + "mXy me 1\n" + "lnQ an 1\n" + "qmL qu 1\n" + "Vky ka 1\n" + "wwX wa 1\n" + "Uwx wa 1\n" + "cfB ch 1\n" + "Gxp pr 1\n" + "fpL pr 1\n" + "jTx ij 1\n" + "cZv ch 1\n" + "zlK le 1\n" + "hBc th 1\n" + "Wqi qu 1\n" + "lGs le 1\n" + "Dqz qu 1\n" + "Jgw ng 1\n" + "gCx ng 1\n" + "cNj ch 1\n" + "cqJ ch 1\n" + "blD le 1\n" + "qXr qu 1\n" + "kXr er 1\n" + "khK th 1\n" + "xZh th 1\n" + "jSs sz 1\n" + "yjx ij 1\n" + "Hwf wa 1\n" + "fXs sz 1\n" + "qgz qu 1\n" + "Xdw de 1\n" + "hcN th 1\n" + "jJd de 1\n" + "cmQ ch 1\n" + "mvV va 1\n" + "Nqe qu 1\n" + "zxS sz 1\n" + "kGt th 1\n" + "tFg th 1\n" + "fzM sz 1\n" + "Xrr er 1\n" + "dcJ ch 1\n" + "dQa an 1\n" + "qNy qu 1\n" + "hxT th 1\n" + "twB th 1\n" + "Bqj qu 1\n" + "prK er 1\n" + "zdC de 1\n" + "yAo on 1\n" + "dLt st 1\n" + "pgF ng 1\n" + "vgW ng 1\n" + "vpN va 1\n" + "Ivx va 1\n" + "vYl le 1\n" + "xRg ng 1\n" + "jPu qu 1\n" + "Oqr qu 1\n" + "vjg ng 1\n" + "dpH de 1\n" + "yDp pr 1\n" + "xfJ fo 1\n" + "fqV qu 1\n" + "eBf er 1\n" + "Zkw ka 1\n" + "qHp qu 1\n" + "Aqz qu 1\n" + "bNw wa 1\n" + "fjX ij 1\n" + "fqS qu 1\n" + "ljK le 1\n" + "Gkf ka 1\n" + "bSf be 1\n" + "Mxg ng 1\n" + "Dqm qu 1\n" + "hKp th 1\n" + "wFq qu 1\n" + "wmJ me 1\n" + "vzT va 1\n" + "rhJ th 1\n" + "nHf an 1\n" + "jJo on 1\n" + "qWy qu 1\n" + "Wvk va 1\n" + "gkB ng 1\n" + "mEw me 1\n" + "Ugx ng 1\n" + "Qmy me 1\n" + "Ljq qu 1\n" + "bGp pr 1\n" + "lHg ng 1\n" + "cGg ch 1\n" + "gFk ng 1\n" + "xnV an 1\n" + "eFy er 1\n" + "Nfm me 1\n" + "hSf th 1\n" + "gXj ng 1\n" + "xHf fo 1\n" + "uqj qu 1\n" + "wXa an 1\n" + "vcT ch 1\n" + "uJw qu 1\n" + "pWx pr 1\n" + "qpQ qu 1\n" + "hqE th 1\n" + "Yfn an 1\n" + "jrI er 1\n" + "cgK ch 1\n" + "yyP ny 1\n" + "Zmg ng 1\n" + "Lkc ch 1\n" + "eUq qu 1\n" + "jrY er 1\n" + "kFs sz 1\n" + "sUq qu 1\n" + "jlZ le 1\n" + "cnV ch 1\n" + "aPj an 1\n" + "mjE ij 1\n" + "pZl le 1\n" + "uFs qu 1\n" + "Knf an 1\n" + "Fpc ch 1\n" + "hfR th 1\n" + "qnC an 1\n" + "Dlq qu 1\n" + "frM er 1\n" + "sfB sz 1\n" + "Gxk ka 1\n" + "Fkj ij 1\n" + "vGk va 1\n" + "gRm ng 1\n" + "rWf er 1\n" + "rYv er 1\n" + "qEd qu 1\n" + "qHr qu 1\n" + "Smv va 1\n" + "lFp le 1\n" + "kDs sz 1\n" + "dSd de 1\n" + "rLw er 1\n" + "cnZ an 1\n" + "Wjp ij 1\n" + "pTq qu 1\n" + "Kcx ch 1\n" + "vKs va 1\n" + "bcK ch 1\n" + "vwy va 1\n" + "Ujx ij 1\n" + "Qvr er 1\n" + "dcV ch 1\n" + "xVf fo 1\n" + "uIk qu 1\n" + "jlN le 1\n" + "vwL va 1\n" + "fWp pr 1\n" + "Pxr er 1\n" + "rRb er 1\n" + "bfD be 1\n" + "yCx ny 1\n" + "nJs an 1\n" + "dCm de 1\n" + "cbG ch 1\n" + "gCf ng 1\n" + "tmV th 1\n" + "qeC qu 1\n" + "knS an 1\n" + "gwY ng 1\n" + "Wjl le 1\n" + "mIw me 1\n" + "qjW qu 1\n" + "gwv ng 1\n" + "qJw wa 1\n" + "cnA an 1\n" + "bBm me 1\n" + "gFw ng 1\n" + "wDn an 1\n" + "qgL qu 1\n" + "lUa an 1\n" + "hDn th 1\n" + "kHx ka 1\n" + "wXm me 1\n" + "qyY qu 1\n" + "pkD ka 1\n" + "sLz st 1\n" + "zxF sz 1\n" + "vMx va 1\n" + "plR le 1\n" + "pwZ pr 1\n" + "pYd de 1\n" + "zfL sz 1\n" + "ztK th 1\n" + "mTm me 1\n" + "dCp de 1\n" + "bwx wa 1\n" + "xCs sz 1\n" + "tfF th 1\n" + "Lnq an 1\n" + "dYi in 1\n" + "pWq qu 1\n" + "oIx on 1\n" + "ywE wa 1\n" + "wNk ka 1\n" + "jwO ij 1\n" + "xZz sz 1\n" + "wGm me 1\n" + "cVw ch 1\n" + "bjK ij 1\n" + "Gzg ng 1\n" + "kwz sz 1\n" + "pBn an 1\n" + "cTx ch 1\n" + "rHq qu 1\n" + "Wsg ng 1\n" + "xEh th 1\n" + "yrK er 1\n" + "mMb me 1\n" + "pHw pr 1\n" + "cjN ch 1\n" + "nXn an 1\n" + "bwO wa 1\n" + "flB le 1\n" + "Qqj qu 1\n" + "mKv va 1\n" + "fFn an 1\n" + "wfG wa 1\n" + "wfB wa 1\n" + "Jqk qu 1\n" + "bwK wa 1\n" + "hhI th 1\n" + "lUe er 1\n" + "wFd de 1\n" + "vkT va 1\n" + "xLg ng 1\n" + "fhB th 1\n" + "wmV me 1\n" + "tmF th 1\n" + "Rtc th 1\n" + "dyY de 1\n" + "jyw ij 1\n" + "kRf ka 1\n" + "fXz sz 1\n" + "Znz an 1\n" + "wqX qu 1\n" + "uMx qu 1\n" + "gwV ng 1\n" + "Pbh th 1\n" + "dcM ch 1\n" + "nPz an 1\n" + "cwU ch 1\n" + "vJt th 1\n" + "gyQ ng 1\n" + "fXi in 1\n" + "bsZ sz 1\n" + "Bqi qu 1\n" + "vGn an 1\n" + "knN an 1\n" + "wYq qu 1\n" + "tTb th 1\n" + "bmP me 1\n" + "jpZ ij 1\n" + "Mqw qu 1\n" + "vjM va 1\n" + "qVh th 1\n" + "juY qu 1\n" + "rBk er 1\n" + "juI qu 1\n" + "zEq qu 1\n" + "zWg ng 1\n" + "fzH sz 1\n" + "tLx th 1\n" + "Ncf ch 1\n" + "kfN ka 1\n" + "uUo qu 1\n" + "fCs sz 1\n" + "tCv th 1\n" + "sUy sz 1\n" + "pBf pr 1\n" + "jBz sz 1\n" + "vDc ch 1\n" + "qmx qu 1\n" + "qtK th 1\n" + "qcS ch 1\n" + "vPt th 1\n" + "gQm ng 1\n" + "hzR th 1\n" + "dcL ch 1\n" + "xrI er 1\n" + "dvN va 1\n" + "Cwv va 1\n" + "xhQ th 1\n" + "Gzu qu 1\n" + "pdO de 1\n" + "Bqr qu 1\n" + "vLn an 1\n" + "lxf le 1\n" + "vYk va 1\n" + "wSq qu 1\n" + "pkS ka 1\n" + "zKg ng 1\n" + "tPm th 1\n" + "Pmj ij 1\n" + "lWu qu 1\n" + "Xuu qu 1\n" + "jcX ch 1\n" + "xzQ sz 1\n" + "Gzw sz 1\n" + "ePm er 1\n" + "fwW wa 1\n" + "qwA qu 1\n" + "vQt th 1\n" + "bxP be 1\n" + "dmD de 1\n" + "awQ an 1\n" + "fVf fo 1\n" + "bwY wa 1\n" + "Zxt th 1\n" + "Xhk th 1\n" + "gYk ng 1\n" + "zCf sz 1\n" + "yfQ ny 1\n" + "zGw sz 1\n" + "gvE ng 1\n" + "gCv ng 1\n" + "oPf on 1\n" + "zXi in 1\n" + "hvI th 1\n" + "hzS th 1\n" + "mfX me 1\n" + "dPd de 1\n" + "Lrf er 1\n" + "lrG er 1\n" + "mYf me 1\n" + "hNj th 1\n" + "qAj qu 1\n" + "sxQ st 1\n" + "kTl le 1\n" + "qOf qu 1\n" + "Jdx de 1\n" + "swK sz 1\n" + "jQb ij 1\n" + "Dqp qu 1\n" + "cWv ch 1\n" + "dxE de 1\n" + "sXj sz 1\n" + "nvB an 1\n" + "wXf wa 1\n" + "Cqi qu 1\n" + "bzW sz 1\n" + "rRf er 1\n" + "mZj ij 1\n" + "bnF an 1\n" + "qaG an 1\n" + "Bqs qu 1\n" + "lMn an 1\n" + "wHp pr 1\n" + "Ljc ch 1\n" + "Mwf wa 1\n" + "pzK sz 1\n" + "mPb me 1\n" + "qjE qu 1\n" + "wRr er 1\n" + "xZf fo 1\n" + "nqG an 1\n" + "vVb va 1\n" + "pjC ij 1\n" + "uHl qu 1\n" + "jDn an 1\n" + "pqX qu 1\n" + "pqk qu 1\n" + "xgU ng 1\n" + "wJx wa 1\n" + "znK an 1\n" + "rhB th 1\n" + "vDq qu 1\n" + "sJc ch 1\n" + "Xkh th 1\n" + "lnJ an 1\n" + "bRq qu 1\n" + "fzA sz 1\n" + "bQe er 1\n" + "Txw wa 1\n" + "bkG ka 1\n" + "ywZ wa 1\n" + "zWc ch 1\n" + "lhL th 1\n" + "gmF ng 1\n" + "sfQ sz 1\n" + "zmG sz 1\n" + "Ogz ng 1\n" + "xuA qu 1\n" + "qAq qu 1\n" + "zDw sz 1\n" + "lVu qu 1\n" + "xRw wa 1\n" + "xmM me 1\n" + "pxB pr 1\n" + "ztT th 1\n" + "kzJ sz 1\n" + "nFz an 1\n" + "uVz qu 1\n" + "pnQ an 1\n" + "pGt th 1\n" + "Xdn an 1\n" + "fVz sz 1\n" + "Mhg th 1\n" + "Xqo qu 1\n" + "sHq qu 1\n" + "jwC ij 1\n" + "vkG va 1\n" + "Xkx ka 1\n" + "tRg th 1\n" + "nvV an 1\n" + "qwG qu 1\n" + "Vhh th 1\n" + "zwO sz 1\n" + "qQb qu 1\n" + "crR ch 1\n" + "Mrq qu 1\n" + "oQe er 1\n" + "mBt th 1\n" + "vUy va 1\n" + "twW th 1\n" + "Qgn an 1\n" + "Nxu qu 1\n" + "qhF th 1\n" + "xpX pr 1\n" + "fvD va 1\n" + "Cvy va 1\n" + "oHj on 1\n" + "Qqo qu 1\n" + "vYd de 1\n" + "xhV th 1\n" + "fZf fo 1\n" + "yKm me 1\n" + "xYq qu 1\n" + "fcU ch 1\n" + "qEp qu 1\n" + "jXd de 1\n" + "mlQ le 1\n" + "Ggz ng 1\n" + "cLp ch 1\n" + "yxU ny 1\n" + "gvJ ng 1\n" + "wqD qu 1\n" + "vsN sz 1\n" + "Ijf ij 1\n" + "jbJ ij 1\n" + "bMx be 1\n" + "kXs sz 1\n" + "grT ng 1\n" + "wOd de 1\n" + "pGw pr 1\n" + "Gkd de 1\n" + "qCj qu 1\n" + "hqY th 1\n" + "rDp er 1\n" + "nQt th 1\n" + "kdV de 1\n" + "bgS ng 1\n" + "Tqo qu 1\n" + "fEj ij 1\n" + "hZs th 1\n" + "jYn an 1\n" + "bPx be 1\n" + "hgY th 1\n" + "Pvy va 1\n" + "fxK fo 1\n" + "Hww wa 1\n" + "xRk ka 1\n" + "dmP de 1\n" + "mcY ch 1\n" + "bxR be 1\n" + "Lsl le 1\n" + "hRl th 1\n" + "iwQ in 1\n" + "Wqx qu 1\n" + "kfV ka 1\n" + "qwN qu 1\n" + "Qpv va 1\n" + "mrO er 1\n" + "iFc ti 1\n" + "wzD sz 1\n" + "qbF qu 1\n" + "xfS fo 1\n" + "Pqh th 1\n" + "xYb be 1\n" + "lDh th 1\n" + "vtG th 1\n" + "Xzu qu 1\n" + "xjK ij 1\n" + "jDx ij 1\n" + "nCj an 1\n" + "mCk ka 1\n" + "qxP qu 1\n" + "oMv on 1\n" + "cgY ch 1\n" + "Wqt th 1\n" + "kkQ ka 1\n" + "tqO th 1\n" + "jnC an 1\n" + "fGq qu 1\n" + "Bfv va 1\n" + "vYi in 1\n" + "pcL ch 1\n" + "Fgp ng 1\n" + "jtR th 1\n" + "vhF th 1\n" + "wUi in 1\n" + "nNj an 1\n" + "jTw ij 1\n" + "qsM qu 1\n" + "aJg an 1\n" + "jQe er 1\n" + "Gnj an 1\n" + "fmM me 1\n" + "zqM qu 1\n" + "gjZ ng 1\n" + "nxH an 1\n" + "cdO ch 1\n" + "aAx an 1\n" + "tUv th 1\n" + "hXk th 1\n" + "qBx qu 1\n" + "tgK th 1\n" + "fZy ny 1\n" + "Jkx ka 1\n" + "pvD va 1\n" + "bmT me 1\n" + "oYx on 1\n" + "hwV th 1\n" + "mjB ij 1\n" + "bYn an 1\n" + "iHx in 1\n" + "lYh th 1\n" + "qCi in 1\n" + "fhR th 1\n" + "nDf an 1\n" + "hCd th 1\n" + "lxB le 1\n" + "eXj er 1\n" + "fvW va 1\n" + "ccW ch 1\n" + "dTc ch 1\n" + "sqA qu 1\n" + "fNt th 1\n" + "zkM sz 1\n" + "lRv le 1\n" + "qnI an 1\n" + "xwC wa 1\n" + "zqY qu 1\n" + "yQb be 1\n" + "xrC er 1\n" + "xFm me 1\n" + "oeQ er 1\n" + "mLl le 1\n" + "jwT ij 1\n" + "fwD wa 1\n" + "vpE va 1\n" + "flY le 1\n" + "sRg ng 1\n" + "vSd de 1\n" + "wuR qu 1\n" + "wrI er 1\n" + "Ysn st 1\n" + "Vhj th 1\n" + "Cqh th 1\n" + "Ygb ng 1\n" + "hPq th 1\n" + "mkB ka 1\n" + "tRq th 1\n" + "ajQ an 1\n" + "hcR th 1\n" + "vDw va 1\n" + "pQn an 1\n" + "xeU er 1\n" + "vcM ch 1\n" + "zVc ch 1\n" + "bRh th 1\n" + "uFx qu 1\n" + "fbW be 1\n" + "uUv qu 1\n" + "Nhv th 1\n" + "Ykx ka 1\n" + "Wtp th 1\n" + "Mzj sz 1\n" + "npT in 1\n" + "Xqk qu 1\n" + "xwN wa 1\n" + "hXw th 1\n" + "zLb sz 1\n" + "Gxy ny 1\n" + "dDq qu 1\n" + "Bfy ny 1\n" + "fkx ka 1\n" + "jOq qu 1\n" + "Ddk de 1\n" + "Njp ij 1\n" + "xjJ ij 1\n" + "qhS th 1\n" + "Qwm me 1\n" + "yWj ij 1\n" + "nFv an 1\n" + "pLb pr 1\n" + "qbB qu 1\n" + "smX sz 1\n" + "tnZ th 1\n" + "zQh th 1\n" + "Fzb sz 1\n" + "cNb ch 1\n" + "hpV th 1\n" + "Bxz sz 1\n" + "xgG ng 1\n" + "Rlj le 1\n" + "iHq in 1\n" + "swN sz 1\n" + "Njv va 1\n" + "wPk ka 1\n" + "oRv on 1\n" + "pJs sz 1\n" + "kZw ka 1\n" + "vVs st 1\n" + "Vbw wa 1\n" + "Ffh th 1\n" + "mzQ sz 1\n" + "Gvl le 1\n" + "Pgq qu 1\n" + "lPp le 1\n" + "vCv va 1\n" + "kNf ka 1\n" + "bmD me 1\n" + "mWt th 1\n" + "slF le 1\n" + "qiX in 1\n" + "yRt th 1\n" + "lqx qu 1\n" + "qlj qu 1\n" + "sfZ sz 1\n" + "Wfy ny 1\n" + "vrO er 1\n" + "gxT ng 1\n" + "lwE le 1\n" + "qdJ qu 1\n" + "Ypk ka 1\n" + "Qpf pr 1\n" + "Znw an 1\n" + "bfJ be 1\n" + "qQy qu 1\n" + "qAy qu 1\n" + "aqW an 1\n" + "qqI qu 1\n" + "Lwg ng 1\n" + "Nnw an 1\n" + "cLv ch 1\n" + "Wtx th 1\n" + "qcq ch 1\n" + "sjR sz 1\n" + "lWn an 1\n" + "Zmx me 1\n" + "qZg qu 1\n" + "tYz th 1\n" + "gVx ng 1\n" + "mXt th 1\n" + "nwJ an 1\n" + "jwZ ij 1\n" + "lwL le 1\n" + "eGx er 1\n" + "Sqk qu 1\n" + "gBg ng 1\n" + "zsS sz 1\n" + "knQ an 1\n" + "Nnf an 1\n" + "qmT qu 1\n" + "Sqp qu 1\n" + "ffQ fo 1\n" + "Vcv ch 1\n" + "fmD me 1\n" + "zYg ng 1\n" + "bAx be 1\n" + "nbW an 1\n" + "gJm ng 1\n" + "Jwn an 1\n" + "mxJ me 1\n" + "xbC be 1\n" + "Rbq qu 1\n" + "xZc ch 1\n" + "bJy be 1\n" + "Xyk ka 1\n" + "zkV sz 1\n" + "uoF qu 1\n" + "bcU ch 1\n" + "cZq ch 1\n" + "rPm er 1\n" + "rGn an 1\n" + "lcL ch 1\n" + "rVt th 1\n" + "Cgw ng 1\n" + "Ctq th 1\n" + "eGv er 1\n" + "Rzs st 1\n" + "Qhz th 1\n" + "sLv va 1\n" + "Vqm qu 1\n" + "ydJ de 1\n" + "xVr er 1\n" + "tLk th 1\n" + "qfy qu 1\n" + "wxV wa 1\n" + "yRq qu 1\n" + "Vxq qu 1\n" + "qYz qu 1\n" + "zhM th 1\n" + "mLn an 1\n" + "Zvt th 1\n" + "Fvm va 1\n" + "hcM th 1\n" + "Mwp wa 1\n" + "cTg ch 1\n" + "lXr er 1\n" + "fQe er 1\n" + "Jbw wa 1\n" + "yfG ny 1\n" + "phK th 1\n" + "gjH ng 1\n" + "Wdg de 1\n" + "pPn an 1\n" + "Bwg ng 1\n" + "znB an 1\n" + "fwJ wa 1\n" + "utQ th 1\n" + "cjC ch 1\n" + "fVd de 1\n" + "cTm ch 1\n" + "wMv va 1\n" + "Kgk ng 1\n" + "nRd an 1\n" + "mMt th 1\n" + "xjQ ij 1\n" + "qYt th 1\n" + "sYj st 1\n" + "jNc ch 1\n" + "qXt th 1\n" + "wzB sz 1\n" + "Sjq qu 1\n" + "qtF th 1\n" + "wYi in 1\n" + "glT ng 1\n" + "Uug ng 1\n" + "uOp qu 1\n" + "iBx in 1\n" + "Rqt th 1\n" + "zWj sz 1\n" + "Hcx ch 1\n" + "jNd de 1\n" + "zQr er 1\n" + "iHd in 1\n" + "Wpx pr 1\n" + "nfY an 1\n" + "Rkz sz 1\n" + "Kqg qu 1\n" + "Gfv va 1\n" + "krC er 1\n" + "Whc th 1\n" + "ljM le 1\n" + "yxG ny 1\n" + "fpW pr 1\n" + "bcF ch 1\n" + "krx er 1\n" + "uDt th 1\n" + "Fzo on 1\n" + "wPn an 1\n" + "Lfj ij 1\n" + "Bkp ka 1\n" + "Xkq qu 1\n" + "jxH ij 1\n" + "vIj va 1\n" + "gTc ch 1\n" + "hEj th 1\n" + "fqB qu 1\n" + "jlD le 1\n" + "tFf th 1\n" + "Nfw wa 1\n" + "Fqe qu 1\n" + "Tzp sz 1\n" + "sJr er 1\n" + "qIt th 1\n" + "dFb de 1\n" + "qzE qu 1\n" + "mVv va 1\n" + "Vqa an 1\n" + "bqM qu 1\n" + "mdJ de 1\n" + "dIp de 1\n" + "Znx an 1\n" + "jkK ij 1\n" + "rfQ er 1\n" + "xkI ku 1\n" + "fIo ro 1\n" + "lqV qu 1\n" + "Qpd de 1\n" + "pAx pr 1\n" + "rrQ er 1\n" + "bIu qu 1\n" + "xDw wa 1\n" + "oHx on 1\n" + "wJw wa 1\n" + "Cqv qu 1\n" + "yvB va 1\n" + "yqU qu 1\n" + "rLx er 1\n" + "Fzx sz 1\n" + "dZf de 1\n" + "Nqh th 1\n" + "Rnz an 1\n" + "hTc th 1\n" + "bVb be 1\n" + "Fdm de 1\n" + "vfv va 1\n" + "hwS th 1\n" + "zPt th 1\n" + "Gxv va 1\n" + "Fvt th 1\n" + "mZr er 1\n" + "zVr er 1\n" + "mBc ch 1\n" + "fXq qu 1\n" + "Plw le 1\n" + "Nlx le 1\n" + "jCd de 1\n" + "Kwv va 1\n" + "Jqa an 1\n" + "zGs st 1\n" + "fuV qu 1\n" + "pzL sz 1\n" + "iFx in 1\n" + "fTm me 1\n" + "yWd de 1\n" + "cHv ch 1\n" + "fFk ka 1\n" + "mqd qu 1\n" + "aQk an 1\n" + "uDf qu 1\n" + "Vbf be 1\n" + "pgJ ng 1\n" + "fkN ka 1\n" + "pBm me 1\n" + "Bdv de 1\n" + "jmW ij 1\n" + "Jvv va 1\n" + "Xpk ka 1\n" + "qQc ch 1\n" + "kdG de 1\n" + "qkP qu 1\n" + "cSd ch 1\n" + "Fdc ch 1\n" + "qgK qu 1\n" + "qdH qu 1\n" + "uNv qu 1\n" + "eVt th 1\n" + "dfA de 1\n" + "Hzy sz 1\n" + "lWc ch 1\n" + "vxH va 1\n" + "hxW th 1\n" + "Khp th 1\n" + "xQb be 1\n" + "pwT pr 1\n" + "Lwf wa 1\n" + "zDq qu 1\n" + "kxK ka 1\n" + "mtY th 1\n" + "bhT th 1\n" + "ywR wa 1\n" + "jIa an 1\n" + "Wze er 1\n" + "hqK th 1\n" + "flZ le 1\n" + "qMi in 1\n" + "wpR wa 1\n" + "qHh th 1\n" + "aOw an 1\n" + "dkU de 1\n" + "vRr er 1\n" + "vjX va 1\n" + "cuQ ch 1\n" + "qmJ qu 1\n" + "uuJ ou 1\n" + "yWx ny 1\n" + "hUf th 1\n" + "vzP va 1\n" + "rSx er 1\n" + "qgy qu 1\n" + "Rzf sz 1\n" + "zjB sz 1\n" + "Sjx ij 1\n" + "xfA fo 1\n" + "fHj ij 1\n" + "qkB qu 1\n" + "cdF ch 1\n" + "fWj ij 1\n" + "jbA ij 1\n" + "Bmb me 1\n" + "yjg ng 1\n" + "rxZ er 1\n" + "Vmr er 1\n" + "iIq in 1\n" + "Wgl ng 1\n" + "mRp me 1\n" + "wvS va 1\n" + "Uvy va 1\n" + "ypQ pr 1\n" + "vFw vo 1\n" + "fqE qu 1\n" + "swJ st 1\n" + "Jrx er 1\n" + "cxE ch 1\n" + "lZk le 1\n" + "fVn an 1\n" + "bhZ th 1\n" + "jhR th 1\n" + "vSq qu 1\n" + "yQz sz 1\n" + "fHv va 1\n" + "vuN qu 1\n" + "jpG ij 1\n" + "Pkz sz 1\n" + "gQb ng 1\n" + "pFs st 1\n" + "Gjq qu 1\n" + "hsK th 1\n" + "twx th 1\n" + "yyQ ny 1\n" + "dqF qu 1\n" + "bHh th 1\n" + "qMq qu 1\n" + "qKv qu 1\n" + "zLg ng 1\n" + "jmO ij 1\n" + "wBk ka 1\n" + "pjQ ij 1\n" + "xZv va 1\n" + "qIu un 1\n" + "ycY ch 1\n" + "mDf me 1\n" + "yJs st 1\n" + "Isx st 1\n" + "Qqr qu 1\n" + "Fkw ka 1\n" + "Cpj ij 1\n" + "Yvq qu 1\n" + "zjG sz 1\n" + "gGc ch 1\n" + "Xdm de 1\n" + "hBv th 1\n" + "Wxj ij 1\n" + "Ywb ow 1\n" + "Vtq th 1\n" + "tjY th 1\n" + "jDj ij 1\n" + "uGd qu 1\n" + "wvF va 1\n" + "uqg qu 1\n" + "Rwp pr 1\n" + "Bgb ng 1\n" + "mnU an 1\n" + "dpI de 1\n" + "wKd de 1\n" + "yXz sz 1\n" + "kLd de 1\n" + "gYx ng 1\n" + "qxk qu 1\n" + "Hhy th 1\n" + "fpJ pr 1\n" + "cVc ch 1\n" + "kVv va 1\n" + "Jzs st 1\n" + "nDw an 1\n" + "tjF th 1\n" + "bZj ij 1\n" + "mqL qu 1\n" + "hFt th 1\n" + "nNw an 1\n" + "wFv va 1\n" + "gHc ch 1\n" + "qRx qu 1\n" + "Jxh th 1\n" + "Vpv va 1\n" + "nMk an 1\n" + "tjN th 1\n" + "fhQ th 1\n" + "bpD pr 1\n" + "Dfg ng 1\n" + "jyO ij 1\n" + "jhV th 1\n" + "kVk ka 1\n" + "nKc an 1\n" + "jkJ ij 1\n" + "cwS ch 1\n" + "oDf on 1\n" + "mkY ka 1\n" + "gdV ng 1\n" + "Xhb th 1\n" + "jUq qu 1\n" + "aJf an 1\n" + "Qxg ng 1\n" + "xzS sz 1\n" + "vUw va 1\n" + "hTj th 1\n" + "oVt th 1\n" + "zdq qu 1\n" + "fHs st 1\n" + "xKk ka 1\n" + "bFc ch 1\n" + "gWq qu 1\n" + "Yqa an 1\n" + "dmH de 1\n" + "Ttq th 1\n" + "iQc ch 1\n" + "jFh ij 1\n" + "fcY ch 1\n" + "fsR st 1\n" + "iWg in 1\n" + "Xyj ij 1\n" + "Xjs st 1\n" + "xpb pr 1\n" + "lzY le 1\n" + "pzg ng 1\n" + "dVw de 1\n" + "Ijc ch 1\n" + "fvq qu 1\n" + "Vnb an 1\n" + "zdH de 1\n" + "cDd ch 1\n" + "wqI qu 1\n" + "yfU ny 1\n" + "qoH qu 1\n" + "xkw ka 1\n" + "Kck ch 1\n" + "mUq qu 1\n" + "zWm sz 1\n" + "Bfj ij 1\n" + "rQj er 1\n" + "qeW qu 1\n" + "qpC qu 1\n" + "oqM qu 1\n" + "pzO sz 1\n" + "cjQ ch 1\n" + "zTx sz 1\n" + "gRw ng 1\n" + "kdQ de 1\n" + "wbQ wa 1\n" + "Qpj ij 1\n" + "zIc ch 1\n" + "yxN ny 1\n" + "nCk an 1\n" + "Jqz qu 1\n" + "dEq qu 1\n" + "gdE ng 1\n" + "wCg ng 1\n" + "pQt th 1\n" + "vKe er 1\n" + "Tjm ij 1\n" + "Zcy ch 1\n" + "kmR ka 1\n" + "cTp ch 1\n" + "bqE qu 1\n" + "vvZ va 1\n" + "cLw ch 1\n" + "oIw on 1\n" + "xjG ij 1\n" + "vtU th 1\n" + "hcH th 1\n" + "xgT ng 1\n" + "vqR qu 1\n" + "wuM qu 1\n" + "xsY st 1\n" + "jCu qu 1\n" + "Fbn an 1\n" + "cqH ch 1\n" + "Xjz ij 1\n" + "fgR ng 1\n" + "yiX in 1\n" + "qnO an 1\n" + "wmN me 1\n" + "wgH ng 1\n" + "tbZ th 1\n" + "Xks st 1\n" + "pzC po 1\n" + "lfX le 1\n" + "qBu un 1\n" + "mLw me 1\n" + "pmY me 1\n" + "xqE qu 1\n" + "rjY er 1\n" + "vrH er 1\n" + "Iuf qu 1\n" + "yfD ny 1\n" + "clG ch 1\n" + "cdZ ch 1\n" + "eTd er 1\n" + "lXv le 1\n" + "kpV ka 1\n" + "sZq qu 1\n" + "Wxc ch 1\n" + "vmJ va 1\n" + "hkE th 1\n" + "pUw pr 1\n" + "Cqd qu 1\n" + "wCn an 1\n" + "pxQ pr 1\n" + "Ywp pr 1\n" + "xwb wa 1\n" + "Wjm ij 1\n" + "zqQ qu 1\n" + "gTp ng 1\n" + "uZv qu 1\n" + "mdH de 1\n" + "juQ qu 1\n" + "gVm ng 1\n" + "zjY ij 1\n" + "fhN th 1\n" + "wfD wa 1\n" + "Zjc ch 1\n" + "iPv in 1\n" + "mzW sz 1\n" + "vXm va 1\n" + "fEq qu 1\n" + "Ozq qu 1\n" + "gEp ng 1\n" + "kDj ij 1\n" + "Zlw le 1\n" + "zbR sz 1\n" + "zCt th 1\n" + "woY on 1\n" + "pkT ka 1\n" + "kbI ka 1\n" + "hdW de 1\n" + "Hsx st 1\n" + "zpX sz 1\n" + "zfV sz 1\n" + "Dhk th 1\n" + "wMp pr 1\n" + "hzJ th 1\n" + "Lwp pr 1\n" + "zmN sz 1\n" + "xfq qu 1\n" + "sjQ sz 1\n" + "zkK sz 1\n" + "bBv va 1\n" + "bdE de 1\n" + "Qxn an 1\n" + "jqt th 1\n" + "jhG th 1\n" + "fYv va 1\n" + "xhE th 1\n" + "cbF ch 1\n" + "Jnb an 1\n" + "jxN ij 1\n" + "fYx fo 1\n" + "hJp th 1\n" + "cRt th 1\n" + "qnS an 1\n" + "vLp va 1\n" + "cBd ch 1\n" + "qqU qu 1\n" + "Sdd de 1\n" + "xeZ er 1\n" + "Jwo on 1\n" + "dPf de 1\n" + "fNl le 1\n" + "kIb ka 1\n" + "cbL ch 1\n" + "Qdr er 1\n" + "Mfb be 1\n" + "jJl le 1\n" + "mxY me 1\n" + "lFd le 1\n" + "twT th 1\n" + "kFk ka 1\n" + "crB ch 1\n" + "jRr er 1\n" + "Htz th 1\n" + "pYf pr 1\n" + "rVc er 1\n" + "vRf va 1\n" + "wVq qu 1\n" + "zpA sz 1\n" + "glY le 1\n" + "sNj ij 1\n" + "vKx va 1\n" + "tvB th 1\n" + "Yjf ij 1\n" + "mwP me 1\n" + "Jyb be 1\n" + "tBc th 1\n" + "gSb ng 1\n" + "cMl ch 1\n" + "gjJ ng 1\n" + "dYz de 1\n" + "zPg ng 1\n" + "kqB qu 1\n" + "sFv st 1\n" + "xkH ka 1\n" + "fZt th 1\n" + "yhR th 1\n" + "bwN wa 1\n" + "qjG qu 1\n" + "nQm an 1\n" + "qMr qu 1\n" + "jcW ch 1\n" + "qJv qu 1\n" + "gTm ng 1\n" + "kmQ ka 1\n" + "Wlc ch 1\n" + "kYf ka 1\n" + "eJp er 1\n" + "Tkb ka 1\n" + "hfM th 1\n" + "nxY an 1\n" + "pDl le 1\n" + "wcN ch 1\n" + "pQa an 1\n" + "ohZ th 1\n" + "xRz sz 1\n" + "lbV le 1\n" + "lKc ch 1\n" + "wxB wa 1\n" + "Lww wa 1\n" + "fqQ qu 1\n" + "kkZ ka 1\n" + "iwO in 1\n" + "dgU ng 1\n" + "dvO de 1\n" + "pDt th 1\n" + "kvK ka 1\n" + "jlV le 1\n" + "xXd de 1\n" + "ykF ku 1\n" + "iyT in 1\n" + "Ufx fo 1\n" + "nzU an 1\n" + "xbH bu 1\n" + "lSb le 1\n" + "Xpf pr 1\n" + "Uvf va 1\n" + "yyF ny 1\n" + "fxP fo 1\n" + "jYu qu 1\n" + "qjb qu 1\n" + "gxL ng 1\n" + "pwI pr 1\n" + "jUe er 1\n" + "rFc ch 1\n" + "fsF st 1\n" + "cdW ch 1\n" + "Xwp pr 1\n" + "xdH de 1\n" + "jYs ij 1\n" + "bFd de 1\n" + "qIh th 1\n" + "yIg ng 1\n" + "vTd de 1\n" + "wfE wa 1\n" + "qRb qu 1\n" + "yhK th 1\n" + "kMn an 1\n" + "cpB ch 1\n" + "txN th 1\n" + "kPd de 1\n" + "nbB an 1\n" + "skQ st 1\n" + "uKw qu 1\n" + "wQf wa 1\n" + "kWf ka 1\n" + "wqA qu 1\n" + "cwA ch 1\n" + "vJk ka 1\n" + "hcD th 1\n" + "nfK an 1\n" + "uXf qu 1\n" + "cgA ch 1\n" + "Pjd de 1\n" + "Lqs qu 1\n" + "zwC sz 1\n" + "ljN le 1\n" + "vkP ka 1\n" + "Rqp qu 1\n" + "zGx sz 1\n" + "jPg ng 1\n" + "kbT ka 1\n" + "kpQ ka 1\n" + "Mzq qu 1\n" + "Gjs st 1\n" + "kDl le 1\n" + "jwR ij 1\n" + "Wyq qu 1\n" + "qxS qu 1\n" + "qGt th 1\n" + "Wvr er 1\n" + "zNx sz 1\n" + "vCm va 1\n" + "hlD th 1\n" + "vBp va 1\n" + "mJc ch 1\n" + "hFb th 1\n" + "vDm va 1\n" + "pfC pr 1\n" + "Lpy pr 1\n" + "Fhd th 1\n" + "dxS de 1\n" + "wWg ng 1\n" + "Fgn an 1\n" + "nFf an 1\n" + "cxF ch 1\n" + "aVh th 1\n" + "Sqx qu 1\n" + "Vjz ij 1\n" + "znC an 1\n" + "qqv qu 1\n" + "zrZ er 1\n" + "bNl le 1\n" + "nvW an 1\n" + "Qyb be 1\n" + "Fht th 1\n" + "jGv ij 1\n" + "gLp ng 1\n" + "gLb ng 1\n" + "qKj qu 1\n" + "hJd th 1\n" + "Zjg ng 1\n" + "nQq an 1\n" + "npX an 1\n" + "qiO in 1\n" + "vvG va 1\n" + "jOx ij 1\n" + "hhE th 1\n" + "vdN de 1\n" + "Czz sz 1\n" + "gjU ng 1\n" + "hVb th 1\n" + "Kcg ch 1\n" + "dvH de 1\n" + "wtD th 1\n" + "jIo on 1\n" + "jQa an 1\n" + "Fyj ij 1\n" + "cpU ch 1\n" + "hxY th 1\n" + "qbD qu 1\n" + "svJ st 1\n" + "vjW ij 1\n" + "gpY ng 1\n" + "qnR an 1\n" + "gQn an 1\n" + "Cvh th 1\n" + "ykB ka 1\n" + "xgB ng 1\n" + "zfD sz 1\n" + "yHw wa 1\n" + "qdG qu 1\n" + "qTn an 1\n" + "lTm le 1\n" + "jgB ng 1\n" + "gxS ng 1\n" + "qPe qu 1\n" + "ppQ pr 1\n" + "yxW ny 1\n" + "Hjk ij 1\n" + "kNk ka 1\n" + "cnJ an 1\n" + "uHd qu 1\n" + "jvH ij 1\n" + "Ggn ng 1\n" + "lbS le 1\n" + "Qcx ch 1\n" + "cqR ch 1\n" + "Jyc ch 1\n" + "wRp pr 1\n" + "nfA an 1\n" + "lXw le 1\n" + "cmJ ch 1\n" + "Ysw st 1\n" + "qQs qu 1\n" + "gsX ng 1\n" + "cIq ch 1\n" + "jjZ ij 1\n" + "Llb le 1\n" + "mMv va 1\n" + "lVh th 1\n" + "Fph th 1\n" + "Zmm me 1\n" + "xMd de 1\n" + "Gwb wa 1\n" + "Qjv ij 1\n" + "lqZ qu 1\n" + "zJh th 1\n" + "Wky ka 1\n" + "hDk th 1\n" + "yLg ng 1\n" + "dYw de 1\n" + "dCq qu 1\n" + "Gmj ij 1\n" + "xTq qu 1\n" + "wkF ka 1\n" + "hFp th 1\n" + "qnB an 1\n" + "xyJ ny 1\n" + "nIj an 1\n" + "xYd de 1\n" + "Wqr qu 1\n" + "xqV qu 1\n" + "wYk ka 1\n" + "Qdz de 1\n" + "fbN be 1\n" + "qwY qu 1\n" + "Ubx be 1\n" + "wtL th 1\n" + "nQw an 1\n" + "jJk ij 1\n" + "Nzs st 1\n" + "dCn an 1\n" + "Nfv va 1\n" + "Hgh th 1\n" + "Hcq ch 1\n" + "Xvb va 1\n" + "sxJ st 1\n" + "wMx wa 1\n" + "qFn an 1\n" + "Gzf sz 1\n" + "qfJ qu 1\n" + "zdQ de 1\n" + "Xgz ng 1\n" + "fkI ka 1\n" + "pvK va 1\n" + "Cqr qu 1\n" + "zFd de 1\n" + "oHm on 1\n" + "aJj an 1\n" + "Fzd de 1\n" + "dWk de 1\n" + "wmE me 1\n" + "sMl le 1\n" + "tBp th 1\n" + "vNw va 1\n" + "Qdh th 1\n" + "whG th 1\n" + "qAp qu 1\n" + "jrM er 1\n" + "rHw er 1\n" + "Lvc ch 1\n" + "gRn an 1\n" + "yjV ij 1\n" + "hRk th 1\n" + "bkV ka 1\n" + "jWm ij 1\n" + "yYz sz 1\n" + "vTy va 1\n" + "dxV de 1\n" + "mKy me 1\n" + "Qlq qu 1\n" + "Upx pr 1\n" + "Qpq qu 1\n" + "Lwm me 1\n" + "yXr er 1\n" + "gTk ng 1\n" + "qnT an 1\n" + "Vlq qu 1\n" + "Qqd qu 1\n" + "Zdd de 1\n" + "Xqt th 1\n" + "Dfb be 1\n" + "oeO on 1\n" + "nCx an 1\n" + "lXd le 1\n" + "vHc ch 1\n" + "vAb va 1\n" + "Ybw wa 1\n" + "zDn an 1\n" + "dGk de 1\n" + "plH le 1\n" + "lxG le 1\n" + "Hgp ng 1\n" + "jRz ij 1\n" + "dTs de 1\n" + "mCj ij 1\n" + "lHf le 1\n" + "lLj le 1\n" + "tNb th 1\n" + "mKk ka 1\n" + "gGj ng 1\n" + "jlQ le 1\n" + "Yyg ng 1\n" + "fDv va 1\n" + "zXg ng 1\n" + "qzZ qu 1\n" + "fEg ng 1\n" + "lhS th 1\n" + "mzM sz 1\n" + "xqT qu 1\n" + "Ycj ch 1\n" + "fbF be 1\n" + "Xsj ij 1\n" + "Lnc an 1\n" + "Gqp qu 1\n" + "fjO ij 1\n" + "zhI th 1\n" + "zgH ng 1\n" + "gWc ch 1\n" + "yKf ny 1\n" + "uQd qu 1\n" + "Kwl le 1\n" + "dxG de 1\n" + "Yqw qu 1\n" + "tKc th 1\n" + "cWn an 1\n" + "hcI th 1\n" + "wfY wa 1\n" + "rBp er 1\n" + "cJd ch 1\n" + "sYf sz 1\n" + "Sqj qu 1\n" + "kQv ka 1\n" + "xpF pr 1\n" + "fcX ch 1\n" + "yfK ny 1\n" + "jQo on 1\n" + "gTg ng 1\n" + "Qwn an 1\n" + "Pnx an 1\n" + "yZt th 1\n" + "wPz sz 1\n" + "juX qu 1\n" + "Lxv va 1\n" + "iXr in 1\n" + "pcE ch 1\n" + "Nqy qu 1\n" + "hjI th 1\n" + "hzV th 1\n" + "nmF an 1\n" + "pvW va 1\n" + "eJw er 1\n" + "Iqd qu 1\n" + "gXy ng 1\n" + "wfW wa 1\n" + "Vdw de 1\n" + "qJx qu 1\n" + "Pdq qu 1\n" + "Bjb ij 1\n" + "qLl qu 1\n" + "zdW de 1\n" + "fQr er 1\n" + "xzW sz 1\n" + "vwQ va 1\n" + "rwU er 1\n" + "qPn an 1\n" + "bFw wa 1\n" + "vHl le 1\n" + "hWl th 1\n" + "wgO ng 1\n" + "hLk th 1\n" + "Jkb ka 1\n" + "zBh th 1\n" + "Dhx th 1\n" + "Fgv ng 1\n" + "bpA pr 1\n" + "zxC sz 1\n" + "gfS ng 1\n" + "Mvx va 1\n" + "uPk qu 1\n" + "Vqn an 1\n" + "yqC qu 1\n" + "vMk ka 1\n" + "wqL qu 1\n" + "wrJ er 1\n" + "cdN ch 1\n" + "pwR pr 1\n" + "hMf th 1\n" + "jPf ij 1\n" + "Vbv va 1\n" + "qzF qu 1\n" + "qNc ch 1\n" + "Jbq qu 1\n" + "fTk ka 1\n" + "Zff fo 1\n" + "Fzt th 1\n" + "Kcw ch 1\n" + "eKf er 1\n" + "pqZ qu 1\n" + "Wpb pr 1\n" + "jkF ij 1\n" + "Vxp pr 1\n" + "hGq th 1\n" + "qBc ch 1\n" + "fcT ch 1\n" + "jMq qu 1\n" + "kZv ka 1\n" + "qkG qu 1\n" + "Ifp pr 1\n" + "dRw de 1\n" + "Zlj le 1\n" + "Kwj ij 1\n" + "fNb be 1\n" + "dYy de 1\n" + "hZl th 1\n" + "wtP th 1\n" + "hPz th 1\n" + "Ykc ch 1\n" + "Jlw le 1\n" + "jNt th 1\n" + "yrW er 1\n" + "gWd ng 1\n" + "yXd de 1\n" + "fQl le 1\n" + "jfF ij 1\n" + "Ejx ij 1\n" + "fGk ka 1\n" + "Zjz ij 1\n" + "wdM de 1\n" + "jlF le 1\n" + "cxZ ch 1\n" + "Zgk ng 1\n" + "mcJ ch 1\n" + "slE le 1\n" + "nYq an 1\n" + "Wfg ng 1\n" + "zJk ka 1\n" + "bvF va 1\n" + "Hnz an 1\n" + "Wkv ka 1\n" + "Mvq qu 1\n" + "Dxh th 1\n" + "Bvt th 1\n" + "sMj ij 1\n" + "wRf wa 1\n" + "vLb va 1\n" + "zGq qu 1\n" + "mFp me 1\n" + "gNb ng 1\n" + "pCg ng 1\n" + "xFs sz 1\n" + "jKf ij 1\n" + "qJb qu 1\n" + "pzI sz 1\n" + "jgG ng 1\n" + "pKs sz 1\n" + "fqD qu 1\n" + "gxQ ng 1\n" + "fvG va 1\n" + "wgF ng 1\n" + "Xxz sz 1\n" + "Lwu qu 1\n" + "dlX le 1\n" + "lPz le 1\n" + "Wqk qu 1\n" + "Xzj ij 1\n" + "uHj qu 1\n" + "uFj qu 1\n" + "jvV ij 1\n" + "jXe le 1\n" + "Zfm me 1\n" + "qIm qu 1\n" + "zbB sz 1\n" + "yZf ny 1\n" + "sKk sz 1\n" + "zpL sz 1\n" + "qKg qu 1\n" + "Ibj ij 1\n" + "iQb in 1\n" + "Fxu qu 1\n" + "Fpb pr 1\n" + "Wva an 1\n" + "fzD sz 1\n" + "bkT ka 1\n" + "Ykt th 1\n" + "njG an 1\n" + "Uvh th 1\n" + "gfT ng 1\n" + "zcI ch 1\n" + "bDq qu 1\n" + "Jdh th 1\n" + "xMg ng 1\n" + "Jby be 1\n" + "lwJ le 1\n" + "sWw sz 1\n" + "Svw va 1\n" + "nrX an 1\n" + "uvV qu 1\n" + "jVr er 1\n" + "tqB th 1\n" + "bVr er 1\n" + "kQl le 1\n" + "fbG be 1\n" + "rqM qu 1\n" + "zHj ij 1\n" + "fhY th 1\n" + "Yzr er 1\n" + "vFf va 1\n" + "Qpg ng 1\n" + "uAq qu 1\n" + "zxP sz 1\n" + "jCn an 1\n" + "qaM an 1\n" + "xlY le 1\n" + "cTf ch 1\n" + "kBf ka 1\n" + "cQc ch 1\n" + "Rbj ij 1\n" + "kVs sz 1\n" + "bGv va 1\n" + "wdN de 1\n" + "gfN ng 1\n" + "bPj ij 1\n" + "gcI ch 1\n" + "gxj ng 1\n" + "rHb er 1\n" + "pVr er 1\n" + "rVj er 1\n" + "vgS ng 1\n" + "Fqz qu 1\n" + "xMk ka 1\n" + "qQm qu 1\n" + "jZc ch 1\n" + "jBc ch 1\n" + "uwY qu 1\n" + "rHf er 1\n" + "czX ch 1\n" + "zcT ch 1\n" + "bFj ij 1\n" + "qcB ch 1\n" + "hfT th 1\n" + "xqO qu 1\n" + "qfp qu 1\n" + "xjU ij 1\n" + "bhR th 1\n" + "tWv th 1\n" + "iqE in 1\n" + "gpU ng 1\n" + "iWb in 1\n" + "tlP th 1\n" + "tYq th 1\n" + "bCv va 1\n" + "oKc ch 1\n" + "Sgj ng 1\n" + "hvq th 1\n" + "kfY ka 1\n" + "zbM sz 1\n" + "zvA sz 1\n" + "cHp ch 1\n" + "vvK va 1\n" + "fpZ pr 1\n" + "dfX de 1\n" + "wrK er 1\n" + "xeE er 1\n" + "fkY ka 1\n" + "sbX sz 1\n" + "fcS ch 1\n" + "vKh th 1\n" + "Qlx le 1\n" + "Zqh th 1\n" + "qWg qu 1\n" + "cdL ch 1\n" + "jvG ij 1\n" + "Mgx ng 1\n" + "gwF ng 1\n" + "kdP de 1\n" + "uMr qu 1\n" + "tcD th 1\n" + "qrL qu 1\n" + "Mtm th 1\n" + "bQz sz 1\n" + "Hpx pr 1\n" + "zpI sz 1\n" + "jkR ij 1\n" + "khH th 1\n" + "mSq qu 1\n" + "pFz sz 1\n" + "juO qu 1\n" + "Xyq qu 1\n" + "jGd de 1\n" + "Yzd de 1\n" + "wbC wa 1\n" + "wSb wa 1\n" + "sZd de 1\n" + "Rzx sz 1\n" + "Flx le 1\n" + "bqC qu 1\n" + "lcH ch 1\n" + "wmG me 1\n" + "zCj ij 1\n" + "xaD an 1\n" + "iwH in 1\n" + "qDp qu 1\n" + "sGx sz 1\n" + "Xhy th 1\n" + "eVc ch 1\n" + "wkJ wa 1\n" + "Lcf ch 1\n" + "lgQ ng 1\n" + "Dhh th 1\n" + "zfO sz 1\n" + "kVc ch 1\n" + "hmL th 1\n" + "Owf wa 1\n" + "wZc ch 1\n" + "dnN an 1\n" + "Mzp sz 1\n" + "mYw me 1\n" + "yLh th 1\n" + "Xxr er 1\n" + "qwI qu 1\n" + "Txs sz 1\n" + "yKp pr 1\n" + "bjX ij 1\n" + "pbS pr 1\n" + "zrP er 1\n" + "hJm th 1\n" + "qgA qu 1\n" + "zwY sz 1\n" + "rXk er 1\n" + "nDx an 1\n" + "vGz sz 1\n" + "mQq qu 1\n" + "upY qu 1\n" + "rLn an 1\n" + "Vfk ka 1\n" + "wCv va 1\n" + "cgx ch 1\n" + "kZq qu 1\n" + "Wjw ij 1\n" + "Qax an 1\n" + "grG ng 1\n" + "bJd de 1\n" + "dJx de 1\n" + "cMd ch 1\n" + "Qcs ch 1\n" + "mkK ka 1\n" + "jNx ij 1\n" + "mrY er 1\n" + "Xwx wa 1\n" + "rZl er 1\n" + "gxU ng 1\n" + "Lnv an 1\n" + "ygC ng 1\n" + "Dqh th 1\n" + "lLn an 1\n" + "mnQ an 1\n" + "kjU ij 1\n" + "bvO va 1\n" + "oVm on 1\n" + "vWt th 1\n" + "rGq qu 1\n" + "tbJ th 1\n" + "fSv va 1\n" + "wJn an 1\n" + "fJv va 1\n" + "oQv on 1\n" + "Vws sz 1\n" + "pnU an 1\n" + "Nmh th 1\n" + "cTq ch 1\n" + "Edx de 1\n" + "uqw qu 1\n" + "Yrh th 1\n" + "Qnx an 1\n" + "mJf me 1\n" + "kDq qu 1\n" + "Xhd th 1\n" + "nLx an 1\n" + "xkU ka 1\n" + "fqT qu 1\n" + "qYh th 1\n" + "bFv va 1\n" + "xbQ be 1\n" + "vcS ch 1\n" + "qqT qu 1\n" + "gkF ng 1\n" + "zFh th 1\n" + "kpE ka 1\n" + "Gxb be 1\n" + "Ztw th 1\n" + "qIl qu 1\n" + "Qkd de 1\n" + "wdV de 1\n" + "rwP er 1\n" + "aCg an 1\n" + "Zrs er 1\n" + "zmW sz 1\n" + "vfO va 1\n" + "hBj th 1\n" + "tbH th 1\n" + "Dxv va 1\n" + "zdD de 1\n" + "nBw an 1\n" + "lrV er 1\n" + "gQq ng 1\n" + "tlK th 1\n" + "ztP th 1\n" + "yqV qu 1\n" + "nRm an 1\n" + "jVz sz 1\n" + "Crq er 1\n" + "fFg ng 1\n" + "Xjg ng 1\n" + "Cml le 1\n" + "qWj qu 1\n" + "jzO ij 1\n" + "Mdq qu 1\n" + "mtQ th 1\n" + "rGv er 1\n" + "kGn an 1\n" + "mLg ng 1\n" + "uWj qu 1\n" + "Rcq ch 1\n" + "cVp ch 1\n" + "bWk ka 1\n" + "Xzx sz 1\n" + "Wkb ka 1\n" + "xzH sz 1\n" + "quP un 1\n" + "dHv de 1\n" + "Dmq qu 1\n" + "Dgv ng 1\n" + "tgY th 1\n" + "jtM th 1\n" + "tMz th 1\n" + "bHm me 1\n" + "Zfk ka 1\n" + "xZp pr 1\n" + "jkH ij 1\n" + "rNp er 1\n" + "xMv va 1\n" + "wpF pr 1\n" + "djD de 1\n" + "bxV be 1\n" + "hgS th 1\n" + "Pkh th 1\n" + "Dxq qu 1\n" + "mMx me 1\n" + "dGj de 1\n" + "kbH ka 1\n" + "Lhg th 1\n" + "Dvq qu 1\n" + "qrT qu 1\n" + "Ijw ij 1\n" + "wuI qu 1\n" + "Zwn an 1\n" + "dhJ th 1\n" + "qcR ch 1\n" + "whM th 1\n" + "pgP ng 1\n" + "qkR qu 1\n" + "sqR qu 1\n" + "lxY le 1\n" + "vVw va 1\n" + "lKd le 1\n" + "Nly le 1\n" + "yKz sz 1\n" + "qBb qu 1\n" + "wQx wa 1\n" + "kYw ka 1\n" + "fQd de 1\n" + "svW sz 1\n" + "yGp pr 1\n" + "ytB th 1\n" + "jvU ij 1\n" + "kjz ka 1\n" + "jVc ch 1\n" + "Qbz sz 1\n" + "pqM qu 1\n" + "vwu ku 1\n" + "Qww wa 1\n" + "dcZ ch 1\n" + "lhG th 1\n" + "gmS ng 1\n" + "Iqz qu 1\n" + "zZf sz 1\n" + "hLn th 1\n" + "eMf er 1\n" + "xNq qu 1\n" + "mPm um 1\n" + "pMg ng 1\n" + "wzW sz 1\n" + "kRl le 1\n" + "hzK th 1\n" + "fbO be 1\n" + "Xxt th 1\n" + "Fnx an 1\n" + "Bvn an 1\n" + "bjZ ij 1\n" + "tcY th 1\n" + "dmB de 1\n" + "qFe qu 1\n" + "kxB ka 1\n" + "qBz qu 1\n" + "pVp pr 1\n" + "boQ on 1\n" + "xoH on 1\n" + "dWg de 1\n" + "Tdq qu 1\n" + "zNq qu 1\n" + "vYp va 1\n" + "pDf pr 1\n" + "lwG le 1\n" + "hDq th 1\n" + "Jdy de 1\n" + "snZ an 1\n" + "mzU sz 1\n" + "zKx sz 1\n" + "rvC er 1\n" + "wuS qu 1\n" + "dnQ an 1\n" + "vCy va 1\n" + "Udw wa 1\n" + "bTl le 1\n" + "qbC qu 1\n" + "tbT th 1\n" + "iDk ka 1\n" + "Whb th 1\n" + "tbX th 1\n" + "tfO th 1\n" + "Tfq qu 1\n" + "dbW de 1\n" + "Bdy de 1\n" + "vjR ij 1\n" + "cbC ch 1\n" + "wuW qu 1\n" + "wCw wa 1\n" + "Wdq qu 1\n" + "vRb va 1\n" + "bWm me 1\n" + "vZw va 1\n" + "dJj de 1\n" + "qZy qu 1\n" + "Jgq ng 1\n" + "zbH sz 1\n" + "hJl th 1\n" + "Xhg th 1\n" + "nVp an 1\n" + "dVc ch 1\n" + "qCc ch 1\n" + "oYg ng 1\n" + "kwH ka 1\n" + "vwN va 1\n" + "zfw sz 1\n" + "vlO le 1\n" + "ztX ti 1\n" + "dKx de 1\n" + "xQs sz 1\n" + "cDl ch 1\n" + "yVv va 1\n" + "zpN sz 1\n" + "xkG ka 1\n" + "eqW qu 1\n" + "jdD di 1\n" + "fQm me 1\n" + "Yhl th 1\n" + "tBf th 1\n" + "qEf qu 1\n" + "whX th 1\n" + "Vgv ng 1\n" + "Lsq qu 1\n" + "dfJ de 1\n" + "Zdp de 1\n" + "rZc ch 1\n" + "tZh ch 1\n" + "mtC th 1\n" + "zxQ sz 1\n" + "Vnj an 1\n" + "sHg ng 1\n" + "wYl le 1\n" + "Bqb qu 1\n" + "yrV er 1\n" + "Ycs ch 1\n" + "jRw ij 1\n" + "iWt th 1\n" + "hVw th 1\n" + "wZs sz 1\n" + "Cqo qu 1\n" + "Gfn an 1\n" + "rBv er 1\n" + "Ojz sz 1\n" + "zGf sz 1\n" + "bZc ch 1\n" + "Fvd de 1\n" + "Zgs ng 1\n" + "Rfg ng 1\n" + "Rww wa 1\n" + "Yrp er 1\n" + "iFp in 1\n" + "bVx be 1\n" + "zfM sz 1\n" + "qdV qu 1\n" + "bGm me 1\n" + "tnJ th 1\n" + "pdR de 1\n" + "gBc ch 1\n" + "gzC ng 1\n" + "Pwc ch 1\n" + "uAw qu 1\n" + "znX an 1\n" + "vgT ng 1\n" + "oAw ko 1\n" + "xBm me 1\n" + "dNf de 1\n" + "Pqs qu 1\n" + "Npd di 1\n" + "oUy ko 1\n" + "fpD pr 1\n" + "Rfx fo 1\n" + "lXm le 1\n" + "qWs qu 1\n" + "gWv vi 1\n" + "Fwv va 1\n" + "Lqj qu 1\n" + "fvQ va 1\n" + "zgB ng 1\n" + "kJl le 1\n" + "vWo on 1\n" + "Xvc ch 1\n" + "yDq qu 1\n" + "bdP de 1\n" + "jVf ij 1\n" + "wPw wa 1\n" + "dwA de 1\n" + "Oqp qu 1\n" + "qiZ in 1\n" + "xdV de 1\n" + "qFg ng 1\n" + "qzI qu 1\n" + "ywL wa 1\n" + "sWv sz 1\n" + "Tpy pr 1\n" + "wbf wa 1\n" + "uPg ng 1\n" + "Knw an 1\n" + "iuO in 1\n" + "Qdn an 1\n" + "Yfv va 1\n" + "wuK qu 1\n" + "xLn an 1\n" + "yJg ng 1\n" + "Nfk ka 1\n" + "Yql qu 1\n" + "qsH qu 1\n" + "Rzv sz 1\n" + "bIp pr 1\n" + "sQt th 1\n" + "tgC th 1\n" + "qSa an 1\n" + "fxQ fo 1\n" + "hcZ th 1\n" + "wbJ wa 1\n" + "qRl qu 1\n" + "Gcy ch 1\n" + "vZm va 1\n" + "Xzl le 1\n" + "wgR ng 1\n" + "dlO le 1\n" + "tCb th 1\n" + "qmY qu 1\n" + "qZx qu 1\n" + "Lbp pr 1\n" + "Dgq ng 1\n" + "Vkj ij 1\n" + "wqU qu 1\n" + "Mqk qu 1\n" + "wUv va 1\n" + "qgC ng 1\n" + "sbD sz 1\n" + "Sqy qu 1\n" + "bMq qu 1\n" + "Bzt th 1\n" + "sIq qu 1\n" + "cVj ch 1\n" + "wJt th 1\n" + "Xjm ij 1\n" + "Hmg ng 1\n" + "aQd an 1\n" + "iHt th 1\n" + "fMm me 1\n" + "wWc ch 1\n" + "fuE qu 1\n" + "mCf me 1\n" + "qnP an 1\n" + "zLn an 1\n" + "kRt th 1\n" + "Mvl le 1\n" + "mRd de 1\n" + "yfJ ny 1\n" + "xCb be 1\n" + "sQb sz 1\n" + "quC un 1\n" + "Ctc th 1\n" + "pPv va 1\n" + "zjI sz 1\n" + "xmC me 1\n" + "xdJ de 1\n" + "nXv an 1\n" + "vsO sz 1\n" + "pRd de 1\n" + "vbF va 1\n" + "wNl le 1\n" + "kHq qu 1\n" + "rwM er 1\n" + "gxD ng 1\n" + "Qhi th 1\n" + "mqB qu 1\n" + "pnL an 1\n" + "bKb be 1\n" + "iqN in 1\n" + "dkX de 1\n" + "bQd de 1\n" + "bNj ij 1\n" + "Tlk le 1\n" + "Nlg ng 1\n" + "Cxh th 1\n" + "Mqf qu 1\n" + "Pvj ij 1\n" + "zwZ sz 1\n" + "pGb pr 1\n" + "nrF an 1\n" + "bkS ka 1\n" + "dRv de 1\n" + "jJm ij 1\n" + "iqF in 1\n" + "fGc ch 1\n" + "nxW an 1\n" + "xsW sz 1\n" + "mfQ me 1\n" + "fgP ng 1\n" + "jlH le 1\n" + "nrI an 1\n" + "kXv ka 1\n" + "Vpq qu 1\n" + "zMk sz 1\n" + "pHf pr 1\n" + "jdM de 1\n" + "bqJ qu 1\n" + "Ckt th 1\n" + "zKv sz 1\n" + "jzG sz 1\n" + "uIx qu 1\n" + "yNm me 1\n" + "jYt th 1\n" + "fwL wa 1\n" + "dZx de 1\n" + "vgF ng 1\n" + "wXi in 1\n" + "vZt th 1\n" + "Ctf th 1\n" + "xqC qu 1\n" + "qOc ch 1\n" + "ygX ng 1\n" + "kWk ka 1\n" + "grF ng 1\n" + "qnX an 1\n" + "xUi in 1\n" + "pmC me 1\n" + "uzE qu 1\n" + "Ivw va 1\n" + "gvI ng 1\n" + "knZ an 1\n" + "lxZ le 1\n" + "Xwf wa 1\n" + "Dqb qu 1\n" + "yKg ng 1\n" + "Vwg ng 1\n" + "xSb be 1\n" + "Hwp pr 1\n" + "yNx ny 1\n" + "yoQ on 1\n" + "cSx ch 1\n" + "Evq qu 1\n" + "tIw th 1\n" + "dfZ de 1\n" + "hzP th 1\n" + "xBk ka 1\n" + "kqr qu 1\n" + "yBm me 1\n" + "lJj le 1\n" + "cjq ch 1\n" + "drW er 1\n" + "qaD an 1\n" + "wDf wa 1\n" + "Lxz sz 1\n" + "zQf fo 1\n" + "Jtq th 1\n" + "qRv qu 1\n" + "Gfc ch 1\n" + "Xbt th 1\n" + "wZb wa 1\n" + "srQ er 1\n" + "gJq ng 1\n" + "jFt th 1\n" + "gNc ch 1\n" + "Rkr er 1\n" + "pzJ sz 1\n" + "lbA le 1\n" + "cBq ch 1\n" + "Kyq qu 1\n" + "xcO ch 1\n" + "zXr er 1\n" + "cVs ch 1\n" + "rYm er 1\n" + "kVm ka 1\n" + "fcZ ch 1\n" + "fzC sz 1\n" + "tKp th 1\n" + "gPz ng 1\n" + "qcL ch 1\n" + "Yjr er 1\n" + "zxU sz 1\n" + "xbT be 1\n" + "nvX an 1\n" + "qmR qu 1\n" + "bxL be 1\n" + "Xww wa 1\n" + "jSf ij 1\n" + "lNf le 1\n" + "zTs sz 1\n" + "kFq qu 1\n" + "qLz qu 1\n" + "rrX er 1\n" + "wXg ng 1\n" + "zvE sz 1\n" + "Hwx wa 1\n" + "qFm qu 1\n" + "cgR ch 1\n" + "pDp pr 1\n" + "Oqb qu 1\n" + "sVc ch 1\n" + "Xtx th 1\n" + "Qwt th 1\n" + "Wfe er 1\n" + "Pcx ch 1\n" + "bpO pr 1\n" + "Cwg ng 1\n" + "wxO wa 1\n" + "bVs sz 1\n" + "jFw ij 1\n" + "fnF an 1\n" + "kxH ka 1\n" + "Yws sz 1\n" + "gdD ng 1\n" + "jWx ij 1\n" + "cTl ch 1\n" + "kmW ka 1\n" + "mhW th 1\n" + "bzT sz 1\n" + "rvJ er 1\n" + "xcJ ch 1\n" + "vkS ka 1\n" + "sXr er 1\n" + "sCv sz 1\n" + "Ntp th 1\n" + "oHh lo 1\n" + "Yvs sz 1\n" + "pVf pr 1\n" + "kEq qu 1\n" + "qfE qu 1\n" + "oWm on 1\n" + "tMw th 1\n" + "zYp sz 1\n" + "nFw an 1\n" + "yQc ch 1\n" + "zQj sz 1\n" + "wKq qu 1\n" + "mKf me 1\n" + "uLr qu 1\n" + "wIb wa 1\n" + "wrH er 1\n" + "pgL ng 1\n" + "Lbt th 1\n" + "zjF sz 1\n" + "qFp qu 1\n" + "zdX de 1\n" + "wTc ch 1\n" + "Jwl le 1\n" + "lxU le 1\n" + "hjA th 1\n" + "iPg in 1\n" + "Xns an 1\n" + "wkW ka 1\n" + "pfP pr 1\n" + "Dyq qu 1\n" + "jWu qu 1\n" + "qzR qu 1\n" + "Yjz sz 1\n" + "twX th 1\n" + "Nwj ij 1\n" + "jbB ij 1\n" + "qwR qu 1\n" + "Ytf th 1\n" + "blX le 1\n" + "xZk ka 1\n" + "Ymw me 1\n" + "wfX wa 1\n" + "Vqy qu 1\n" + "Xqn an 1\n" + "yUw wa 1\n" + "jzT jo 1\n" + "kNt th 1\n" + "pmQ me 1\n" + "dXr er 1\n" + "ylq qu 1\n" + "tWz th 1\n" + "Kvr er 1\n" + "bhQ th 1\n" + "uJn an 1\n" + "pbT pr 1\n" + "aBf an 1\n" + "Rhj th 1\n" + "uAx qu 1\n" + "Bgx ng 1\n" + "jqN qu 1\n" + "jdC ij 1\n" + "fBs st 1\n" + "cXk ch 1\n" + "nmM an 1\n" + "xRr er 1\n" + "Hkz sz 1\n" + "dhZ th 1\n" + "Fyp pr 1\n" + "kGm ka 1\n" + "sGq qu 1\n" + "jKh th 1\n" + "vDz sz 1\n" + "vLq qu 1\n" + "lJs le 1\n" + "zNn an 1\n" + "Wgj ng 1\n" + "jmL ij 1\n" + "gVt th 1\n" + "wFz sz 1\n" + "zbD sz 1\n" + "kTd de 1\n" + "dwX de 1\n" + "xRl le 1\n" + "Azv sz 1\n" + "bQh th 1\n" + "qQf qu 1\n" + "yoZ on 1\n" + "jPs sz 1\n" + "jyG ij 1\n" + "kXj ka 1\n" + "yBv va 1\n" + "nwP an 1\n" + "xnA an 1\n" + "bKf be 1\n" + "qbP qu 1\n" + "vGs sz 1\n" + "jjG ij 1\n" + "Kqc ch 1\n" + "zVt th 1\n" + "wSg ng 1\n" + "sWm sz 1\n" + "fDg ng 1\n" + "pHz sz 1\n" + "fYp pr 1\n" + "zrW er 1\n" + "lDx le 1\n" + "hQh th 1\n" + "Bdp de 1\n" + "fqZ qu 1\n" + "oQm on 1\n" + "Qsq qu 1\n" + "xjq qu 1\n" + "Mfv va 1\n" + "zbQ sz 1\n" + "quR un 1\n" + "cMb ch 1\n" + "zqD qu 1\n" + "dXf de 1\n" + "rHh th 1\n" + "jhF th 1\n" + "nNf an 1\n" + "wHb wa 1\n" + "Tpq qu 1\n" + "bjY ij 1\n" + "cJq ch 1\n" + "lCk le 1\n" + "Pfp pr 1\n" + "Oqn an 1\n" + "fmR me 1\n" + "Qpu qu 1\n" + "Ncv ch 1\n" + "qYr qu 1\n" + "sfA sz 1\n" + "frS er 1\n" + "Gpf pr 1\n" + "jmD ij 1\n" + "hwI th 1\n" + "Rbz sz 1\n" + "jhB th 1\n" + "xXj ij 1\n" + "qYd qu 1\n" + "sVf sz 1\n" + "cCz ch 1\n" + "qMl qu 1\n" + "fpK pr 1\n" + "hVy th 1\n" + "lcJ ch 1\n" + "Okj ij 1\n" + "qJg ng 1\n" + "jLp ij 1\n" + "nYf an 1\n" + "npF on 1\n" + "rWk er 1\n" + "mcP ch 1\n" + "nZm an 1\n" + "fYb fo 1\n" + "zbC sz 1\n" + "nBq an 1\n" + "fjy ij 1\n" + "bIx be 1\n" + "twN th 1\n" + "Ggk ng 1\n" + "Czm sz 1\n" + "jtO th 1\n" + "nRl an 1\n" + "jyC ij 1\n" + "yEh th 1\n" + "vmH va 1\n" + "wtQ th 1\n" + "wIf wa 1\n" + "jIf ij 1\n" + "qbM qu 1\n" + "Rwq qu 1\n" + "fqF qu 1\n" + "Wfj ij 1\n" + "jfW ij 1\n" + "wWm me 1\n" + "Wpp pr 1\n" + "Mgj ng 1\n" + "dSf de 1\n" + "wYv va 1\n" + "ccI ch 1\n" + "ylT le 1\n" + "Gqh th 1\n" + "Cmz sz 1\n" + "Hfk ka 1\n" + "qBt th 1\n" + "yCf ny 1\n" + "qzO qu 1\n" + "ydF de 1\n" + "Vdt th 1\n" + "pJd de 1\n" + "sfR sz 1\n" + "dlV le 1\n" + "jOd de 1\n" + "nfF an 1\n" + "wTt th 1\n" + "rGk er 1\n" + "xAw wa 1\n" + "vfF va 1\n" + "Dzg ng 1\n" + "kFp ka 1\n" + "jTm ij 1\n" + "nNq an 1\n" + "qcN ch 1\n" + "Jjx ij 1\n" + "tKf th 1\n" + "Zrq qu 1\n" + "hmK th 1\n" + "Mqz qu 1\n" + "xfR fo 1\n" + "wQq qu 1\n" + "mqG qu 1\n" + "xUr er 1\n" + "oiU in 1\n" + "qsS qu 1\n" + "qGg ng 1\n" + "qtO th 1\n" + "tPb th 1\n" + "Rqm qu 1\n" + "vkX ka 1\n" + "Wsb st 1\n" + "cxR ch 1\n" + "fZr er 1\n" + "yQg ng 1\n" + "ziU in 1\n" + "xvW va 1\n" + "aDx an 1\n" + "bQj ij 1\n" + "jxC ij 1\n" + "Twk ka 1\n" + "sQh th 1\n" + "Bfx fo 1\n" + "aGj an 1\n" + "Pgc ch 1\n" + "Hzh th 1\n" + "qgW ng 1\n" + "kdF de 1\n" + "kbY ka 1\n" + "Qjx ij 1\n" + "Hxj ij 1\n" + "tVx th 1\n" + "nxZ an 1\n" + "oVd on 1\n" + "Hlq qu 1\n" + "jKz sz 1\n" + "qAi in 1\n" + "dNl le 1\n" + "pqA qu 1\n" + "eIv er 1\n" + "xmW me 1\n" + "ycK ch 1\n" + "mQd de 1\n" + "hmU th 1\n" + "nlF an 1\n" + "Gkl le 1\n" + "qBq qu 1\n" + "rhQ th 1\n" + "Znk an 1\n" + "Vfp pr 1\n" + "nBn an 1\n" + "qvL qu 1\n" + "aqN an 1\n" + "kLf ka 1\n" + "zJr er 1\n" + "tQw th 1\n" + "sWq qu 1\n" + "bwW wa 1\n" + "vzB sz 1\n" + "yyR ny 1\n" + "qqN qu 1\n" + "wyI ny 1\n" + "jzJ sz 1\n" + "qgI qu 1\n" + "bgQ ng 1\n" + "yLt th 1\n" + "Vqq qu 1\n" + "Xnr an 1\n" + "wHg ng 1\n" + "aQg an 1\n" + "cFh th 1\n" + "zjQ sz 1\n" + "gpD ng 1\n" + "xzN sz 1\n" + "iIw in 1\n" + "dQg ng 1\n" + "pQy pr 1\n" + "Xyx ny 1\n" + "sWc ch 1\n" + "jFd de 1\n" + "bpF pr 1\n" + "Vsv st 1\n" + "Qql qu 1\n" + "wzT sz 1\n" + "sqQ qu 1\n" + "Kzm sz 1\n" + "oFq qu 1\n" + "gkJ ng 1\n" + "hkH th 1\n" + "qLg ng 1\n" + "bmU me 1\n" + "crJ ch 1\n" + "slX le 1\n" + "Tzx sz 1\n" + "qbx qu 1\n" + "kpI ka 1\n" + "xCf fo 1\n" + "Fml le 1\n" + "Qhj th 1\n" + "tQs th 1\n" + "vRd de 1\n" + "Ycb ch 1\n" + "cjP ch 1\n" + "yuE qu 1\n" + "gIi in 1\n" + "kWg ng 1\n" + "Jwh th 1\n" + "fVy ny 1\n" + "jqy qu 1\n" + "Wzp sz 1\n" + "Cwc ch 1\n" + "qEy qu 1\n" + "jrX er 1\n" + "Kqi in 1\n" + "lYv le 1\n" + "dGv de 1\n" + "Cwj ij 1\n" + "nDv an 1\n" + "Ojm ij 1\n" + "Dnx an 1\n" + "vrF er 1\n" + "Jmr er 1\n" + "zfI sz 1\n" + "bqT qu 1\n" + "Xvj ij 1\n" + "nPp an 1\n" + "aVw an 1\n" + "wBv va 1\n" + "kVb ka 1\n" + "gcH ch 1\n" + "Xbs sz 1\n" + "tRd th 1\n" + "mQz sz 1\n" + "Hxe er 1\n" + "Dnw an 1\n" + "xWg ng 1\n" + "pGc ch 1\n" + "hgI th 1\n" + "ywP wa 1\n" + "nrW an 1\n" + "iVq di 1\n" + "xzE sz 1\n" + "Vxd de 1\n" + "Lzc ch 1\n" + "Jwp pr 1\n" + "gCq ng 1\n" + "Otq th 1\n" + "wvP va 1\n" + "cNr ch 1\n" + "iXq in 1\n" + "Qnl in 1\n" + "tPz th 1\n" + "hIb th 1\n" + "aPg an 1\n" + "zvw sz 1\n" + "nqO an 1\n" + "sqO qu 1\n" + "bjQ ij 1\n" + "lwQ le 1\n" + "pEq qu 1\n" + "bWj ij 1\n" + "swT sz 1\n" + "gmY ng 1\n" + "gRk ng 1\n" + "dZr er 1\n" + "fMr er 1\n" + "lxO le 1\n" + "kbQ ka 1\n" + "yfN ny 1\n" + "ymq qu 1\n" + "jpK ij 1\n" + "Wjn an 1\n" + "fmW me 1\n" + "rKx er 1\n" + "dlH le 1\n" + "kcK ch 1\n" + "vbV va 1\n" + "qNl qu 1\n" + "pHt th 1\n" + "hlT th 1\n" + "lBv le 1\n" + "oaF an 1\n" + "xfM fo 1\n" + "rZd er 1\n" + "jgW ng 1\n" + "Hvh th 1\n" + "Fkf ka 1\n" + "cDc ch 1\n" + "hLh th 1\n" + "qQp qu 1\n" + "zhJ th 1\n" + "ivQ in 1\n" + "Ukq qu 1\n" + "bpV pr 1\n" + "bJq qu 1\n" + "aPw an 1\n" + "sdK de 1\n" + "cGf ch 1\n" + "Ljw ij 1\n" + "qhP th 1\n" + "mFw me 1\n" + "fIu qu 1\n" + "zhB th 1\n" + "fuH qu 1\n" + "bFq qu 1\n" + "Wgk ng 1\n" + "Fqh th 1\n" + "zmf sz 1\n" + "Zpf pr 1\n" + "nFh th 1\n" + "yBw wa 1\n" + "gIj ng 1\n" + "qBf fo 1\n" + "Uwl le 1\n" + "zrM er 1\n" + "yBd de 1\n" + "Rlf le 1\n" + "Pzh ch 1\n" + "rZx er 1\n" + "qVs qu 1\n" + "dxJ de 1\n" + "Lcz ch 1\n" + "gFn an 1\n" + "vIm va 1\n" + "qtG th 1\n" + "qbG qu 1\n" + "bHg ng 1\n" + "xrY er 1\n" + "tBd th 1\n" + "nKq an 1\n" + "Nkt th 1\n" + "jCq qu 1\n" + "byX be 1\n" + "oBp on 1\n" + "Wjz sz 1\n" + "zfP sz 1\n" + "aQz an 1\n" + "sjx ij 1\n" + "nfW an 1\n" + "nXw an 1\n" + "bJw wa 1\n" + "aSf an 1\n" + "iRf in 1\n" + "yMd de 1\n" + "fBc ch 1\n" + "vxR va 1\n" + "Llx le 1\n" + "yGs sz 1\n" + "Jsy sz 1\n" + "Lvx va 1\n" + "eFh th 1\n" + "wbM wa 1\n" + "uOq qu 1\n" + "wWl le 1\n" + "bvU va 1\n" + "fnO an 1\n" + "mzI sz 1\n" + "Vcf ch 1\n" + "mhE th 1\n" + "vgQ ng 1\n" + "jgP ng 1\n" + "qbj qu 1\n" + "bZf be 1\n" + "Xtj th 1\n" + "yYq qu 1\n" + "jdK de 1\n" + "jzB sz 1\n" + "Yys sz 1\n" + "wUg ng 1\n" + "yBb be 1\n" + "qjM qu 1\n" + "sXw sz 1\n" + "Xqw qu 1\n" + "cTb ch 1\n" + "jrE er 1\n" + "sNp sz 1\n" + "Zhm th 1\n" + "xVs sz 1\n" + "jGz sz 1\n" + "Jqh th 1\n" + "zTm sz 1\n" + "vhE th 1\n" + "dQi in 1\n" + "Tmv va 1\n" + "qxD qu 1\n" + "fzE sz 1\n" + "vMr er 1\n" + "Cqx qu 1\n" + "twY th 1\n" + "nVz an 1\n" + "lRk le 1\n" + "Owq qu 1\n" + "qYj qu 1\n" + "yQk ka 1\n" + "Nlf le 1\n" + "qDn an 1\n" + "bHw wa 1\n" + "cjA ch 1\n" + "sgU ng 1\n" + "kQi in 1\n" + "yNf ny 1\n" + "lwZ le 1\n" + "vGd de 1\n" + "Vmn an 1\n" + "tpB th 1\n" + "cFd ch 1\n" + "xHm me 1\n" + "bSg ng 1\n" + "hEq th 1\n" + "ewQ er 1\n" + "eWd er 1\n" + "jfR ij 1\n" + "zpY sz 1\n" + "cvQ ch 1\n" + "hXr th 1\n" + "cJw ch 1\n" + "wEp pr 1\n" + "Nxl le 1\n" + "qMf qu 1\n" + "vGc ch 1\n" + "pyQ pr 1\n" + "jpU ij 1\n" + "xoA on 1\n" + "gXn an 1\n" + "qqG qu 1\n" + "pXn an 1\n" + "vlP le 1\n" + "Lzv sz 1\n" + "jxB ij 1\n" + "cJc ch 1\n" + "jcT ch 1\n" + "Wtm th 1\n" + "cLg ch 1\n" + "kUx ka 1\n" + "nFp an 1\n" + "Jsw sz 1\n" + "sBg ng 1\n" + "jFn an 1\n" + "gvC ng 1\n" + "fFy ny 1\n" + "qnA an 1\n" + "Zbb be 1\n" + "Pzx sz 1\n" + "psJ sz 1\n" + "lZq qu 1\n" + "yfP ny 1\n" + "gYv ng 1\n" + "bfC be 1\n" + "dMx de 1\n" + "hlN th 1\n" + "wRl le 1\n" + "qjH qu 1\n" + "Wjc ch 1\n" + "uQp qu 1\n" + "zTb sz 1\n" + "qUr qu 1\n" + "zqp qu 1\n" + "vlR le 1\n" + "jqX qu 1\n" + "swR sz 1\n" + "qMy ny 1\n" + "zkT sz 1\n" + "yqX qu 1\n" + "nlR an 1\n" + "Hqn an 1\n" + "aaJ an 1\n" + "lKw le 1\n" + "bzB sz 1\n" + "Vgk ng 1\n" + "aVm an 1\n" + "dnR an 1\n" + "txQ th 1\n" + "Qzi in 1\n" + "zxV sz 1\n" + "xgQ ng 1\n" + "tvZ th 1\n" + "jwN ij 1\n" + "Eqj qu 1\n" + "Bxj ij 1\n" + "hzH th 1\n" + "Qfy ny 1\n" + "Ppj ij 1\n" + "Aqp qu 1\n" + "zJn an 1\n" + "szF st 1\n" + "qfX qu 1\n" + "pzV sz 1\n" + "tgN th 1\n" + "xsS sz 1\n" + "nQz an 1\n" + "tkF th 1\n" + "Qhq th 1\n" + "gJc ch 1\n" + "uOa an 1\n" + "rqW qu 1\n" + "fYz sz 1\n" + "uFc ch 1\n" + "Ncx ch 1\n" + "lMw le 1\n" + "cjI ch 1\n" + "Jcw ch 1\n" + "vEo on 1\n" + "eQy er 1\n" + "Sxc ch 1\n" + "bUx mb 1\n" + "zdJ sz 1\n" + "lpN le 1\n" + "Rkq qu 1\n" + "vvI va 1\n" + "Qmq qu 1\n" + "tgJ th 1\n" + "gfE ng 1\n" + "qcX ch 1\n" + "klT le 1\n" + "bbV be 1\n" + "pmZ me 1\n" + "uqA qu 1\n" + "cYy ch 1\n" + "wmY me 1\n" + "zlB le 1\n" + "zNd sz 1\n" + "cvZ ch 1\n" + "dvL de 1\n" + "wLz sz 1\n" + "qcG ch 1\n" + "Qjl le 1\n" + "nqf an 1\n" + "gxY ng 1\n" + "aqI an 1\n" + "Kqa an 1\n" + "Xqp qu 1\n" + "Yvg ng 1\n" + "qqF qu 1\n" + "yHh th 1\n" + "nHc an 1\n" + "Uqq qu 1\n" + "zfN sz 1\n" + "mXq qu 1\n" + "Fgj ng 1\n" + "Dsx sz 1\n" + "xRv va 1\n" + "wbZ wa 1\n" + "Hnp an 1\n" + "fUx fo 1\n" + "cYd ch 1\n" + "qTg ng 1\n" + "Bgq ng 1\n" + "pCn an 1\n" + "Xmh th 1\n" + "vjJ ij 1\n" + "tdG th 1\n" + "Zhk th 1\n" + "xFn an 1\n" + "dkQ de 1\n" + "Lcg ch 1\n" + "mIu qu 1\n" + "Iwd de 1\n" + "wjw ij 1\n" + "zbX sz 1\n" + "Yhp th 1\n" + "cvH ch 1\n" + "Lcx ch 1\n" + "Wfn an 1\n" + "Nfq qu 1\n" + "qMv qu 1\n" + "Uvw va 1\n" + "Qnh th 1\n" + "nbG an 1\n" + "sFg ng 1\n" + "xlJ le 1\n" + "bPb be 1\n" + "xpI pr 1\n" + "mrV er 1\n" + "Fwu qu 1\n" + "wOy wa 1\n" + "Pmh th 1\n" + "Jhq th 1\n" + "Zbx be 1\n" + "pgY ng 1\n" + "Rbw wa 1\n" + "Awx wa 1\n" + "mcB ch 1\n" + "gkG ng 1\n" + "xkW ka 1\n" + "Pnw in 1\n" + "bNs sz 1\n" + "nXr an 1\n" + "Vmt th 1\n" + "eUv er 1\n" + "yQv va 1\n" + "kxr er 1\n" + "Ksw sz 1\n" + "bpW pr 1\n" + "qeD qu 1\n" + "Qvh th 1\n" + "bRm me 1\n" + "qJm qu 1\n" + "csY ch 1\n" + "qwH qu 1\n" + "Cqc ch 1\n" + "lYq qu 1\n" + "dPp de 1\n" + "oAe er 1\n" + "dcS ch 1\n" + "uwU qu 1\n" + "zjL sz 1\n" + "oZx on 1\n" + "kjR ij 1\n" + "cDy ch 1\n" + "fSs sz 1\n" + "eQf le 1\n" + "qBm qu 1\n" + "mLb me 1\n" + "Zrj er 1\n" + "Gkx ka 1\n" + "pkX ka 1\n" + "vTk ka 1\n" + "Zgp ng 1\n" + "dhP th 1\n" + "nPv an 1\n" + "xnQ an 1\n" + "bHp pr 1\n" + "Xgf ng 1\n" + "Cwf wa 1\n" + "lbN le 1\n" + "jNm ij 1\n" + "xNt th 1\n" + "rJp er 1\n" + "oJd on 1\n" + "Ryq qu 1\n" + "lvL le 1\n" + "qvY qu 1\n" + "vwC va 1\n" + "kFj ij 1\n" + "qHd qu 1\n" + "wcB ch 1\n" + "xTs sz 1\n" + "fQz sz 1\n" + "Dlf le 1\n" + "wLt th 1\n" + "Fbh th 1\n" + "rqJ qu 1\n" + "hhO th 1\n" + "xOi in 1\n" + "mqz qu 1\n" + "qmQ me 1\n" + "qQj qu 1\n" + "ovQ on 1\n" + "gfR ng 1\n" + "Pmq qu 1\n" + "Tcj ch 1\n" + "mqQ qu 1\n" + "mwV me 1\n" + "bXw wa 1\n" + "jlA le 1\n" + "fjG ij 1\n" + "jxY ij 1\n" + "qwM qu 1\n" + "kvU ka 1\n" + "Bkq qu 1\n" + "gfA ng 1\n" + "Awc ch 1\n" + "Vmv va 1\n" + "Qhl th 1\n" + "Wmj ij 1\n" + "cMq ch 1\n" + "tHp th 1\n" + "lPb le 1\n" + "vlK le 1\n" + "Ygk ng 1\n" + "gJs ng 1\n" + "tWl th 1\n" + "xVw wa 1\n" + "srN er 1\n" + "Uhb th 1\n" + "vfR va 1\n" + "kFf ka 1\n" + "Jlz le 1\n" + "fKq qu 1\n" + "mRq qu 1\n" + "kWw ka 1\n" + "zvO sz 1\n" + "Xqz qu 1\n" + "dIj de 1\n" + "wJm me 1\n" + "Fqv qu 1\n" + "wNt th 1\n" + "lxL le 1\n" + "xLm me 1\n" + "dqN qu 1\n" + "wRj ij 1\n" + "Ljt th 1\n" + "wRw wa 1\n" + "cxB ch 1\n" + "cjH ch 1\n" + "Vqj qu 1\n" + "qJs qu 1\n" + "cFk ch 1\n" + "xqd qu 1\n" + "Eqh th 1\n" + "qRd qu 1\n" + "vfT va 1\n" + "Zqb qu 1\n" + "mGc ch 1\n" + "Sbd de 1\n" + "iwV in 1\n" + "jfI ij 1\n" + "nWz an 1\n" + "Ljg ng 1\n" + "rjG er 1\n" + "cFb ch 1\n" + "uqZ qu 1\n" + "mVm me 1\n" + "jgK ng 1\n" + "dZh th 1\n" + "Bqx qu 1\n" + "quG un 1\n" + "lCv le 1\n" + "lxW le 1\n" + "gGb ng 1\n" + "gvY ng 1\n" + "mjF ij 1\n" + "ptX th 1\n" + "pYy pr 1\n" + "Yrf er 1\n" + "mVd de 1\n" + "zpR sz 1\n" + "xKw wa 1\n" + "wpM pr 1\n" + "cLk ch 1\n" + "Sqz qu 1\n" + "gWn an 1\n" + "sWz st 1\n" + "srS er 1\n" + "cVx ch 1\n" + "xNb be 1\n" + "hPb th 1\n" + "bGq qu 1\n" + "tdH th 1\n" + "yJl le 1\n" + "vUk ka 1\n" + "dJz sz 1\n" + "qhI th 1\n" + "mtP th 1\n" + "lGb le 1\n" + "hDx th 1\n" + "zfW sz 1\n" + "Nml le 1\n" + "Hsw st 1\n" + "pfG pr 1\n" + "dMj de 1\n" + "kKq qu 1\n" + "rjS er 1\n" + "Qlg ng 1\n" + "Nfy ny 1\n" + "cqM ch 1\n" + "hWm th 1\n" + "fuO qu 1\n" + "zfF sz 1\n" + "qgH ng 1\n" + "bpZ pr 1\n" + "btY th 1\n" + "uqB qu 1\n" + "qyA qu 1\n" + "Xrp er 1\n" + "ytX th 1\n" + "dHm de 1\n" + "vBg ng 1\n" + "yyN ny 1\n" + "Qrj er 1\n" + "gKd ng 1\n" + "bfU be 1\n" + "Qft th 1\n" + "bqP qu 1\n" + "qOz qu 1\n" + "Xhc th 1\n" + "dqY qu 1\n" + "hjQ th 1\n" + "Yfu qu 1\n" + "aXk an 1\n" + "pbV pr 1\n" + "vjP ij 1\n" + "Ybp pr 1\n" + "Jmb me 1\n" + "qFq qu 1\n" + "yPq qu 1\n" + "yWw wa 1\n" + "vhX th 1\n" + "iwT in 1\n" + "qZf qu 1\n" + "uqU qu 1\n" + "uFk qu 1\n" + "cpW ch 1\n" + "Lpq qu 1\n" + "kfL ka 1\n" + "pQe er 1\n" + "gwz ng 1\n" + "jpM ij 1\n" + "Qkm ka 1\n" + "jgH ng 1\n" + "xjP ij 1\n" + "xgL ng 1\n" + "jLm ij 1\n" + "dxN de 1\n" + "vWs st 1\n" + "Jjh th 1\n" + "hhG th 1\n" + "Yvc ch 1\n" + "xrE er 1\n" + "bZw wa 1\n" + "Lvw va 1\n" + "eNw er 1\n" + "fjB ij 1\n" + "dcQ ch 1\n" + "lZt th 1\n" + "Jwq qu 1\n" + "qPg ng 1\n" + "xMb be 1\n" + "hfD th 1\n" + "jzQ sz 1\n" + "Uuf qu 1\n" + "zGk sz 1\n" + "zCc ch 1\n" + "npC an 1\n" + "tWd th 1\n" + "hjF th 1\n" + "Pzs st 1\n" + "wuA qu 1\n" + "Qhg th 1\n" + "Mqm qu 1\n" + "fsI st 1\n" + "fdU de 1\n" + "Xrm er 1\n" + "qQg ng 1\n" + "bkW ka 1\n" + "dHg ng 1\n" + "rcB ch 1\n" + "hWu th 1\n" + "nIq an 1\n" + "rYq qu 1\n" + "xXv va 1\n" + "wqP qu 1\n" + "xmN me 1\n" + "sJf st 1\n" + "yMf ny 1\n" + "Sfk ka 1\n" + "qzW qu 1\n" + "cvT ch 1\n" + "kmX ka 1\n" + "xqU qu 1\n" + "cnG an 1\n" + "Jpi in 1\n" + "frX er 1\n" + "yLf ny 1\n" + "uyU qu 1\n" + "Ddw de 1\n" + "Tgj ng 1\n" + "qeH qu 1\n" + "fEz sz 1\n" + "pCk ka 1\n" + "qmf qu 1\n" + "rjH er 1\n" + "xMp pr 1\n" + "Ywo on 1\n" + "zgD ng 1\n" + "Pqx qu 1\n" + "nqM on 1\n" + "wdX de 1\n" + "Bpz sz 1\n" + "lhM th 1\n" + "Epb pr 1\n" + "bhJ th 1\n" + "kvQ ka 1\n" + "Rsq qu 1\n" + "xbP be 1\n" + "nMm an 1\n" + "xuC qu 1\n" + "wjs sz 1\n" + "fxX fo 1\n" + "hvT th 1\n" + "uPx qu 1\n" + "Jmy me 1\n" + "Qzd de 1\n" + "Nsz st 1\n" + "vWd de 1\n" + "hfX th 1\n" + "jCg ng 1\n" + "yQx ny 1\n" + "whJ th 1\n" + "wrq qu 1\n" + "xgW ng 1\n" + "Jhj th 1\n" + "lhC th 1\n" + "Pwf ow 1\n" + "ljC le 1\n" + "vvB va 1\n" + "mcN ch 1\n" + "yHx ny 1\n" + "bBj ij 1\n" + "qRz qu 1\n" + "glH ng 1\n" + "cZp ch 1\n" + "qJh th 1\n" + "tSg th 1\n" + "xVm me 1\n" + "uWs qu 1\n" + "Vxo on 1\n" + "fjM ij 1\n" + "zhK th 1\n" + "Cjh th 1\n" + "vZr er 1\n" + "bCs sz 1\n" + "rwY er 1\n" + "xEi in 1\n" + "dUv de 1\n" + "fRg ng 1\n" + "Gcu ch 1\n" + "jDf ij 1\n" + "djH de 1\n" + "vlU le 1\n" + "qyG qu 1\n" + "kfq qu 1\n" + "lXg ng 1\n" + "lbC le 1\n" + "Pwg ng 1\n" + "Oae an 1\n" + "pbC pr 1\n" + "dWt th 1\n" + "lzU le 1\n" + "wJz sz 1\n" + "dYj de 1\n" + "cBj ch 1\n" + "fRv va 1\n" + "djG de 1\n" + "mYg ng 1\n" + "Qbc ch 1\n" + "gnX an 1\n" + "wPm me 1\n" + "wvN va 1\n" + "qGm qu 1\n" + "qNh th 1\n" + "mRg ng 1\n" + "Uqv qu 1\n" + "Qxm me 1\n" + "fzX sz 1\n" + "zjM sz 1\n" + "xqA qu 1\n" + "bMs sz 1\n" + "vmL me 1\n" + "Eyx ny 1\n" + "hHj th 1\n" + "jGp ij 1\n" + "mfD me 1\n" + "Jfw wa 1\n" + "Wjh th 1\n" + "bZs sz 1\n" + "Iyk ka 1\n" + "zRn an 1\n" + "cdU ch 1\n" + "mJh th 1\n" + "Qjy ij 1\n" + "Qao an 1\n" + "bXv va 1\n" + "hSg th 1\n" + "rAo er 1\n" + "hLs th 1\n" + "lCs le 1\n" + "qkJ qu 1\n" + "Rxu qu 1\n" + "xdN de 1\n" + "yYx ny 1\n" + "dkN de 1\n" + "Rgw ng 1\n" + "zgL sz 1\n" + "Rcj ch 1\n" + "iWz in 1\n" + "dLk de 1\n" + "mpX me 1\n" + "Gbd de 1\n" + "bnH an 1\n" + "kdM de 1\n" + "wqG qu 1\n" + "vMz sz 1\n" + "zwH sz 1\n" + "wgx ng 1\n" + "Ljk ij 1\n" + "tlG th 1\n" + "tgE th 1\n" + "Wcw ch 1\n" + "Vby be 1\n" + "mVz sz 1\n" + "Hgc ch 1\n" + "gqP ng 1\n" + "hhB th 1\n" + "nFx an 1\n" + "yBf ny 1\n" + "Wmx me 1\n" + "vNb va 1\n" + "Mnv an 1\n" + "Zmc ch 1\n" + "bzS sz 1\n" + "yfC ny 1\n" + "Epx pr 1\n" + "ljG le 1\n" + "wUa an 1\n" + "Qgo ng 1\n" + "pqb qu 1\n" + "Jkm ka 1\n" + "Wvy va 1\n" + "Bjp ij 1\n" + "vfZ va 1\n" + "wxT wa 1\n" + "Vxw wa 1\n" + "dRt th 1\n" + "nVq an 1\n" + "iWf in 1\n" + "Smq qu 1\n" + "jwG ij 1\n" + "vcW ch 1\n" + "Qgz ng 1\n" + "Wkq qu 1\n" + "xrL er 1\n" + "tVh ch 1\n" + "Zlr er 1\n" + "zDt th 1\n" + "yxP ny 1\n" + "Yyw wa 1\n" + "zPk sz 1\n" + "Bgg ng 1\n" + "xOk ka 1\n" + "oXq qu 1\n" + "tQf th 1\n" + "fxF fo 1\n" + "dOq qu 1\n" + "Vtp th 1\n" + "jhP th 1\n" + "vhZ th 1\n" + "Gqq qu 1\n" + "dFg ng 1\n" + "eCg ng 1\n" + "kjH ij 1\n" + "vqQ qu 1\n" + "jpL ij 1\n" + "hgZ th 1\n" + "xFd de 1\n" + "Qjd de 1\n" + "xKm me 1\n" + "zQc ch 1\n" + "Nhw th 1\n" + "Kqo qu 1\n" + "hwO th 1\n" + "oYn an 1\n" + "Wnf an 1\n" + "vSc ch 1\n" + "Afq qu 1\n" + "jqJ qu 1\n" + "jEg ng 1\n" + "dKp de 1\n" + "nmK an 1\n" + "wXw wa 1\n" + "vjC ij 1\n" + "dXb de 1\n" + "tQn th 1\n" + "qoR qu 1\n" + "bRf be 1\n" + "yyL ny 1\n" + "kSj ij 1\n" + "Xyu qu 1\n" + "vmA va 1\n" + "Zgm ng 1\n" + "Lbx be 1\n" + "bIv va 1\n" + "Zdq qu 1\n" + "gHn an 1\n" + "bYq qu 1\n" + "Mqd qu 1\n" + "qMk qu 1\n" + "Qsv st 1\n" + "zXx sz 1\n" + "hQf th 1\n" + "wcV ch 1\n" + "Xfz sz 1\n" + "Mhc th 1\n" + "kBz sz 1\n" + "bWp pr 1\n" + "Wzu qu 1\n" + "hWw th 1\n" + "yNp pr 1\n" + "xbZ be 1\n" + "mTb me 1\n" + "Kdf de 1\n" + "pfQ pr 1\n" + "vCd de 1\n" + "Pqf qu 1\n" + "ofZ on 1\n" + "wYd de 1\n" + "Tfc ch 1\n" + "Gnb an 1\n" + "Zdx de 1\n" + "zVj sz 1\n" + "Tqw qu 1\n" + "fzV sz 1\n" + "Igq ng 1\n" + "Qvv vi 1\n" + "Pmf me 1\n" + "qHe qu 1\n" + "ybR be 1\n" + "cFg ch 1\n" + "Kvf va 1\n" + "Zxm me 1\n" + "oVc ch 1\n" + "Yhb th 1\n" + "bwP wa 1\n" + "Vvz sz 1\n" + "sdW de 1\n" + "gFz ng 1\n" + "mRl le 1\n" + "bqN qu 1\n" + "bhU th 1\n" + "tBw th 1\n" + "Hbb be 1\n" + "Jzp sz 1\n" + "zrS er 1\n" + "mkZ me 1\n" + "bKw wa 1\n" + "jPx ij 1\n" + "Xqa an 1\n" + "fGz sz 1\n" + "xLk ka 1\n" + "nrV an 1\n" + "Tmx me 1\n" + "zvZ sz 1\n" + "gWl ng 1\n" + "Yxb be 1\n" + "yWt th 1\n" + "lqN qu 1\n" + "tWu th 1\n" + "xZt th 1\n" + "iqI in 1\n" + "cpQ ch 1\n" + "zPf sz 1\n" + "bqG qu 1\n" + "gmI ng 1\n" + "Wkc ch 1\n" + "Zvs sz 1\n" + "qdN qu 1\n" + "hYf th 1\n" + "sBn an 1\n" + "Dwb ow 1\n" + "Wzq qu 1\n" + "Qdw de 1\n" + "svR sz 1\n" + "Nvv va 1\n" + "jRc ch 1\n" + "qDv qu 1\n" + "qGe qu 1\n" + "cwT ch 1\n" + "fTy ny 1\n" + "Cvv va 1\n" + "flQ le 1\n" + "mWg ng 1\n" + "twS th 1\n" + "npM an 1\n" + "Ufq qu 1\n" + "fuG qu 1\n" + "oCj on 1\n" + "txF th 1\n" + "Yft th 1\n" + "qwy qu 1\n" + "Vdz de 1\n" + "Vgq ng 1\n" + "Rkg ng 1\n" + "Pxz sz 1\n" + "mCn an 1\n" + "whZ th 1\n" + "fgB ng 1\n" + "jvW ij 1\n" + "kdL de 1\n" + "Lxi in 1\n" + "svB sz 1\n" + "xuH qu 1\n" + "gFy ng 1\n" + "oVv on 1\n" + "Zhq th 1\n" + "oqG qu 1\n" + "oJp on 1\n" + "gIf ng 1\n" + "bwF wa 1\n" + "vLh th 1\n" + "jgX ng 1\n" + "qKi in 1\n" + "xRh th 1\n" + "qwV qu 1\n" + "mNl le 1\n" + "Gvv va 1\n" + "pQf pr 1\n" + "xbV be 1\n" + "dpZ de 1\n" + "fHq qu 1\n" + "bBd de 1\n" + "vUh th 1\n" + "hzA th 1\n" + "Mnz an 1\n" + "pBt th 1\n" + "oaE an 1\n" + "slK le 1\n" + "Wlg ng 1\n" + "jhK th 1\n" + "xvX va 1\n" + "Ffx fo 1\n" + "gXh th 1\n" + "cWf ch 1\n" + "Gpy pr 1\n" + "xmS me 1\n" + "gZn an 1\n" + "djX de 1\n" + "bkX ka 1\n" + "xlP le 1\n" + "hCt th 1\n" + "Yhj th 1\n" + "gwQ ng 1\n" + "klD le 1\n" + "Rhq th 1\n" + "aEj an 1\n" + "jpY ij 1\n" + "pVn an 1\n" + "nJx an 1\n" + "zdV de 1\n" + "Rvf va 1\n" + "Oqy qu 1\n" + "zpT sz 1\n" + "Pzc ch 1\n" + "qTm qu 1\n" + "jfq ij 1\n" + "ztY th 1\n" + "Zqv qu 1\n" + "nZb an 1\n" + "pHl le 1\n" + "Qcr ch 1\n" + "zVm sz 1\n" + "pNm me 1\n" + "Xhj th 1\n" + "oYy on 1\n" + "Flq qu 1\n" + "lwj le 1\n" + "rwH er 1\n" + "oWq qu 1\n" + "Bwm me 1\n" + "jXs sz 1\n" + "Lkt th 1\n" + "lVn an 1\n" + "jXa an 1\n" + "hkB th 1\n" + "qrQ qu 1\n" + "dqK qu 1\n" + "Zxn an 1\n" + "ygZ ng 1\n" + "Fgt th 1\n" + "nwM an 1\n" + "Wzx sz 1\n" + "qgb ng 1\n" + "Ygv ng 1\n" + "Xdd de 1\n" + "xjM ij 1\n" + "qHb qu 1\n" + "zKz sz 1\n" + "dvM de 1\n" + "Zpx pr 1\n" + "wPt th 1\n" + "qiA in 1\n" + "jyV ij 1\n" + "jyR ij 1\n" + "Uox on 1\n" + "Qkz ka 1\n" + "Lxq qu 1\n" + "fpq qu 1\n" + "Xmf me 1\n" + "kRx ka 1\n" + "jFk ij 1\n" + "nZc an 1\n" + "hCp th 1\n" + "Hbw wa 1\n" + "zlF le 1\n" + "kqI qu 1\n" + "wWj ij 1\n" + "qKk qu 1\n" + "Jpf pr 1\n" + "lbR le 1\n" + "rbJ er 1\n" + "zfK sz 1\n" + "gVk ng 1\n" + "bZx be 1\n" + "znQ an 1\n" + "gZb ga 1\n" + "wtI th 1\n" + "bvW va 1\n" + "qhG th 1\n" + "xrV er 1\n" + "pYc ch 1\n" + "bQq qu 1\n" + "qpV qu 1\n" + "pFm me 1\n" + "zdO de 1\n" + "Jvj ij 1\n" + "mQl le 1\n" + "xWm me 1\n" + "Dtz th 1\n" + "lKz le 1\n" + "dkI de 1\n" + "fSx fo 1\n" + "yCp pr 1\n" + "whF th 1\n" + "lVm le 1\n" + "yHv va 1\n" + "Plm le 1\n" + "Jpm me 1\n" + "hEw ha 1\n" + "zHz sz 1\n" + "uIj qu 1\n" + "gzB ng 1\n" + "qsV qu 1\n" + "pbX pr 1\n" + "jyY ij 1\n" + "mjq qu 1\n" + "zDd de 1\n" + "Tqc ch 1\n" + "fTg ng 1\n" + "qbh th 1\n" + "Cjq qu 1\n" + "pcW ch 1\n" + "Xhp th 1\n" + "fwR wa 1\n" + "dQm de 1\n" + "xCk ka 1\n" + "yhM th 1\n" + "glQ ng 1\n" + "gVb ng 1\n" + "Pdy de 1\n" + "yOj ij 1\n" + "jZg ng 1\n" + "oqZ qu 1\n" + "bqI qu 1\n" + "jkX ij 1\n" + "Kfh th 1\n" + "xpQ pr 1\n" + "rhX th 1\n" + "wjI ij 1\n" + "Bqf qu 1\n" + "aCp an 1\n" + "ccX ch 1\n" + "vGm ma 1\n" + "paU an 1\n" + "xUh th 1\n" + "gLd ng 1\n" + "tfJ th 1\n" + "fwH wa 1\n" + "Pnq an 1\n" + "kxV ka 1\n" + "Nbk ka 1\n" + "sqE qu 1\n" + "Cjp ij 1\n" + "kcZ ka 1\n" + "Wqj ij 1\n" + "tzY th 1\n" + "nqX an 1\n" + "Yyc ch 1\n" + "Lzd de 1\n" + "xZy ny 1\n" + "sdY de 1\n" + "jXn an 1\n" + "Nbm me 1\n" + "wLr er 1\n" + "Nqr qu 1\n" + "Zwx wa 1\n" + "yvH va 1\n" + "ylC le 1\n" + "qyh th 1\n" + "Jnz an 1\n" + "hHv th 1\n" + "zUq qu 1\n" + "xgI ng 1\n" + "Ztp th 1\n" + "Vvb va 1\n" + "tGn th 1\n" + "Ujq qu 1\n" + "jHs sz 1\n" + "bWq qu 1\n" + "bXr er 1\n" + "hFg th 1\n" + "gdT ng 1\n" + "qHc ch 1\n" + "lCj le 1\n" + "mVg ng 1\n" + "pQq qu 1\n" + "vWl le 1\n" + "yFq qu 1\n" + "djY de 1\n" + "btQ th 1\n" + "vlM le 1\n" + "Iwt th 1\n" + "Pdb de 1\n" + "jtQ th 1\n" + "xjR ij 1\n" + "dhW th 1\n" + "zXs sz 1\n" + "fbE be 1\n" + "Hqr qu 1\n" + "vLt th 1\n" + "kbD ka 1\n" + "vUd de 1\n" + "yZc ch 1\n" + "Qke le 1\n" + "fhG th 1\n" + "eHt th 1\n" + "vHj ij 1\n" + "Tfg ng 1\n" + "uoA qu 1\n" + "zCx sz 1\n" + "zLk sz 1\n" + "jdW de 1\n" + "Cgn an 1\n" + "Lrq qu 1\n" + "yOi in 1\n" + "qOw qu 1\n" + "fqs qu 1\n" + "ltQ th 1\n" + "nwU an 1\n" + "zYq qu 1\n" + "Gzs st 1\n" + "nWv an 1\n" + "lNx le 1\n" + "Wql qu 1\n" + "dcD ch 1\n" + "vfD va 1\n" + "qVd qu 1\n" + "Wzz sz 1\n" + "jfH ij 1\n" + "Rrt th 1\n" + "qDr qu 1\n" + "lOh th 1\n" + "wwZ wa 1\n" + "mQw me 1\n" + "nqK an 1\n" + "Uvl le 1\n" + "kRq qu 1\n" + "Vhg th 1\n" + "xsD st 1\n" + "Ldd de 1\n" + "sQv st 1\n" + "qMj qu 1\n" + "hbQ th 1\n" + "cjX ch 1\n" + "nbT an 1\n" + "xNf fo 1\n" + "wCt th 1\n" + "jnX an 1\n" + "tZf th 1\n" + "qCk qu 1\n" + "dHk de 1\n" + "Ccq ch 1\n" + "uMf qu 1\n" + "bvG va 1\n" + "zPz sz 1\n" + "yIy ny 1\n" + "lHx le 1\n" + "fnB an 1\n" + "Ebx be 1\n" + "rGc ch 1\n" + "mgD ng 1\n" + "hJg th 1\n" + "jcG ch 1\n" + "Ybd de 1\n" + "oDq qu 1\n" + "jRx ij 1\n" + "kJf ka 1\n" + "tFv th 1\n" + "Gdv de 1\n" + "fHn an 1\n" + "Uqp qu 1\n" + "cYh th 1\n" + "kHp ka 1\n" + "qhZ th 1\n" + "wZh th 1\n" + "kQt th 1\n" + "hwH th 1\n" + "xzU sz 1\n" + "tQg th 1\n" + "Qbj ij 1\n" + "zVl le 1\n" + "qJd qu 1\n" + "Xrf er 1\n" + "fMv va 1\n" + "qJc ch 1\n" + "Dqy qu 1\n" + "qMs qu 1\n" + "fzl le 1\n" + "Wdx de 1\n" + "Tdw wa 1\n" + "mcT ch 1\n" + "fOd de 1\n" + "Kgj ng 1\n" + "yrT er 1\n" + "bqA qu 1\n" + "snq an 1\n" + "Lzt th 1\n" + "gLw ng 1\n" + "dLq qu 1\n" + "Qzr er 1\n" + "Qrn an 1\n" + "eFn an 1\n" + "Nmw wa 1\n" + "pxE pr 1\n" + "Cqk qu 1\n" + "Wcd ch 1\n" + "fXw wa 1\n" + "fbU be 1\n" + "aeO an 1\n" + "svV st 1\n" + "yVt th 1\n" + "sRp st 1\n" + "rxU er 1\n" + "qhK th 1\n" + "uQw qu 1\n" + "oXw on 1\n" + "Jvw va 1\n" + "kvH ka 1\n" + "zVy sz 1\n" + "rOq qu 1\n" + "cWx ch 1\n" + "iXv in 1\n" + "cBk ch 1\n" + "xkM ka 1\n" + "vHb va 1\n" + "jbW ij 1\n" + "mYq qu 1\n" + "fnH an 1\n" + "zRj sz 1\n" + "hvN th 1\n" + "oMh th 1\n" + "yqO qu 1\n" + "fBf fo 1\n" + "oPj on 1\n" + "fFc ch 1\n" + "lVq qu 1\n" + "ptJ th 1\n" + "Ntj th 1\n" + "rwL er 1\n" + "cFz ch 1\n" + "jVd de 1\n" + "Gbv va 1\n" + "oJn an 1\n" + "wkL ka 1\n" + "qoT qu 1\n" + "Qxk ka 1\n" + "rZj ij 1\n" + "Cgd ng 1\n" + "gvW ng 1\n" + "kYv ka 1\n" + "qjR qu 1\n" + "Vnq an 1\n" + "yJt th 1\n" + "xWy ny 1\n" + "bXl le 1\n" + "xVk ka 1\n" + "xuG qu 1\n" + "Hzs st 1\n" + "uDq qu 1\n" + "Ywk ka 1\n" + "Jkh th 1\n" + "Gdm de 1\n" + "qcO ch 1\n" + "hlH th 1\n" + "Jfv va 1\n" + "cLn an 1\n" + "wzG sz 1\n" + "yhF th 1\n" + "kfD ka 1\n" + "kbJ ka 1\n" + "Nqp qu 1\n" + "gYq ng 1\n" + "ztM th 1\n" + "jcD ch 1\n" + "wgY ng 1\n" + "qdT da 1\n" + "vTw va 1\n" + "cNz ch 1\n" + "Jbc ch 1\n" + "Xcj ch 1\n" + "rUw er 1\n" + "gXv ng 1\n" + "dRf de 1\n" + "bJz sz 1\n" + "aqA an 1\n" + "uOz qu 1\n" + "wPj ij 1\n" + "uDw qu 1\n" + "mqF qu 1\n" + "cXr ch 1\n" + "yrL er 1\n" + "nJk an 1\n" + "hsY th 1\n" + "Zqs qu 1\n" + "qeS qu 1\n" + "bLv va 1\n" + "jEo on 1\n" + "pmE me 1\n" + "jIt th 1\n" + "vzZ sz 1\n" + "Qhd th 1\n" + "cnN an 1\n" + "bPq qu 1\n" + "pZw pr 1\n" + "iwR in 1\n" + "oJv ko 1\n" + "ufI qu 1\n" + "wKm me 1\n" + "uWv qu 1\n" + "fCf fo 1\n" + "wBn an 1\n" + "Uyf ny 1\n" + "uVx qu 1\n" + "kKf ka 1\n" + "mrZ er 1\n" + "lXb le 1\n" + "zJm sz 1\n" + "wYr er 1\n" + "Hkw ka 1\n" + "Ewz sz 1\n" + "xJy ny 1\n" + "Emx me 1\n" + "cqL ch 1\n" + "zVk sz 1\n" + "yPb be 1\n" + "zcC ch 1\n" + "Ndq qu 1\n" + "uWf qu 1\n" + "kcM ch 1\n" + "tkB th 1\n" + "yhq th 1\n" + "qaP an 1\n" + "rVs er 1\n" + "dLd de 1\n" + "Sgm ng 1\n" + "Xhx th 1\n" + "xqH qu 1\n" + "Kqy qu 1\n" + "yRw wa 1\n" + "Wdw de 1\n" + "qcQ ch 1\n" + "zbp sz 1\n" + "dtY th 1\n" + "cwB ch 1\n" + "nfV an 1\n" + "cgP ch 1\n" + "pwW pr 1\n" + "pqf qu 1\n" + "Xkp ka 1\n" + "izJ in 1\n" + "cYw ch 1\n" + "iQl in 1\n" + "Qvy va 1\n" + "ylR le 1\n" + "sFp st 1\n" + "Lqg ng 1\n" + "xnP an 1\n" + "gYl ng 1\n" + "wIr er 1\n" + "fqR qu 1\n" + "Qpk ka 1\n" + "qXz qu 1\n" + "Lrr er 1\n" + "sjI st 1\n" + "iyX in 1\n" + "Zfq qu 1\n" + "vtH th 1\n" + "cZf ch 1\n" + "hXp th 1\n" + "rJw er 1\n" + "gbP ng 1\n" + "Qug ng 1\n" + "jRt th 1\n" + "lXh th 1\n" + "pVc ch 1\n" + "kGc ch 1\n" + "Nxr er 1\n" + "yKk ka 1\n" + "xAo on 1\n" + "oUx on 1\n" + "nWx an 1\n" + "fwU wa 1\n" + "mKg ng 1\n" + "qhO th 1\n" + "sGg ng 1\n" + "Wwu qu 1\n" + "cnE an 1\n" + "tjS th 1\n" + "Qyd de 1\n" + "yWm me 1\n" + "Qdj de 1\n" + "jSd de 1\n" + "Ioy on 1\n" + "Xpp pr 1\n" + "xJb be 1\n" + "xvT va 1\n" + "cdT ch 1\n" + "khX th 1\n" + "hVp th 1\n" + "cjT ch 1\n" + "Hqf qu 1\n" + "nbP an 1\n" + "Uwb wa 1\n" + "Kcb ch 1\n" + "qsQ qu 1\n" + "tkZ th 1\n" + "zrX er 1\n" + "zbN sz 1\n" + "mYi in 1\n" + "gLx ng 1\n" + "sGc ch 1\n" + "Pbv va 1\n" + "gcV ch 1\n" + "Qjf ij 1\n" + "wvB va 1\n" + "gKp ng 1\n" + "jZy ij 1\n" + "qhW th 1\n" + "vCg ng 1\n" + "Lrk er 1\n" + "fRw wa 1\n" + "cMj ch 1\n" + "ohK th 1\n" + "frK er 1\n" + "dQq qu 1\n" + "Hdj de 1\n" + "Bkx ka 1\n" + "yXv va 1\n" + "fdO de 1\n" + "sWg ng 1\n" + "Xtf th 1\n" + "rUx ar 1\n" + "qHm qu 1\n" + "kQh th 1\n" + "wzU sz 1\n" + "vTt th 1\n" + "zkN sz 1\n" + "Fqp qu 1\n" + "xJc ch 1\n" + "wkQ ka 1\n" + "wxF wa 1\n" + "vRj ij 1\n" + "jzD sz 1\n" + "Zqu un 1\n" + "zWw sz 1\n" + "zgU ng 1\n" + "ugX ng 1\n" + "pmB me 1\n" + "gzA ng 1\n" + "Zjj ij 1\n" + "xIj ij 1\n" + "xoK on 1\n" + "Gqx qu 1\n" + "uLq qu 1\n" + "lGw le 1\n" + "tZq th 1\n" + "zcN ch 1\n" + "yPz sz 1\n" + "rqN qu 1\n" + "pwG pr 1\n" + "vfP va 1\n" + "vIy va 1\n" + "vEj ij 1\n" + "jqD qu 1\n" + "Hxu qu 1\n" + "qLs qu 1\n" + "Jpy pr 1\n" + "pRw pr 1\n" + "fZs st 1\n" + "Vvx va 1\n" + "zkB sz 1\n" + "yGk ka 1\n" + "kvZ ka 1\n" + "cqW ch 1\n" + "wLg ng 1\n" + "Ypg ng 1\n" + "jrR er 1\n" + "vwZ va 1\n" + "gVd ng 1\n" + "iCw ij 1\n" + "Fxw wa 1\n" + "qyZ qu 1\n" + "qgT qu 1\n" + "xLs st 1\n" + "pXg ng 1\n" + "gNv ng 1\n" + "Hgz ng 1\n" + "zJv sz 1\n" + "Hvm va 1\n" + "uXb qu 1\n" + "lLz le 1\n" + "dwP de 1\n" + "gvN ng 1\n" + "cpF ch 1\n" + "vZj ij 1\n" + "Pfv va 1\n" + "xcI ch 1\n" + "yVp pr 1\n" + "fdC de 1\n" + "pbE pr 1\n" + "jQm ij 1\n" + "Tqt th 1\n" + "wMh th 1\n" + "Gkq qu 1\n" + "tdV th 1\n" + "xIk ka 1\n" + "hHp th 1\n" + "Lsb st 1\n" + "Wvs st 1\n" + "Qcw ch 1\n" + "gfQ ng 1\n" + "Fjt th 1\n" + "xBz sz 1\n" + "fLx fo 1\n" + "zkR sz 1\n" + "kjA ij 1\n" + "Fcw ch 1\n" + "fhT th 1\n" + "qiK qu 1\n" + "wQv va 1\n" + "pXl le 1\n" + "hLg th 1\n" + "jJw ij 1\n" + "sOj st 1\n" + "vWb va 1\n" + "Ajq qu 1\n" + "vKc ch 1\n" + "iIy in 1\n" + "pJy pr 1\n" + "Lqc ch 1\n" + "wBd de 1\n" + "kRb ka 1\n" + "Lcp ch 1\n" + "gfB ng 1\n" + "zVn an 1\n" + "qWf qu 1\n" + "Qyf ny 1\n" + "puF qu 1\n" + "fIe er 1\n" + "wGb wa 1\n" + "jjL ij 1\n" + "hcE th 1\n" + "qhp th 1\n" + "gxN ng 1\n" + "tMd th 1\n" + "Rzt th 1\n" + "cgO ch 1\n" + "vmT va 1\n" + "Dcq ch 1\n" + "qoI qu 1\n" + "Nqz qu 1\n" + "vhM th 1\n" + "gBq ng 1\n" + "jWv ij 1\n" + "xmE me 1\n" + "qcd ch 1\n" + "lYj le 1\n" + "dDc ch 1\n" + "xUa an 1\n" + "kVl le 1\n" + "wqN qu 1\n" + "uuI qu 1\n" + "Wzf sz 1\n" + "yvX va 1\n" + "Pyq qu 1\n" + "wuU qu 1\n" + "hLp th 1\n" + "qqL qu 1\n" + "cVh th 1\n" + "Fgs ng 1\n" + "xjF ij 1\n" + "wkG ka 1\n" + "qJr qu 1\n" + "Gzq qu 1\n" + "Ixv va 1\n" + "hMv th 1\n" + "dfQ de 1\n" + "eOx er 1\n" + "mHq qu 1\n" + "Zkn an 1\n" + "nqW an 1\n" + "nJd an 1\n" + "pEh th 1\n" + "gVg ng 1\n" + "Zyf ny 1\n" + "nmT an 1\n" + "csQ ch 1\n" + "Pkq qu 1\n" + "tdP th 1\n" + "fkz sz 1\n" + "Qnc an 1\n" + "pBj ij 1\n" + "Mjv ij 1\n" + "ymJ me 1\n" + "Mxs st 1\n" + "hbL th 1\n" + "vQh th 1\n" + "xDy ny 1\n" + "djC de 1\n" + "cdQ ch 1\n" + "bnL an 1\n" + "Yjl le 1\n" + "qUc ch 1\n" + "mjW ij 1\n" + "zWs st 1\n" + "xvF va 1\n" + "Gqi qu 1\n" + "fGm me 1\n" + "Xuw qu 1\n" + "qCs qu 1\n" + "Kxm me 1\n" + "lNn an 1\n" + "sdL de 1\n" + "Vtn th 1\n" + "sJj st 1\n" + "kQj ij 1\n" + "xfX fo 1\n" + "Nqk qu 1\n" + "cBs ch 1\n" + "yzP sz 1\n" + "xUv va 1\n" + "lbT le 1\n" + "wyV wa 1\n" + "Xkm ka 1\n" + "Wdv de 1\n" + "qQn an 1\n" + "sqZ qu 1\n" + "sfW st 1\n" + "gfM ng 1\n" + "Vlp le 1\n" + "Xjx ij 1\n" + "hIj th 1\n" + "Jws st 1\n" + "xZr er 1\n" + "iKw in 1\n" + "Tbd de 1\n" + "zQv sz 1\n" + "nmZ an 1\n" + "bpE pr 1\n" + "zSv sz 1\n" + "Fgi ng 1\n" + "uIw qu 1\n" + "Zvx va 1\n" + "rqR qu 1\n" + "vjZ ij 1\n" + "Njr er 1\n" + "kwF ka 1\n" + "Ovw va 1\n" + "hwZ th 1\n" + "Mvk ka 1\n" + "Dvf va 1\n" + "xsP st 1\n" + "gZq ng 1\n" + "vXv va 1\n" + "wGt th 1\n" + "qlO qu 1\n" + "fNz sz 1\n" + "Nvw va 1\n" + "zdZ de 1\n" + "vxV va 1\n" + "Nhz th 1\n" + "tZm th 1\n" + "iyS in 1\n" + "qZa an 1\n" + "xrZ er 1\n" + "qly qu 1\n" + "cjM ch 1\n" + "kYj ij 1\n" + "iyF in 1\n" + "Cdq qu 1\n" + "xwE wa 1\n" + "xfV fo 1\n" + "wbF wa 1\n" + "wuO qu 1\n" + "Rlh th 1\n" + "fCj ij 1\n" + "bcZ ch 1\n" + "Gjv ij 1\n" + "gLl ng 1\n" + "wLc ch 1\n" + "zmP sz 1\n" + "cYo ch 1\n" + "Rhk th 1\n" + "grM ng 1\n" + "fDh th 1\n" + "Yyb be 1\n" + "uyW un 1\n" + "kGb ka 1\n" + "iwK in 1\n" + "qkN qu 1\n" + "qXd qu 1\n" + "zCb sz 1\n" + "rQf er 1\n" + "xrO er 1\n" + "Fzh th 1\n" + "wSj ij 1\n" + "yPw wa 1\n" + "Bqw qu 1\n" + "kWc ch 1\n" + "qhX th 1\n" + "kBw ka 1\n" + "yvL va 1\n" + "xcT ch 1\n" + "Fbz sz 1\n" + "cEb ch 1\n" + "vEk ka 1\n" + "uQh th 1\n" + "sHw us 1\n" + "Fvf va 1\n" + "wkO ka 1\n" + "wiY in 1\n" + "sPm st 1\n" + "dFn an 1\n" + "qQx qu 1\n" + "Rsg ng 1\n" + "fUj ij 1\n" + "tLw th 1\n" + "sRk st 1\n" + "zkP sz 1\n" + "mvF va 1\n" + "jYb ij 1\n" + "swY is 1\n" + "rRc ch 1\n" + "rHd er 1\n" + "bDk ka 1\n" + "lWv le 1\n" + "vqv qu 1\n" + "qoN qu 1\n" + "zMl le 1\n" + "pfJ pr 1\n" + "Dmz sz 1\n" + "obQ on 1\n" + "Vfz sz 1\n" + "bVd de 1\n" + "Cjv ij 1\n" + "mKz sz 1\n" + "jjE ij 1\n" + "Aqc ch 1\n" + "Cxn an 1\n" + "vpH va 1\n" + "Lxa an 1\n" + "zpH sz 1\n" + "qoF qu 1\n" + "hRz th 1\n" + "yYw wa 1\n" + "dUx de 1\n" + "Kxl le 1\n" + "xUo on 1\n" + "hDp th 1\n" + "zDf sz 1\n" + "Wsq qu 1\n" + "jzZ sz 1\n" + "mGf me 1\n" + "jjV ij 1\n" + "pfR pr 1\n" + "bPd de 1\n" + "wjq qu 1\n" + "Rjx ij 1\n" + "Lwq qu 1\n" + "fqH qu 1\n" + "jRs sz 1\n" + "sfT sz 1\n" + "Grw er 1\n" + "zGn an 1\n" + "ycW ch 1\n" + "lUq qu 1\n" + "pRq qu 1\n" + "nZq an 1\n" + "Svx va 1\n" + "Phf th 1\n" + "Fvj ij 1\n" + "Qlm le 1\n" + "jgS ng 1\n" + "Mmv va 1\n" + "xPd de 1\n" + "qqw qu 1\n" + "rWp er 1\n" + "qIr qu 1\n" + "Cxf fo 1\n" + "wtG th 1\n" + "cKb ch 1\n" + "btL th 1\n" + "pRx pr 1\n" + "zsB sz 1\n" + "nbD an 1\n" + "jKg ng 1\n" + "bhL th 1\n" + "Yhw th 1\n" + "yYr er 1\n" + "jCm ij 1\n" + "xzK sz 1\n" + "pJl le 1\n" + "Qrr er 1\n" + "uvG qu 1\n" + "cfJ ch 1\n" + "iqX in 1\n" + "vNd de 1\n" + "qcM ch 1\n" + "Wvj ij 1\n" + "vmS va 1\n" + "vWp va 1\n" + "aIj an 1\n" + "jmS ij 1\n" + "Fmk ka 1\n" + "iyN in 1\n" + "bZu qu 1\n" + "Kzj sz 1\n" + "Vwd de 1\n" + "Ulx le 1\n" + "rCv er 1\n" + "wvq qu 1\n" + "Qkr ri 1\n" + "fjC ij 1\n" + "tRr th 1\n" + "pCy pr 1\n" + "fbC be 1\n" + "fQc ch 1\n" + "Xkf ka 1\n" + "Dqr qu 1\n" + "fgE ng 1\n" + "vMm va 1\n" + "dPb de 1\n" + "vjL ij 1\n" + "wKc ch 1\n" + "Pyw wa 1\n" + "eXv er 1\n" + "nVw an 1\n" + "Jww wa 1\n" + "Dfq qu 1\n" + "tCc th 1\n" + "qtH th 1\n" + "Xqm qu 1\n" + "Bhc th 1\n" + "tcX th 1\n" + "xKp pr 1\n" + "tfN th 1\n" + "ibZ in 1\n" + "Nzb sz 1\n" + "Wnj an 1\n" + "vXy va 1\n" + "iVf in 1\n" + "dxT de 1\n" + "jxQ ij 1\n" + "Ddv de 1\n" + "mXd de 1\n" + "fUq qu 1\n" + "wgQ ng 1\n" + "Lgj ng 1\n" + "mgY ng 1\n" + "qMw qu 1\n" + "gpJ ng 1\n" + "sZx st 1\n" + "nXz an 1\n" + "Wve er 1\n" + "lVk le 1\n" + "wCb wa 1\n" + "xvI va 1\n" + "mfJ me 1\n" + "tQq th 1\n" + "dTt th 1\n" + "fqk qu 1\n" + "nVt th 1\n" + "wIh th 1\n" + "Qvp va 1\n" + "vfN va 1\n" + "gQs ng 1\n" + "iVp in 1\n" + "jGl le 1\n" + "xMf fo 1\n" + "xvw wi 1\n" + "zIl le 1\n" + "zfR sz 1\n" + "zWv sz 1\n" + "ehV th 1\n" + "dZq qu 1\n" + "tmK th 1\n" + "cLt th 1\n" + "pZb pr 1\n" + "vnJ an 1\n" + "fvk ka 1\n" + "Xhv th 1\n" + "Vjn an 1\n" + "tgI th 1\n" + "xaJ an 1\n" + "mSf me 1\n" + "Xzm sz 1\n" + "dTz de 1\n" + "xXm me 1\n" + "pQz sz 1\n" + "Cqg ng 1\n" + "bSs st 1\n" + "prW er 1\n" + "hDb th 1\n" + "sXt th 1\n" + "kcD ch 1\n" + "kgZ ng 1\n" + "Tzt th 1\n" + "zcR ch 1\n" + "Xwu qu 1\n" + "kXg ng 1\n" + "Ywv wi 1\n" + "rpK er 1\n" + "wPs is 1\n" + "Kjz sz 1\n" + "fDb be 1\n" + "jrF er 1\n" + "bbQ be 1\n" + "Qdb de 1\n" + "rKt th 1\n" + "vYf va 1\n" + "vxA va 1\n" + "fhM th 1\n" + "jsU st 1\n" + "zXk sz 1\n" + "uwO qu 1\n" + "jsR st 1\n" + "kHn an 1\n" + "xWv va 1\n" + "vfS va 1\n" + "pIv va 1\n" + "bcW ch 1\n" + "zdM sz 1\n" + "gCz ng 1\n" + "hzN th 1\n" + "bQw wa 1\n" + "ojX on 1\n" + "Vqv qu 1\n" + "qWb qu 1\n" + "Ykb ka 1\n" + "xnJ an 1\n" + "sJz st 1\n" + "hRr th 1\n" + "tXs th 1\n" + "Qeb er 1\n" + "Uwd de 1\n" + "nYg an 1\n" + "Yfx fo 1\n" + "xrG er 1\n" + "eZr le 1\n" + "ufV us 1\n" + "rXm er 1\n" + "qZv qu 1\n" + "vQz sz 1\n" + "Tnq an 1\n" + "Rmj ij 1\n" + "jlM le 1\n" + "cqO ch 1\n" + "xWf fo 1\n" + "jcZ ch 1\n" + "jfV ij 1\n" + "Zmj ij 1\n" + "bxM be 1\n" + "fFd de 1\n" + "gjP ng 1\n" + "hMs th 1\n" + "Ysq qu 1\n" + "qkV qu 1\n" + "Kmc ch 1\n" + "xYy ny 1\n" + "dvX de 1\n" + "rwC er 1\n" + "gwW wa 1\n" + "Qpy pr 1\n" + "jXy ij 1\n" + "qOj qu 1\n" + "Qmz sz 1\n" + "Eqq qu 1\n" + "zJs st 1\n" + "fHy ny 1\n" + "hDt th 1\n" + "sDh th 1\n" + "Vkq qu 1\n" + "yLc ch 1\n" + "vHm va 1\n" + "vnX an 1\n" + "jxS ij 1\n" + "Jtj th 1\n" + "qgE ng 1\n" + "bpH pr 1\n" + "Iqy qu 1\n" + "qMn an 1\n" + "dmE de 1\n" + "Hfq qu 1\n" + "pSb pr 1\n" + "xhI th 1\n" + "Qjt th 1\n" + "yfX ny 1\n" + "vuF qu 1\n" + "wFw wa 1\n" + "znS an 1\n" + "zlV le 1\n" + "lkK le 1\n" + "Fvz sz 1\n" + "qjT qu 1\n" + "zoQ on 1\n" + "Wvx va 1\n" + "hMn th 1\n" + "dMw de 1\n" + "gcF ch 1\n" + "dbB de 1\n" + "Cqj qu 1\n" + "mCv va 1\n" + "pJx pr 1\n" + "Dfv va 1\n" + "sjL st 1\n" + "qiG in 1\n" + "Zls le 1\n" + "Vsf st 1\n" + "Fgd ng 1\n" + "wmD me 1\n" + "Dxo on 1\n" + "qrk qu 1\n" + "pJr er 1\n" + "cLx ch 1\n" + "jdB de 1\n" + "ybM be 1\n" + "mvM va 1\n" + "jtX th 1\n" + "cnB an 1\n" + "wtW th 1\n" + "Ksd st 1\n" + "wql wa 1\n" + "mhU th 1\n" + "oJy on 1\n" + "Ghp th 1\n" + "qoX qu 1\n" + "xsI st 1\n" + "vFs st 1\n" + "fYe er 1\n" + "lnV an 1\n" + "uXn an 1\n" + "Eoh th 1\n" + "wcM wa 1\n" + "jwK ij 1\n" + "Gke er 1\n" + "uFq qu 1\n" + "Ycg ch 1\n" + "xqy qu 1\n" + "btM th 1\n" + "jHw ij 1\n" + "qeU qu 1\n" + "Qjz sz 1\n" + "nuQ an 1\n" + "Fcx ch 1\n" + "Kqt th 1\n" + "Lqv qu 1\n" + "mwU me 1\n" + "fQs st 1\n" + "kSd de 1\n" + "nYv an 1\n" + "wGj ij 1\n" + "gvZ ng 1\n" + "mqN qu 1\n" + "Fhp th 1\n" + "pMq qu 1\n" + "dBh ch 1\n" + "bXk ka 1\n" + "fqK qu 1\n" + "Yyq qu 1\n" + "Krq qu 1\n" + "Rnv an 1\n" + "uuE qu 1\n" + "Xsz st 1\n" + "fKb be 1\n" + "yIh th 1\n" + "Ncd ch 1\n" + "mLr er 1\n" + "cSs ch 1\n" + "lbE le 1\n" + "xaW an 1\n" + "Rtd th 1\n" + "rbF er 1\n" + "vgR ng 1\n" + "scZ ch 1\n" + "rHp er 1\n" + "eYw er 1\n" + "Lxj ij 1\n" + "qRg ng 1\n" + "jpN ij 1\n" + "rjW er 1\n" + "lgK ng 1\n" + "mCc ch 1\n" + "fGu qu 1\n" + "xzT sz 1\n" + "wQw wa 1\n" + "klJ li 1\n" + "cqk ch 1\n" + "lMh th 1\n" + "pYs st 1\n" + "hQk th 1\n" + "Hxz sz 1\n" + "feY er 1\n" + "fhF th 1\n" + "fBm me 1\n" + "fVt th 1\n" + "zfh th 1\n" + "sbT st 1\n" + "dQy de 1\n" + "Fmc ch 1\n" + "vhL th 1\n" + "Jtb th 1\n" + "Vrx er 1\n" + "yqZ qu 1\n" + "jDm ij 1\n" + "mfV me 1\n" + "oSx on 1\n" + "Jxg ng 1\n" + "wOq qu 1\n" + "dJq qu 1\n" + "Vvc ch 1\n" + "Eqe qu 1\n" + "jqO qu 1\n" + "zxI sz 1\n" + "qKf qu 1\n" + "fdW de 1\n" + "ccM ch 1\n" + "gcW ch 1\n" + "lFn an 1\n" + "Rvq qu 1\n" + "znN an 1\n" + "zbU sz 1\n" + "tNw th 1\n" + "wjK ij 1\n" + "Jbd de 1\n" + "Bfc ch 1\n" + "qeX le 1\n" + "tXk th 1\n" + "slJ le 1\n" + "cKd ch 1\n" + "nCf an 1\n" + "qgV ng 1\n" + "Mhx th 1\n" + "sKf st 1\n" + "hqZ th 1\n" + "Fdt th 1\n" + "qzJ qu 1\n" + "sNn an 1\n" + "tjW th 1\n" + "xcN ch 1\n" + "fcJ ch 1\n" + "djU de 1\n" + "Ygh th 1\n" + "woI on 1\n" + "Yyz sz 1\n" + "kQc ch 1\n" + "hfQ th 1\n" + "nrL an 1\n" + "lQs le 1\n" + "mtF th 1\n" + "wbX wa 1\n" + "gmR ng 1\n" + "Zsq qu 1\n" + "ytQ th 1\n" + "mbF me 1\n" + "fgT ng 1\n" + "cWu ch 1\n" + "gxG ng 1\n" + "hNv th 1\n" + "dfW de 1\n" + "zrC er 1\n" + "woX on 1\n" + "wjT ij 1\n" + "Pqw qu 1\n" + "vkf ka 1\n" + "nLz an 1\n" + "cjV ch 1\n" + "fcP ch 1\n" + "vlQ le 1\n" + "Fgq ng 1\n" + "hgP th 1\n" + "Gqy qu 1\n" + "tKs th 1\n" + "Xfv va 1\n" + "yZq qu 1\n" + "yiZ in 1\n" + "rXv er 1\n" + "Ycy ch 1\n" + "fvA va 1\n" + "Tqs qu 1\n" + "hZy th 1\n" + "xwc ch 1\n" + "qVf qu 1\n" + "Mhq th 1\n" + "zSj sz 1\n" + "vhQ th 1\n" + "tzX th 1\n" + "Gvm va 1\n" + "cqU ch 1\n" + "Hhp th 1\n" + "gQk ng 1\n" + "pwL pr 1\n" + "sNw st 1\n" + "qEt th 1\n" + "Nzq qu 1\n" + "zsD st 1\n" + "mDg ng 1\n" + "Rtq th 1\n" + "jLf ij 1\n" + "wTp pr 1\n" + "xJh th 1\n" + "Vqo qu 1\n" + "Zqk qu 1\n" + "qqQ qu 1\n" + "hrY th 1\n" + "Wqo qu 1\n" + "mIy me 1\n" + "Ipk ka 1\n" + "xjC ij 1\n" + "lLp le 1\n" + "hqF th 1\n" + "cWg ch 1\n" + "qYc qu 1\n" + "cjU ch 1\n" + "qXk qu 1\n" + "hqL th 1\n" + "zxT sz 1\n" + "dnX an 1\n" + "zBt th 1\n" + "Qls le 1\n" + "khC th 1\n" + "uqX qu 1\n" + "Zbf be 1\n" + "iDx li 1\n" + "Znp an 1\n" + "Jxq qu 1\n" + "jqY qu 1\n" + "vbU va 1\n" + "qRr qu 1\n" + "qpj qu 1\n" + "wlG le 1\n" + "Wgx ng 1\n" + "Vxj ij 1\n" + "zSw sz 1\n" + "ihW th 1\n" + "kzT sz 1\n" + "aeZ an 1\n" + "hKj th 1\n" + "tWs th 1\n" + "gLc ch 1\n" + "gpK ng 1\n" + "yJz sz 1\n" + "Gvt th 1\n" + "fEo on 1\n" + "sKd st 1\n" + "xhN th 1\n" + "aMq an 1\n" + "ehX th 1\n" + "kfZ ku 1\n" + "Wwc ch 1\n" + "Ymz sz 1\n" + "Vkd de 1\n" + "bzD sz 1\n" + "Xkg ng 1\n" + "Vzz sz 1\n" + "xvV va 1\n" + "pHh th 1\n" + "rKq qu 1\n" + "vmM va 1\n" + "Qxj ij 1\n" + "zNr er 1\n" + "bqB qu 1\n" + "Jqw qu 1\n" + "zqB qu 1\n" + "Xvm va 1\n" + "lBf le 1\n" + "qqB qu 1\n" + "gCs ng 1\n" + "rRg ng 1\n" + "Rnm an 1\n" + "Lzw sz 1\n" + "iwN in 1\n" + "pfN pr 1\n" + "hCw wa 1\n" + "uHz qu 1\n" + "cLc ch 1\n" + "lwD le 1\n" + "qjB qu 1\n" + "Ojy ij 1\n" + "dmV di 1\n" + "cCw ch 1\n" + "lXs le 1\n" + "smR st 1\n" + "mxO me 1\n" + "Jrt th 1\n" + "zjN sz 1\n" + "bBn an 1\n" + "cxQ ch 1\n" + "Kdp de 1\n" + "Dlb le 1\n" + "pqD qu 1\n" + "qqC qu 1\n" + "Spz sz 1\n" + "tCd th 1\n" + "gfP ng 1\n" + "uGj qu 1\n" + "xbE be 1\n" + "Xpv va 1\n" + "Xzt th 1\n" + "gqG qu 1\n" + "kqq qu 1\n" + "Kvq qu 1\n" + "qWi qu 1\n" + "mxZ me 1\n" + "qoY qu 1\n" + "Sgf ng 1\n" + "cRv ch 1\n" + "Wgi ng 1\n" + "eDx er 1\n" + "cWw ch 1\n" + "vFq qu 1\n" + "Kxv va 1\n" + "iWp in 1\n" + "fRx fo 1\n" + "wtB th 1\n" + "swW st 1\n" + "grK ng 1\n" + "Hfe er 1\n" + "gfZ ng 1\n" + "xqX qu 1\n" + "oKj on 1\n" + "vfq qu 1\n" + "pWw pr 1\n" + "uWc ch 1\n" + "lCg ng 1\n" + "qkg qu 1\n" + "cDh th 1\n" + "Sfz sz 1\n" + "uYx qu 1\n" + "xvR va 1\n" + "eAo er 1\n" + "pYg ng 1\n" + "dRx de 1\n" + "iWd in 1\n" + "gGx ng 1\n" + "bXz sz 1\n" + "kcP ch 1\n" + "hcJ th 1\n" + "lCf le 1\n" + "gmW ng 1\n" + "Hkf ka 1\n" + "rhL th 1\n" + "jqP qu 1\n" + "rQp er 1\n" + "vCn an 1\n" + "dWj de 1\n" + "Hrx er 1\n" + "sTz st 1\n" + "aVt th 1\n" + "qwK qu 1\n" + "vvE va 1\n" + "wKp pr 1\n" + "xcY ch 1\n" + "vpM va 1\n" + "jlC le 1\n" + "dlG le 1\n" + "oTq qu 1\n" + "iLp in 1\n" + "xsL st 1\n" + "lFz le 1\n" + "vhC th 1\n" + "ylX le 1\n" + "pmO me 1\n" + "Ycc ch 1\n" + "Ynp an 1\n" + "Ybm me 1\n" + "Qln an 1\n" + "bxA be 1\n" + "tFs th 1\n" + "Lqw qu 1\n" + "zcU ch 1\n" + "vfK va 1\n" + "vpQ va 1\n" + "Dtf th 1\n" + "bTj ij 1\n" + "Vvw va 1\n" + "Qbx be 1\n" + "zWk sz 1\n" + "bSx be 1\n" + "zpK sz 1\n" + "wTb wa 1\n" + "mkC ka 1\n" + "cRh th 1\n" + "nBk an 1\n" + "xGv va 1\n" + "hnQ th 1\n" + "aqQ an 1\n" + "zhZ th 1\n" + "zwP sz 1\n" + "vqL qu 1\n" + "scU ch 1\n" + "glS ng 1\n" + "pjE ij 1\n" + "qqD qu 1\n" + "lRx le 1\n" + "qVr qu 1\n" + "Xuh th 1\n" + "brB er 1\n" + "Qyc ch 1\n" + "Sgx ng 1\n" + "dqk qu 1\n" + "bYj ij 1\n" + "mPx me 1\n" + "Fdv de 1\n" + "Xmd de 1\n" + "cPj ch 1\n" + "Pqg qu 1\n" + "vYh th 1\n" + "bJx be 1\n" + "dQt th 1\n" + "fxj ij 1\n" + "Hwq qu 1\n" + "vgC ng 1\n" + "kjK ij 1\n" + "nrC an 1\n" + "vqX qu 1\n" + "Bgk ng 1\n" + "Cbv va 1\n" + "Uww wa 1\n" + "wcJ ch 1\n" + "gBf ng 1\n" + "zTv va 1\n" + "zwX sz 1\n" + "lWg le 1\n" + "qOs qu 1\n" + "fbB be 1\n" + "xqG qu 1\n" + "jQj ij 1\n" + "voQ on 1\n" + "yjW ij 1\n" + "qvO qu 1\n" + "xbF be 1\n" + "nWu an 1\n" + "yjQ ij 1\n" + "cjK ch 1\n" + "Sxn an 1\n" + "ybX be 1\n" + "eYg ng 1\n" + "Bmn an 1\n" + "fDt th 1\n" + "jXm ij 1\n" + "nMt th 1\n" + "Sxb be 1\n" + "lHm le 1\n" + "gfY ng 1\n" + "nwG an 1\n" + "gHl ng 1\n" + "Wpm me 1\n" + "wFj ij 1\n" + "hGm th 1\n" + "wwC wa 1\n" + "Mlf le 1\n" + "cJb ch 1\n" + "bnC an 1\n" + "Fvp va 1\n" + "tGc th 1\n" + "fhZ th 1\n" + "Vkh th 1\n" + "jwg ng 1\n" + "xbK be 1\n" + "zVq qu 1\n" + "qTz qu 1\n" + "vrD er 1\n" + "fRt th 1\n" + "fFs st 1\n" + "hWg th 1\n" + "lzE le 1\n" + "lwX le 1\n" + "jHy ij 1\n" + "Qqt th 1\n" + "Dqi in 1\n" + "Tvj ij 1\n" + "gPb ng 1\n" + "dPz sz 1\n" + "zdT sz 1\n" + "mvA va 1\n" + "Zvh th 1\n" + "qaU an 1\n" + "fwQ wa 1\n" + "Rsw st 1\n" + "klB le 1\n" + "vlN le 1\n" + "Gvx va 1\n" + "pdJ de 1\n" + "lcB ch 1\n" + "vTq qu 1\n" + "yhV th 1\n" + "jLv ij 1\n" + "pzR sz 1\n" + "Xyw wa 1\n" + "Xlq qu 1\n" + "Rqw wa 1\n" + "zhP th 1\n" + "sgT ng 1\n" + "gpG ng 1\n" + "tkY th 1\n" + "dqE qu 1\n" + "Qcg ch 1\n" + "bfB be 1\n" + "Wpv va 1\n" + "Wxl le 1\n" + "Xbq qu 1\n" + "yFh th 1\n" + "Rfq qu 1\n" + "hhL th 1\n" + "jxz sz 1\n" + "bKh th 1\n" + "ptU th 1\n" + "cXe ch 1\n" + "zXm sz 1\n" + "Ghw th 1\n" + "dzY sz 1\n" + "dXn an 1\n" + "kxW ka 1\n" + "vVr er 1\n" + "Jxu un 1\n" + "bbX be 1\n" + "rPb er 1\n" + "qCm qu 1\n" + "qiJ qu 1\n" + "Xgw ng 1\n" + "Nhq th 1\n" + "cGp po 1\n" + "hPw th 1\n" + "bTz sz 1\n" + "qIg ng 1\n" + "pJh th 1\n" + "wcE ch 1\n" + "mCb me 1\n" + "bJc ch 1\n" + "nzQ an 1\n" + "yqR qu 1\n" + "xHw wa 1\n" + "bwH wa 1\n" + "qCr qu 1\n" + "Uqe qu 1\n" + "qxM qu 1\n" + "fpO pr 1\n" + "kcN ch 1\n" + "ykV ka 1\n" + "mQb me 1\n" + "Yqs qu 1\n" + "yVk ka 1\n" + "vbX va 1\n" + "mTd de 1\n" + "jXo on 1\n" + "wqJ qu 1\n" + "kKt th 1\n" + "fkS ka 1\n" + "Wvz sz 1\n" + "Iyv va 1\n" + "hGk th 1\n" + "Fze er 1\n" + "bhM th 1\n" + "qvI qu 1\n" + "nXq an 1\n" + "nXc an 1\n" + "kJt th 1\n" + "Nqc ch 1\n" + "Yjc ch 1\n" + "Fhb th 1\n" + "jyK ij 1\n" + "Jzj sz 1\n" + "yqc ch 1\n" + "wmZ me 1\n" + "zbF sz 1\n" + "spq qu 1\n" + "gPn an 1\n" + "jSg ng 1\n" + "gMh th 1\n" + "fXt th 1\n" + "Fyw wa 1\n" + "Fwg ng 1\n" + "hmN th 1\n" + "hNl th 1\n" + "tqY th 1\n" + "pGm me 1\n" + "mXz sz 1\n" + "qYy qu 1\n" + "Rmq qu 1\n" + "Dqa an 1\n" + "Wkx ka 1\n" + "dpT de 1\n" + "jyJ ij 1\n" + "Jqj qu 1\n" + "wjZ ij 1\n" + "xNr er 1\n" + "qAm qu 1\n" + "hBn th 1\n" + "qpJ qu 1\n" + "ygW ng 1\n" + "jXf ij 1\n" + "rMl er 1\n" + "zgV ng 1\n" + "nLp an 1\n" + "pFx pr 1\n" + "tvG th 1\n" + "zQl le 1\n" + "fdF de 1\n" + "bxK be 1\n" + "Bcx ch 1\n" + "rpY er 1\n" + "sJb st 1\n" + "Kvh th 1\n" + "kNq qu 1\n" + "zHd sz 1\n" + "dzF sz 1\n" + "tJq th 1\n" + "Hfv va 1\n" + "vQd de 1\n" + "pKj ij 1\n" + "fhV th 1\n" + "qZi qu 1\n" + "ohY th 1\n" + "vqq qu 1\n" + "tnQ th 1\n" + "Vqk qu 1\n" + "zJf sz 1\n" + "Jkz sz 1\n" + "Rwf wa 1\n" + "zvM va 1\n" + "bxY be 1\n" + "pXh th 1\n" + "fUy ny 1\n" + "pvE va 1\n" + "Lpk ka 1\n" + "dzV sz 1\n" + "xIf fo 1\n" + "wZw wa 1\n" + "npQ an 1\n" + "pWk ka 1\n" + "jgQ ng 1\n" + "Jqr qu 1\n" + "gmX ng 1\n" + "jfM ij 1\n" + "lWj le 1\n" + "pbN pr 1\n" + "fvF va 1\n" + "sDd st 1\n" + "qdB qu 1\n" + "frL er 1\n" + "uHn an 1\n" + "gwN ng 1\n" + "yBh th 1\n" + "Zzq qu 1\n" + "vDg ng 1\n" + "Qcz ch 1\n" + "qzf qu 1\n" + "wEc ch 1\n" + "pxH pr 1\n" + "fqO qu 1\n" + "Vqe qu 1\n" + "gkD ng 1\n" + "Xfq qu 1\n" + "uXg qu 1\n" + "jCw ij 1\n" + "Pzu qu 1\n" + "gRh th 1\n" + "vqH qu 1\n" + "vvW va 1\n" + "Rfb be 1\n" + "gqJ qu 1\n" + "tgO th 1\n" + "wUy wa 1\n" + "Jkw ka 1\n" + "hSs th 1\n" + "gkW ng 1\n" + "Qgy ng 1\n" + "dJb de 1\n" + "prF er 1\n" + "buX qu 1\n" + "cVg ch 1\n" + "jtU th 1\n" + "fDc ch 1\n" + "Ygc ch 1\n" + "Kqr qu 1\n" + "Uyp pr 1\n" + "lJk le 1\n" + "sxY st 1\n" + "xfY fo 1\n" + "Xkz sz 1\n" + "cgZ ch 1\n" + "cyX ch 1\n" + "gbF ng 1\n" + "zTk sz 1\n" + "hsU th 1\n" + "tlW th 1\n" + "Zzv sz 1\n" + "kqE qu 1\n" + "lpQ po 1\n" + "qJu un 1\n" + "hYi th 1\n" + "zlM le 1\n" + "vDt th 1\n" + "Hvn an 1\n" + "Nsf st 1\n" + "bJg ng 1\n" + "fNg ng 1\n" + "kQo on 1\n" + "Kqp qu 1\n" + "bKs st 1\n" + "mHp me 1\n" + "Uyj ij 1\n" + "cxY ch 1\n" + "yIe er 1\n" + "qTj qu 1\n" + "wfP wa 1\n" + "fxI fo 1\n" + "vQa an 1\n" + "fvN va 1\n" + "pwN pr 1\n" + "vaQ an 1\n" + "mxQ me 1\n" + "bdV de 1\n" + "Cgj ng 1\n" + "xjz sz 1\n" + "Wqw qu 1\n" + "wpO pr 1\n" + "woQ on 1\n" + "xYj ij 1\n" + "fpT pr 1\n" + "lNp le 1\n" + "pvX va 1\n" + "pLp pr 1\n" + "Ksg ng 1\n" + "rWg ng 1\n" + "iUy in 1\n" + "bfX be 1\n" + "xsV st 1\n" + "Xnj an 1\n" + "dmW de 1\n" + "oQw on 1\n" + "Zxy ny 1\n" + "Oay an 1\n" + "pjG ij 1\n" + "Zbt th 1\n" + "Hql qu 1\n" + "Zxq qu 1\n" + "jWd de 1\n" + "qUp qu 1\n" + "qxN qu 1\n" + "qCo qu 1\n" + "Yfd de 1\n" + "vvU va 1\n" + "vIk ka 1\n" + "Dfj ij 1\n" + "Zmh th 1\n" + "Cqt th 1\n" + "vQf va 1\n" + "Nbn an 1\n" + "tJs th 1\n" + "Fhx th 1\n" + "dzQ sz 1\n" + "zYj ij 1\n" + "qBw qu 1\n" + "vcV ch 1\n" + "gGt th 1\n" + "iVw in 1\n" + "Fzp sz 1\n" + "bjH ij 1\n" + "cuY ch 1\n" + "jwS ij 1\n" + "Cqp qu 1\n" + "yJv va 1\n" + "kdJ de 1\n" + "kdT de 1\n" + "nqB an 1\n" + "hWs th 1\n" + "qsj qu 1\n" + "hLw th 1\n" + "hdX th 1\n" + "cgV ch 1\n" + "tYc th 1\n" + "eZx er 1\n" + "hfN th 1\n" + "gvw ng 1\n" + "aVp an 1\n" + "gMs ng 1\n" + "Pbf be 1\n" + "mQf me 1\n" + "yUi in 1\n" + "vGf va 1\n" + "xgF ng 1\n" + "zvY sz 1\n" + "wrA er 1\n" + "yrM er 1\n" + "vMj ij 1\n" + "Uyv va 1\n" + "dLp de 1\n" + "Gjj ij 1\n" + "zEi in 1\n" + "Xdg ng 1\n" + "jHf ij 1\n" + "oPz on 1\n" + "xIz sz 1\n" + "bCb be 1\n" + "Dzq qu 1\n" + "Yjn an 1\n" + "gGz ng 1\n" + "mjU ij 1\n" + "Cjx ij 1\n" + "xKc ch 1\n" + "mvO va 1\n" + "Pzb sz 1\n" + "crK ch 1\n" + "xhO th 1\n" + "ylB le 1\n" + "lDk le 1\n" + "zlO le 1\n" + "pgH ng 1\n" + "vQb va 1\n" + "sdZ st 1\n" + "kQm ka 1\n" + "lRh th 1\n" + "oQy on 1\n" + "twC th 1\n" + "Bdj ij 1\n" + "Qjg ng 1\n" + "dnP an 1\n" + "Nnp an 1\n" + "qiP qu 1\n" + "Ccj ch 1\n" + "uHt th 1\n" + "qLx qu 1\n" + "Qsf st 1\n" + "fKx fo 1\n" + "fkE ka 1\n" + "jlX le 1\n" + "jZb ij 1\n" + "Vwj ij 1\n" + "zbA sz 1\n" + "Hhd th 1\n" + "cbY ch 1\n" + "Ikf ka 1\n" + "Grx er 1\n" + "jpP ij 1\n" + "Qfh th 1\n" + "xhW th 1\n" + "wmX me 1\n" + "aJb an 1\n" + "sfO st 1\n" + "qXq qu 1\n" + "mXg ng 1\n" + "bnV an 1\n" + "Ypw pr 1\n" + "zCy sz 1\n" + "lhN th 1\n" + "rXn an 1\n" + "fGh th 1\n" + "Wxq qu 1\n" + "cxT ch 1\n" + "Zsg ng 1\n" + "uGv qu 1\n" + "bzM sz 1\n" + "zjS sz 1\n" + "dfS de 1\n" + "gpH ng 1\n" + "qgO ng 1\n" + "kqF qu 1\n" + "qfU qu 1\n" + "qTp qu 1\n" + "vZb va 1\n" + "Ejw ij 1\n" + "zQn an 1\n" + "gYz ng 1\n" + "kjV ij 1\n" + "fWl le 1\n" + "fRk ka 1\n" + "uSj qu 1\n" + "Cxg ng 1\n" + "Lcv ch 1\n" + "bzK sz 1\n" + "wqF qu 1\n" + "qJp qu 1\n" + "rCj er 1\n" + "qvs qu 1\n" + "lwN le 1\n" + "xmR me 1\n" + "btC th 1\n" + "kTx ka 1\n" + "qkU qu 1\n" + "Lhj th 1\n" + "dIx de 1\n" + "vsQ st 1\n" + "gSd ng 1\n" + "wDl le 1\n" + "Vjm ij 1\n" + "pmI me 1\n" + "vWh th 1\n" + "fKv va 1\n" + "xPt th 1\n" + "uoQ qu 1\n" + "Kgh th 1\n" + "gwX ng 1\n" + "sgJ ng 1\n" + "pWj ij 1\n" + "Qff fo 1\n" + "hkJ th 1\n" + "Hqo qu 1\n" + "jwW ij 1\n" + "sQz st 1\n" + "wUw wa 1\n" + "mKx me 1\n" + "oQf on 1\n" + "jVk ij 1\n" + "xwT wa 1\n" + "sTq qu 1\n" + "uqV qu 1\n" + "Qlp le 1\n" + "pMb pr 1\n" + "xKj ij 1\n" + "bpX pr 1\n" + "vQe er 1\n" + "Jjq qu 1\n" + "qKh th 1\n" + "fkJ ka 1\n" + "jbQ ij 1\n" + "mZw me 1\n" + "Xgc ch 1\n" + "vzU sz 1\n" + "pTm me 1\n" + "pNq qu 1\n" + "rwD er 1\n" + "Qdg ng 1\n" + "wqC qu 1\n" + "Yrn an 1\n" + "qww qu 1\n" + "qwU qu 1\n" + "xzF sz 1\n" + "flW le 1\n" + "jzP sz 1\n" + "Wxp pr 1\n" + "rDq qu 1\n" + "dGp de 1\n" + "Ztj th 1\n" + "Uvp va 1\n" + "eGc ch 1\n" + "zZb sz 1\n" + "gQh th 1\n" + "tFd th 1\n" + "Mqg ng 1\n" + "dnD an 1\n" + "hvY th 1\n" + "Iyb be 1\n" + "fDz sz 1\n" + "Kbj ij 1\n" + "vYm va 1\n" + "Wxr er 1\n" + "Kwz sz 1\n" + "hrQ th 1\n" + "yCt th 1\n" + "Hxw wa 1\n" + "hEf th 1\n" + "bdU de 1\n" + "sGj st 1\n" + "Gwt th 1\n" + "bYh th 1\n" + "zmU sz 1\n" + "pDm po 1\n" + "qmC qu 1\n" + "dTd de 1\n" + "Qxq qu 1\n" + "uVf qu 1\n" + "qAl qu 1\n" + "jEa an 1\n" + "Kpy pr 1\n" + "Hqv qu 1\n" + "fCk ka 1\n" + "aqZ an 1\n" + "lUo on 1\n" + "Pvo on 1\n" + "Dqf qu 1\n" + "gdM ng 1\n" + "fzL sz 1\n" + "Bhh th 1\n" + "dGd de 1\n" + "wtY th 1\n" + "qTy qu 1\n" + "Uxr er 1\n" + "Vvm va 1\n" + "vHh th 1\n" + "qZc ch 1\n" + "fhC th 1\n" + "xdZ de 1\n" + "hZp th 1\n" + "Pmz sz 1\n" + "cfT ch 1\n" + "pjI ij 1\n" + "mdZ de 1\n" + "jkQ ij 1\n" + "Sdj de 1\n" + "hDf th 1\n" + "eJj er 1\n" + "wjY ij 1\n" + "zLm sz 1\n" + "eFs er 1\n" + "wgj ng 1\n" + "Zmk ka 1\n" + "lvJ le 1\n" + "xYm me 1\n" + "Nzf sz 1\n" + "wJi in 1\n" + "yQs st 1\n" + "pfM pr 1\n" + "dhR th 1\n" + "cmK ch 1\n" + "dhM th 1\n" + "qGb qu 1\n" + "wvQ va 1\n" + "Cgq ng 1\n" + "Jfc ch 1\n" + "bkD ka 1\n" + "fdS de 1\n" + "Ivp va 1\n" + "Gkj ij 1\n" + "zIv sz 1\n" + "Bzl le 1\n" + "gBb ng 1\n" + "Tpj ij 1\n" + "vyY va 1\n" + "Uxs st 1\n" + "kwW ka 1\n" + "gPf ng 1\n" + "pqC qu 1\n" + "cTj ch 1\n" + "yzI sz 1\n" + "Yph th 1\n" + "bvD va 1\n" + "xCc ch 1\n" + "pcQ ch 1\n" + "fZw wa 1\n" + "Zxf fo 1\n" + "wbA wa 1\n" + "bTf be 1\n" + "rxR er 1\n" + "qqE qu 1\n" + "yFp pr 1\n" + "pNf pr 1\n" + "kMv ka 1\n" + "vUq qu 1\n" + "wOh th 1\n" + "hxH th 1\n" + "Xqh th 1\n" + "uIu qu 1\n" + "Fzq qu 1\n" + "Ysd st 1\n" + "ojY on 1\n" + "cEo ch 1\n" + "lwR le 1\n" + "qjF qu 1\n" + "jTp ij 1\n" + "yzT sz 1\n" + "jfO ij 1\n" + "qSg ng 1\n" + "Nck ch 1\n" + "hwF th 1\n" + "Gmq qu 1\n" + "Iiq qu 1\n" + "zwE sz 1\n" + "qQv qu 1\n" + "xVd de 1\n" + "Ywq qu 1\n" + "sFx st 1\n" + "fvB va 1\n" + "qYe le 1\n" + "gwT ng 1\n" + "Wjx ij 1\n" + "bHn an 1\n" + "fMn an 1\n" + "gJg ng 1\n" + "Vkg ng 1\n" + "Fxv va 1\n" + "lHv le 1\n" + "Wpk ka 1\n" + "xAq qu 1\n" + "rxB pr 1\n" + "xuQ qu 1\n" + "pIb pr 1\n" + "bfE be 1\n" + "gRx ng 1\n" + "Bpb pr 1\n" + "bxN be 1\n" + "kgU ng 1\n" + "Pxc ch 1\n" + "cCq ch 1\n" + "Npb pr 1\n" + "lxE le 1\n" + "lCy le 1\n" + "dgX ng 1\n" + "xLf fo 1\n" + "bQt th 1\n" + "qgF ng 1\n" + "pxZ pr 1\n" + "pPx pr 1\n" + "iYz in 1\n" + "vJl le 1\n" + "kTf ka 1\n" + "qVm qu 1\n" + "gwS ng 1\n" + "zTd sz 1\n" + "pQk ka 1\n" + "xEg ng 1\n" + "fpP pr 1\n" + "qjw qu 1\n" + "Oyw wa 1\n" + "mcO ch 1\n" + "Vjd de 1\n" + "qdg ng 1\n" + "Lfp pr 1\n" + "vZc ch 1\n" + "nOq an 1\n" + "qjn an 1\n" + "sKc ch 1\n" + "wgU ng 1\n" + "hgX th 1\n" + "dMv de 1\n" + "Xcp ch 1\n" + "Fwz sz 1\n" + "pwA pr 1\n" + "Lpj ij 1\n" + "bkP ka 1\n" + "vHn an 1\n" + "Jjy ij 1\n" + "mCq qu 1\n" + "wvM va 1\n" + "Icb ch 1\n" + "kfJ ka 1\n" + "hsQ th 1\n" + "dWd de 1\n" + "fUs st 1\n" + "fLn an 1\n" + "pjN ij 1\n" + "zgQ ng 1\n" + "jLj ij 1\n" + "zqE qu 1\n" + "Qmv va 1\n" + "Zjr er 1\n" + "Zkp ka 1\n" + "iyH in 1\n" + "wuY qu 1\n" + "mzT sz 1\n" + "cwK ch 1\n" + "bCm me 1\n" + "ydG de 1\n" + "xdU de 1\n" + "wTf wa 1\n" + "lHh th 1\n" + "qyD qu 1\n" + "xlV le 1\n" + "qyT qu 1\n" + "tWn th 1\n" + "rMz er 1\n" + "pXv va 1\n" + "Xbz sz 1\n" + "kHm ka 1\n" + "cVd ch 1\n" + "qzH qu 1\n" + "ydN de 1\n" + "qMb qu 1\n" + "yjS ij 1\n" + "gmC ng 1\n" + "zIi in 1\n" + "fpM pr 1\n" + "lcZ ch 1\n" + "qHn an 1\n" + "Jjd de 1\n" + "jlG le 1\n" + "qcK ch 1\n" + "xQm me 1\n" + "vIi in 1\n" + "wBp pr 1\n" + "wcI ch 1\n" + "dJd de 1\n" + "Qbn an 1\n" + "Bjf ij 1\n" + "dpY de 1\n" + "dcF ch 1\n" + "xSj ij 1\n" + "iXj in 1\n" + "Qgb ng 1\n" + "gDt th 1\n" + "xxq qu 1\n" + "xcQ ch 1\n" + "Sqs qu 1\n" + "Qmg ng 1\n" + "gcU ch 1\n" + "Bvv va 1\n" + "pzE sz 1\n" + "wtT th 1\n" + "vbL va 1\n" + "bCt th 1\n" + "Qpo on 1\n" + "mXs me 1\n" + "Zqr qu 1\n" + "Gky ka 1\n" + "Xmr er 1\n" + "Lnz an 1\n" + "vYq qu 1\n" + "yRl le 1\n" + "gmK ng 1\n" + "vwP va 1\n" + "eFg ng 1\n" + "Njd de 1\n" + "klG le 1\n" + "hbE th 1\n" + "kWz sz 1\n" + "qpM qu 1\n" + "oZc ch 1\n" + "jRm ij 1\n" + "wXl le 1\n" +#ifndef _MSC_VER // TODO: Hack to avoid unsupported long string for MS VC. + "iyD in 1\n" + "fvL va 1\n" + "rPw er 1\n" + "fdR de 1\n" + "iSg ng 1\n" + "dbQ de 1\n" + "xxQ xe 1\n" + "Djc ch 1\n" + "ygK ng 1\n" + "Rhb th 1\n" + "zgG ng 1\n" + "Yky ka 1\n" + "Cxj ij 1\n" + "wWk ka 1\n" + "lmY le 1\n" + "qrB qu 1\n" + "ywK wa 1\n" + "xqI qu 1\n" + "Twj ij 1\n" + "Xgq ng 1\n" + "dwZ de 1\n" + "nQl an 1\n" + "Ghc th 1\n" + "pnH an 1\n" + "vmU va 1\n" + "qqK qu 1\n" + "cjB ch 1\n" + "gzS ng 1\n" + "Rwz sz 1\n" + "gYr ng 1\n" + "Fgx ng 1\n" + "wdK de 1\n" + "hxZ th 1\n" + "xUx xe 1\n" + "wmT me 1\n" + "yYk ka 1\n" + "fcD ch 1\n" + "hVv th 1\n" + "Sgv ng 1\n" + "zPn an 1\n" + "vYb va 1\n" + "bzE sz 1\n" + "whV th 1\n" + "qNz qu 1\n" + "wtS th 1\n" + "vhY th 1\n" + "nLf an 1\n" + "Lfw wa 1\n" + "gVc ch 1\n" + "gkS ng 1\n" + "Jqb qu 1\n" + "hWx th 1\n" + "zgO ng 1\n" + "tgX th 1\n" + "jPb ij 1\n" + "Wxb be 1\n" + "gqw ng 1\n" + "Cfw wa 1\n" + "woU on 1\n" + "ycJ ch 1\n" + "kwD ka 1\n" + "Sbp pr 1\n" + "qcw ch 1\n" + "Hwr er 1\n" + "bmL me 1\n" + "gwZ ng 1\n" + "yKj ij 1\n" + "fXv va 1\n" + "iKx in 1\n" + "lRz le 1\n" + "cHj ch 1\n" + "fFt th 1\n" + "sJv sz 1\n" + "xmI me 1\n" + "cCd ch 1\n" + "iYd in 1\n" + "yfY ny 1\n" + "xbY be 1\n" + "bmE me 1\n" + "fBv va 1\n" + "dHw de 1\n" + "ycR ch 1\n" + "wvL va 1\n" + "rjL er 1\n" + "sYv sz 1\n" + "Wpn an 1\n" + "zxB sz 1\n" + "yBq qu 1\n" + "gdJ ng 1\n" + "Yjo on 1\n" + "fpQ pr 1\n" + "qOq qu 1\n" + "Wjf ij 1\n" + "qcT ch 1\n" + "Lfh th 1\n" + "cFj ch 1\n" + "lMq qu 1\n" + "wSf wa 1\n" + "wQc ch 1\n" + "zDy sz 1\n" + "qrl qu 1\n" + "pYw pr 1\n" + "Vnf an 1\n" + "Hcj ch 1\n" + "zdU sz 1\n" + "bvP va 1\n" + "Yfj ij 1\n" + "Qkn an 1\n" + "wHm me 1\n" + "qVv qu 1\n" + "gkV ng 1\n" + "vpq qu 1\n" + "hFk th 1\n" + "fWf fo 1\n" + "pYq qu 1\n" + "dNv de 1\n" + "Wwj ij 1\n" + "Fmx me 1\n" + "mDl le 1\n" + "jMg ng 1\n" + "fZk ka 1\n" + "jNp ij 1\n" + "qhf th 1\n" + "Vbg ng 1\n" + "lKx le 1\n" + "iZx in 1\n" + "sjT sz 1\n" + "ijY in 1\n" + "qtV th 1\n" + "yTk ka 1\n" + "Hpz sz 1\n" + "iGq qu 1\n" + "yqW qu 1\n" + "hgF th 1\n" + "mFk ka 1\n" + "Oqw qu 1\n" + "dXa an 1\n" + "Zbq qu 1\n" + "lKm le 1\n" + "Svz sz 1\n" + "zKc ch 1\n" + "Vmz sz 1\n" + "mIx me 1\n" + "gKj ng 1\n" + "gTt th 1\n" + "vfC fo 1\n" + "hKg th 1\n" + "hSx th 1\n" + "oKg ng 1\n" + "nQs an 1\n" + "yiG in 1\n" + "qgM ng 1\n" + "kQg ng 1\n" + "Cjd de 1\n" + "jPy ij 1\n" + "Xqe qu 1\n" + "Pzy sz 1\n" + "Ftq th 1\n" + "fcE ch 1\n" + "mkL ka 1\n" + "Hzj sz 1\n" + "bTn an 1\n" + "qXy qu 1\n" + "dmM de 1\n" + "dVx de 1\n" + "Tqn an 1\n" + "xWj ij 1\n" + "qxQ qu 1\n" + "fQx fo 1\n" + "vLl le 1\n" + "Pgk ng 1\n" + "gHk ng 1\n" + "hxV th 1\n" + "tJz th 1\n" + "fMz sz 1\n" + "Ixb be 1\n" + "Cyy ny 1\n" + "pXf pr 1\n" + "pLl le 1\n" + "Twq qu 1\n" + "Dtw th 1\n" + "wRn an 1\n" + "uXl qu 1\n" + "zhq th 1\n" + "wIv va 1\n" + "cjL ch 1\n" + "qxH qu 1\n" + "lDm le 1\n" + "tXv th 1\n" + "gjC ng 1\n" + "Zzd sz 1\n" + "tgT th 1\n" + "hnP th 1\n" + "Kjc ch 1\n" + "gVw ng 1\n" + "xbI be 1\n" + "Zpc ch 1\n" + "bfO be 1\n" + "mSx me 1\n" + "qaF an 1\n" + "aQh th 1\n" + "Hjd de 1\n" + "qXj qu 1\n" + "fqA qu 1\n" + "bvR va 1\n" + "qSn an 1\n" + "cdV ch 1\n" + "pTf pr 1\n" + "Kzc ch 1\n" + "qtI th 1\n" + "egY ng 1\n" + "Rxt th 1\n" + "bhY th 1\n" + "pGh th 1\n" + "jDg ng 1\n" + "foY on 1\n" + "dKs sz 1\n" + "qJt th 1\n" + "Xwz sz 1\n" + "Ixg ng 1\n" + "rMt th 1\n" + "zXu qu 1\n" + "sQy sz 1\n" + "Npz sz 1\n" + "Qfz sz 1\n" + "rLm er 1\n" + "zGm sz 1\n" + "wHz sz 1\n" + "vcY ch 1\n" + "kqZ qu 1\n" + "jDh th 1\n" + "qgG ng 1\n" + "Dqq qu 1\n" + "fmO me 1\n" + "qdW qu 1\n" + "dNw de 1\n" + "rXj er 1\n" + "Jwc ch 1\n" + "mDb me 1\n" + "wMw wa 1\n" + "Yjg ng 1\n" + "fjY ij 1\n" + "iJb in 1\n" + "cdC ch 1\n" + "Yxq qu 1\n" + "Vbk ka 1\n" + "Fpx pr 1\n" + "zhD th 1\n" + "hCs th 1\n" + "dXw de 1\n" + "kDd de 1\n" + "uqT un 1\n" + "Bxw wa 1\n" + "Bjq qu 1\n" + "jGx ij 1\n" + "fXb be 1\n" + "ybF be 1\n" + "dtA th 1\n" + "cVv ch 1\n" + "Cbd de 1\n" + "wtH th 1\n" + "Kdj de 1\n" + "kPs sz 1\n" + "Zvk ka 1\n" + "xPv va 1\n" + "woH on 1\n" + "Xpz sz 1\n" + "qXe qu 1\n" + "pTj ij 1\n" + "kwQ ka 1\n" + "kZf ka 1\n" + "Uqj qu 1\n" + "yJh th 1\n" + "hCq th 1\n" + "jMj ij 1\n" + "phY th 1\n" + "kbB ka 1\n" + "Gpz sz 1\n" + "sGz st 1\n" + "fwE wa 1\n" + "Ttf th 1\n" + "Gqm qu 1\n" + "bzN sz 1\n" + "fkO ka 1\n" + "uzW qu 1\n" + "oxQ on 1\n" + "Vgm ng 1\n" + "qmD qu 1\n" + "xqn an 1\n" + "vRl le 1\n" + "Tnr an 1\n" + "zjW sz 1\n" + "vwq qu 1\n" + "jtW th 1\n" + "qnL an 1\n" + "yDx ny 1\n" + "xfQ fo 1\n" + "wxJ wa 1\n" + "nxE an 1\n" + "vQn in 1\n" + "Wkh th 1\n" + "ywD wa 1\n" + "pFf pr 1\n" + "lbK le 1\n" + "vHy va 1\n" + "gVj ng 1\n" + "Oqh th 1\n" + "bcN ch 1\n" + "tWm th 1\n" + "wMc ch 1\n" + "nwQ an 1\n" + "qnM an 1\n" + "Ztx th 1\n" + "nQj an 1\n" + "Vxt th 1\n" + "Uxc ch 1\n" + "pWv va 1\n" + "yRx ny 1\n" + "qKu un 1\n" + "jXg ng 1\n" + "jpX ij 1\n" + "dkG de 1\n" + "Bnf an 1\n" + "Ykf ka 1\n" + "gbW ng 1\n" + "klX le 1\n" + "vkH ka 1\n" + "dKd de 1\n" + "Kpq qu 1\n" + "gqM ng 1\n" + "yBz sz 1\n" + "rPj er 1\n" + "Hzv sz 1\n" + "wYz sz 1\n" + "qGa an 1\n" + "jIs sz 1\n" + "bUj ij 1\n" + "rTt th 1\n" + "nqI an 1\n" + "jfP ij 1\n" + "hRt th 1\n" + "yRr er 1\n" + "jjK ij 1\n" + "tfE th 1\n" + "Qsw st 1\n" + "Fcm ch 1\n" + "bJm me 1\n" + "tXq th 1\n" + "fRl le 1\n" + "gqE ng 1\n" + "wGg ng 1\n" + "gKc ch 1\n" + "yXc ch 1\n" + "zBy sz 1\n" + "lTd le 1\n" + "Wqc ch 1\n" + "Ftf th 1\n" + "wdB de 1\n" + "xnX an 1\n" + "Bqc ch 1\n" + "zqO qu 1\n" + "Qdl le 1\n" + "ojJ on 1\n" + "qZn an 1\n" + "hzW th 1\n" + "ylQ le 1\n" + "Zbw wa 1\n" + "mvL va 1\n" + "Ljb ij 1\n" + "Gqe qu 1\n" + "mfE me 1\n" + "xQq qu 1\n" + "fLv va 1\n" + "xLt th 1\n" + "wBj ij 1\n" + "jUm ij 1\n" + "pdL de 1\n" + "mJv va 1\n" + "dxU de 1\n" + "xqN qu 1\n" + "fpG pr 1\n" + "tlO th 1\n" + "whL th 1\n" + "kDx ka 1\n" + "Rqb qu 1\n" + "uvX qu 1\n" + "vjY ij 1\n" + "crQ ch 1\n" + "xyY ny 1\n" + "yhQ th 1\n" + "yYc ch 1\n" + "Lmg ng 1\n" + "Jsq qu 1\n" + "Gbj ij 1\n" + "aPb an 1\n" + "dwJ de 1\n" + "Xyv va 1\n" + "ucJ ch 1\n" + "dTf de 1\n" + "lBb le 1\n" + "hKz th 1\n" + "jcR ch 1\n" + "eQc ch 1\n" + "qYi in 1\n" + "Vtb th 1\n" + "Ccg ch 1\n" + "zAe er 1\n" + "gxJ ng 1\n" + "uvC qu 1\n" + "Bhm ma 1\n" + "Zgx ng 1\n" + "yzJ sz 1\n" + "cvJ ch 1\n" + "xTk ka 1\n" + "qdK qu 1\n" + "vwG va 1\n" + "Ymx me 1\n" + "oYw on 1\n" + "jXx ij 1\n" + "ywf wa 1\n" + "vVx vi 1\n" + "Rwm me 1\n" + "Dvk ka 1\n" + "xKt th 1\n" + "qLp qu 1\n" + "Yyv vi 1\n" + "Cqa an 1\n" + "xRf fo 1\n" + "Qqk qu 1\n" + "Jqe qu 1\n" + "yZg ng 1\n" + "vqG qu 1\n" + "hbO th 1\n" + "uVq qu 1\n" + "Rlm le 1\n" + "uZc ch 1\n" + "Ppv va 1\n" + "pVd de 1\n" + "yVd de 1\n" + "zJl le 1\n" + "Yzg ng 1\n" + "Cvq qu 1\n" + "pwS pr 1\n" + "Kkw ka 1\n" + "Wvv va 1\n" + "Fdy de 1\n" + "ppX pr 1\n" + "hvC th 1\n" + "iwG in 1\n" + "rBg ng 1\n" + "hBq th 1\n" + "nYs an 1\n" + "kcO ch 1\n" + "qEe qu 1\n" + "Ybv va 1\n" + "Qsn an 1\n" + "svC st 1\n" + "qkD qu 1\n" + "Qiw in 1\n" + "Gtj th 1\n" + "qAh th 1\n" + "wVy wa 1\n" + "bxT be 1\n" + "Qhs th 1\n" + "tlX th 1\n" + "hbA th 1\n" + "Qfb be 1\n" + "xWl le 1\n" + "xeV er 1\n" + "rqG qu 1\n" + "vqZ qu 1\n" + "jKv ij 1\n" + "iTf in 1\n" + "kwU ka 1\n" + "iFq in 1\n" + "mjZ ij 1\n" + "xgJ ng 1\n" + "zLp sz 1\n" + "qsR qu 1\n" + "zDj sz 1\n" + "pdF de 1\n" + "wxN wa 1\n" + "wGk ka 1\n" + "dUq qu 1\n" + "dJw de 1\n" + "fCb be 1\n" + "Dhz th 1\n" + "yIq qu 1\n" + "aQm an 1\n" + "Yzs st 1\n" + "vHf va 1\n" + "bjV ij 1\n" + "zSq qu 1\n" + "Wqs qu 1\n" + "jrW er 1\n" + "Hzq qu 1\n" + "wWs st 1\n" + "Mkg ng 1\n" + "zgF ng 1\n" + "Cnk an 1\n" + "rDg ng 1\n" + "fzB sz 1\n" + "fOm me 1\n" + "uVt th 1\n" + "Qfi in 1\n" + "Mhj th 1\n" + "uYj qu 1\n" + "Rqx qu 1\n" + "hkY th 1\n" + "wYb wa 1\n" + "tqP th 1\n" + "Jpb pr 1\n" + "bGw wa 1\n" + "xFh th 1\n" + "Xwb wa 1\n" + "Kgt th 1\n" + "Iqc ch 1\n" + "pJm me 1\n" + "Qkq qu 1\n" + "bVh th 1\n" + "yTq qu 1\n" + "zZg ng 1\n" + "cDz ch 1\n" + "qfm qu 1\n" + "afQ an 1\n" + "Qwc ch 1\n" + "bdJ de 1\n" + "qTu un 1\n" + "Ucx ch 1\n" + "Hnx an 1\n" + "Hbh th 1\n" + "gyH ng 1\n" + "tTz th 1\n" + "txV th 1\n" + "bdS de 1\n" + "Wgg ng 1\n" + "oqP qu 1\n" + "Rrf er 1\n" + "gYy ng 1\n" + "fMs st 1\n" + "fKd de 1\n" + "Hyx ny 1\n" + "Mxz sz 1\n" + "qHk qu 1\n" + "tfM th 1\n" + "hgQ th 1\n" + "zmO sz 1\n" + "wzS sz 1\n" + "jwQ ij 1\n" + "Fhc ic 1\n" + "xIy ny 1\n" + "fHg ng 1\n" + "wqY qu 1\n" + "bFp pr 1\n" + "Qdq qu 1\n" + "bhV th 1\n" + "bCg ng 1\n" + "Hgr ng 1\n" + "xqL qu 1\n" + "qgS ng 1\n" + "Nqg ng 1\n" + "fQv va 1\n" + "Qzw sz 1\n" + "Ixd de 1\n" + "Cxm me 1\n" + "mxN me 1\n" + "vQi in 1\n" + "cAq ch 1\n" + "eCx er 1\n" + "mqX qu 1\n" + "rqY qu 1\n" + "fVp pr 1\n" + "qoP qu 1\n" + "Gxc ch 1\n" + "vzX sz 1\n" + "fXf fo 1\n" + "Qtc th 1\n" + "ohQ th 1\n" + "Ygy ng 1\n" + "Xnb an 1\n" + "cWm ch 1\n" + "jXw ij 1\n" + "gWj ng 1\n" + "Kmg ng 1\n" + "vvH va 1\n" + "Uew er 1\n" + "qJk qu 1\n" + "Hkd de 1\n" + "xmP me 1\n" + "slR is 1\n" + "Uaq an 1\n" + "zbG sz 1\n" + "vNv va 1\n" + "cVb ch 1\n" + "bGg ng 1\n" + "iwU in 1\n" + "Cnw an 1\n" + "rXd er 1\n" + "vWz sz 1\n" + "tGf th 1\n" + "fbY be 1\n" + "hzp th 1\n" + "uWz qu 1\n" + "bMb be 1\n" + "jzW sz 1\n" + "gLh th 1\n" + "kZc ch 1\n" + "kHg ng 1\n" + "Vwf wa 1\n" + "vtY th 1\n" + "qeA qu 1\n" + "cxG ch 1\n" + "uQz qu 1\n" + "jGc ch 1\n" + "cvA ch 1\n" + "oTm on 1\n" + "pjY ij 1\n" + "bUo on 1\n" + "jwU ij 1\n" + "Jgm ng 1\n" + "tfZ th 1\n" + "xeO er 1\n" + "qBp qu 1\n" + "pBz sz 1\n" + "qSb qu 1\n" + "jyP ij 1\n" + "Fkq qu 1\n" + "njS an 1\n" + "jtA th 1\n" + "Zmf me 1\n" + "Ytm th 1\n" + "Pqc ch 1\n" + "bwJ wa 1\n" + "oWf on 1\n" + "kxJ ka 1\n" + "jHx ij 1\n" + "gcP ch 1\n" + "gBs ng 1\n" + "bkK ka 1\n" + "vdQ de 1\n" + "pjZ ij 1\n" + "Vgf ng 1\n" + "svG st 1\n" + "kGj ij 1\n" + "Wjg ng 1\n" + "Qmk ka 1\n" + "Glv le 1\n" + "tmY th 1\n" + "klY le 1\n" + "Pcj ch 1\n" + "fQw wi 1\n" + "xaO an 1\n" + "jfN ij 1\n" + "qGx qu 1\n" + "qvB qu 1\n" + "hwA th 1\n" + "Xmq qu 1\n" + "Xvt th 1\n" + "Bpq qu 1\n" + "oJq qu 1\n" + "vmZ va 1\n" + "nJp an 1\n" + "zqJ qu 1\n" + "qHf qu 1\n" + "mQg ng 1\n" + "yGz sz 1\n" + "hQm th 1\n" + "mBp me 1\n" + "tpJ th 1\n" + "Qkj ij 1\n" + "uUg ng 1\n" + "tdJ th 1\n" + "Jfn an 1\n" + "Lvj ij 1\n" + "iXc ch 1\n" + "pOq qu 1\n" + "bhK th 1\n" + "bMk ka 1\n" + "Fsw st 1\n" + "qAt th 1\n" + "xwJ wa 1\n" + "fPm me 1\n" + "Dfy ny 1\n" + "Zbp pr 1\n" + "Bgw ng 1\n" + "pQp pr 1\n" + "kQp ka 1\n" + "qoV qu 1\n" + "Uqd qu 1\n" + "jYo on 1\n" + "sDf st 1\n" + "xuJ qu 1\n" + "vRk ka 1\n" + "Qsg ng 1\n" + "yTd de 1\n" + "Qxr er 1\n" + "Hvc ch 1\n" + "hZt th 1\n" + "qDu un 1\n" + "fxA fo 1\n" + "xPf fo 1\n" + "wXc ch 1\n" + "jJb ij 1\n" + "pdK de 1\n" + "gpW ng 1\n" + "Qgx ng 1\n" + "kxG ka 1\n" + "dLx de 1\n" + "Bwz sz 1\n" + "Vdx de 1\n" + "yQh th 1\n" + "Wsx st 1\n" + "fSb be 1\n" + "Ukg ng 1\n" + "Pjz sz 1\n" + "rFg ng 1\n" + "fjP ij 1\n" + "kWv ka 1\n" + "Khf th 1\n" + "yGv va 1\n" + "pnD an 1\n" + "jYf ij 1\n" + "mgR ng 1\n" + "rjC er 1\n" + "Xjl le 1\n" + "kzE sz 1\n" + "Qgq ng 1\n" + "zgb ng 1\n" + "mhD th 1\n" + "vkO ka 1\n" + "uwV qu 1\n" + "rPp er 1\n" + "wXd de 1\n" + "gAo ng 1\n" + "kvG ka 1\n" + "vcX ch 1\n" + "xOz sz 1\n" + "Xzq qu 1\n" + "Fmu qu 1\n" + "xGg ng 1\n" + "jjR ij 1\n" + "qkI ku 1\n" + "pqH qu 1\n" + "cnH an 1\n" + "dhT th 1\n" + "mdR de 1\n" + "dDf de 1\n" + "qIq qu 1\n" + "xCj ij 1\n" + "qRk qu 1\n" + "kKc ch 1\n" + "Iuu qu 1\n" + "jqR qu 1\n" + "qEk qu 1\n" + "hfO th 1\n" + "quJ un 1\n" + "nRp an 1\n" + "txI th 1\n" + "yfZ ny 1\n" + "oqT ho 1\n" + "cgX ch 1\n" + "pbL pr 1\n" + "Xmx me 1\n" + "Vjr er 1\n" + "ylY le 1\n" + "dfK de 1\n" + "xgD ng 1\n" + "uwL qu 1\n" + "bPm me 1\n" + "qCy qu 1\n" + "Rpq qu 1\n" + "yqh th 1\n" + "xJt th 1\n" + "lzQ le 1\n" + "fgM ng 1\n" + "Ylc ch 1\n" + "fTz sz 1\n" + "Rjf ij 1\n" + "Rgj jo 1\n" + "Gkt th 1\n" + "fxG fo 1\n" + "mtG th 1\n" + "lgJ ng 1\n" + "tdR th 1\n" + "iHk in 1\n" + "Gqv qu 1\n" + "lDj le 1\n" + "wzZ sz 1\n" + "dFp de 1\n" + "qTt th 1\n" + "Wtg th 1\n" + "cbT ch 1\n" + "dvK de 1\n" + "Ctw th 1\n" + "mdG de 1\n" + "vKj ij 1\n" + "Clf le 1\n" + "wrU er 1\n" + "gmT ng 1\n" + "bXx be 1\n" + "zOx sz 1\n" + "Xnf an 1\n" + "rzQ er 1\n" + "vQj ij 1\n" + "kpT ka 1\n" + "fYh th 1\n" + "zLr er 1\n" + "Xgd ng 1\n" + "cZl ch 1\n" + "lFy le 1\n" + "Zng an 1\n" + "aXg an 1\n" + "qbE qu 1\n" + "zcY ch 1\n" + "sqK qu 1\n" + "Blx le 1\n" + "oqJ qu 1\n" + "jPv ij 1\n" + "qZd qu 1\n" + "fdZ de 1\n" + "Bqm qu 1\n" + "cpG ch 1\n" + "xdP de 1\n" + "fuF qu 1\n" + "vbq qu 1\n" + "dhH th 1\n" + "Jwm me 1\n" + "qkO ko 1\n" + "gsY ng 1\n" + "qGh th 1\n" + "Jkv ka 1\n" + "zpg ng 1\n" + "rwK er 1\n" + "Lhq th 1\n" + "zuV qu 1\n" + "bqV qu 1\n" + "Qcv ch 1\n" + "mWd de 1\n" + "cnF an 1\n" + "lWw le 1\n" + "txS th 1\n" + "znE an 1\n" + "fTj ij 1\n" + "lFq qu 1\n" + "wdJ de 1\n" + "eVk er 1\n" + "zjZ sz 1\n" + "fPq qu 1\n" + "cqQ ch 1\n" + "Pcg ch 1\n" + "Ydk de 1\n" + "svE st 1\n" + "Wqb qu 1\n" + "bcV ch 1\n" + "nHx on 1\n" + "wAx wa 1\n" + "hfB th 1\n" + "aMv an 1\n" + "pwO pr 1\n" + "Ywx wa 1\n" + "cbH ch 1\n" + "ojZ on 1\n" + "suU qu 1\n" + "jcU ch 1\n" + "sqY qu 1\n" + "jMr er 1\n" + "pxG pr 1\n" + "rBq qu 1\n" + "vlY le 1\n" + "hyY th 1\n" + "Cvw va 1\n" + "Tqe qu 1\n" + "fSj ij 1\n" + "fVs st 1\n" + "Eqc ch 1\n" + "xnD an 1\n" + "Owp pr 1\n" + "xTb be 1\n" + "wjL ij 1\n" + "Rxv va 1\n" + "nWf an 1\n" + "vHp va 1\n" + "vBk ka 1\n" + "Nqv qu 1\n" + "Lzf sz 1\n" + "bwS wa 1\n" + "Cby be 1\n" + "zRr er 1\n" + "qwJ qu 1\n" + "xnB an 1\n" + "qIc ch 1\n" + "cGk ch 1\n" + "Yji in 1\n" + "gVh th 1\n" + "lDc ch 1\n" + "Qyr er 1\n" + "fcH ch 1\n" + "nxB an 1\n" + "dvw de 1\n" + "gQc ch 1\n" + "mrR er 1\n" + "fnK an 1\n" + "Hlr le 1\n" + "Dnq an 1\n" + "bnU an 1\n" + "qCe qu 1\n" + "Tjv ij 1\n" + "Epq qu 1\n" + "wLf wa 1\n" + "pZj ij 1\n" + "gvR ng 1\n" + "kqK qu 1\n" + "vlG le 1\n" + "vvN va 1\n" + "gbM ng 1\n" + "bNk ka 1\n" + "jzL sz 1\n" + "Wlq qu 1\n" + "aYq an 1\n" + "zdY de 1\n" + "sfG st 1\n" + "qfW qu 1\n" + "kBv ka 1\n" + "btG th 1\n" + "Mqb qu 1\n" + "lrC er 1\n" + "vuE qu 1\n" + "fyJ ny 1\n" + "qmZ qu 1\n" + "Jkq qu 1\n" + "Cmj ij 1\n" + "bXy be 1\n" + "Ymy me 1\n" + "qxY qu 1\n" + "cNl ch 1\n" + "fzU fo 1\n" + "Rvt th 1\n" + "ylI le 1\n" + "xMs st 1\n" + "Qhm th 1\n" + "dHq qu 1\n" + "dwL de 1\n" + "vYr er 1\n" + "Qxu qu 1\n" + "dNh th 1\n" + "zNc ch 1\n" + "jmP ij 1\n" + "Pbq qu 1\n" + "fqj qu 1\n" + "fUw wa 1\n" + "Hyq qu 1\n" + "Qdx de 1\n" + "zSl le 1\n" + "cWt th 1\n" + "Fke er 1\n" + "Ztz th 1\n" + "uUq qu 1\n" + "nBm an 1\n" + "zJy sz 1\n" + "pdI de 1\n" + "nTd an 1\n" + "Yjb ij 1\n" + "Qjn an 1\n" + "yXj ij 1\n" + "xwB ow 1\n" + "klq qu 1\n" + "hfY th 1\n" + "pDg ng 1\n" + "zZd de 1\n" + "mqO qu 1\n" + "hZr th 1\n" + "cmY ch 1\n" + "gLk ng 1\n" + "Qcj ch 1\n" + "uKj qu 1\n" + "nqD an 1\n" + "yKw wa 1\n" + "bfR be 1\n" + "Rqz qu 1\n" + "jhQ th 1\n" + "vNj ij 1\n" + "Tcf ch 1\n" + "Hbn an 1\n" + "Lwv va 1\n" + "wcZ ch 1\n" + "cdK ch 1\n" + "bpR pr 1\n" + "lWm le 1\n" + "wNq qu 1\n" + "pAj ij 1\n" + "grV ng 1\n" + "qmk qu 1\n" + "cLf ch 1\n" + "iwB in 1\n" + "eqV qu 1\n" + "Wqz qu 1\n" + "Qnj an 1\n" + "uoJ qu 1\n" + "fVj ij 1\n" + "cbU ch 1\n" + "qpT qu 1\n" + "pdZ de 1\n" + "dzW de 1\n" + "Wfw wa 1\n" + "Zqm qu 1\n" + "kJd de 1\n" + "zWf sz 1\n" + "bYg ng 1\n" + "rjQ er 1\n" + "dwB de 1\n" + "Vlx le 1\n" + "zKd de 1\n" + "Lxw wa 1\n" + "Hpw pr 1\n" + "mvR va 1\n" + "qMt th 1\n" + "pWb pr 1\n" + "dcW ch 1\n" + "zEh th 1\n" + "Xrs er 1\n" + "Ftz th 1\n" + "qyL qu 1\n" + "jSn an 1\n" + "Wzh th 1\n" + "Pzf sz 1\n" + "zkW sz 1\n" + "ywY wa 1\n" + "oGb on 1\n" + "jBw ij 1\n" + "Qpz sz 1\n" + "rWm er 1\n" + "smQ st 1\n" + "uGk qu 1\n" + "xkV ka 1\n" + "wJf wa 1\n" + "cjW ch 1\n" + "wNx wa 1\n" + "wjR ij 1\n" + "wDd wa 1\n" + "lrB er 1\n" + "qhJ th 1\n" + "jKp ij 1\n" + "kNn an 1\n" + "tqU th 1\n" + "Jmj ij 1\n" + "bJv va 1\n" + "frN er 1\n" + "uBj qu 1\n" + "Uuv qu 1\n" + "Mzv sz 1\n" + "Djq qu 1\n" + "Qgl le 1\n" + "hdC th 1\n" + "mFh th 1\n" + "vjU ij 1\n" + "prX er 1\n" + "Kvc ch 1\n" + "ryY er 1\n" + "vzQ sz 1\n" + "Ojh th 1\n" + "Qfn an 1\n" + "Vqg ng 1\n" + "aQv an 1\n" + "hHx th 1\n" + "uIg ng 1\n" + "Kpv va 1\n" + "dQk ko 1\n" + "Ghq th 1\n" + "cZs ch 1\n" + "nvH an 1\n" + "jwJ ij 1\n" + "dMm de 1\n" + "gjI ng 1\n" + "lPg ng 1\n" + "qBs qu 1\n" + "Vhq th 1\n" + "qLt th 1\n" + "hBd th 1\n" + "Vcu ch 1\n" + "cQd ch 1\n" + "ypX pr 1\n" + "mQv va 1\n" + "vmR va 1\n" + "xfH fo 1\n" + "pqY qu 1\n" + "Xtb th 1\n" + "Vcx ch 1\n" + "tWb th 1\n" + "Pxa an 1\n" + "Qmr er 1\n" + "mdX de 1\n" + "Bxt th 1\n" + "jZv ij 1\n" + "hNp th 1\n" + "ybN be 1\n" + "bkZ ka 1\n" + "nVf an 1\n" + "lKq qu 1\n" + "oJj on 1\n" + "pBv va 1\n" + "hgA th 1\n" + "qxE qu 1\n" + "nvJ an 1\n" + "Xcf ch 1\n" + "Fdb de 1\n" + "zAo on 1\n" + "wQk ka 1\n" + "tmX th 1\n" + "pvZ va 1\n" + "fNw wa 1\n" + "zKk sz 1\n" + "hRx th 1\n" + "Tlj le 1\n" + "iQj in 1\n" + "jmU ij 1\n" + "tbW th 1\n" + "wVh th 1\n" + "Tvh th 1\n" + "nVg an 1\n" + "Lxp pr 1\n" + "vgO ng 1\n" + "dfE de 1\n" + "nVm an 1\n" + "qKy qu 1\n" + "eqZ qu 1\n" + "Tcc ch 1\n" + "cTk ch 1\n" + "fKz sz 1\n" + "Wkz sz 1\n" + "lvZ le 1\n" + "rGp er 1\n" + "kKz sz 1\n" + "Cbf be 1\n" + "jQd de 1\n" + "Zfc ch 1\n" + "hvX th 1\n" + "xgN ng 1\n" + "Kpe er 1\n" + "hzM th 1\n" + "jxZ ij 1\n" + "yqL qu 1\n" + "pgC ng 1\n" + "Fqd qu 1\n" + "tMb th 1\n" + "njQ an 1\n" + "tfB th 1\n" + "gjN ng 1\n" + "wNc ch 1\n" + "Pzj sz 1\n" + "mhO th 1\n" + "qUm qu 1\n" + "Fhh th 1\n" + "Sjd de 1\n" + "hWj th 1\n" + "yhL th 1\n" + "lGp le 1\n" + "dtX th 1\n" + "hwX th 1\n" + "srK er 1\n" + "vqE qu 1\n" + "bcO ch 1\n" + "xQl le 1\n" + "Qqf qu 1\n" + "kJg ng 1\n" + "pXz sz 1\n" + "yuJ qu 1\n" + "Gnp an 1\n" + "Dlc ch 1\n" + "Mxf fo 1\n" + "yNr er 1\n" + "bmV me 1\n" + "fXo on 1\n" + "mwW me 1\n" + "lIj le 1\n" + "Fvq qu 1\n" + "Utq th 1\n" + "jGk ij 1\n" + "wYw wa 1\n" + "wVm me 1\n" + "bTq qu 1\n" + "Ijp ij 1\n" + "znM an 1\n" + "xmO me 1\n" + "gQx ng 1\n" + "dKw de 1\n" + "dUf de 1\n" + "cSb ch 1\n" + "zVb sz 1\n" + "ccY ch 1\n" + "xjE ij 1\n" + "pYt th 1\n" + "Vrq qu 1\n" + "kzK sz 1\n" + "zfC sz 1\n" + "Ybh th 1\n" + "dgS ng 1\n" + "xcV ch 1\n" + "xNm me 1\n" + "Xkw ka 1\n" + "Tpw pr 1\n" + "Bwd de 1\n" + "hwT th 1\n" + "gQl ng 1\n" + "cDs ch 1\n" + "zYr er 1\n" + "xTp pr 1\n" + "qWm qu 1\n" + "xjT ij 1\n" + "hjK th 1\n" + "uDc ch 1\n" + "xhS th 1\n" + "bWd de 1\n" + "vCw va 1\n" + "jyB ij 1\n" + "uWd qu 1\n" + "Nnq qu 1\n" + "Qvb va 1\n" + "jzV sz 1\n" + "zBx sz 1\n" + "wIj ij 1\n" + "qRt th 1\n" + "qrJ qu 1\n" + "zZj sz 1\n" + "kRr er 1\n" + "Nzv sz 1\n" + "Qfw wa 1\n" + "Njt th 1\n" + "bFy be 1\n" + "lhY th 1\n" + "eWj er 1\n" + "jbM ij 1\n" + "Xsg ng 1\n" + "Rsd de 1\n" + "flF le 1\n" + "Phz th 1\n" + "xWs st 1\n" + "bCw wa 1\n" + "gfJ ng 1\n" + "qVo qu 1\n" + "eQh th 1\n" + "vcP ch 1\n" + "mDj ij 1\n" + "qTs qu 1\n" + "Xgs ng 1\n" + "Vuq qu 1\n" + "ufN qu 1\n" + "xBs st 1\n" + "pTk ka 1\n" + "fSq qu 1\n" + "mbD me 1\n" + "Vwz sz 1\n" + "hhQ th 1\n" + "kfP ka 1\n" + "Pwq qu 1\n" + "dhG th 1\n" + "qZj qu 1\n" + "yRj ij 1\n" + "yCs st 1\n" + "fjN ij 1\n" + "Rqg ng 1\n" + "jJh th 1\n" + "dlR le 1\n" + "Xmb me 1\n" + "Jjt th 1\n" + "gqI ng 1\n" + "fqM qu 1\n" + "iVg ng 1\n" + "Hgu ng 1\n" + "iHw in 1\n" + "eQv er 1\n" + "mzE sz 1\n" + "fjZ ij 1\n" + "qNn an 1\n" + "wlE le 1\n" + "kGp ka 1\n" + "Iqv qu 1\n" + "kBn an 1\n" + "xZd de 1\n" + "Dkc ch 1\n" + "zlH le 1\n" + "txB th 1\n" + "tQr th 1\n" + "uOx qu 1\n" + "pJi in 1\n" + "zbL sz 1\n" + "xkD ka 1\n" + "scV ch 1\n" + "qXh th 1\n" + "kIq qu 1\n" + "xNn an 1\n" + "gJf ng 1\n" + "tmB th 1\n" + "tcK th 1\n" + "kwZ ka 1\n" + "uZj qu 1\n" + "snQ an 1\n" + "uKq qu 1\n" + "crX ch 1\n" + "hXy th 1\n" + "Zcc ch 1\n" + "Pfz sz 1\n" + "dwM de 1\n" + "qIy qu 1\n" + "xuP qu 1\n" + "wDw wa 1\n" + "Hjr er 1\n" + "dQf de 1\n" + "wvJ wa 1\n" + "tHm th 1\n" + "Ydw de 1\n" + "wxI wa 1\n" + "pOv va 1\n" + "Wmq qu 1\n" + "dhD th 1\n" + "qpw qu 1\n" + "bmC me 1\n" + "wcX ch 1\n" + "wjH ij 1\n" + "bWf be 1\n" + "Gdp de 1\n" + "Ldw de 1\n" + "Sbq qu 1\n" + "vZv va 1\n" + "Kwb wa 1\n" + "qhT th 1\n" + "yRf ny 1\n" + "hwC th 1\n" + "npJ an 1\n" + "jmV ij 1\n" + "vGg ng 1\n" + "xqF qu 1\n" + "Phm th 1\n" + "pWc ch 1\n" + "Vxk ka 1\n" + "sHz st 1\n" + "Wbx be 1\n" + "bfK be 1\n" + "Jgl ng 1\n" + "kTb ka 1\n" + "Kbf be 1\n" + "kzC sz 1\n" + "pKq qu 1\n" + "zwB sz 1\n" + "uZg ng 1\n" + "btI th 1\n" + "zXj sz 1\n" + "uzS qu 1\n" + "vWk ka 1\n" + "xrH er 1\n" + "oQc ch 1\n" + "zlT le 1\n" + "dfI de 1\n" + "Qmf me 1\n" + "sgE ng 1\n" + "Ysx st 1\n" + "Rzd de 1\n" + "xLd de 1\n" + "qsX qu 1\n" + "kqJ qu 1\n" + "kCm ka 1\n" + "bFm me 1\n" + "igQ ng 1\n" + "sRq qu 1\n" + "jGm ij 1\n" + "Szs st 1\n" + "Yvz sz 1\n" + "kXz sz 1\n" + "Gnz an 1\n" + "mWc ch 1\n" + "tDq th 1\n" + "gqz ng 1\n" + "nHb ng 1\n" + "tdM th 1\n" + "Ovx va 1\n" + "Znl an 1\n" + "wuE qu 1\n" + "zLt th 1\n" + "ofQ on 1\n" + "vYj ij 1\n" + "jyH ij 1\n" + "zqA qu 1\n" + "cJy ch 1\n" + "Wbf be 1\n" + "lTt th 1\n" + "klW le 1\n" + "Xxa an 1\n" + "fCz sz 1\n" + "lKf le 1\n" + "qwT qu 1\n" + "rHk er 1\n" + "dbN de 1\n" + "uUy qu 1\n" + "zgN ng 1\n" + "Pxg ng 1\n" + "pNc ch 1\n" + "cyJ ch 1\n" + "jpH ij 1\n" + "Vtf th 1\n" + "sjJ st 1\n" + "Qlh th 1\n" + "twV th 1\n" + "yGq qu 1\n" + "tVp th 1\n" + "ksQ st 1\n" + "xnT an 1\n" + "rpJ er 1\n" + "wzI sz 1\n" + "Zhp th 1\n" + "aDf an 1\n" + "Uxj ij 1\n" + "cPg ch 1\n" + "qSq qu 1\n" + "mKq qu 1\n" + "vBz sz 1\n" + "yPj ij 1\n" + "Vkz sz 1\n" + "qiB qu 1\n" + "tkJ th 1\n" + "Ouq qu 1\n" + "zoH on 1\n" + "qVt th 1\n" + "Gxs st 1\n" + "jzF sz 1\n" + "swH st 1\n" + "nBb an 1\n" + "zhQ th 1\n" + "yRn an 1\n" + "fnX an 1\n" + "qoQ qu 1\n" + "mxP me 1\n" + "bwR wa 1\n" + "gJj ng 1\n" + "qnk an 1\n" + "tMk th 1\n" + "dxO de 1\n" + "rzV er 1\n" + "vpP va 1\n" + "Nvz sz 1\n" + "Nfp pr 1\n" + "Cnz an 1\n" + "oTd on 1\n" + "dqG qu 1\n" + "Hmx me 1\n" + "psX st 1\n" + "swM st 1\n" + "dqC qu 1\n" + "Vwx wa 1\n" + "nXf an 1\n" + "wkY ka 1\n" + "wfC wa 1\n" + "qSr qu 1\n" + "qVc ch 1\n" + "kDn an 1\n" + "Yvb va 1\n" + "zqH qu 1\n" + "qxJ qu 1\n" + "zKj sz 1\n" + "jcN ch 1\n" + "tWk th 1\n" + "Rrz er 1\n" + "bmG me 1\n" + "srZ er 1\n" + "wWq qu 1\n" + "Cfh th 1\n" + "lNt th 1\n" + "hcV th 1\n" + "Znf an 1\n" + "Jhv th 1\n" + "qIp qu 1\n" + "vSz sz 1\n" + "feU er 1\n" + "xIi in 1\n" + "Zmq qu 1\n" + "eGf er 1\n" + "bQk ka 1\n" + "Xcb ch 1\n" + "nlK an 1\n" + "tmJ th 1\n" + "jlL le 1\n" + "mwC me 1\n" + "qjr qu 1\n" + "zBb sz 1\n" + "fhU th 1\n" + "sPq qu 1\n" + "sBf st 1\n" + "uXy qu 1\n" + "Lkx ka 1\n" + "rGz er 1\n" + "hXz th 1\n" + "zuW qu 1\n" + "Rvx va 1\n" + "bcJ ch 1\n" + "Eoj on 1\n" + "iVt in 1\n" + "yhH th 1\n" + "xVv va 1\n" + "pMr er 1\n" + "vZd de 1\n" + "Vvn an 1\n" + "iCv in 1\n" + "vQp va 1\n" + "vlB le 1\n" + "wVt th 1\n" + "Ugk ng 1\n" + "ktQ th 1\n" + "jCr er 1\n" + "qvz qu 1\n" + "bVf be 1\n" + "rPv er 1\n" + "wfH wa 1\n" + "hbU th 1\n" + "pjF ij 1\n" + "oXg ng 1\n" + "zSr er 1\n" + "wRb wa 1\n" + "Hcu ch 1\n" + "yxJ ny 1\n" + "lTc ch 1\n" + "bYb be 1\n" + "Wxz sz 1\n" + "vrE er 1\n" + "zGy sz 1\n" + "Jqm qu 1\n" + "rzI er 1\n" + "xgV gi 1\n" + "Rvw va 1\n" + "Vnx an 1\n" + "uJg ng 1\n" + "hFq th 1\n" + "Tgz ng 1\n" + "aQc an 1\n" + "xzJ sz 1\n" + "tNc th 1\n" + "jfA ij 1\n" + "ycO ch 1\n" + "Wkj ij 1\n" + "yBp pr 1\n" + "hgD th 1\n" + "iSx in 1\n" + "xCm me 1\n" + "yjX ij 1\n" + "uIh th 1\n" + "qgq ng 1\n" + "Tzj sz 1\n" + "yjO ij 1\n" + "yrY er 1\n" + "bmZ me 1\n" + "zqT qu 1\n" + "mBd de 1\n" + "qvK qu 1\n" + "zcA ch 1\n" + "xrX er 1\n" + "mJm me 1\n" + "Xqf qu 1\n" + "Pxk ka 1\n" + "aDb an 1\n" + "qXg ng 1\n" + "eGw er 1\n" + "hjD th 1\n" + "tTx th 1\n" + "oMd on 1\n" + "fKg ng 1\n" + "Npn an 1\n" + "kqU qu 1\n" + "lbF le 1\n" + "Hvj ij 1\n" + "qZe qu 1\n" + "lQj le 1\n" + "dkY de 1\n" + "dZl le 1\n" + "zZh th 1\n" + "qyM qu 1\n" + "dmJ de 1\n" + "kfK ka 1\n" + "iPq qu 1\n" + "zwU sz 1\n" + "pvS va 1\n" + "ihJ th 1\n" + "ucW ch 1\n" + "Jjz sz 1\n" + "mMd de 1\n" + "vpw va 1\n" + "xCg ng 1\n" + "hKs th 1\n" + "vlI le 1\n" + "Nmc ch 1\n" + "xzV sz 1\n" + "gZs ng 1\n" + "rRp er 1\n" + "Ufd de 1\n" + "fpF pr 1\n" + "fwY wa 1\n" + "Gxr er 1\n" + "xLr er 1\n" + "vzE sz 1\n" + "jRf ij 1\n" + "brR er 1\n" + "gkZ ng 1\n" + "dUy de 1\n" + "Xji in 1\n" + "Kdb de 1\n" + "jpC ij 1\n" + "oUj on 1\n" + "qmh th 1\n" + "qjL qu 1\n" + "wRs sz 1\n" + "jhM th 1\n" + "Rhr th 1\n" + "btN th 1\n" + "Pjq ij 1\n" + "xwU wa 1\n" + "qyE qu 1\n" + "Jxd de 1\n" + "Pqr qu 1\n" + "lRd le 1\n" + "jqI qu 1\n" + "qFs qu 1\n" + "Mwk ka 1\n" + "jEb ij 1\n" + "Nxy ny 1\n" + "Pzm sz 1\n" + "tfL th 1\n" + "vFc ch 1\n" + "jQg ng 1\n" + "Bnx an 1\n" + "lMv le 1\n" + "tKq th 1\n" + "eVq qu 1\n" + "Tyq qu 1\n" + "drJ er 1\n" + "oHw on 1\n" + "lFk le 1\n" + "jpW ij 1\n" + "Qjw ij 1\n" + "cNx ch 1\n" + "Bhz th 1\n" + "bhB th 1\n" + "pDx pr 1\n" + "xpY pr 1\n" + "tnH th 1\n" + "dfL de 1\n" + "hzL th 1\n" + "zNk sz 1\n" + "lBm le 1\n" + "lXl le 1\n" + "yPv va 1\n" + "Zcl ch 1\n" + "hMq th 1\n" + "rJj ri 1\n" + "aXw an 1\n" + "zsQ sz 1\n" + "cQm ch 1\n" + "Sqc ch 1\n" + "tKm th 1\n" + "hvO th 1\n" + "hGd th 1\n" + "Wbn an 1\n" + "vCf va 1\n" + "lGg ng 1\n" + "vDh th 1\n" + "wDq qu 1\n" + "xRy ny 1\n" + "vXi in 1\n" + "qiQ qu 1\n" + "cFs ch 1\n" + "Lhp th 1\n" + "xEp pr 1\n" + "fQt th 1\n" + "cJv ch 1\n" + "lzO le 1\n" + "Fxk ka 1\n" + "tDd th 1\n" + "Xnx an 1\n" + "txC th 1\n" + "tGb th 1\n" + "zvG sz 1\n" + "gpC ng 1\n" + "pxD pr 1\n" + "Zfp pr 1\n" + "oWt th 1\n" + "vvV va 1\n" + "Gwf wa 1\n" + "Ycv ch 1\n" + "gcZ ch 1\n" + "mMw me 1\n" + "yQl le 1\n" + "uGp qu 1\n" + "lNj le 1\n" + "Ycm ch 1\n" + "vIx va 1\n" + "yLp pr 1\n" + "mRx me 1\n" + "nrK an 1\n" + "Zyh th 1\n" + "Nct th 1\n" + "Qml le 1\n" + "zPd de 1\n" + "dWq qu 1\n" + "Egx ng 1\n" + "vNs st 1\n" + "sNl le 1\n" + "pdW de 1\n" + "Snh th 1\n" + "yrP er 1\n" + "fJl le 1\n" + "tVg th 1\n" + "jvC ij 1\n" + "yhN th 1\n" + "qdC qu 1\n" + "pmT me 1\n" + "Lbg ng 1\n" + "xpJ pr 1\n" + "mYt th 1\n" + "bwV wa 1\n" + "wjD ij 1\n" + "fqC qu 1\n" + "xUf fo 1\n" + "dhU th 1\n" + "bZb be 1\n" + "twD th 1\n" + "bbM be 1\n" + "hgC th 1\n" + "dKb de 1\n" + "vJm va 1\n" + "wEq qu 1\n" + "Ofq qu 1\n" + "cXl ch 1\n" + "wpV pr 1\n" + "tqM th 1\n" + "pUf pr 1\n" + "Twx wa 1\n" + "Mgq ng 1\n" + "vQo on 1\n" + "yjT ij 1\n" + "aVd an 1\n" + "eHp er 1\n" + "vGv va 1\n" + "srG er 1\n" + "qVb qu 1\n" + "tlM th 1\n" + "nrT an 1\n" + "zRh th 1\n" + "cLr ch 1\n" + "lrH er 1\n" + "wTl le 1\n" + "cvI ch 1\n" + "kqN qu 1\n" + "Ixp pr 1\n" + "xeQ er 1\n" + "cNy ch 1\n" + "kRh th 1\n" + "ruY qu 1\n" + "Xcq ch 1\n" + "Kzb bi 1\n" + "Wxh th 1\n" + "pjM ij 1\n" + "jdO de 1\n" + "Jfy ny 1\n" + "bVz sz 1\n" + "dQo on 1\n" + "ncQ an 1\n" + "pVw pr 1\n" + "Sxj ij 1\n" + "Ubp pr 1\n" + "wvC va 1\n" + "khG th 1\n" + "cqF ch 1\n" + "Nxj ij 1\n" + "wDm me 1\n" + "yDd de 1\n" + "iyI in 1\n" + "eXq qu 1\n" + "hqP th 1\n" + "Kxr er 1\n" + "vsY st 1\n" + "Twb wa 1\n" + "fqw qu 1\n" + "wmC me 1\n" + "vFx va 1\n" + "vnC an 1\n" + "nWq an 1\n" + "hzB th 1\n" + "Kfk ka 1\n" + "tQe th 1\n" + "juW qu 1\n" + "qlX qu 1\n" + "hGw th 1\n" + "Oqd qu 1\n" + "Npw pr 1\n" + "hgW th 1\n" + "fxM fo 1\n" + "jSy ij 1\n" + "fJt th 1\n" + "mjG ij 1\n" + "tgV th 1\n" + "Ogx ng 1\n" + "Hbx be 1\n" + "Ljl le 1\n" + "ivZ in 1\n" + "bmY me 1\n" + "Qfp pr 1\n" + "wfQ wa 1\n" + "hCg th 1\n" + "vuU qu 1\n" + "ydZ de 1\n" + "vVk ka 1\n" + "mZf me 1\n" + "lOq qu 1\n" + "qIv qu 1\n" + "xZb be 1\n" + "xqk qu 1\n" + "Wmy me 1\n" + "Jqi qu 1\n" + "cxL ch 1\n" + "Ztq th 1\n" + "tdT th 1\n" + "uWt th 1\n" + "xGz sz 1\n" + "Wwk ka 1\n" + "pBk ka 1\n" + "yqg ng 1\n" + "cYl ch 1\n" + "ynW an 1\n" + "wyJ wa 1\n" + "qGy qu 1\n" + "fNp pr 1\n" + "hFs th 1\n" + "Yxu qu 1\n" + "kvJ ka 1\n" + "Fxz sz 1\n" + "twG th 1\n" + "qvG qu 1\n" + "vRp va 1\n" + "Qqi qu 1\n" + "gzE ng 1\n" + "pNl le 1\n" + "zpW sz 1\n" + "dcP ch 1\n" + "cPx ch 1\n" + "wcQ ch 1\n" + "pQc ch 1\n" + "qyF qu 1\n" + "zcX ch 1\n" + "wqk qu 1\n" + "kmY ka 1\n" + "qlG qu 1\n" + "xEz sz 1\n" + "pqV qu 1\n" + "Ohp th 1\n" + "xdM de 1\n" + "fLp pr 1\n" + "qAe qu 1\n" + "Xwv va 1\n" + "Lzi in 1\n" + "qOk qu 1\n" + "cXn an 1\n" + "Kds de 1\n" + "gvU ng 1\n" + "fPk ka 1\n" + "nZr an 1\n" + "Hxq qu 1\n" + "fCm me 1\n" + "qfD qu 1\n" + "Wfv va 1\n" + "qfb qu 1\n" + "jqC qu 1\n" + "fuX qu 1\n" + "qfA qu 1\n" + "Rlt th 1\n" + "xjD ij 1\n" + "wtF th 1\n" + "Xmz sz 1\n" + "pWp pr 1\n" + "Qxv va 1\n" + "zVf sz 1\n" + "gmZ ng 1\n" + "qdU qu 1\n" + "jqV qu 1\n" + "gXc ch 1\n" + "qmK qu 1\n" + "Gfj ij 1\n" + "cQr ch 1\n" + "Yhr th 1\n" + "vvS va 1\n" + "uDb qu 1\n" + "cdB ch 1\n" + "bvE va 1\n" + "xvS va 1\n" + "jRq qu 1\n" + "rvD er 1\n" + "Xyy ny 1\n" + "Jfi in 1\n" + "aBw an 1\n" + "nWc an 1\n" + "xBq qu 1\n" + "kgY ng 1\n" + "bGb bi 1\n" + "gjE ng 1\n" + "Rlw le 1\n" + "wrT er 1\n" + "bQr er 1\n" + "ljY le 1\n" + "qvU qu 1\n" + "fKm me 1\n" + "pTt th 1\n" + "zTw sz 1\n" + "qnV an 1\n" + "rWx er 1\n" + "nWd an 1\n" + "nKf an 1\n" + "kMf ka 1\n" + "fkG ka 1\n" + "bwX wa 1\n" + "cwV ch 1\n" + "uwK qu 1\n" + "rLv er 1\n" + "zMb sz 1\n" + "zpZ sz 1\n" + "rMq qu 1\n" + "Ttj th 1\n" + "gvO ng 1\n" + "Jcz ch 1\n" + "Cyx ny 1\n" + "njX an 1\n" + "aVx an 1\n" + "qXn an 1\n" + "Uqs qu 1\n" + "dVz de 1\n" + "Rcp ch 1\n" + "eKg ng 1\n" + "Xzn in 1\n" + "vyF va 1\n" + "Klc ch 1\n" + "xdI de 1\n" + "Hqb qu 1\n" + "xEe er 1\n" + "qpI qu 1\n" + "gDx ng 1\n" + "Jhf th 1\n" + "quK un 1\n" + "vgU ng 1\n" + "rWv er 1\n" + "Pnm an 1\n" + "nLm an 1\n" + "Bhj th 1\n" + "bPt th 1\n" + "jpI ij 1\n" + "tLz th 1\n" + "vpS va 1\n" + "Fxj ij 1\n" + "qDs qu 1\n" + "wzM sz 1\n" + "gwJ ng 1\n" + "zBw sz 1\n" + "qGv qu 1\n" + "rLh th 1\n" + "Bjl le 1\n" + "hfH th 1\n" + "clW ch 1\n" + "Rgk ng 1\n" + "Gsg ng 1\n" + "Uvx va 1\n" + "Qgv ng 1\n" + "gfX ng 1\n" + "rQv er 1\n" + "xvG va 1\n" + "kjx ij 1\n" + "dGf de 1\n" + "fcA ch 1\n" + "Ehq th 1\n" + "zBz sz 1\n" + "Gpk ka 1\n" + "tBv th 1\n" + "Xfg ng 1\n" + "yJm me 1\n" + "sqT qu 1\n" + "prY er 1\n" + "Dqo qu 1\n" + "Jzg ng 1\n" + "qMp qu 1\n" + "yfM ny 1\n" + "Gxf fo 1\n" + "wzP sz 1\n" + "zNm sz 1\n" + "wKg ng 1\n" + "Rrd er 1\n" + "Hvw va 1\n" + "gfD ng 1\n" + "Wmz sz 1\n" + "cJn an 1\n" + "nTf an 1\n" + "uvW qu 1\n" + "uPf qu 1\n" + "vwR va 1\n" + "bMf be 1\n" + "wIu qu 1\n" + "kxY ka 1\n" + "gZk ng 1\n" + "qFd qu 1\n" + "bMl le 1\n" + "wHl le 1\n" + "wVg ng 1\n" + "wlX le 1\n" + "fsL st 1\n" + "pRf pr 1\n" + "zsX st 1\n" + "qBk qu 1\n" + "Xzp sz 1\n" + "jdR de 1\n" + "Zlz le 1\n" + "Wfc ch 1\n" + "Rjv ij 1\n" + "vFz sz 1\n" + "tkV th 1\n" + "Xbw wa 1\n" + "xQc ch 1\n" + "Kxy ny 1\n" + "xCv va 1\n" + "nqV an 1\n" + "Wwx wa 1\n" + "kdW de 1\n" + "pkI ka 1\n" + "ohS th 1\n" + "Zdc ch 1\n" + "mCg ng 1\n" + "sxL st 1\n" + "Qrx er 1\n" + "qXw qu 1\n" + "wqQ qu 1\n" + "ijK in 1\n" + "sFz st 1\n" + "Hlw le 1\n" + "Gqn an 1\n" + "xPk ka 1\n" + "wZq qu 1\n" + "jqm qu 1\n" + "Lzp sz 1\n" + "Bdz de 1\n" + "wQl le 1\n" + "wtJ th 1\n" + "Uyi in 1\n" + "Wcy ch 1\n" + "wqH qu 1\n" + "Bns an 1\n" + "cDt th 1\n" + "xJv va 1\n" + "Wfz sz 1\n" + "xhP th 1\n" + "cWp ch 1\n" + "rqZ qu 1\n" + "bkB ka 1\n" + "Wtl th 1\n" + "gzf ng 1\n" + "bMr er 1\n" + "pxN pr 1\n" + "vhV th 1\n" + "kqX qu 1\n" + "Kdq qu 1\n" + "vQl le 1\n" + "ykC ka 1\n" + "zMh th 1\n" + "Eqz qu 1\n" + "lXq qu 1\n" + "zmZ sz 1\n" + "qpB qu 1\n" + "vGj ij 1\n" + "Tjx zj 1\n" + "tvK th 1\n" + "gYc ch 1\n" + "lFc ch 1\n" + "iJt th 1\n" + "Pkx ka 1\n" + "cDv ch 1\n" + "Yyd de 1\n" + "Vcq ch 1\n" + "Xhq th 1\n" + "zNf sz 1\n" + "vcD ch 1\n" + "bnW an 1\n" + "uvQ qu 1\n" + "Zzj sz 1\n" + "gPj ng 1\n" + "jwD ij 1\n" + "jpO ij 1\n" + "bDx be 1\n" + "vEi in 1\n" + "Zct th 1\n" + "wrX er 1\n" + "dhS th 1\n" + "zjJ sz 1\n" + "dDk de 1\n" + "srJ er 1\n" + "aWg an 1\n" + "mvJ va 1\n" + "Ytc th 1\n" + "jiQ in 1\n" + "tFz th 1\n" + "sJl le 1\n" + "vZq qu 1\n" + "xUd de 1\n" + "oqB qu 1\n" + "xDh th 1\n" + "hfE th 1\n" + "mSb me 1\n" + "jmR ij 1\n" + "rFp er 1\n" + "Xjy ij 1\n" + "bPp pr 1\n" + "iqQ ti 1\n" + "mfq qu 1\n" + "txL th 1\n" + "jBd de 1\n" + "Xvq qu 1\n" + "dvY de 1\n" + "sdM de 1\n" + "xgY ng 1\n" + "rYh th 1\n" + "vlA le 1\n" + "pFb pr 1\n" + "yFz sz 1\n" + "gcK ch 1\n" + "xfZ fo 1\n" + "jDc ch 1\n" + "yNv va 1\n" + "tKt th 1\n" + "wtU th 1\n" + "bHk ka 1\n" + "qCw qu 1\n" + "Zca an 1\n" + "kDw ka 1\n" + "Ywc ch 1\n" + "pXs st 1\n" + "yMm me 1\n" + "Gwq qu 1\n" + "mYv va 1\n" + "wCx wa 1\n" + "jZx ij 1\n" + "oQd on 1\n" + "Fzk sz 1\n" + "lwF le 1\n" + "Xzk sz 1\n" + "Njx ij 1\n" + "yoI on 1\n" + "sJm st 1\n" + "wKk ka 1\n" + "Qth ch 1\n" + "Llz le 1\n" + "gVf gi 1\n" + "pPq qu 1\n" + "lGy le 1\n" + "gzR ng 1\n" + "rXg ng 1\n" + "Npf pr 1\n" + "wvR va 1\n" + "yXs st 1\n" + "mMl li 1\n" + "bYx be 1\n" + "fzZ sz 1\n" + "vrG er 1\n" + "Kdk de 1\n" + "yqw qu 1\n" + "Lkq qu 1\n" + "jKs st 1\n" + "Zqx qu 1\n" + "Pfm me 1\n" + "rlW er 1\n" + "hPv th 1\n" + "Ojx ij 1\n" + "Gtq th 1\n" + "vtJ th 1\n" + "Wly le 1\n" + "yHd de 1\n" + "kQb ka 1\n" + "Ldc de 1\n" + "sUx st 1\n" + "cJg ch 1\n" + "fLd de 1\n" + "Mjq qu 1\n" + "Cjm ij 1\n" + "awX an 1\n" + "Gtl th 1\n" + "wzN sz 1\n" + "bqx qu 1\n" + "fAq qu 1\n" + "ezX er 1\n" + "cBx ch 1\n" + "csX ch 1\n" + "cUf ch 1\n" + "qsJ qu 1\n" + "hsZ th 1\n" + "qzg ng 1\n" + "Qgk ng 1\n" + "Nxg ng 1\n" + "Hqa an 1\n" + "rXl er 1\n" + "nlP an 1\n" + "aVg an 1\n" + "yhG th 1\n" + "kfA ka 1\n" + "Vmk mG 1\n" + "jKm ij 1\n" + "hPd th 1\n" + "aPd an 1\n" + "bYy be 1\n" + "bnZ an 1\n" + "Gsj st 1\n" + "kxQ ka 1\n" + "vkF ka 1\n" + "jzS sz 1\n" + "fWm me 1\n" + "Qcu ch 1\n" + "rZf er 1\n" + "jbZ ij 1\n" + "aQj an 1\n" + "bzO sz 1\n" + "fZq qu 1\n" + "lrN er 1\n" + "fkL ka 1\n" + "Dqv qu 1\n" + "zkC sz 1\n" + "sLw st 1\n" + "Nvr er 1\n" + "Nby be 1\n" + "eMh th 1\n" + "wFc ch 1\n" + "Cxz sz 1\n" + "iZp in 1\n" + "dvZ de 1\n" + "vIh th 1\n" + "qCl qu 1\n" + "Pzo on 1\n" + "vNq qu 1\n" + "zqK qu 1\n" + "Lmx me 1\n" + "xVt th 1\n" + "glD ng 1\n" + "Gbf be 1\n" + "Jvq qu 1\n" + "zFw sz 1\n" + "tMq th 1\n" + "vkJ ka 1\n" + "Sxu qu 1\n" + "afU an 1\n" + "mHb me 1\n" + "jxU ij 1\n" + "cJl ch 1\n" + "uqE qu 1\n" + "Nqq qu 1\n" + "xGt th 1\n" + "czG ch 1\n" + "Kfg ng 1\n" + "zWh th 1\n" + "yXm me 1\n" + "fnD an 1\n" + "Jrd er 1\n" + "oxZ on 1\n" + "hXn th 1\n" + "fqI qu 1\n" + "wAo on 1\n" + "iGk in 1\n" + "xEw wa 1\n" + "fVq qu 1\n" + "ytU th 1\n" + "bhG th 1\n" + "oQz on 1\n" + "pgO ng 1\n" + "Yqm qu 1\n" + "bJi in 1\n" + "kcV ch 1\n" + "knM an 1\n" + "Cwr er 1\n" + "Wgd ng 1\n" + "bpT pr 1\n" + "Jdj de 1\n" + "Nbq qu 1\n" + "twJ th 1\n" + "Qep er 1\n" + "Kdc ch 1\n" + "kQq qu 1\n" + "rPq qu 1\n" + "lWp le 1\n" + "Fbq qu 1\n" + "bVk ka 1\n" + "zlI le 1\n" + "Bzp sz 1\n" + "jfK ij 1\n" + "Yvm va 1\n" + "Ftm th 1\n" + "aMj an 1\n" + "zzV sz 1\n" + "zOa an 1\n" + "mHc ch 1\n" + "xWn an 1\n" + "fFh th 1\n" + "sDv st 1\n" + "vmD va 1\n" + "xjL ij 1\n" + "iBq qu 1\n" + "jqT qu 1\n" + "hsR th 1\n" + "Qxo on 1\n" + "jsG st 1\n" + "cXb ch 1\n" + "Ybj ij 1\n" + "xeJ er 1\n" + "oPq qu 1\n" + "yXt th 1\n" + "xvL va 1\n" + "jcF ch 1\n" + "kFb ka 1\n" + "jXv ij 1\n" + "Aox on 1\n" + "zkQ sz 1\n" + "fPd de 1\n" + "Fvx va 1\n" + "fbX be 1\n" + "oCf on 1\n" + "Yjd de 1\n" + "Ppf pr 1\n" + "Njs st 1\n" + "cZh th 1\n" + "vnG an 1\n" + "cwJ cm 1\n" + "qJl qu 1\n" + "gNf ng 1\n" + "Tfv va 1\n" + "vwK va 1\n" + "Zcs ch 1\n" + "eBv er 1\n" + "qLf qu 1\n" + "Yqt th 1\n" + "crD ch 1\n" + "Icj ch 1\n" + "qBl qu 1\n" + "gzX ng 1\n" + "ujF qu 1\n" + "vxU va 1\n" + "kZt th 1\n" + "Ldh th 1\n" + "bfM be 1\n" + "mQm QO 1\n" + "zlQ le 1\n" + "jbU ij 1\n" + "Kvz sz 1\n" + "Uxw wa 1\n" + "pjS ij 1\n" + "Xvv va 1\n" + "kjI ij 1\n" + "cYi ch 1\n" + "nJn an 1\n" + "Qxz sz 1\n" + "aNw an 1\n" + "Jfp pr 1\n" + "bNz sz 1\n" + "xdQ de 1\n" + "Bzk sz 1\n" + "qZz qu 1\n" + "Ycp ch 1\n" + "pGs st 1\n" + "kCf ka 1\n" + "gwP ng 1\n" + "wbV wa 1\n" + "Eqt eq 1\n" + "Xhn th 1\n" + "oUf on 1\n" + "dKc ch 1\n" + "sxN st 1\n" + "Ofz sz 1\n" + "gCp ng 1\n" + "bhI th 1\n" + "hgU th 1\n" + "knU an 1\n" + "kjT ij 1\n" + "fsZ st 1\n" + "lGv le 1\n" + "wMd de 1\n" + "ukQ qu 1\n" + "Ghk th 1\n" + "kRw ka 1\n" + "zRc ch 1\n" + "gwK ng 1\n" + "vJp va 1\n" + "tVc th 1\n" + "pqT qu 1\n" + "iYl in 1\n" + "xLv va 1\n" + "Xdq qu 1\n" + "zcO ch 1\n" + "plM le 1\n" + "bDz sz 1\n" + "Nmx me 1\n" + "dKv de 1\n" + "hPk th 1\n" + "Tjy ij 1\n" + "wYs st 1\n" + "nfJ an 1\n" + "tfC th 1\n" + "zJt th 1\n" + "lKp le 1\n" + "Iyc ch 1\n" + "xuB qu 1\n" + "eKx er 1\n" + "sZf st 1\n" + "zpQ sz 1\n" + "sfL st 1\n" + "mjT ij 1\n" + "zXw sz 1\n" + "yKt th 1\n" + "rwV er 1\n" + "pjB ij 1\n" + "qYb qu 1\n" + "bYz sz 1\n" + "qqY eq 1\n" + "uIf qu 1\n" + "jTc ch 1\n" + "sqC qu 1\n" + "uJc ch 1\n" + "dGx de 1\n" + "swF st 1\n" + "Hfn an 1\n" + "Htb th 1\n" + "pfW hW 1\n" + "iyG in 1\n" + "zPc ch 1\n" + "yzV sz 1\n" + "pVz sz 1\n" + "sPg ng 1\n" + "fKj ij 1\n" + "eFb er 1\n" + "Qji jS 1\n" + "mtH th 1\n" + "wgZ ng 1\n" + "hHd th 1\n" + "fTt th 1\n" + "gxZ ng 1\n" + "Ktg th 1\n" + "hWd th 1\n" + "fWq qu 1\n" + "wSv va 1\n" + "Fzn an 1\n" + "ghH th 1\n" + "npW an 1\n" + "jvP ij 1\n" + "uYk qu 1\n" + "Uxn an 1\n" + "Sqg ng 1\n" + "zcJ ch 1\n" + "dMr er 1\n" + "Zgc ch 1\n" + "qGp qu 1\n" + "oVq qu 1\n" + "oUa an 1\n" + "oqV qu 1\n" + "jGs st 1\n" + "Ybq qu 1\n" + "qRf qu 1\n" + "brZ er 1\n" + "qTv qu 1\n" + "wZf wa 1\n" + "gOj ng 1\n" + "Jji in 1\n" + "Ppx pr 1\n" + "qwB qu 1\n" + "qcJ ch 1\n" + "fFz sz 1\n" + "wwY wa 1\n" + "kTc ch 1\n" + "uGn an 1\n" + "eQq qu 1\n" + "qGk qu 1\n" + "dpV de 1\n" + "vTm va 1\n" + "Ojq qu 1\n" + "dpX de 1\n" + "bYf be 1\n" + "tjV th 1\n" + "Lzn LG 1\n" + "Yjm ij 1\n" + "uYw qu 1\n" + "Zdg ng 1\n" + "hXs th 1\n" + "Iwp pr 1\n" + "hJw th 1\n" + "Tfd de 1\n" + "cxO ch 1\n" + "Qqy qu 1\n" + "lDv le 1\n" + "zsO st 1\n" + "mrG er 1\n" + "cjJ ch 1\n" + "dgD ng 1\n" + "cUw ch 1\n" + "zdB de 1\n" + "jlU le 1\n" + "bBf be 1\n" + "qbJ qu 1\n" + "qlR qu 1\n" + "cWc ch 1\n" + "Xgb ng 1\n" + "zrU er 1\n" + "bgI ng 1\n" + "wjJ ij 1\n" + "mvU va 1\n" + "rCp GC 1\n" + "nVx an 1\n" + "xbG be 1\n" + "tdN th 1\n" + "yjR ij 1\n" + "wQj ij 1\n" + "xzZ sz 1\n" + "qUk qu 1\n" + "xjY ij 1\n" + "Jxz sz 1\n" + "xZs st 1\n" + "vZx va 1\n" + "lRs le 1\n" + "vwp va 1\n" + "wpj ij 1\n" + "swS st 1\n" + "Eqx qu 1\n" + "vEw va 1\n" + "tkQ th 1\n" + "vgX ng 1\n" + "Rwb wa 1\n" + "sjW st 1\n" + "dXm de 1\n" + "fvY vK 1\n" + "lrO er 1\n" + "Ldx de 1\n" + "cxV ch 1\n" + "qFh th 1\n" + "qVw qu 1\n" + "Pyf ny 1\n" + "Kxz sz 1\n" + "hwJ th 1\n" + "cpL ch 1\n" + "Hge ng 1\n" + "Wbh th 1\n" + "lQq qu 1\n" + "hDl th 1\n" + "Zph th 1\n" + "wZj ij 1\n" + "Zqt th 1\n" + "xmU me 1\n" + "tUf th 1\n" + "qWo qu 1\n" + "Lrd er 1\n" + "pQs st 1\n" + "rZv er 1\n" + "mjI ij 1\n" + "xQy ny 1\n" + "vGy va 1\n" + "jwY ij 1\n" + "cNn an 1\n" + "zpP sz 1\n" + "vKd de 1\n" + "wVk ka 1\n" + "tMh ch 1\n" + "Ktd th 1\n" + "tpG th 1\n" + "iDf in 1\n" + "qKl qu 1\n" + "jLc ch 1\n" + "Jjl le 1\n" + "hcQ th 1\n" + "Tqg qu 1\n" + "bGk ka 1\n" + "jxV ij 1\n" + "fcC ch 1\n" + "Fwx wa 1\n" + "qPy qu 1\n" + "jmE ij 1\n" + "xmT me 1\n" + "lxC GC 1\n" + "lRr er 1\n" + "Qkl le 1\n" + "ihF th 1\n" + "Llt th 1\n" + "Kqe qu 1\n" + "Hhf th 1\n" + "nPq an 1\n" + "zvQ QO 1\n" + "jGy ij 1\n" + "lMk le 1\n" + "uOj qu 1\n" + "fdT de 1\n" + "qvH qu 1\n" + "pcZ ch 1\n" + "qkc ch 1\n" + "cbJ ch 1\n" + "gfK ng 1\n" + "pMt th 1\n" + "vpF va 1\n" + "dgP ng 1\n" + "mxF me 1\n" + "rZp er 1\n" + "cGd ch 1\n" + "sPx st 1\n" + "rGd er 1\n" + "gbQ ng 1\n" + "Dfz sz 1\n" + "sjC st 1\n" + "zSx sz 1\n" + "qIo qu 1\n" + "dIw de 1\n" + "kpF ka 1\n" + "eUw er 1\n" + "Hxc ch 1\n" + "yvG va 1\n" + "vUf va 1\n" + "fjF ij 1\n" + "kLq qu 1\n" + "Zjt th 1\n" + "fLq qu 1\n" + "ydS de 1\n" + "zwK sz 1\n" + "hHy th 1\n" + "Ssw st 1\n" + "hjG th 1\n" + "Ddp de 1\n" + "bPs st 1\n" + "Wpq qu 1\n" + "crW ch 1\n" + "Xpj ij 1\n" + "oXr er 1\n" + "vjK ij 1\n" + "Vzf sz 1\n" + "lYd le 1\n" + "Odx de 1\n" + "hVt th 1\n" + "gRc ch 1\n" + "Ztf th 1\n" + "hVj th 1\n" + "Jjf ij 1\n" + "jFb ij 1\n" + "Lhf th 1\n" + "jlO le 1\n" + "jvB ij 1\n" + "gbN ng 1\n" + "vPm va 1\n" + "tQd th 1\n" + "Vvj ij 1\n" + "rqX qu 1\n" + "zEo on 1\n" + "jsB st 1\n" + "qmH qu 1\n" + "btE th 1\n" + "Wdd de 1\n" + "Dmj ij 1\n" + "ywI wa 1\n" + "jpQ ij 1\n" + "uXs qu 1\n" + "bYm me 1\n" + "oFz on 1\n" + "tBg th 1\n" + "cCn ch 1\n" + "dZg ng 1\n" + "wrL er 1\n" + "Jry er 1\n" + "iKd in 1\n" + "vcN ch 1\n" + "zNp sz 1\n" + "nRf an 1\n" + "dcH ch 1\n" + "qaO an 1\n" + "uaQ an 1\n" + "jxL ij 1\n" + "mUf me 1\n" + "vOk ka 1\n" + "Pxt th 1\n" + "fuQ qu 1\n" + "sfN st 1\n" + "Qlv le 1\n" + "bZy be 1\n" + "vEq vK 1\n" + "Xvg ng 1\n" + "Jxb be 1\n" + "zGz sz 1\n" + "Cqf qu 1\n" + "sPp st 1\n" + "vAq qu 1\n" + "kWd de 1\n" + "rcZ cm 1\n" + "lDs le 1\n" + "xDd de 1\n" + "pSj ij 1\n" + "vwS va 1\n" + "kgQ ng 1\n" + "crT ch 1\n" + "fKs st 1\n" + "qhc th 1\n" + "gMl ng 1\n" + "zKt th 1\n" + "jdF de 1\n" + "cfN ch 1\n" + "sdO st 1\n" + "kHh th 1\n" + "xvE va 1\n" + "bPf be 1\n" + "rzX er 1\n" + "vSj ij 1\n" + "dFf de 1\n" + "vXl le 1\n" + "bRv va 1\n" + "Zxw wa 1\n" + "Xzw sz 1\n" + "vrR er 1\n" + "xHb be 1\n" + "qeE qu 1\n" + "jrQ er 1\n" + "vkI ka 1\n" + "frY er 1\n" + "jqL qu 1\n" + "cZj ch 1\n" + "Tmg ng 1\n" + "mHw me 1\n" + "dqS qu 1\n" + "qlI qu 1\n" + "Zvb va 1\n" + "Klx le 1\n" + "gbS ng 1\n" + "sbQ st 1\n" + "quF un 1\n" + "qzT qu 1\n" + "qaI an 1\n" + "Vmd de 1\n" + "qaQ an 1\n" + "Qkb ka 1\n" + "Xjb ij 1\n" + "oCq GC 1\n" + "qQh QO 1\n" + "cwO ch 1\n" + "tMf th 1\n" + "zrK er 1\n" + "wKy wa 1\n" + "wKb wa 1\n" + "cqS ch 1\n" + "iGv in 1\n" + "xXw wa 1\n" + "fMx fo 1\n" + "Zmv va 1\n" + "Yqq qu 1\n" + "kDh th 1\n" + "Jxy ny 1\n" + "yyE ny 1\n" + "sUv st 1\n" + "cVr ch 1\n" + "bqH qu 1\n" + "Wgq qu 1\n" + "uqQ qu 1\n" + "bTg ng 1\n" + "iMv in 1\n" + "qWk qu 1\n" + "fdV de 1\n" + "oQq qu 1\n" + "nZp an 1\n" + "zoY on 1\n" + "jRk ij 1\n" + "qPj qu 1\n" + "uqL qu 1\n" + "cqX ch 1\n" + "lBq qu 1\n" + "fpX pr 1\n" + "bYw wa 1\n" + "Yeq qu 1\n" + "hjN th 1\n" + "tqW th 1\n" + "jhT th 1\n" + "cvF ch 1\n" + "Ycx ch 1\n" + "jFs st 1\n" + "Hdy de 1\n" + "lrZ er 1\n" + "fZv va 1\n" + "Tfw wa 1\n" + "zrI er 1\n" + "dDv de 1\n" + "xeH er 1\n" + "lzH le 1\n" + "sLr er 1\n" + "iKq qu 1\n" + "Fzc cm 1\n" + "xRd de 1\n" + "fSd de 1\n" + "qwF qu 1\n" + "wxY wa 1\n" + "Ykw ka 1\n" + "oVp on 1\n" + "cgB ch 1\n" + "bFh th 1\n" + "njT an 1\n" + "dZz de 1\n" + "bhS th 1\n" + "Fzu qu 1\n" + "fHm me 1\n" + "vNz sz 1\n" + "qlF qu 1\n" + "Lvf va 1\n" + "zpU sz 1\n" + "jtL th 1\n" + "cQq ch 1\n" + "mKm me 1\n" + "Rwc ch 1\n" + "jrO er 1\n" + "npB an 1\n" + "Qtx th 1\n" + "Mqj qu 1\n" + "Oqx qu 1\n" + "Dzp sz 1\n" + "hVg th 1\n" + "pTn an 1\n" + "gQj ng 1\n" + "mTn an 1\n" + "tQv th 1\n" + "lZh th 1\n" + "kJj ij 1\n" + "crP ch 1\n" + "mqC qu 1\n" + "Dwl le 1\n" + "vVj ij 1\n" + "hqT th 1\n" + "mJw me 1\n" + "txT th 1\n" + "wZm me 1\n" + "Xnq an 1\n" + "hfU th 1\n" + "kVr er 1\n" + "gVp ng 1\n" + "nBp an 1\n" + "xnZ an 1\n" + "jqA qu 1\n" + "Pzk sz 1\n" + "fJq qu 1\n" + "Gnf an 1\n" + "Kxp pr 1\n" + "dXl Xm 1\n" + "hwL th 1\n" + "Rrn an 1\n" + "klL le 1\n" + "fOg ng 1\n" + "Qwx wa 1\n" + "Cmx me 1\n" + "Fbf be 1\n" + "hWq th 1\n" + "bSw wa 1\n" + "Bxr er 1\n" + "zcB ch 1\n" + "lvX le 1\n" + "Kkx ka 1\n" + "qfI qu 1\n" + "uKg qu 1\n" + "Yku qu 1\n" + "jJz sz 1\n" + "uIp qu 1\n" + "qAd qu 1\n" + "pfH pr 1\n" + "Qwf wa 1\n" + "wbU wa 1\n" + "vDv va 1\n" + "gJn an 1\n" + "zlR le 1\n" + "mXr er 1\n" + "rHx er 1\n" + "oVz on 1\n" + "gtG th 1\n" + "lrK HK 1\n" + "Wxe er 1\n" + "pnJ an 1\n" + "Fqy qu 1\n" + "jVl le 1\n" + "cbP ch 1\n" + "Gjc jS 1\n" + "jQs st 1\n" + "tvV th 1\n" + "Hzk sz 1\n" + "jyW ij 1\n" + "Xbf be 1\n" + "qfS qu 1\n" + "Wvp va 1\n" + "wbL wa 1\n" + "mkO ka 1\n" + "eqB qu 1\n" + "dvS de 1\n" + "zGh th 1\n" + "vWu qu 1\n" + "flX le 1\n" + "xJq qu 1\n" + "qLk qu 1\n" + "vNl le 1\n" + "kzQ sz 1\n" + "Czv sz 1\n" + "knV an 1\n" + "Rjb ij 1\n" + "bNq qu 1\n" + "zPm sz 1\n" + "qxB qu 1\n" + "Lhh th 1\n" + "Uvt th 1\n" + "xfU fo 1\n" + "iNp in 1\n" + "yYg ng 1\n" + "oPb on 1\n" + "qiW qu 1\n" + "ycD ch 1\n" + "wVz sz 1\n" + "wGq qu 1\n" + "hRb th 1\n" + "xbB be 1\n" + "sZl le 1\n" + "gxO ng 1\n" + "wFk ka 1\n" + "Mxd de 1\n" + "dxP de 1\n" + "lRq qu 1\n" + "hbZ th 1\n" + "Eao an 1\n" + "zgA ng 1\n" + "qcW ch 1\n" + "vmQ va 1\n" + "Yqf qu 1\n" + "wiO in 1\n" + "xOe er 1\n" + "Hfy ny 1\n" + "bfS be 1\n" + "Qhn th 1\n" + "Cmk ka 1\n" + "lYs le 1\n" + "Nqt th 1\n" + "qeJ qu 1\n" + "ztJ th 1\n" + "pMv va 1\n" + "uhW th 1\n" + "jSb ij 1\n" + "dYh th 1\n" + "cfW ch 1\n" + "gSx ng 1\n" + "qSv qu 1\n" + "jCs st 1\n" + "pwC pr 1\n" + "Gxq qu 1\n" + "fMq qu 1\n" + "kkC ka 1\n" + "uqI qu 1\n" + "zBk sz 1\n" + "zsW st 1\n" + "fZb be 1\n" + "xjb ij 1\n" + "vHq qu 1\n" + "fwN wa 1\n" + "vMw va 1\n" + "Hhq th 1\n" + "csJ ch 1\n" + "brJ er 1\n" + "xvM va 1\n" + "mXn an 1\n" + "qWw wa 1\n" + "dxZ de 1\n" + "sVj st 1\n" + "xrF er 1\n" + "pbU pr 1\n" + "Tfz sz 1\n" + "wqT qu 1\n" + "vcF ch 1\n" + "nrS an 1\n" + "Whz th 1\n" + "kgX ng 1\n" + "yXk ka 1\n" + "kJb ka 1\n" + "rZk er 1\n" + "pBc ch 1\n" + "gUv ng 1\n" + "Hqe qu 1\n" + "Kqj qu 1\n" + "oFj on 1\n" + "xbN be 1\n" + "pnK an 1\n" + "Lbw wa 1\n" + "dMb de 1\n" + "qSp qu 1\n" + "Zsv st 1\n" + "wrV er 1\n" + "uKf qu 1\n" + "mlY le 1\n" + "gxF ng 1\n" + "tjL th 1\n" + "Xrc ch 1\n" + "rvF er 1\n" + "mLq qu 1\n" + "jrK er 1\n" + "Qlz le 1\n" + "zxD sz 1\n" + "fdY de 1\n" + "jvD ij 1\n" + "xQg ng 1\n" + "qFu un 1\n" + "sfJ st 1\n" + "pIf pr 1\n" + "hxJ th 1\n" + "cNc ch 1\n" + "Idq qu 1\n" + "yHf ny 1\n" + "qXm qu 1\n" + "ylD le 1\n" + "zFq qu 1\n" + "jWp ij 1\n" + "eKp er 1\n" + "xhf th 1\n" + "ybV be 1\n" + "xXs st 1\n" + "Yhk th 1\n" + "fwX wa 1\n" + "bqK qu 1\n" + "nvY an 1\n" + "xvk ka 1\n" + "rbP er 1\n" + "sXl le 1\n" + "Uwt th 1\n" + "wmW me 1\n" + "pxV pr 1\n" + "njZ an 1\n" + "Tqk qu 1\n" + "zmE sz 1\n" + "Rqu un 1\n" + "qqM qu 1\n" + "dhQ th 1\n" + "uJz qu 1\n" + "Vqd qu 1\n" + "yCk ka 1\n" + "pWu qu 1\n" + "Vdy de 1\n" + "iRx in 1\n" + "Vcm ch 1\n" + "wIg ng 1\n" + "Xbh th 1\n" + "vcG ch 1\n" + "jjX ij 1\n" + "nmO an 1\n" + "dQj de 1\n" + "dfV de 1\n" + "dbK de 1\n" + "gqk qu 1\n" + "nFd an 1\n" + "oWv on 1\n" + "nHp an 1\n" + "knK an 1\n" + "bxZ be 1\n" + "wmH me 1\n" + "fgX ng 1\n" + "gzH ng 1\n" + "Zbv va 1\n" + "vgM ng 1\n" + "dmK de 1\n" + "cvB ch 1\n" + "eQs er 1\n" + "cHm ch 1\n" + "sBt th 1\n" + "bHx be 1\n" + "vqd qu 1\n" + "Npy pr 1\n" + "xzL sz 1\n" + "gMx ng 1\n" + "vwU va 1\n" + "pfX pr 1\n" + "nFg an 1\n" + "sFs st 1\n" + "Vqh th 1\n" + "Emq qu 1\n" + "tXy th 1\n" + "uVd qu 1\n" + "Yvj ij 1\n" + "qHo qu 1\n" + "pWm me 1\n" + "xcK ch 1\n" + "pUv va 1\n" + "pLn an 1\n" + "uVn an 1\n" + "Fsq qu 1\n" + "cGj ch 1\n" + "Xwy wa 1\n" + "gzT ng 1\n" + "dNq qu 1\n" + "jrU er 1\n" + "qtA th 1\n" + "gqT qu 1\n" + "pwM pr 1\n" + "lrP er 1\n" + "jmC ij 1\n" + "pmP me 1\n" + "yiY in 1\n" + "pTs st 1\n" + "Zwj ij 1\n" + "qpF qu 1\n" + "fhJ ch 1\n" + "fOv va 1\n" + "wcK ch 1\n" + "kqk qu 1\n" + "Ugz ng 1\n" + "xfF fo 1\n" + "cTv ch 1\n" + "gpX ng 1\n" + "Lfx fo 1\n" + "gwU ng 1\n" + "Dzx sz 1\n" + "kDc ch 1\n" + "Pvh th 1\n" + "kdY de 1\n" + "wWv va 1\n" + "sQq qu 1\n" + "mjY ij 1\n" + "yCb be 1\n" + "rSq qu 1\n" + "Sfv va 1\n" + "fZh th 1\n" + "dMd de 1\n" + "dNs st 1\n" + "jTv ij 1\n" + "tmW th 1\n" + "cxJ ch 1\n" + "uAo qu 1\n" + "mHx me 1\n" + "fgA ng 1\n" + "Rhx th 1\n" + "wWt th 1\n" + "pfU pr 1\n" + "oIj on 1\n" + "lhQ th 1\n" + "vDk ka 1\n" + "vJd de 1\n" + "sDp st 1\n" + "qiU qu 1\n" + "Yfs st 1\n" + "qxW qu 1\n" + "sFh th 1\n" + "vhP th 1\n" + "Vjj ij 1\n" + "tmQ th 1\n" + "wmM me 1\n" + "cVy ch 1\n" + "Kzw sz 1\n" + "tfA th 1\n" + "gjR ij 1\n" + "xyQ ny 1\n" + "mBv va 1\n" + "fQy ny 1\n" + "dZc ch 1\n" + "eVh th 1\n" + "Nvc ch 1\n" + "qFb qu 1\n" + "qhl th 1\n" + "Zcn ch 1\n" + "qwW qu 1\n" + "xZq qu 1\n" + "jhL th 1\n" + "lWf le 1\n" + "jJx ij 1\n" + "Yzt th 1\n" + "Eoq qu 1\n" + "Njm ij 1\n" + "Zgd ng 1\n" + "pGq qu 1\n" + "sgY ng 1\n" + "jyE ij 1\n" + "jzE sz 1\n" + "ujK qu 1\n" + "qbm qu 1\n" + "Wsf st 1\n" + "mQn an 1\n" + "sQs st 1\n" + "yXg ng 1\n" + "vYe er 1\n" + "ePv er 1\n" + "aCv an 1\n" + "pVm me 1\n" + "zxO sz 1\n" + "jjW ij 1\n" + "vgI ng 1\n" + "tZc th 1\n" + "Qtg th 1\n" + "vMt th 1\n" + "kTt th 1\n" + "Mxj ij 1\n" + "fbI be 1\n" + "qAu un 1\n" + "wfT wa 1\n" + "fcF ch 1\n" + "pfK pr 1\n" + "bOq qu 1\n" + "huX th 1\n" + "cJm ch 1\n" + "Xpg ng 1\n" + "tqJ th 1\n" + "Ovf va 1\n" + "Xlj le 1\n" + "Nrl er 1\n" + "fxW fo 1\n" + "Swq qu 1\n" + "qvE qu 1\n" + "qpY qu 1\n" + "oNw on 1\n" + "kYc ch 1\n" + "jXb ij 1\n" + "Qfk ka 1\n" + "eDp er 1\n" + "Vqb qu 1\n" + "sKz us 1\n" + "qjp qu 1\n" + "Uxl le 1\n" + "Lky ka 1\n" + "zFy sz 1\n" + "nMl an 1\n" + "yYi in 1\n" + "cQe ch 1\n" + "oYj on 1\n" + "tbB th 1\n" + "Ybg ng 1\n" + "nVk nd 1\n" + "bXc ch 1\n" + "Lqn an 1\n" + "mdK de 1\n" + "pdP de 1\n" + "tqS th 1\n" + "Zjf ij 1\n" + "kcC ch 1\n" + "qZq qu 1\n" + "aSd an 1\n" + "Cmh th 1\n" + "hzG th 1\n" + "wQm me 1\n" + "Gqg qu 1\n" + "yWp pr 1\n" + "Xrw er 1\n" + "yJy ny 1\n" + "sqD qu 1\n" + "dWb de 1\n" + "nbQ an 1\n" + "iwP in 1\n" + "lWs le 1\n" + "Tsg ng 1\n" + "dHz de 1\n" + "tcF th 1\n" + "Qkt th 1\n" + "Bdd de 1\n" + "Mxq qu 1\n" + "pjV ij 1\n" + "kQr er 1\n" + "dnI an 1\n" + "fyY ny 1\n" + "aFq an 1\n" + "Ylx le 1\n" + "Yym me 1\n" + "jbV ij 1\n" + "qcV ch 1\n" + "pzX sz 1\n" + "qRh th 1\n" + "djA de 1\n" + "bnI an 1\n" + "Llv le 1\n" + "tmZ th 1\n" + "hQo th 1\n" + "ztW th 1\n" + "Rxz sz 1\n" + "dxW de 1\n" + "qtW th 1\n" + "kqO qu 1\n" + "lHc ch 1\n" + "lRj le 1\n" + "hNf th 1\n" + "Giq qu 1\n" + "cYq ch 1\n" + "Ydp de 1\n" + "qWn an 1\n" + "xkB ka 1\n" + "kxC ka 1\n" + "ljA le 1\n" + "Qwp pr 1\n" + "mCp me 1\n" + "fJd de 1\n" + "vCt th 1\n" + "Vcz ch 1\n" + "vBf va 1\n" + "cYx ch 1\n" + "fHw wa 1\n" + "kvW ka 1\n" + "Jmz sz 1\n" + "hQj th 1\n" + "rbQ er 1\n" + "vxX va 1\n" + "wFh th 1\n" + "Tjz sz 1\n" + "hxR th 1\n" + "vdY de 1\n" + "pmF me 1\n" + "sDl le 1\n" + "rVh th 1\n" + "wDc ch 1\n" + "gBw ng 1\n" + "cHf ch 1\n" + "pzQ sz 1\n" + "lVp le 1\n" + "gfH ng 1\n" + "oGc ch 1\n" + "tvJ th 1\n" + "cMv ch 1\n" + "xnS an 1\n" + "vQx va 1\n" + "uoM qu 1\n" + "zkX sz 1\n" + "zHp sz 1\n" + "yuW qu 1\n" + "Qbv va 1\n" + "zwG sz 1\n" + "cpX ch 1\n" + "Rpv va 1\n" + "zKq qu 1\n" + "wUb wa 1\n" + "qnJ an 1\n" + "Rpy pr 1\n" + "bcS ch 1\n" + "qxK qu 1\n" + "qjD qu 1\n" + "lQg ng 1\n" + "krX er 1\n" + "Fcg ch 1\n" + "oVx on 1\n" + "vJf va 1\n" + "Bvk ka 1\n" + "dmX de 1\n" + "Wdj de 1\n" + "Yzp sz 1\n" + "Ycd ch 1\n" + "jKx ij 1\n" + "krH er 1\n" + "Lnm an 1\n" + "zCm sz 1\n" + "Uwj ij 1\n" + "Uvk ka 1\n" + "Mfj ij 1\n" + "yqJ qu 1\n" + "Lfq qu 1\n" + "yHz sz 1\n" + "kgJ ng 1\n" + "aGq an 1\n" + "tjH th 1\n" + "Zkc ch 1\n" + "wHv va 1\n" + "Nzp sz 1\n" + "cZx ch 1\n" + "jvK ij 1\n" + "clF ch 1\n" + "xmD me 1\n" + "Ypz sz 1\n" + "pFy pr 1\n" + "hvF th 1\n" + "mtW th 1\n" + "hqG th 1\n" + "kvN ka 1\n" + "tcZ th 1\n" + "tkR th 1\n" + "pdH de 1\n" + "qEs qu 1\n" + "Zcw ch 1\n" + "Vwu un 1\n" + "gXz ng 1\n" + "mWj ij 1\n" + "mWv va 1\n" + "Jqx qu 1\n" + "oSj on 1\n" + "lwY le 1\n" + "Tkf ka 1\n" + "pcC ch 1\n" + "ohG th 1\n" + "dzG de 1\n" + "fdN de 1\n" + "xrS er 1\n" + "hHk th 1\n" + "Fjz sz 1\n" + "vbZ va 1\n" + "Udx de 1\n" + "wzX sz 1\n" + "uNq qu 1\n" + "wfZ wa 1\n" + "swB st 1\n" + "dmQ de 1\n" + "dcA ch 1\n" + "qzP qu 1\n" + "jJj ij 1\n" + "qWq qu 1\n" + "tVk th 1\n" + "gwB ng 1\n" + "bIw wa 1\n" + "bpU pr 1\n" + "bwM wa 1\n" + "fkA ka 1\n" + "xUc ch 1\n" + "xTd de 1\n" + "fKl le 1\n" + "lxS le 1\n" + "xaS an 1\n" + "yvQ va 1\n" + "dhV th 1\n" + "mdW de 1\n" + "wfJ wa 1\n" + "Wqq qu 1\n" + "sZj st 1\n" + "Lxy ny 1\n" + "xXy ny 1\n" + "qDm qu 1\n" + "gKq qu 1\n" + "Qvj ij 1\n" + "kfH ka 1\n" + "aQp an 1\n" + "xFz sz 1\n" + "njW an 1\n" + "Rpn an 1\n" + "Mmn an 1\n" + "fhD th 1\n" + "jKk ij 1\n" + "zAq qu 1\n" + "qfL qu 1\n" + "ywN wa 1\n" + "qpz qu 1\n" + "hxP th 1\n" + "Gdq qu 1\n" + "tMx th 1\n" + "jwL ij 1\n" + "kBb ka 1\n" + "fAw wa 1\n" + "Sdx de 1\n" + "Jmv va 1\n" + "bgX ng 1\n" + "xWp pr 1\n" + "hHt th 1\n" + "Gww wa 1\n" + "Fbb be 1\n" + "zoT on 1\n" + "yjG ij 1\n" + "Rlg ng 1\n" + "vFn an 1\n" + "zcK ch 1\n" + "xdC de 1\n" + "wvO va 1\n" + "oQl le 1\n" + "nIw an 1\n" + "wzA sz 1\n" + "Rzj sz 1\n" + "Qzn an 1\n" + "Yjt th 1\n" + "xkQ ku 1\n" + "lrq qu 1\n" + "nwZ an 1\n" + "pGk ka 1\n" + "mnL an 1\n" + "Rlq qu 1\n" + "ccD ch 1\n" + "rRd er 1\n" + "Ofj ij 1\n" + "Fjh th 1\n" + "uuO qu 1\n" + "zZx sz 1\n" + "Nbj ij 1\n" + "znW an 1\n" + "jbH ij 1\n" + "rDx er 1\n" + "Qmc ch 1\n" + "dwV de 1\n" + "Oqv qu 1\n" + "Zqe qu 1\n" + "fwI wa 1\n" + "njP an 1\n" + "Oqq qu 1\n" + "pVv va 1\n" + "fqx qu 1\n" + "gfO ng 1\n" + "hqU th 1\n" + "gDj ng 1\n" + "Tmj ij 1\n" + "vcK ch 1\n" + "qmV qu 1\n" + "sVx st 1\n" + "Wfh th 1\n" + "mJk ka 1\n" + "fuK qu 1\n" + "bfN be 1\n" + "qfT qu 1\n" + "Fmj ij 1\n" + "tbN th 1\n" + "kjN ij 1\n" + "yhZ th 1\n" + "Nxk ka 1\n" + "wxU wa 1\n" + "zXb sz 1\n" + "Nzd de 1\n" + "ohL th 1\n" + "pVt th 1\n" + "Zsx st 1\n" + "Zqj qu 1\n" + "wUj ij 1\n" + "yjC ij 1\n" + "kTn an 1\n" + "vqV qu 1\n" + "Fyc ch 1\n" + "Icd ch 1\n" + "svN st 1\n" + "Jjv ij 1\n" + "bVp pr 1\n" + "fdI de 1\n" + "nbX an 1\n" + "cfU ch 1\n" + "lGm le 1\n" + "Ovg ng 1\n" + "zDc ch 1\n" + "jgq qu 1\n" + "lYr er 1\n" + "hjR th 1\n" + "qPm qu 1\n" + "iRq qu 1\n" + "Zrx er 1\n" + "wpT pr 1\n" + "xsB st 1\n" + "qxT qu 1\n" + "gFx ng 1\n" + "qoJ qu 1\n" + "smD st 1\n" + "lbM le 1\n" + "wCc ch 1\n" + "wFm me 1\n" + "Xlv le 1\n" + "zyU sz 1\n" + "vFk ka 1\n" + "tjR th 1\n" + "iYx in 1\n" + "uJk qu 1\n" + "Qeh th 1\n" + "Xrv er 1\n" + "Bqq qu 1\n" + "Vdb de 1\n" + "znR an 1\n" + "pmL me 1\n" + "tvH th 1\n" + "Tmd de 1\n" + "Dgb ng 1\n" + "ozO on 1\n" + "fQb be 1\n" + "Pqb qu 1\n" + "qYn an 1\n" + "xPm me 1\n" + "gWf ng 1\n" + "cCv ch 1\n" + "qeP qu 1\n" + "qZm qu 1\n" + "dgZ ng 1\n" + "mjO ij 1\n" + "gCw ng 1\n" + "svQ st 1\n" + "Rqq qu 1\n" + "Qbt th 1\n" + "Lkj ij 1\n" + "Fza an 1\n" + "jlB le 1\n" + "iWj in 1\n" + "Zxi in 1\n" + "Kxw wa 1\n" + "jcJ ij 1\n" + "uCf qu 1\n" + "cAx ch 1\n" + "Vjw ij 1\n" + "vUs st 1\n" + "Mnq an 1\n" + "jjM ij 1\n" + "vUx va 1\n" + "uZr qu 1\n" + "twU th 1\n" + "Ytv th 1\n" + "hRp th 1\n" + "kzV sz 1\n" + "mvY va 1\n" + "jFj ij 1\n" + "jBp ij 1\n" + "kGz sz 1\n" + "qUq qu 1\n" + "qgR qu 1\n" + "lWb le 1\n" + "wwP wa 1\n" + "wvE va 1\n" + "Fsx st 1\n" + "Izx sz 1\n" + "bwC wa 1\n" + "Fmq qu 1\n" + "cLd ch 1\n" + "bRl le 1\n" + "iXf in 1\n" + "yMq qu 1\n" + "cqP ch 1\n" + "jsL st 1\n" + "jIq qu 1\n" + "wuG qu 1\n" + "Lbv va 1\n" + "Eqf qu 1\n" + "Ogf ng 1\n" + "kGv ka 1\n" + "pjK ij 1\n" + "vcQ ch 1\n" + "Xzh th 1\n" + "jUv ij 1\n" + "wGd de 1\n" + "hmX th 1\n" + "yqm qu 1\n" + "qkE qu 1\n" + "zgX ng 1\n" + "vwO va 1\n" + "wmS me 1\n" + "vhT th 1\n" + "syX st 1\n" + "nbC an 1\n" + "zgW ng 1\n" + "vqM qu 1\n" + "dWf de 1\n" + "cwF ch 1\n" + "dnF an 1\n" + "qDi qu 1\n" + "qSw qu 1\n" + "jQf ij 1\n" + "crZ ch 1\n" + "qGl qu 1\n" + "Wxu qu 1\n" + "grW ng 1\n" + "glX ng 1\n" + "vFd de 1\n" + "pbF pr 1\n" + "bNf be 1\n" + "Qcf ch 1\n" + "fVx fo 1\n" + "pPf pr 1\n" + "pVq qu 1\n" + "xlG le 1\n" + "Dwj ij 1\n" + "xQj ij 1\n" + "lkQ le 1\n" + "sqH qu 1\n" + "Yyx ny 1\n" + "vFm va 1\n" + "tQo th 1\n" + "zlU le 1\n" + "vlW le 1\n" + "glW ng 1\n" + "qmW qu 1\n" + "aWl an 1\n" + "zmV sz 1\n" + "gLm ng 1\n" + "glB ng 1\n" + "tqA th 1\n" + "hgJ th 1\n" + "cGb ch 1\n" + "qwE qu 1\n" + "Ffy ny 1\n" + "wmL me 1\n" + "xLh th 1\n" + "sbE st 1\n" + "bQl le 1\n" + "xkR ka 1\n" + "yFd de 1\n" + "Omq qu 1\n" + "Xfj ij 1\n" + "wJj ij 1\n" + "Lws st 1\n" + "wfU wa 1\n" + "zfk sz 1\n" + "lNv le 1\n" + "ykQ ka 1\n" + "xDt th 1\n" + "jDw ij 1\n" + "zbx sz 1\n" + "vQs st 1\n" + "vvM va 1\n" + "Xqq qu 1\n" + "jLq qu 1\n" + "zkZ sz 1\n" + "qAg qu 1\n" + "Xjw ij 1\n" + "cFw ch 1\n" + "rwQ er 1\n" + "mWk ka 1\n" + "Yrx er 1\n" + "eUo er 1\n" + "uDm qu 1\n" + "Mhw th 1\n" + "fGp pr 1\n" + "Rpz sz 1\n" + "sbF st 1\n" + "nfX an 1\n" + "Wfu qu 1\n" + "Mwq qu 1\n" + "qDj qu 1\n" + "Wpw pr 1\n" + "zFv sz 1\n" + "qXc ch 1\n" + "qsT qu 1\n" + "pZh th 1\n" + "lLc ch 1\n" + "pqB qu 1\n" + "Xjo on 1\n" + "kDk ka 1\n" + "Jxf fo 1\n" + "Vqz qu 1\n" + "Hvq qu 1\n" + "Zqw qu 1\n" + "kRc ch 1\n" + "tvR th 1\n" + "dNx de 1\n" + "jWq qu 1\n" + "nRw an 1\n" + "rGb er 1\n" + "vZz sz 1\n" + "Xtz th 1\n" + "kZn an 1\n" + "Vmj ij 1\n" + "dMp de 1\n" + "cPy ch 1\n" + "uzR qu 1\n" + "yjE ij 1\n" + "gzF ng 1\n" + "tCp th 1\n" + "qfC qu 1\n" + "vcq ch 1\n" + "Zfg ng 1\n" + "kwC ka 1\n" + "fkM ko 1\n" + "vJh th 1\n" + "eCq qu 1\n" + "wPp pr 1\n" + "qJy qu 1\n" + "dmY de 1\n" + "uMj qu 1\n" + "fKh th 1\n" + "sqU qu 1\n" + "vNp va 1\n" + "Crj er 1\n" + "hsH th 1\n" + "Vwn an 1\n" + "Sdy de 1\n" + "Fpw pr 1\n" + "Wcq ch 1\n" + "pjW ij 1\n" + "dwW de 1\n" + "gjX ng 1\n" + "yZk ka 1\n" + "cKg ch 1\n" + "xdR de 1\n" + "wqW qu 1\n" + "khD th 1\n" + "vgG ng 1\n" + "vMl le 1\n" + "qnQ an 1\n" + "hJt th 1\n" + "fvC va 1\n" + "cpR ch 1\n" + "Wtt th 1\n" + "uyX qu 1\n" + "cXf ch 1\n" + "uKv qu 1\n" + "gVv ng 1\n" + "xzg ng 1\n" + "cPq ch 1\n" + "fTn an 1\n" + "sFj st 1\n" + "mzX sz 1\n" + "gMq qu 1\n" + "rxI er 1\n" + "eYf er 1\n" + "kwB ka 1\n" + "eQk er 1\n" + "jBq qu 1\n" + "lbH le 1\n" + "qCt th 1\n" + "Wnv an 1\n" + "gYd ng 1\n" + "Zxe er 1\n" + "fZj ij 1\n" + "Hgj ng 1\n" + "bRj ij 1\n" + "fpR pr 1\n" + "cbR ch 1\n" + "lqT qu 1\n" + "cMt th 1\n" + "tQy to 1\n" + "vxG va 1\n" + "gpB ng 1\n" + "Gkw ka 1\n" + "zqX qu 1\n" + "tPw th 1\n" + "fnN an 1\n" + "Gkp ka 1\n" + "mvQ va 1\n" + "hHf th 1\n" + "wfS wa 1\n" + "qCx qu 1\n" + "mqH qu 1\n" + "hgR th 1\n" + "Mwg ng 1\n" + "bqQ qu 1\n" + "Fkz sz 1\n" + "oFv on 1\n" + "Ddq qu 1\n" + "uIo qu 1\n" + "Yfh th 1\n" + "ygQ ng 1\n" + "fxh th 1\n" + "Zqd qu 1\n" + "Htn th 1\n" + "Gvz sz 1\n" + "zRw sz 1\n" + "vCb va 1\n" + "rjT ro 1\n" + "rjD er 1\n" + "Qpm me 1\n" + "Xdb de 1\n" + "Lkf ka 1\n" + "Ajx ij 1\n" + "Ylz le 1\n" + "Qtb th 1\n" + "bHz sz 1\n" + "bDg ng 1\n" + "Lqx qu 1\n" + "yhW th 1\n" + "zLv sz 1\n" + "xgK ng 1\n" + "eWq qu 1\n" + "sjS st 1\n" + "qVe qu 1\n" + "Okq qu 1\n" + "Ewj ij 1\n" + "Dsv st 1\n" + "jhI th 1\n" + "xGf fo 1\n" + "Okx ka 1\n" + "Fqx qu 1\n" + "dPv de 1\n" + "zsK st 1\n" + "qLn an 1\n" + "fkB ka 1\n" + "cCb ch 1\n" + "gNp ng 1\n" + "Qwd de 1\n" + "zTf sz 1\n" + "Pqq qu 1\n" + "rFv ro 1\n" + "Rwt th 1\n" + "uKc ch 1\n" + "hqN th 1\n" + "kmK ka 1\n" + "wuC qu 1\n" + "pnZ an 1\n" + "tgM th 1\n" + "Qds st 1\n" + "Axq qu 1\n" + "xwO wa 1\n" + "eQg ng 1\n" + "mFj ij 1\n" + "Dpm me 1\n" + "pQm me 1\n" + "aFp an 1\n" + "mfB me 1\n" + "fpA pr 1\n" + "jgZ ng 1\n" + "lGk le 1\n" + "xcA ch 1\n" + "gWw ng 1\n" + "lzF le 1\n" + "xsQ st 1\n" + "bQx be 1\n" + "wjc ch 1\n" + "bDc ch 1\n" + "Wpz sz 1\n" + "rfV er 1\n" + "Zbs st 1\n" + "hKq th 1\n" + "qXa ar 1\n" + "wjA ij 1\n" + "vzS sz 1\n" + "cWy ch 1\n" + "gjK ng 1\n" + "yRb be 1\n" + "qgU qu 1\n" + "pqF qu 1\n" + "qnU an 1\n" + "Zqc ch 1\n" + "Xqg qu 1\n" + "zLq qu 1\n" + "gzV ng 1\n" + "Kqs qu 1\n" + "zgZ ng 1\n" + "jqG qu 1\n" + "pqJ qu 1\n" + "Ieq qu 1\n" + "hjH th 1\n" + "vmN va 1\n" + "iuF qu 1\n" + "wGy wa 1\n" + "Kdh th 1\n" + "hQb th 1\n" + "jWr er 1\n" + "Cxy ny 1\n" + "Kqz qu 1\n" + "wXr er 1\n" + "xoQ on 1\n" + "wBh th 1\n" + "qyI qu 1\n" + "qhC th 1\n" + "Vpy pr 1\n" + "nJb an 1\n" + "uGw qu 1\n" + "hhX th 1\n" + "mjS ij 1\n" + "Scv ch 1\n" + "hFw th 1\n" + "bKg ng 1\n" + "Xmn an 1\n" + "bdT de 1\n" + "sJq qu 1\n" + "xTm me 1\n" + "qjz qu 1\n" + "Mqp qu 1\n" + "dHp de 1\n" + "rRn ar 1\n" + "Xlf le 1\n" + "cNs ch 1\n" + "Xql qu 1\n" + "iFz in 1\n" + "Nlk le 1\n" + "sPw st 1\n" + "vWq qu 1\n" + "wXt th 1\n" + "Fnq an 1\n" + "ozJ on 1\n" + "zIg ng 1\n" + "lSf le 1\n" + "wRc ch 1\n" + "Bvp va 1\n" + "Wwr er 1\n" + "pWg pr 1\n" + "pLk ka 1\n" + "krJ er 1\n" + "Zfv va 1\n" + "yIx ny 1\n" + "oKx on 1\n" + "qLb qu 1\n" + "dHj de 1\n" + "oqK qu 1\n" + "cxC ch 1\n" + "wJh th 1\n" + "wZd de 1\n" + "cWz ch 1\n" + "yqS qu 1\n" + "kXq qu 1\n" + "fYd de 1\n" + "dGy de 1\n" + "dDt th 1\n" + "pKg ng 1\n" + "Xjd de 1\n" + "sjM st 1\n" + "sfC st 1\n" + "dMh th 1\n" + "dZp de 1\n" + "wcD ch 1\n" + "Qoj on 1\n" + "gxC ng 1\n" + "Zfn an 1\n" + "hYv th 1\n" + "xWq qu 1\n" + "gZw ng 1\n" + "pQi in 1\n" + "Xlb le 1\n" + "gQz ng 1\n" + "nbZ an 1\n" + "Ezx sz 1\n" + "wNg ng 1\n" + "Xrj er 1\n" + "cxX ch 1\n" + "dQp de 1\n" + "Ypn an 1\n" + "pNp pr 1\n" + "pbQ pr 1\n" + "gMv ng 1\n" + "qeF qu 1\n" + "uVv qu 1\n" + "dVk de 1\n" + "uMv qu 1\n" + "jQn an 1\n" + "mhP th 1\n" + "iTb in 1\n" + "Pvw va 1\n" + "zCw sz 1\n" + "wcR ch 1\n" + "svU st 1\n" + "nMz an 1\n" + "cjE ch 1\n" + "jmH ij 1\n" + "Qzc ch 1\n" + "mqc ch 1\n" + "qlU qu 1\n" + "Zvp va 1\n" + "xHl le 1\n" + "gqB qu 1\n" + "xsN st 1\n" + "kCj ij 1\n" + "Olx le 1\n" + "Gxw wa 1\n" + "xwV wa 1\n" + "fPb be 1\n" + "Rhv th 1\n" + "pgV ng 1\n" + "Qdp de 1\n" + "zFs st 1\n" + "klQ le 1\n" + "yJd de 1\n" + "rxE er 1\n" + "uHv qu 1\n" + "wKl le 1\n" + "wpJ pr 1\n" + "Cjr er 1\n" + "tYg th 1\n" + "Vpz sz 1\n" + "Zxh th 1\n" + "pQl le 1\n" + "Fxe er 1\n" + "Qok on 1\n" + "plK le 1\n" + "lpX le 1\n" + "jdP de 1\n" + "Zqy qu 1\n" + "yRz sz 1\n" + "nDg an 1\n" + "kqL qu 1\n" + "ugW qu 1\n" + "Mbf be 1\n" + "Kql qu 1\n" + "Nqw qu 1\n" + "Jzw sz 1\n" + "sGn an 1\n" + "wDv va 1\n" + "Jjk ij 1\n" + "ztQ th 1\n" + "hwP th 1\n" + "wDp pr 1\n" + "gfG ng 1\n" + "qhL th 1\n" + "cUv ch 1\n" + "Wbk ka 1\n" + "fkF ko 1\n" + "Pqv qu 1\n" + "nbK an 1\n" + "qSz qu 1\n" + "vwI va 1\n" + "cFc ch 1\n" + "qfG qu 1\n" + "rhF th 1\n" + "xzl le 1\n" + "dNc ch 1\n" + "zwR sz 1\n" + "wzK sz 1\n" + "bQa an 1\n" + "hLq th 1\n" + "fUv va 1\n" + "rHg ng 1\n" + "uJj qu 1\n" + "Fhz th 1\n" + "Nzm sz 1\n" + "gRz ng 1\n" + "qXf qu 1\n" + "Tzm sz 1\n" + "Zkx ka 1\n" + "hLx th 1\n" + "Ukd de 1\n" + "fMf fo 1\n" + "vGp va 1\n" + "jtI th 1\n" + "hxE th 1\n" + "jrH er 1\n" + "Fgh th 1\n" + "dlF le 1\n" + "jcO ja 1\n" + "sCw st 1\n" + "Bqh th 1\n" + "kZy ka 1\n" + "fOh th 1\n" + "rJb er 1\n" + "rjV er 1\n" + "Kwq qu 1\n" + "Hcw ch 1\n" + "mCw ma 1\n" + "hxM th 1\n" + "jTb ij 1\n" + "mmQ me 1\n" + "pjR ij 1\n" + "cdP ch 1\n" + "Zjs st 1\n" + "jqF qu 1\n" + "vMn an 1\n" + "Mqs qu 1\n" + "svX st 1\n" + "iXn an 1\n" + "nwR an 1\n" + "ytR th 1\n" + "Vjb ij 1\n" + "Cjl le 1\n" + "pXd de 1\n" + "Gwu qu 1\n" + "qIj qu 1\n" + "kQn an 1\n" + "fYm me 1\n" + "vtZ th 1\n" + "Usx st 1\n" + "nfP an 1\n" + "dQx de 1\n" + "oXf on 1\n" + "fEw wa 1\n" + "sgX ng 1\n" + "cPp ch 1\n" + "ybW be 1\n" + "kcW ch 1\n" + "kHf ka 1\n" + "vcU ch 1\n" + "tXo th 1\n" + "Kzh th 1\n" + "Cfq qu 1\n" + "Ujy ij 1\n" + "Fxa an 1\n" + "hxS th 1\n" + "tWx th 1\n" + "mlK le 1\n" + "nZj an 1\n" + "qOv qu 1\n" + "Xkt th 1\n" + "Fzf sz 1\n" + "uTd qu 1\n" + "qrS qu 1\n" + "Ptw th 1\n" + "dDs st 1\n" + "rNm er 1\n" + "Ewf wa 1\n" + "hJk th 1\n" + "Hdq qu 1\n" + "Jtw th 1\n" + "kqc ch 1\n" + "nHq an 1\n" + "rhH th 1\n" + "oqH qu 1\n" + "vpZ va 1\n" + "Dgd ng 1\n" + "qxV qu 1\n" + "Cxv va 1\n" + "plV pr 1\n" + "kIi in 1\n" + "Khc th 1\n" + "jsY st 1\n" + "fLh th 1\n" + "Ykq qu 1\n" + "Qmx me 1\n" + "zvI sz 1\n" + "yhS th 1\n" + "qfg qu 1\n" + "wxZ wa 1\n" + "jVy ij 1\n" + "kQw ka 1\n" + "zXv sz 1\n" + "Lhs th 1\n" + "Mkq qu 1\n" + "jkU ij 1\n" + "Yhq th 1\n" + "zrH er 1\n" + "vhG va 1\n" + "drD er 1\n" + "Psj st 1\n" + "gDf ng 1\n" + "Xjj ij 1\n" + "pLm me 1\n" + "klC le 1\n" + "hTx th 1\n" + "zrJ er 1\n" + "Xgk ng 1\n" + "Wxf fo 1\n" + "fdD de 1\n" + "jHp ij 1\n" + "yDw wa 1\n" + "kPv ka 1\n" + "Rkm ka 1\n" + "mzg ng 1\n" + "lHz le 1\n" + "vpR va 1\n" + "wZt th 1\n" + "pBd de 1\n" + "qPf qu 1\n" + "hNw th 1\n" + "Nvj ij 1\n" + "pyU pr 1\n" + "Sjh th 1\n" + "Kzx sz 1\n" + "oQp on 1\n" + "xdL de 1\n" + "dnZ an 1\n" + "qfB qu 1\n" + "kJc ch 1\n" + "fWn an 1\n" + "Xmc ch 1\n" + "rGx er 1\n" + "sFf st 1\n" + "Vwv va 1\n" + "tKd th 1\n" + "sQx st 1\n" + "oNm on 1\n" + "uXj qu 1\n" + "Xsq qu 1\n" + "yWc ch 1\n" + "hfC th 1\n" + "Ijd de 1\n" + "dkW de 1\n" + "Nxn an 1\n" + "juC qu 1\n" + "bPy be 1\n" + "lKs le 1\n" + "aLq an 1\n" + "jPp ij 1\n" + "wpZ pr 1\n" + "fjE ij 1\n" + "zNt th 1\n" + "mhN th 1\n" + "bQn an 1\n" + "bxB be 1\n" + "fdX de 1\n" + "Jcv va 1\n" + "Fdp de 1\n" + "wVx wa 1\n" + "tmU th 1\n" + "njJ an 1\n" + "qzK qu 1\n" + "jtD th 1\n" + "bcX ch 1\n" + "Ghx th 1\n" + "xZj ij 1\n" + "vKw va 1\n" + "pvO va 1\n" + "gXs ng 1\n" + "wRv va 1\n" + "hgN th 1\n" + "gpO ng 1\n" + "hWc th 1\n" + "Upq qu 1\n" + "vwD va 1\n" + "mxE me 1\n" + "Zvm va 1\n" + "ozM on 1\n" + "fbJ be 1\n" + "tpQ th 1\n" + "yeV er 1\n" + "Znb an 1\n" + "wXv va 1\n" + "bcY ch 1\n" + "sgZ ng 1\n" + "qfM qu 1\n" + "fcL ch 1\n" + "mXl le 1\n" + "uBq qu 1\n" + "jxW ij 1\n" + "mtU th 1\n" + "qgJ qu 1\n" + "dAq qu 1\n" + "jBv ij 1\n" + "Gty th 1\n" + "Jfm me 1\n" + "xqQ qu 1\n" + "cBp ch 1\n" + "Xqd qu 1\n" + "fvM va 1\n" + "uWm qu 1\n" + "rSb er 1\n" + "Xqj qu 1\n" + "qTd qu 1\n" + "lLg ng 1\n" + "Jrp er 1\n" + "oJb on 1\n" + "pXy pr 1\n" + "zrQ er 1\n" + "cnT ch 1\n" + "qsE qu 1\n" + "pZc ch 1\n" + "bVy be 1\n" + "qIz qu 1\n" + "dgR ng 1\n" + "mLv va 1\n" + "hVl th 1\n" + "qRj qu 1\n" + "fhA th 1\n" + "zLc ch 1\n" + "Sgq qu 1\n" + "pLc ch 1\n" + "Txq qu 1\n" + "ypY pr 1\n" + "tXz th 1\n" + "dcC ch 1\n" + "iYf in 1\n" + "Wwm me 1\n" + "kZk ka 1\n" + "Ywr er 1\n" + "gFv ng 1\n" + "Fmz sz 1\n" + "uQq qu 1\n" + "xwR wa 1\n" + "Yfc ch 1\n" + "aIo an 1\n" + "sBq qu 1\n" + "Gzb sz 1\n" + "jwI ij 1\n" + "cFf ch 1\n" + "aWv an 1\n" + "Eaw an 1\n" + "vkW ka 1\n" + "Nfh th 1\n" + "flN le 1\n" + "Lpm me 1\n" + "ylK le 1\n" + "Znr an 1\n" + "mcQ ch 1\n" + "kfE ka 1\n" + "Iyf ny 1\n" + "qrV qu 1\n" + "fPx fo 1\n" + "fgJ ng 1\n" + "jIi in 1\n" + "bPw wa 1\n" + "Qyx ny 1\n" + "Qnb an 1\n" + "Wdm de 1\n" + "nJt th 1\n" + "qCd qu 1\n" + "gZl ng 1\n" + "Nlz le 1\n" + "Zwh th 1\n" + "iWl in 1\n" + "bUu qu 1\n" + "lbJ le 1\n" + "sNq qu 1\n" + "qjU qu 1\n" + "wbT wa 1\n" + "yNc ch 1\n" + "mxM me 1\n" + "pHk ka 1\n" + "Rdq qu 1\n" + "gkE ng 1\n" + "hbN th 1\n" + "Tgq qu 1\n" + "gjV ng 1\n" + "Gjw ij 1\n" + "gqX qu 1\n" + "qXx qu 1\n" + "vQq qu 1\n" + "pNb pr 1\n" + "fJy ny 1\n" + "yvZ va 1\n" + "zNl le 1\n" + "zDb sz 1\n" + "lUz le 1\n" + "Dxy ny 1\n" + "Wwn an 1\n" + "hPn th 1\n" + "kNb ko 1\n" + "Wdb de 1\n" + "zXt th 1\n" + "pjL ij 1\n" + "tJg th 1\n" + "jmM ij 1\n" + "bXg ng 1\n" + "hTv th 1\n" + "Ysf st 1\n" + "hmQ th 1\n" + "Vyq qu 1\n" + "Fpd de 1\n" + "yQw wa 1\n" + "Pbn an 1\n" + "xVj ij 1\n" + "whP th 1\n" + "fSg ng 1\n" + "Gxz ze 1\n" + "Dfw wa 1\n" + "rMx er 1\n" + "zMf sz 1\n" + "vJw va 1\n" + "xJl le 1\n" + "xfN fo 1\n" + "dQw de 1\n" + "fuD qu 1\n" + "xjB ij 1\n" + "lPj le 1\n" + "mqA qu 1\n" + "mfM me 1\n" + "kwG ka 1\n" + "eaY an 1\n" + "Vmm me 1\n" + "zfS sz 1\n" + "Fmy me 1\n" + "sqP qu 1\n" + "fKk ka 1\n" + "Qdv de 1\n" + "djZ de 1\n" + "qrR qu 1\n" + "txK th 1\n" + "bxH be 1\n" + "jRb ij 1\n" + "cjD ch 1\n" + "Sxw wa 1\n" + "Sxh th 1\n" + "vrZ er 1\n" + "xmH me 1\n" + "dfH de 1\n" + "fJw wa 1\n" + "mwZ me 1\n" + "vRm va 1\n" + "xwj ij 1\n" + "Xqr er 1\n" + "Gvj ij 1\n" + "hzF th 1\n" + "xnK an 1\n" + "xhU th 1\n" + "Nls le 1\n" + "zbV sz 1\n" + "fTq qu 1\n" + "Wxv va 1\n" + "upG qu 1\n" + "qAo qu 1\n" + "kKx ka 1\n" + "zlD le 1\n" + "hTl th 1\n" + "Gqr qu 1\n" + "Gxm me 1\n" + "zPj sz 1\n" + "bvZ va 1\n" + "jHc ch 1\n" + "iXg ng 1\n" + "Kgz ng 1\n" + "Jyi in 1\n" + "vFh th 1\n" + "ytW th 1\n" + "qBd qu 1\n" + "Xjq qu 1\n" + "dgO ng 1\n" + "mjN ij 1\n" + "Djg ng 1\n" + "zIj sz 1\n" + "uDx qu 1\n" + "qJf qu 1\n" + "fAx fo 1\n" + "Fsj st 1\n" + "yDf ny 1\n" + "xjV ij 1\n" + "hdB th 1\n" + "dwG de 1\n" + "slW le 1\n" + "zYb sz 1\n" + "vzO sz 1\n" + "vqO qu 1\n" + "Jzv sz 1\n" + "xmG me 1\n" + "Kdw de 1\n" + "xVq qu 1\n" + "jtE th 1\n" + "kJy ka 1\n" + "xjW ij 1\n" + "mwR me 1\n" + "zVx sz 1\n" + "tMj th 1\n" + "qqb qu 1\n" + "nlQ le 1\n" + "bxQ be 1\n" + "hJv th 1\n" + "jnY an 1\n" + "yfS ny 1\n" + "Mdw de 1\n" + "zZc ch 1\n" + "ysJ st 1\n" + "Qqv qu 1\n" + "zxl le 1\n" + "jAq qu 1\n" + "lJw le 1\n" + "kwJ ka 1\n" + "sxC st 1\n" + "hJr th 1\n" + "xGp pr 1\n" + "ccF ch 1\n" + "vGq qu 1\n" + "qSc ch 1\n" + "fqq qu 1\n" + "kkV ka 1\n" + "gVq qu 1\n" + "Wqg qu 1\n" + "kJp ka 1\n" + "Wlr er 1\n" + "Jwz sz 1\n" + "qEa an 1\n" + "krL er 1\n" + "tqE th 1\n" + "eJz er 1\n" + "Whx th 1\n" + "vWw va 1\n" + "Qzh th 1\n" + "pcF ch 1\n" + "Vmx me 1\n" + "dvC de 1\n" + "qjZ qu 1\n" + "pkF ka 1\n" + "cvO ch 1\n" + "Qyv va 1\n" + "hNs th 1\n" + "snJ an 1\n" + "yjU ij 1\n" + "Yfq qu 1\n" + "xLw wa 1\n" + "rVz er 1\n" + "gOw ng 1\n" + "fxL fo 1\n" + "snW an 1\n" + "yWk ka 1\n" + "wgK ng 1\n" + "aTf an 1\n" + "eVf er 1\n" + "vZp va 1\n" + "uVp qu 1\n" + "Vjh th 1\n" + "zwT sz 1\n" + "wSn an 1\n" + "nNp an 1\n" + "gfF ng 1\n" + "hcW th 1\n" + "gTf ng 1\n" + "qaJ an 1\n" + "kzY sz 1\n" + "ljX le 1\n" + "wMm me 1\n" + "btB st 1\n" + "zfE sz 1\n" + "bxO be 1\n" + "wPc ch 1\n" + "fgK ng 1\n" + "fzW sz 1\n" + "dcX ch 1\n" + "qqR qu 1\n" + "kjq qu 1\n" + "vMh th 1\n" + "gZj ng 1\n" + "qtw th 1\n" + "vkY ka 1\n" + "lCb le 1\n" + "dpO de 1\n" + "mXm me 1\n" + "vWc ch 1\n" + "fOq qu 1\n" + "Vgy ng 1\n" + "dkD de 1\n" + "fQh th 1\n" + "vIq qu 1\n" + "lZr er 1\n" + "zKn an 1\n" + "Vpt th 1\n" + "Dmw me 1\n" + "Nwf wa 1\n" + "kYl le 1\n" + "jpJ ij 1\n" + "qXi qu 1\n" + "Bnj an 1\n" + "xfK fo 1\n" + "fCc ch 1\n" + "vPd de 1\n" + "Qnp an 1\n" + "ypW pr 1\n" + "uwJ qu 1\n" + "Pvb va 1\n" + "cnC ch 1\n" + "hvA th 1\n" + "hGz th 1\n" + "nZx an 1\n" + "kbS ka 1\n" + "Swx wa 1\n" + "hvP th 1\n" + "kqG qu 1\n" + "bLq qu 1\n" + "qjP qu 1\n" + "sUo on 1\n" + "lDq qu 1\n" + "Zlp le 1\n" + "dwQ de 1\n" + "dlN le 1\n" + "fTl le 1\n" + "Npv va 1\n" + "bMn an 1\n" + "dNz sz 1\n" + "efV er 1\n" + "aCw an 1\n" + "aWf an 1\n" + "Lqo qu 1\n" + "fzT sz 1\n" + "Jjr er 1\n" + "zvK sz 1\n" + "nwT an 1\n" + "fXr er 1\n" + "cGm ch 1\n" + "lvS le 1\n" + "qDq qu 1\n" + "qRm qu 1\n" + "vYt th 1\n" + "iQv in 1\n" + "fkH ka 1\n" + "fcO ch 1\n" + "rNn an 1\n" + "qmS qu 1\n" + "kzR sz 1\n" + "Dfc ch 1\n" + "qUs qu 1\n" + "xqP qu 1\n" + "sXk st 1\n" + "Xyt th 1\n" + "pWt th 1\n" + "jbL ij 1\n" + "jYd ij 1\n" + "kqV qu 1\n" + "Fqm qu 1\n" + "xoX on 1\n" + "zuX qu 1\n" + "xUq qu 1\n" + "cgC ch 1\n" + "wBq qu 1\n" + "gQp ng 1\n" + "jnE an 1\n" + "yZs st 1\n" + "fkD ka 1\n" + "sVk st 1\n" + "qyX qu 1\n" + "cBf ch 1\n" + "Cjy ij 1\n" + "dPq qu 1\n" + "wDg ng 1\n" + "dxB de 1\n" + "Dkm ka 1\n" + "kPp ka 1\n" + "hWz th 1\n" + "Bjv ij 1\n" + "Izf sz 1\n" + "Hnk an 1\n" + "rQc ch 1\n" + "Jwu qu 1\n" + "fbP be 1\n" + "frQ er 1\n" + "Aov on 1\n" + "yqQ qu 1\n" + "jfY ij 1\n" + "xsH st 1\n" + "zxh th 1\n" + "Jbj ij 1\n" + "Mjz sz 1\n" + "gRp ng 1\n" + "Gvw va 1\n" + "mzF sz 1\n" + "oqF qu 1\n" + "ejU er 1\n" + "xmQ me 1\n" + "hOq th 1\n" + "pwX pr 1\n" + "zgK ng 1\n" + "wLk ka 1\n" + "fqc ch 1\n" + "dPm de 1\n" + "tCg th 1\n" + "qrF qu 1\n" + "pWl le 1\n" + "rDf er 1\n" + "Ynw an 1\n" + "jnQ an 1\n" + "tFb th 1\n" + "rpU er 1\n" + "pPj ij 1\n" + "yjM ij 1\n" + "jmY ij 1\n" + "Cpz sz 1\n" + "uDn an 1\n" + "uqY qu 1\n" + "Pjx ij 1\n" + "qFv qu 1\n" + "Ktf th 1\n" + "Jcj ch 1\n" + "kpO pr 1\n" + "pgZ ng 1\n" + "kfO ka 1\n" + "tZv th 1\n" + "jHq qu 1\n" + "cRq ch 1\n" + "zDm sz 1\n" + "lPm le 1\n" + "svP st 1\n" + "qkx qu 1\n" + "bNp pr 1\n" + "Kjq qu 1\n" + "vqS qu 1\n" + "fQp pr 1\n" + "txR th 1\n" + "Hpf pr 1\n" + "iQg ng 1\n" + "vvP va 1\n" + "iGf in 1\n" + "tjI th 1\n" + "pWn an 1\n" + "Qqg qu 1\n" + "qiF ti 1\n" + "Zzr er 1\n" + "aYf an 1\n" + "zjA sz 1\n" + "kwR ka 1\n" + "gkM ng 1\n" + "Cjf ij 1\n" + "zgM ng 1\n" + "Rxk ka 1\n" + "bCd de 1\n" + "Ypv va 1\n" + "wyE wa 1\n" + "iyB in 1\n" + "hQp th 1\n" + "ipQ in 1\n" + "Ucj ch 1\n" + "qkW qu 1\n" + "krK er 1\n" + "Hpp pr 1\n" + "xnN an 1\n" + "jwB ij 1\n" + "Zdm de 1\n" + "mYj ij 1\n" + "tQx th 1\n" + "qwS qu 1\n" + "Hxo on 1\n" + "qDx qu 1\n" + "cXd ch 1\n" + "gdO ng 1\n" + "aEo an 1\n" + "Twd de 1\n" + "avQ an 1\n" + "lhZ th 1\n" + "lzV le 1\n" + "bHf be 1\n" + "bJn an 1\n" + "Uqz qu 1\n" + "uFy qu 1\n" + "jNl le 1\n" + "xBp pr 1\n" + "dRb de 1\n" + "nlT an 1\n" + "wrO er 1\n" + "lzW le 1\n" + "fYf fo 1\n" + "mRw me 1\n" + "rXy er 1\n" + "qyR qu 1\n" + "fGv va 1\n" + "Uwk ka 1\n" + "kXm ka 1\n" + "hJy th 1\n" + "Xgv ng 1\n" + "xYv va 1\n" + "yYd de 1\n" + "xzC sz 1\n" + "gjB ng 1\n" + "jzI sz 1\n" + "zrO er 1\n" + "tqF th 1\n" + "vwM va 1\n" + "zCq qu 1\n" + "ljL le 1\n" + "vnZ an 1\n" + "eDq qu 1\n" + "Qvq qu 1\n" + "pfL pr 1\n" + "iRb in 1\n" + "gdR ng 1\n" + "qAv qu 1\n" + "vnL an 1\n" + "mkT ka 1\n" + "pVk ka 1\n" + "xKh th 1\n" + "jNk ij 1\n" + "jLt th 1\n" + "cNp ch 1\n" + "tmP th 1\n" + "vVt th 1\n" + "qfP qu 1\n" + "Uqo qu 1\n" + "Dnp an 1\n" + "yGb be 1\n" + "sHd st 1\n" + "pwF pr 1\n" + "fPy ny 1\n" + "Drq qu 1\n" + "bJh th 1\n" + "sQp st 1\n" + "Iws st 1\n" + "uCw qu 1\n" + "Lwj ij 1\n" + "rFw er 1\n" + "sJp st 1\n" + "xiI in 1\n" + "Rqv qu 1\n" + "bkQ ka 1\n" + "qNp qu 1\n" + "dYl le 1\n" + "Vmf me 1\n" + "lYc ch 1\n" + "oPw on 1\n" + "kjO ij 1\n" + "mKb me 1\n" + "fDf fo 1\n" + "fFb be 1\n" + "Vhv th 1\n" + "Hjq qu 1\n" + "qfK qu 1\n" + "Kjp ij 1\n" + "vTg ng 1\n" + "pBq qu 1\n" + "Htd th 1\n" + "pNd de 1\n" + "bQv va 1\n" + "aSx an 1\n" + "jwx ij 1\n" + "Uyx ny 1\n" + "wVj ij 1\n" + "Ioq qu 1\n" + "Nhm th 1\n" + "Hqh th 1\n" + "rUq qu 1\n" + "bBx be 1\n" + "Gqb qu 1\n" + "Ccw ch 1\n" + "hZw th 1\n" + "Qbl le 1\n" + "xFv va 1\n" + "sZv st 1\n" + "qzY qu 1\n" + "pDb pr 1\n" + "cfR ch 1\n" + "rqk qu 1\n" + "fzP sz 1\n" + "hqO th 1\n" + "pzH sz 1\n" + "qSj qu 1\n" + "pxJ pr 1\n" + "xbq qu 1\n" + "sXf st 1\n" + "ybT be 1\n" + "sHn an 1\n" + "vTz sz 1\n" + "Pgf ng 1\n" + "hKw th 1\n" + "jPj ij 1\n" + "wTx wa 1\n" + "jSj ij 1\n" + "Fgz ng 1\n" + "bKk ka 1\n" + "eUj er 1\n" + "cDf ch 1\n" + "xFg ng 1\n" + "cnW an 1\n" + "tUy th 1\n" + "Jgx ng 1\n" + "yuF qu 1\n" + "vyQ va 1\n" + "xCz sz 1\n" + "jRh th 1\n" + "cXx ch 1\n" + "kGk ka 1\n" + "Xnh th 1\n" + "qPh th 1\n" + "lfZ le 1\n" + "qVa an 1\n" + "xws st 1\n" + "Dzt th 1\n" + "xfG fo 1\n" + "fXh th 1\n" + "jgV ng 1\n" + "vJj ij 1\n" + "bXj ij 1\n" + "cgG ch 1\n" + "vuW qu 1\n" + "txG th 1\n" + "Zxz sz 1\n" + "fNc ch 1\n" + "oBq qu 1\n" + "Wgv ng 1\n" + "Hwz sz 1\n" + "oaW an 1\n" + "vRg ng 1\n" + "uXz qu 1\n" + "fzQ sz 1\n" + "bcB ch 1\n" + "Bnw an 1\n" + "gvB ng 1\n" + "rQm er 1\n" + "cvU ch 1\n" + "xhR th 1\n" + "zxR sz 1\n" + "btZ th 1\n" + "Kkf ka 1\n" + "zJw sz 1\n" + "uwq qu 1\n" + "pSx pr 1\n" + "yRv va 1\n" + "nCq an 1\n" + "tGv th 1\n" + "wgT ng 1\n" + "kNz sz 1\n" + "oHk on 1\n" + "Wzw sz 1\n" + "hvU th 1\n" + "skX st 1\n" + "vYz sz 1\n" + "joZ on 1\n" + "nGq an 1\n" + "qmM qu 1\n" + "Bmr er 1\n" + "sVg ng 1\n" + "uCv qu 1\n" + "iXz in 1\n" + "vKp va 1\n" + "lEw le 1\n" + "hhF th 1\n" + "iwS in 1\n" + "qyU qu 1\n" + "jjY ij 1\n" + "Ygm ng 1\n" + "wJd de 1\n" + "eQp er 1\n" + "Yfb be 1\n" + "Wpg ng 1\n" + "jdS de 1\n" + "vmG va 1\n" + "mdT de 1\n" + "grZ ng 1\n" + "yqN qu 1\n" + "pBp po 1\n" + "fkZ ka 1\n" + "qeB qu 1\n" + "cGs ch 1\n" + "Eqg qu 1\n" + "cfO ch 1\n" + "uSx qu 1\n" + "Dhf th 1\n" + "Qjr er 1\n" + "xqZ qu 1\n" + "yQf ny 1\n" + "npY an 1\n" + "xDc ch 1\n" + "bmQ me 1\n" + "kMb ka 1\n" + "aqC an 1\n" + "jYl le 1\n" + "wkD ka 1\n" + "cWs ch 1\n" + "yyJ ny 1\n" + "wvV va 1\n" + "lYb le 1\n" + "qrW qu 1\n" + "bqz qu 1\n" + "wjC ij 1\n" + "vKy va 1\n" + "vjD ij 1\n" + "sDs st 1\n" + "fKf fo 1\n" + "zsT st 1\n" + "jYc ch 1\n" + "Ywt th 1\n" + "Hjw ij 1\n" + "wIy wa 1\n" + "ffU fo 1\n" + "Wnx an 1\n" + "eHq qu 1\n" + "fWy ny 1\n" + "Nwv va 1\n" + "ySj ij 1\n" + "jfC ij 1\n" + "xXq qu 1\n" + "grI ng 1\n" + "oVf on 1\n" + "Vfy ny 1\n" + "jgY ng 1\n" + "Hjp ij 1\n" + "zqC qu 1\n" + "qyH qu 1\n" + "kcQ ch 1\n" + "zsE st 1\n" + "pCx pr 1\n" + "kwP ka 1\n" + "jfQ ij 1\n" + "wZg ng 1\n" + "Vxm me 1\n" + "Jvb va 1\n" + "sEw sz 1\n" + "jLl le 1\n" + "dOx de 1\n" + "wpS pr 1\n" + "yIo on 1\n" + "tGt th 1\n" + "vHz sz 1\n" + "xGj ij 1\n" + "gvQ ng 1\n" + "pNr er 1\n" + "gqY qu 1\n" + "sfK st 1\n" + "dYd de 1\n" + "sMm st 1\n" + "oBx on 1\n" + "qsF qu 1\n" + "bmI me 1\n" + "tmC th 1\n" + "wlW le 1\n" + "Twg ng 1\n" + "srV er 1\n" + "rNz er 1\n" + "Uuc ch 1\n" + "Gjg ng 1\n" + "njY an 1\n" + "vOh th 1\n" + "Qmh th 1\n" + "Fnf an 1\n" + "yvY va 1\n" + "pGf pr 1\n" + "lHp al 1\n" + "qgZ qu 1\n" + "jbS ij 1\n" + "xQi in 1\n" + "tqG th 1\n" + "nwI an 1\n" + "qkY qu 1\n" + "Wxy ny 1\n" + "hDm th 1\n" + "qQe qu 1\n" + "iJp in 1\n" + "xrN er 1\n" + "dGg ng 1\n" + "kQx ka 1\n" + "Jqg qu 1\n" + "hMk th 1\n" + "ljT le 1\n" + "Xkn an 1\n" + "ztq th 1\n" + "qNd qu 1\n" + "suY qu 1\n" + "Uoa an 1\n" + "djR de 1\n" + "mFf me 1\n" + "jzq qu 1\n" + "zjR sz 1\n" + "Nnl an 1\n" + "tJp th 1\n" + "gZr ng 1\n" + "Bwx wa 1\n" + "dWz sz 1\n" + "lwM le 1\n" + "Iqk qu 1\n" + "twZ th 1\n" + "Mwt th 1\n" + "kjY ij 1\n" + "zBv sz 1\n" + "iwF in 1\n" + "rHz er 1\n" + "Sqh th 1\n" + "oKq qu 1\n" + "qjO qu 1\n" + "htQ th 1\n" + "cKx ch 1\n" + "bqW qu 1\n" + "kYh th 1\n" + "tBq th 1\n" + "gmJ ng 1\n" + "eYx er 1\n" + "hGv th 1\n" + "hQd th 1\n" + "pnX an 1\n" + "bvJ va 1\n" + "sxM st 1\n" + "qNt th 1\n" + "Wlj le 1\n" + "kqD qu 1\n" + "qdZ qu 1\n" + "mhY th 1\n" + "tlC th 1\n" + "pqI qu 1\n" + "ybD be 1\n" + "xAe er 1\n" + "pLt th 1\n" + "lHb le 1\n" + "xVc ch 1\n" + "dhN th 1\n" + "qxU qu 1\n" + "dVf de 1\n" + "Zkm ka 1\n" + "kpD ka 1\n" + "pjH ij 1\n" + "yGm me 1\n" + "iyP in 1\n" + "wmK me 1\n" + "mJz sz 1\n" + "fmL me 1\n" + "cBv ch 1\n" + "Vvf va 1\n" + "Eql qu 1\n" + "ohV th 1\n" + "lCx le 1\n" + "oWc ch 1\n" + "nzX an 1\n" + "fIj ij 1\n" + "kPt th 1\n" + "pYm me 1\n" + "zhG th 1\n" + "cqN ch 1\n" + "umQ qu 1\n" + "wXs st 1\n" + "lZj le 1\n" + "Sxs st 1\n" + "Kqd qu 1\n" + "tWc th 1\n" + "Kcc ch 1\n" + "pvB po 1\n" + "tgR th 1\n" + "yrN er 1\n" + "xQr er 1\n" + "Xvz sz 1\n" + "lJh th 1\n" + "Xfk ka 1\n" + "Fvr er 1\n" + "fUb be 1\n" + "lZb le 1\n" + "gdI ng 1\n" + "joI on 1\n" + "yKq qu 1\n" + "twz th 1\n" + "qJj qu 1\n" + "vxM va 1\n" + "Vzs st 1\n" + "fjR ij 1\n" + "Kmz sz 1\n" + "qIw qu 1\n" + "jyD ij 1\n" + "qbU qu 1\n" + "qkZ qu 1\n" + "jVg ng 1\n" + "Fhj th 1\n" + "qJq qu 1\n" + "wPq qu 1\n" + "Ueo er 1\n" + "zXd sz 1\n" + "gFb ng 1\n" + "jJy ij 1\n" + "Nsj st 1\n" + "lMb le 1\n" + "yQn an 1\n" + "dnM an 1\n" + "yRg ng 1\n" + "Fjc ch 1\n" + "dKg ng 1\n" + "gqV ng 1\n" + "gCk ng 1\n" + "sOz st 1\n" + "hlO th 1\n" + "qbN qu 1\n" + "sjN st 1\n" + "Ujz sz 1\n" + "rVm er 1\n" + "Wjs st 1\n" + "bmM me 1\n" + "Vzx sz 1\n" + "hZg th 1\n" + "zFt th 1\n" + "yhJ th 1\n" + "vNk ka 1\n" + "zbT sz 1\n" + "xmJ me 1\n" + "Fcs ch 1\n" + "yTc ch 1\n" + "cSg ch 1\n" + "qmP qu 1\n" + "mFz sz 1\n" + "bdI de 1\n" + "jlK le 1\n" + "bnB an 1\n" + "qyQ qu 1\n" + "Vjk ij 1\n" + "hzU th 1\n" + "qgp qu 1\n" + "lqW qu 1\n" + "fNn an 1\n" + "Tjp ij 1\n" + "vlV le 1\n" + "rVp er 1\n" + "bLd de 1\n" + "ydQ de 1\n" + "gYg ng 1\n" + "qhE th 1\n" + "Gsq qu 1\n" + "gWz ng 1\n" + "Qtk th 1\n" + "Hzw sz 1\n" + "kIo ho 1\n" + "kfC ka 1\n" + "zBg ng 1\n" + "jJp ij 1\n" + "eIq qu 1\n" + "vuB qu 1\n" + "Wbg ng 1\n" + "Jjp ij 1\n" + "lXk le 1\n" + "Tfx fo 1\n" + "zLl le 1\n" + "dqT qu 1\n" + "oZq qu 1\n" + "Jfu qu 1\n" + "Qhh th 1\n" + "qkK qu 1\n" + "Ejc ch 1\n" + "zwN sz 1\n" + "yQq qu 1\n" + "dDp de 1\n" + "Pww wa 1\n" + "ztC th 1\n" + "jtH th 1\n" + "yrX er 1\n" + "vwT va 1\n" + "yRh th 1\n" + "wQt th 1\n" + "lXz le 1\n" + "cfL ch 1\n" + "Fwl le 1\n" + "rNw er 1\n" + "Bhx th 1\n" + "glZ ng 1\n" + "gcD ch 1\n" + "Sfs st 1\n" + "Uzf sz 1\n" + "Tdl le 1\n" + "dRn an 1\n" + "vYw va 1\n" + "xcD ch 1\n" + "xcC ch 1\n" + "lBx le 1\n" + "gHq qu 1\n" + "wJy wa 1\n" + "yrO er 1\n" + "vqF qu 1\n" + "tYb th 1\n" + "Zjw ij 1\n" + "jLk ij 1\n" + "Hvf va 1\n" + "pnS an 1\n" + "pcT ch 1\n" + "sFk st 1\n" + "dcO ch 1\n" + "zPw sz 1\n" + "vNf va 1\n" + "Gdx de 1\n" + "dlP le 1\n" + "jLx jo 1\n" + "jZj ij 1\n" + "wwT wa 1\n" + "tGx th 1\n" + "fhS th 1\n" + "Xtk th 1\n" + "xnW on 1\n" + "pkJ ka 1\n" + "xIo on 1\n" + "Zxb be 1\n" + "nOj an 1\n" + "wHj ij 1\n" + "fjS ij 1\n" + "wdL de 1\n" + "jbN ij 1\n" + "ykO ka 1\n" + "xqB qu 1\n" + "qzN qu 1\n" + "Qbq qu 1\n" + "Fqw qu 1\n" + "jWw ij 1\n" + "nxM an 1\n" + "tpX th 1\n" + "Ttz th 1\n" + "zsH st 1\n" + "fjz sz 1\n" + "xIg ng 1\n" + "xkY ka 1\n" + "Fqa an 1\n" + "oGk on 1\n" + "Hnc an 1\n" + "jPq qu 1\n" + "zlW le 1\n" + "uRx qu 1\n" + "uGx qu 1\n" + "jYv ij 1\n" + "Kpz sz 1\n" + "gQo ng 1\n" + "Kwx wa 1\n" + "jNw ij 1\n" + "tdD th 1\n" + "yGj ij 1\n" + "Lbq qu 1\n" + "Rrc ch 1\n" + "qvX qu 1\n" + "hhK th 1\n" + "kZx ka 1\n" + "xDf fo 1\n" + "Pjf ij 1\n" + "cgF ch 1\n" + "vCk ka 1\n" + "fWw ow 1\n" + "mJp me 1\n" + "fXe er 1\n" + "uYp qu 1\n" + "jHk ij 1\n" + "wdP de 1\n" + "qFk qu 1\n" + "jrG er 1\n" + "fgD ng 1\n" + "fsG st 1\n" + "Vgb ng 1\n" + "xAa an 1\n" + "gtZ th 1\n" + "tlq th 1\n" + "Tmw me 1\n" + "gyY ng 1\n" + "Qxt th 1\n" + "Uxz sz 1\n" + "iVr in 1\n" + "zqI qu 1\n" + "Nbw wa 1\n" + "Dhd th 1\n" + "mOq qu 1\n" + "iBd in 1\n" + "cqB ch 1\n" + "zQq qu 1\n" + "Wbv va 1\n" + "Qks ka 1\n" + "qPa an 1\n" + "tfI th 1\n" + "mZs st 1\n" + "pDs st 1\n" + "nJj an 1\n" + "zcp ch 1\n" + "tWj th 1\n" + "Zxp pr 1\n" + "vPy va 1\n" + "dxK de 1\n" + "oPv on 1\n" + "rjN er 1\n" + "oQh th 1\n" + "vwH va 1\n" + "Qhp th 1\n" + "xsU st 1\n" + "kGq qu 1\n" + "wjW ij 1\n" + "Pwx wa 1\n" + "Bbn an 1\n" + "xOq qu 1\n" + "qpN qu 1\n" + "nbq an 1\n" + "zpM sz 1\n" + "jmB ij 1\n" + "Nqj qu 1\n" + "zYd sz 1\n" + "Ybc ch 1\n" + "xcW ch 1\n" + "gPg ng 1\n" + "Qys st 1\n" + "Bhq th 1\n" + "yGx ny 1\n" + "qxL qu 1\n" + "Jfd de 1\n" + "mbV me 1\n" + "pkY ka 1\n" + "cWl ch 1\n" + "wBg ng 1\n" + "vOw va 1\n" + "Gpb pr 1\n" + "Ppq qu 1\n" + "fsX st 1\n" + "vtQ th 1\n" + "yCj ij 1\n" + "yoY on 1\n" + "pwQ pr 1\n" + "yGd de 1\n" + "qtJ th 1\n" + "nrZ an 1\n" + "eVx er 1\n" + "Nrq qu 1\n" + "wtA th 1\n" + "fHf fo 1\n" + "gsQ ng 1\n" + "hlC th 1\n" + "dLc ch 1\n" + "zjC sz 1\n" + "jvY ij 1\n" + "tIj th 1\n" + "pvL va 1\n" + "Hhg th 1\n" + "yMv va 1\n" + "xMn an 1\n" + "tYx th 1\n" + "vVp va 1\n" + "Ynb an 1\n" + "vmX va 1\n" + "qjQ qu 1\n" + "vQr er 1\n" + "hQz th 1\n" + "mNf me 1\n" + "zfY sz 1\n" + "xjS ij 1\n" + "jBm ij 1\n" + "jpq qu 1\n" + "nJq an 1\n" + "Knz an 1\n" + "gGf ng 1\n" + "pZx pr 1\n" + "Gql qu 1\n" + "Uqm qu 1\n" + "eWv er 1\n" + "fGg ng 1\n" + "qsA qu 1\n" + "uhY th 1\n" + "xhH th 1\n" + "yxS ny 1\n" + "rxK er 1\n" + "hNc th 1\n" + "Vwh th 1\n" + "aNv an 1\n" + "Qzv sz 1\n" + "fQn an 1\n" + "jzH sz 1\n" + "Rvh th 1\n" + "Qpt th 1\n" + "qXv qu 1\n" + "phQ th 1\n" + "Qlb le 1\n" + "bnQ an 1\n" + "njK an 1\n" + "Jjs st 1\n" + "tJx th 1\n" + "iwX in 1\n" + "nVd an 1\n" + "kzA sz 1\n" + "uwE qu 1\n" + "Tsq qu 1\n" + "hqM th 1\n" + "Rnq an 1\n" + "rDn an 1\n" + "yNb be 1\n" + "uqN qu 1\n" + "fKw wa 1\n" + "Iqn an 1\n" + "xHc ch 1\n" + "Wwq qu 1\n" + "gMw ng 1\n" + "yWf ny 1\n" + "vcO ch 1\n" + "Gkm ka 1\n" + "fRh th 1\n" + "dMc nd 1\n" + "Zhx th 1\n" + "qlH qu 1\n" + "qUl qu 1\n" + "zHf sz 1\n" + "wCk ka 1\n" + "Qfj ij 1\n" + "Qkw ka 1\n" + "mYh th 1\n" + "dcU ch 1\n" + "jTf ij 1\n" + "rjF er 1\n" + "hxQ th 1\n" + "wNf wa 1\n" + "Lgg ng 1\n" + "Fdu qu 1\n" + "tJw th 1\n" + "ycQ ch 1\n" + "xXf fo 1\n" + "wwQ wa 1\n" + "evQ er 1\n" + "Fcj ch 1\n" + "Cyq qu 1\n" + "tpF th 1\n" + "Axj ij 1\n" + "zGg ng 1\n" + "Qbb be 1\n" + "vfY va 1\n" + "oXd on 1\n" + "wAq qu 1\n" + "Xbk ka 1\n" + "wmR me 1\n" + "rzN er 1\n" + "fcB ch 1\n" + "Bwc ch 1\n" + "xgS ng 1\n" + "dQr er 1\n" + "kJw ka 1\n" + "bgx ng 1\n" + "pZs sz 1\n" + "wfA wa 1\n" + "jmX ij 1\n" + "dNp de 1\n" + "Vxr er 1\n" + "Rvb va 1\n" + "wZl le 1\n" + "wgA ng 1\n" + "Wrq qu 1\n" + "Jcq ch 1\n" + "ljW le 1\n" + "qPt th 1\n" + "gjY ng 1\n" + "jUo on 1\n" + "mIj ij 1\n" + "Hpy pr 1\n" + "Mpj ij 1\n" + "bkO ka 1\n" + "Avz sz 1\n" + "vKk ka 1\n" + "Bfz sz 1\n" + "yYj ij 1\n" + "Egq qu 1\n" + "wxH wa 1\n" + "zHh th 1\n" + "svA st 1\n" + "zcP ch 1\n" + "Bxo on 1\n" + "hSv th 1\n" + "Lxt th 1\n" + "hBz th 1\n" + "cWk ch 1\n" + "xBv va 1\n" + "hwN th 1\n" + "mkJ ka 1\n" + "oNj on 1\n" + "Ugq qu 1\n" + "jZq qu 1\n" + "xfP fo 1\n" + "bYv va 1\n" + "qxF qu 1\n" + "dcI ch 1\n" + "dhY th 1\n" + "cvP ch 1\n" + "qUy qu 1\n" + "mxC me 1\n" + "zPx sz 1\n" + "Nql qu 1\n" + "Yfw wa 1\n" + "Wgp ng 1\n" + "jgD ng 1\n" + "Qfq qu 1\n" + "lcW ch 1\n" + "qxy qu 1\n" + "Xpq qu 1\n" + "wrD er 1\n" + "bEo on 1\n" + "bzV sz 1\n" + "fwS wa 1\n" + "mLj ij 1\n" + "wMr er 1\n" + "vFb va 1\n" + "zfT sz 1\n" + "nRk an 1\n" + "kJh th 1\n" + "Rmw me 1\n" + "nqR an 1\n" + "qpO qu 1\n" + "bHb be 1\n" + "Tkq qu 1\n" + "sjG st 1\n" + "qaT an 1\n" + "Pql qu 1\n" + "hlQ th 1\n" + "kzW sz 1\n" + "yFc ch 1\n" + "uBv qu 1\n" + "vxO va 1\n" + "qvC qu 1\n" + "Yqx qu 1\n" + "jCb ij 1\n" + "Qjk ij 1\n" + "fBh th 1\n" + "vKq qu 1\n" + "rMg ng 1\n" + "hRw th 1\n" + "ykU ka 1\n" + "bUq qu 1\n" + "vYv va 1\n" + "Pdx de 1\n" + "oGv on 1\n" + "jLy ij 1\n" + "duY qu 1\n" + "Wcp ch 1\n" + "oGx on 1\n" + "vGl le 1\n" + "Jdz sz 1\n" + "ijH in 1\n" + "mlX le 1\n" + "jNr er 1\n" + "kCq qu 1\n" + "Ghh th 1\n" + "rMv er 1\n" + "Bgp ng 1\n" + "bFt th 1\n" + "uWl qu 1\n" + "dXg ng 1\n" + "Wcf ch 1\n" + "dbI de 1\n" + "bGx be 1\n" + "exQ er 1\n" + "jWj jo 1\n" + "pQb pr 1\n" + "jcH ch 1\n" + "qOl qu 1\n" + "mtL th 1\n" + "crC ch 1\n" + "pBh th 1\n" + "Wlz le 1\n" + "nHn an 1\n" + "Hfp pr 1\n" + "Xpc ch 1\n" + "Uxp pr 1\n" + "Ksq qu 1\n" + "xWk ka 1\n" + "nqZ an 1\n" + "Cxd de 1\n" + "zJx sz 1\n" + "rWq qu 1\n" + "Cbq qu 1\n" + "qqP qu 1\n" + "lhU th 1\n" + "Ufv va 1\n" + "Uxg ng 1\n" + "hJf th 1\n" + "nvQ an 1\n" + "dhF th 1\n" + "Cvb va 1\n" + "aPf an 1\n" + "Jxj ij 1\n" + "Dwp pr 1\n" + "Ixw wa 1\n" + "kfS ka 1\n" + "rZm er 1\n" + "fmE me 1\n" + "sLq qu 1\n" + "bmR me 1\n" + "uCs qu 1\n" + "kFm ka 1\n" + "Kqk qu 1\n" + "xQk ka 1\n" + "Sfn an 1\n" + "fgU ng 1\n" + "vvT va 1\n" + "mQe er 1\n" + "Gbt th 1\n" + "tbY th 1\n" + "lQk le 1\n" + "cIh th 1\n" + "Tjq qu 1\n" + "nQg an 1\n" + "yYp pr 1\n" + "qPw qu 1\n" + "xOa an 1\n" + "pNw pr 1\n" + "fJz sz 1\n" + "zHb sz 1\n" + "kBh th 1\n" + "fdE de 1\n" + "wPg ng 1\n" + "lVv le 1\n" + "mPw me 1\n" + "Rmg ng 1\n" + "xoE on 1\n" + "hnJ th 1\n" + "uvE qu 1\n" + "Woq qu 1\n" + "ucX ch 1\n" + "nmD an 1\n" + "pcX ch 1\n" + "hDw th 1\n" + "dgI ng 1\n" + "vVd de 1\n" + "tDh ch 1\n" + "jHn an 1\n" + "hkX th 1\n" + "pxT pr 1\n" + "xYz sz 1\n" + "rTp er 1\n" + "Ubz sz 1\n" + "Llm le 1\n" + "yjZ ij 1\n" + "Qss st 1\n" + "cfM ch 1\n" + "jbG be 1\n" + "Jfz sz 1\n" + "mWb me 1\n" + "jDp ij 1\n" + "lWz le 1\n" + "cXy ch 1\n" + "oQr er 1\n" + "ucZ ch 1\n" + "cvN ch 1\n" + "cvK ch 1\n" + "zDk sz 1\n" + "bLr er 1\n" + "dDl le 1\n" + "hhD th 1\n" + "vmK va 1\n" + "hLt th 1\n" + "mqW qu 1\n" + "Bfs st 1\n" + "Acj ch 1\n" + "dcG ch 1\n" + "yJc ch 1\n" + "mfS me 1\n" + "drL er 1\n" + "qyK qu 1\n" + "tQz th 1\n" + "jrL er 1\n" + "ccJ ch 1\n" + "wpX pr 1\n" + "Zzf sz 1\n" + "snU an 1\n" + "qEw qu 1\n" + "tQb th 1\n" + "mPd de 1\n" + "vJq qu 1\n" + "vpU va 1\n" + "vzM sz 1\n" + "uZb qu 1\n" + "ywU wa 1\n" + "Rjs st 1\n" + "hKt th 1\n" + "Bfb be 1\n" + "wuQ qu 1\n" + "bvM va 1\n" + "yiW in 1\n" + "hqC th 1\n" + "iUq qu 1\n" + "lBd le 1\n" + "Zxj ij 1\n" + "wpW pr 1\n" + "rHm er 1\n" + "mhQ th 1\n" + "fMb be 1\n" + "vWf va 1\n" + "Fdq qu 1\n" + "jGb ij 1\n" + "Dhw th 1\n" + "cjR ch 1\n" + "kvD ka 1\n" + "qvD qu 1\n" + "Xmk ka 1\n" + "Cjj ij 1\n" + "kkX ka 1\n" + "qkF qu 1\n" + "vWg ng 1\n" + "Msq qu 1\n" + "nNv an 1\n" + "Hzu qu 1\n" + "zrY er 1\n" + "hgB th 1\n" + "pwB pr 1\n" + "Jxc ch 1\n" + "vcJ ch 1\n" + "sYw st 1\n" + "Tqx qu 1\n" + "eJf le 1\n" + "czJ ch 1\n" + "Qyh th 1\n" + "bvV va 1\n" + "Xyh th 1\n" + "fjq qu 1\n" + "dYc ch 1\n" + "pBx pr 1\n" + "jvR ij 1\n" + "gbH ng 1\n" + "ygH ng 1\n" + "hbV th 1\n" + "lwU le 1\n" + "tJk th 1\n" + "pIw pr 1\n" + "Vjl le 1\n" + "Dgm ng 1\n" + "nvR an 1\n" + "yRp pr 1\n" + "fOj ij 1\n" + "Ecf ch 1\n" + "Zrf er 1\n" + "mxD me 1\n" + "Iqf qu 1\n" + "zBj sz 1\n" + "tTs th 1\n" + "lqB qu 1\n" + "kCv ka 1\n" + "nVh th 1\n" + "jGq qu 1\n" + "cgQ ch 1\n" + "Ppd de 1\n" + "Jcd ch 1\n" + "hhP th 1\n" + "sLg ng 1\n" + "xYt th 1\n" + "Qps st 1\n" + "sfE st 1\n" + "wxR wa 1\n" + "pFp pr 1\n" + "Ymf me 1\n" + "Jgy ng 1\n" + "yvI va 1\n" + "Ncz ch 1\n" + "wBf wa 1\n" + "rVx er 1\n" + "jvX ij 1\n" + "nYp an 1\n" + "nNb an 1\n" + "cQi ch 1\n" + "Qwy wa 1\n" + "vPf va 1\n" + "qvd qu 1\n" + "hkD th 1\n" + "Wmr er 1\n" + "gdY ng 1\n" + "Kjj ij 1\n" + "qsN qu 1\n" + "vJg ng 1\n" + "mDc ch 1\n" + "kvF ka 1\n" + "kWx ka 1\n" + "xYu qu 1\n" + "eMq qu 1\n" + "mYy me 1\n" + "Hxt th 1\n" + "pbM pr 1\n" + "Hwd de 1\n" + "mWu qu 1\n" + "zNs st 1\n" + "Qjh th 1\n" + "aqD an 1\n" + "Gcd ch 1\n" + "btX th 1\n" + "Zql qu 1\n" + "Ujw ij 1\n" + "yvM va 1\n" + "Hhw th 1\n" + "zWd sz 1\n" + "pYj ij 1\n" + "xWt th 1\n" + "ylO le 1\n" + "cnX ch 1\n" + "cMf ch 1\n" + "pKb pr 1\n" + "woV on 1\n" + "fzG sz 1\n" + "Lqb qu 1\n" + "eOj er 1\n" + "Gtb th 1\n" + "clX ch 1\n" + "kdC de 1\n" + "cfq ch 1\n" + "hKk th 1\n" + "cJi ch 1\n" + "uSb qu 1\n" + "jgT ng 1\n" + "tcG th 1\n" + "qNv qu 1\n" + "fpB pr 1\n" + "vPw va 1\n" + "jmA ij 1\n" + "dxI de 1\n" + "jGg ng 1\n" + "Bvg ng 1\n" + "qrC qu 1\n" + "nPx an 1\n" + "Qmn an 1\n" + "cqC ch 1\n" + "kFh th 1\n" + "Jtf th 1\n" + "Cqz qu 1\n" + "rCd er 1\n" + "Zms st 1\n" + "dVq qu 1\n" + "Gwg ng 1\n" + "cwP ch 1\n" + "wVu qu 1\n" + "dNg ng 1\n" + "jXc ch 1\n" + "Mbz sz 1\n" + "wvG ve 1\n" + "Vpw pr 1\n" + "yXq qu 1\n" + "hlK th 1\n" + "pYv va 1\n" + "Fbd de 1\n" + "zcV ch 1\n" + "rQk er 1\n" + "wtN th 1\n" + "qeI qu 1\n" + "eGt th 1\n" + "kMq qu 1\n" + "kqS qu 1\n" + "cqd ch 1\n" + "pLf po 1\n" + "xvO va 1\n" + "rfH er 1\n" + "gIq qu 1\n" + "Pqk qu 1\n" + "xCn an 1\n" + "dVs st 1\n" + "iqY qu 1\n" + "bsJ st 1\n" + "Vww wa 1\n" + "Znm an 1\n" + "Yrz er 1\n" + "Rvz sz 1\n" + "dzK de 1\n" + "zbW sz 1\n" + "tkx th 1\n" + "xkP ka 1\n" + "kzS sz 1\n" + "gXq qu 1\n" + "Lxf fo 1\n" + "Fwr er 1\n" + "lHs le 1\n" + "zrB er 1\n" + "jNb ij 1\n" + "Hxy ny 1\n" + "Gfw wa 1\n" + "Egw ng 1\n" + "Jxw wa 1\n" + "tVm th 1\n" + "bwQ wa 1\n" + "gIx ng 1\n" + "Wqu un 1\n" + "jvI ij 1\n" + "cGc ch 1\n" + "kSb ka 1\n" + "hxG th 1\n" + "zHm sz 1\n" + "Jpk ka 1\n" + "fVb be 1\n" + "Ukf ka 1\n" + "rxF er 1\n" + "dVu qu 1\n" + "sdX st 1\n" + "mjM ij 1\n" + "xwq qu 1\n" + "Ogk ng 1\n" + "qhr th 1\n" + "vfA va 1\n" + "qbA qu 1\n" + "Lfu qu 1\n" + "hzY th 1\n" + "iHf in 1\n" + "jxb ij 1\n" + "vmP va 1\n" + "bvI va 1\n" + "fmH me 1\n" + "qtx th 1\n" + "bvQ va 1\n" + "qzX qu 1\n" + "bVn an 1\n" + "Xmt th 1\n" + "qXo qu 1\n" + "pfD pr 1\n" + "fCd de 1\n" + "vbx va 1\n" + "Zhz th 1\n" + "Kwg ng 1\n" + "rcJ ch 1\n" + "jlT le 1\n" + "jzM sz 1\n" + "rpP er 1\n" + "tmA th 1\n" + "aYw an 1\n" + "zBq qu 1\n" + "xhT th 1\n" + "yLq qu 1\n" + "cKf ch 1\n" + "qdP qu 1\n" + "Ybx be 1\n" + "dHs st 1\n" + "jhH th 1\n" + "Bsv st 1\n" + "rZt th 1\n" + "mhJ th 1\n" + "Zwq qu 1\n" + "kXf ka 1\n" + "zvT sz 1\n" + "yiC in 1\n" + "gkT ng 1\n" + "nJw an 1\n" + "zpV sz 1\n" + "tPq th 1\n" + "cVt th 1\n" + "dBg ng 1\n" + "cRf ch 1\n" + "vRq qu 1\n" + "jgA ng 1\n" + "bMz sz 1\n" + "hJh th 1\n" + "mHd de 1\n" + "Ckq qu 1\n" + "qcj ch 1\n" + "yIb be 1\n" + "wqE qu 1\n" + "pMh th 1\n" + "Hqj qu 1\n" + "jZu qu 1\n" + "iqO qu 1\n" + "tqC th 1\n" + "qoK qu 1\n" + "Knq an 1\n" + "bQm me 1\n" + "uuX qu 1\n" + "Wzc ch 1\n" + "Pxy ny 1\n" + "Qgf ng 1\n" + "sFw st 1\n" + "gHf ng 1\n" + "kgN ng 1\n" + "rCw er 1\n" + "Yjy ij 1\n" + "pnV an 1\n" + "fbS be 1\n" + "iHz in 1\n" + "kGx ka 1\n" + "kwS ka 1\n" + "sDm st 1\n" + "Vhk th 1\n" + "phN th 1\n" + "Jbf be 1\n" + "pWz sz 1\n" + "vvQ va 1\n" + "vNm va 1\n" + "lYw le 1\n" + "zHx sz 1\n" + "Zzc ch 1\n" + "bDt th 1\n" + "Fcv ch 1\n" + "dJg ng 1\n" + "Qwb wa 1\n" + "qFw qu 1\n" + "wmO me 1\n" + "Bvy va 1\n" + "qgY qu 1\n" + "vYs st 1\n" + "xwF wa 1\n" + "qwP qu 1\n" + "uEc ch 1\n" + "mWq qu 1\n" + "fzO sz 1\n" + "bPg ng 1\n" + "pnW an 1\n" + "hGx th 1\n" + "Vkk ka 1\n" + "Xrx er 1\n" + "gJd ng 1\n" + "Llq qu 1\n" + "Vqu un 1\n" + "fgH ng 1\n" + "Vcy ch 1\n" + "hVc th 1\n" + "rwZ er 1\n" + "Xlc ch 1\n" + "xJd de 1\n" + "Fnn an 1\n" + "Ypj ij 1\n" + "lhJ th 1\n" + "aUj an 1\n" + "lBp pr 1\n" + "dlW le 1\n" + "pvV va 1\n" + "Mwr er 1\n" + "Zwc ch 1\n" + "wcU ch 1\n" + "cVq ch 1\n" + "ycU ch 1\n" + "Lcq ch 1\n" + "rvQ er 1\n" + "eYm er 1\n" + "qCn an 1\n" + "dBx de 1\n" + "Iwq qu 1\n" + "gMt th 1\n" + "bhC th 1\n" + "bDs st 1\n" + "Vhz th 1\n" + "kJz sz 1\n" + "Ohz th 1\n" + "kDz sz 1\n" + "hTn th 1\n" + "eqG qu 1\n" + "gJr ng 1\n" + "Zpz sz 1\n" + "hwQ th 1\n" + "fgY ng 1\n" + "sdV st 1\n" + "ljV le 1\n" + "yGg ng 1\n" + "uWg qu 1\n" + "sbO st 1\n" + "qdD qu 1\n" + "yJj ij 1\n" + "nwq an 1\n" + "Apq qu 1\n" + "ccK ch 1\n" + "Qwl le 1\n" + "oyQ on 1\n" + "lPw le 1\n" + "cYt th 1\n" + "brG er 1\n" + "xkT ka 1\n" + "dUj de 1\n" + "rhR th 1\n" + "xPw wa 1\n" + "xoF on 1\n" + "hYj th 1\n" + "hYw th 1\n" + "lPn an 1\n" + "zCg ng 1\n" + "sJt th 1\n" + "wDs st 1\n" + "fVh th 1\n" + "zwW sz 1\n" + "yLj ij 1\n" + "aBx an 1\n" + "Dvv va 1\n" + "tKb th 1\n" + "jfG ij 1\n" + "xMm me 1\n" + "bLp pr 1\n" + "xwW wa 1\n" + "bzH sz 1\n" + "cIw ch 1\n" + "zdN sz 1\n" + "Ggv va 1\n" + "lwV le 1\n" + "qyV qu 1\n" + "vBv va 1\n" + "Owm me 1\n" + "Ltx th 1\n" + "mqE qu 1\n" + "Xjc ch 1\n" + "pzY sz 1\n" + "Jds st 1\n" + "kMl le 1\n" + "Ddj de 1\n" + "tfX th 1\n" + "cqT ch 1\n" + "buG qu 1\n" + "oHb po 1\n" + "vRx va 1\n" + "qyq qu 1\n" + "kpY ka 1\n" + "vqN qu 1\n" + "jNq qu 1\n" + "cWb ch 1\n" + "gbJ ng 1\n" + "oZw on 1\n" + "cBz ch 1\n" + "Pvv va 1\n" + "ljI le 1\n" + "hvQ th 1\n" + "kwY ka 1\n" + "hBg th 1\n" + "kdN de 1\n" + "yxH ny 1\n" + "fxH fo 1\n" + "tXj th 1\n" + "uBx qu 1\n" + "uJm qu 1\n" + "Gxh th 1\n" + "fjK ij 1\n" + "gqO qu 1\n" + "dMt th 1\n" + "lVx le 1\n" + "Rhp th 1\n" + "cDn ch 1\n" + "Xkv ka 1\n" + "zmB sz 1\n" + "qaY an 1\n" + "Ivq qu 1\n" + "wmP me 1\n" + "bjq qu 1\n" + "cmU ch 1\n" + "slC le 1\n" + "Krx er 1\n" + "iVv in 1\n" + "Zwz sz 1\n" + "yPd de 1\n" + "qUv qu 1\n" + "Pdz sz 1\n" + "Qzk sz 1\n" + "zoU on 1\n" + "xJf fo 1\n" + "Udq qu 1\n" + "Qwj ij 1\n" + "Kvd de 1\n" + "vQw va 1\n" + "Rdk de 1\n" + "sIj st 1\n" + "Ggt th 1\n" + "lNw le 1\n" + "qvr qu 1\n" + "yqD qu 1\n" + "fXl le 1\n" + "jqg qu 1\n" + "qmA qu 1\n" + "Tgd ng 1\n" + "zpO po 1\n" + "tEz th 1\n" + "Bqz qu 1\n" + "wfL wa 1\n" + "vYu qu 1\n" + "Dxw wa 1\n" + "qWl qu 1\n" + "Rzc ch 1\n" + "mQo on 1\n" + "Ttc th 1\n" + "tVv th 1\n" + "Rqn an 1\n" + "Wcn ch 1\n" + "Nwu qu 1\n" + "xoJ on 1\n" + "vDf va 1\n" + "phH th 1\n" + "fJs st 1\n" + "Pxm me 1\n" + "rFb er 1\n" + "hlM th 1\n" + "mkX ka 1\n" + "nnQ an 1\n" + "Xfn an 1\n" + "sbZ st 1\n" + "Yyf ny 1\n" + "Bjw ij 1\n" + "Ilx le 1\n" + "qpA qu 1\n" + "Mqc ch 1\n" + "gqZ qu 1\n" + "sNv st 1\n" + "Zvq qu 1\n" + "kSx ka 1\n" + "vBd de 1\n" + "wvZ va 1\n" + "Uoe er 1\n" + "Fjy ij 1\n" + "zKb sz 1\n" + "pvI va 1\n" + "Zll le 1\n" + "hdE th 1\n" + "Fpv va 1\n" + "lhV th 1\n" + "rqQ qu 1\n" + "wjG ij 1\n" + "pLq qu 1\n" + "bpJ pr 1\n" + "wzV sz 1\n" + "Hgq ng 1\n" + "zhW th 1\n" + "Lvq qu 1\n" + "Xhr th 1\n" + "quY un 1\n" + "jqZ qu 1\n" + "vuH qu 1\n" + "Fzj sz 1\n" + "gzG ng 1\n" + "tFc th 1\n" + "vfE va 1\n" + "Igx ng 1\n" + "fqY qu 1\n" + "gYb ng 1\n" + "lJg ng 1\n" + "wcO ch 1\n" + "Qvk ka 1\n" + "Tqq qu 1\n" + "bdY de 1\n" + "wuT qu 1\n" + "lHw le 1\n" + "zRm sz 1\n" + "Hgw ng 1\n" + "tPk th 1\n" + "Jqv qu 1\n" + "tKx th 1\n" + "xpA pr 1\n" + "bkI ka 1\n" + "bSj ij 1\n" + "mxW me 1\n" + "mjR ij 1\n" + "Oip in 1\n" + "wyY wa 1\n" + "dFc ch 1\n" + "qDg qu 1\n" + "wXp pr 1\n" + "Vbp pr 1\n" + "jyN ij 1\n" + "yvP va 1\n" + "yVr er 1\n" + "aWm an 1\n" + "Gjk ij 1\n" + "Apw pr 1\n" + "Zsw st 1\n" + "jQv ij 1\n" + "jbT ij 1\n" + "bdB de 1\n" + "kcY ch 1\n" + "rqC qu 1\n" + "bxD be 1\n" + "vlx le 1\n" + "kjJ ij 1\n" + "xqW qu 1\n" + "zxE sz 1\n" + "sHf st 1\n" + "juF qu 1\n" + "kwX ka 1\n" + "oqW qu 1\n" + "qWt th 1\n" + "fHc ch 1\n" + "cHc ch 1\n" + "Jjm ij 1\n" + "xbA be 1\n" + "Rqj qu 1\n" + "Ijy ij 1\n" + "vSx va 1\n" + "pVj ij 1\n" + "rQx er 1\n" + "fmK me 1\n" + "fnA an 1\n" + "Phv th 1\n" + "bhN th 1\n" + "Hxp pr 1\n" + "Vjq qu 1\n" + "lqC qu 1\n" + "Whd th 1\n" + "zsF st 1\n" + "tYt th 1\n" + "Jzq qu 1\n" + "Nff fo 1\n" + "qXs qu 1\n" + "xJj ij 1\n" + "lXn an 1\n" + "Zpv va 1\n" + "qTh th 1\n" + "npH an 1\n" + "kYx ka 1\n" + "bBs st 1\n" + "vEa an 1\n" + "pjq qu 1\n" + "qIi qu 1\n" + "Fdk de 1\n" + "fNx fo 1\n" + "Ofh th 1\n" + "wXe er 1\n" + "mvZ va 1\n" + "Cjs st 1\n" + "Fmm me 1\n" + "pkR ka 1\n" + "zfZ sz 1\n" + "Zpm me 1\n" + "cbA ch 1\n" + "tvY th 1\n" + "Lmp me 1\n" + "gFd ng 1\n" + "bFx be 1\n" + "Fjm ij 1\n" + "wjF ij 1\n" + "bjv ij 1\n" + "dbT de 1\n" + "jmQ ij 1\n" + "xFw wa 1\n" + "cDk ch 1\n" + "hFz th 1\n" + "uGm qu 1\n" + "Yhx th 1\n" + "Vtl th 1\n" + "azV an 1\n" + "xJs st 1\n" + "Mxw wa 1\n" + "vgK ng 1\n" + "cwQ ch 1\n" + "Gnx an 1\n" + "lbP le 1\n" + "kdS de 1\n" + "kDt th 1\n" + "Pvq qu 1\n" + "yHs st 1\n" + "Lgq qu 1\n" + "Xmj ij 1\n" + "pvA va 1\n" + "vUu qu 1\n" + "Qju qu 1\n" + "qDf qu 1\n" + "Gxj ij 1\n" + "Gfz sz 1\n" + "gbY ng 1\n" + "Sjf ij 1\n" + "Ogw ng 1\n" + "hGt th 1\n" + "btT th 1\n" + "gwH ng 1\n" + "Mwj ij 1\n" + "fvU va 1\n" + "frG er 1\n" + "cMx ch 1\n" + "Ydv de 1\n" + "xkZ ka 1\n" + "fjL ij 1\n" + "yPx ny 1\n" + "drX er 1\n" + "jxR ij 1\n" + "hYq th 1\n" + "xHn an 1\n" + "jrP er 1\n" + "tcJ th 1\n" + "qJz qu 1\n" + "zUd sz 1\n" + "jXj ij 1\n" + "qDd qu 1\n" + "Bjh th 1\n" + "qFz sz 1\n" + "mxG me 1\n" + "xOd de 1\n" + "hgL th 1\n" + "cpD ch 1\n" + "jhS th 1\n" + "Zqp qu 1\n" + "yNq qu 1\n" + "pHq qu 1\n" + "rZq qu 1\n" + "Wjy ij 1\n" + "Tfb be 1\n" + "Nwb wa 1\n" + "zQk sz 1\n" + "Rkc ch 1\n" + "Qvw va 1\n" + "wlJ le 1\n" + "cFp ch 1\n" + "oDb on 1\n" + "lsY le 1\n" + "Zbn an 1\n" + "wCd de 1\n" + "zxN sz 1\n" + "bQf be 1\n" + "Kjy ij 1\n" + "Ovk ka 1\n" + "cxA ch 1\n" + "Hqw qu 1\n" + "hwY th 1\n" + "sGv st 1\n" + "Rwn an 1\n" + "zvH sz 1\n" + "yVw wa 1\n" + "zmX sz 1\n" + "qdM qu 1\n" + "dJv de 1\n" + "wDj ij 1\n" + "Vhm th 1\n" + "fLt th 1\n" + "bvC va 1\n" + "xVn an 1\n" + "Hfx fo 1\n" + "tQl th 1\n" + "lhW th 1\n" + "oqS qu 1\n" + "Qya an 1\n" + "gZf ng 1\n" + "bKy be 1\n" + "tjX th 1\n" + "Vkc ch 1\n" + "yjv ij 1\n" + "bgN ng 1\n" + "lNm le 1\n" + "Jzl le 1\n" + "Lwx wa 1\n" + "vcL ch 1\n" + "yXh th 1\n" + "ztZ th 1\n" + "yJx ny 1\n" + "npV an 1\n" + "swG st 1\n" + "sXn an 1\n" + "eJb er 1\n" + "dcR ch 1\n" + "Zrg ng 1\n" + "Pgv ng 1\n" + "xYr er 1\n" + "jlI le 1\n" + "Fmf me 1\n" + "Gqk qu 1\n" + "vlZ le 1\n" + "Csq qu 1\n" + "uQj qu 1\n" + "lLm le 1\n" + "hwK th 1\n" + "cQv ch 1\n" + "qfH qu 1\n" + "rRw er 1\n" + "aUo an 1\n" + "qpE qu 1\n" + "lPc ch 1\n" + "dHd de 1\n" + "gqL qu 1\n" + "zWp sz 1\n" + "bBq be 1\n" + "wWp pr 1\n" + "cfK ch 1\n" + "fWx fo 1\n" + "rvV er 1\n" + "zhR th 1\n" + "Klh th 1\n" + "cbQ ch 1\n" + "Jmg ng 1\n" + "fPg ng 1\n" + "Qnn an 1\n" + "sMq qu 1\n" + "aFz an 1\n" + "sJs st 1\n" + "Pwj ij 1\n" + "jcL ch 1\n" + "gmQ ng 1\n" + "Yqr qu 1\n" + "Cgz ng 1\n" + "wqz qu 1\n" + "fnI nt 1\n" + "qOt th 1\n" + "vyU va 1\n" + "wQz sz 1\n" + "vUa an 1\n" + "xBt th 1\n" + "dNm de 1\n" + "Ewx wa 1\n" + "ypD pr 1\n" + "wxL wa 1\n" + "qeN qu 1\n" + "vkB ka 1\n" + "jBj ij 1\n" + "gUj ng 1\n" + "kQk ka 1\n" + "fwO wa 1\n" + "qQt th 1\n" + "Qrl er 1\n" + "dTx de 1\n" + "fWd de 1\n" + "jxK ij 1\n" + "fHl le 1\n" + "jcY ch 1\n" + "oJs on 1\n" + "sRx st 1\n" + "uQg qu 1\n" + "hhY th 1\n" + "sdN st 1\n" + "mxR me 1\n" + "Xsv st 1\n" + "Pcq ch 1\n" + "pkZ ka 1\n" + "zDl le 1\n" + "rIh th 1\n" + "Hnv an 1\n" + "jpA ij 1\n" + "hZj th 1\n" + "Znd an 1\n" + "hZd th 1\n" + "qrO qu 1\n" + "Sbx be 1\n" + "tWp th 1\n" + "Hpd de 1\n" + "Hjz sz 1\n" + "zcS ch 1\n" + "kPz sz 1\n" + "Htq th 1\n" + "gcG ch 1\n" + "Xqx qu 1\n" + "mZc ch 1\n" + "Xzv sz 1\n" + "Kgw ng 1\n" + "aUf an 1\n" + "Ymq qu 1\n" + "wcY ch 1\n" + "oVh th 1\n" + "pdM de 1\n" + "vzK sz 1\n" + "lrX er 1\n" + "ydV de 1\n" + "uqP qu 1\n" + "fmN me 1\n" + "Ocg ch 1\n" + "fLk ka 1\n" + "cJs ch 1\n" + "uGf qu 1\n" + "cMk ch 1\n" + "gTx ng 1\n" + "xNc ch 1\n" + "bHl le 1\n" + "uWp qu 1\n" + "dxL de 1\n" + "zxG sz 1\n" + "dVn an 1\n" + "Nbh th 1\n" + "Cxs st 1\n" + "cvG ch 1\n" + "wCf wa 1\n" + "kjC ij 1\n" + "cfY ch 1\n" + "zcf ch 1\n" + "dpW de 1\n" + "Pqy qu 1\n" + "tlN th 1\n" + "sIi in 1\n" + "qxC qu 1\n" + "Kjm ij 1\n" + "zZk sz 1\n" + "Fks st 1\n" + "gWb ng 1\n" + "tqK th 1\n" + "Jlv le 1\n" + "kCk ka 1\n" + "whT th 1\n" + "Owv va 1\n" + "zKm sz 1\n" + "jql qu 1\n" + "tGz th 1\n" + "dCw de 1\n" + "ymQ me 1\n" + "xnF an 1\n" + "wuF qu 1\n" + "pFq qu 1\n" + "jyS ij 1\n" + "pjX ij 1\n" + "lOj le 1\n" + "Jmd de 1\n" + "Zvz sz 1\n" + "jqM qu 1\n" + "jTd de 1\n" + "qOi qu 1\n" + "oJg ng 1\n" + "Mjx ij 1\n" + "Tpb pr 1\n" + "Wtv th 1\n" + "jxO ij 1\n" + "dBs st 1\n" + "tNv th 1\n" + "qTb qu 1\n" + "vnU an 1\n" + "zDx sz 1\n" + "pSq qu 1\n" + "xRm me 1\n" + "qUf qu 1\n" + "mBb me 1\n" + "qjI qu 1\n" + "sIy st 1\n" + "dCg ng 1\n" + "qIx qu 1\n" + "pZp pr 1\n" + "qDt th 1\n" + "xrM er 1\n" + "uOe qu 1\n" + "xgO ng 1\n" + "grX ng 1\n" + "Pgg ng 1\n" + "yVq qu 1\n" + "qEu un 1\n" + "kBc ch 1\n" + "Sgz ng 1\n" + "hjX th 1\n" + "gOq qu 1\n" + "pmW me 1\n" + "Gnw an 1\n" + "xZl le 1\n" + "hTd th 1\n" + "Gfq qu 1\n" + "sLf st 1\n" + "Pgj ng 1\n" + "twF th 1\n" + "mDk ka 1\n" + "qdY qu 1\n" + "vsZ st 1\n" + "vcC ch 1\n" + "Dcj ch 1\n" + "wUh th 1\n" + "qId qu 1\n" + "qrZ qu 1\n" + "cbS ch 1\n" + "Xzc ch 1\n" + "vWj ij 1\n" + "pvC va 1\n" + "Jrw er 1\n" + "yxI ny 1\n" + "dqI qu 1\n" + "uCm qu 1\n" + "vXd de 1\n" + "Wdp de 1\n" + "Dzc ch 1\n" + "hdV th 1\n" + "qbO qu 1\n" + "Jwk ka 1\n" + "Wqm qu 1\n" + "iXw in 1\n" + "fYl le 1\n" + "quQ un 1\n" + "kjD ij 1\n" + "mIh th 1\n" + "xWw wa 1\n" + "oCw on 1\n" + "Zcv ch 1\n" + "jdN de 1\n" + "uYb qu 1\n" + "Srx er 1\n" + "pgU ng 1\n" + "rQg ng 1\n" + "mHf me 1\n" + "fBt th 1\n" + "jVx ij 1\n" + "vYc ch 1\n" + "Vgj ng 1\n" + "qaS an 1\n" + "pxW pr 1\n" + "mnJ an 1\n" + "Bww wa 1\n" + "Tqz qu 1\n" + "jFv ij 1\n" + "xwM wa 1\n" + "Dqw qu 1\n" + "mwI me 1\n" + "vhW th 1\n" + "sqX qu 1\n" + "tlR th 1\n" + "aBh th 1\n" + "qnZ an 1\n" + "gXg ng 1\n" + "sCj st 1\n" + "grN ng 1\n" + "tYv th 1\n" + "Wwg ng 1\n" + "fYi in 1\n" + "btF th 1\n" + "wQn an 1\n" + "Zlt th 1\n" + "cJz ch 1\n" + "Xbn an 1\n" + "tLm th 1\n" + "Zlx le 1\n" + "Nmj ij 1\n" + "hcG th 1\n" + "Wrk er 1\n" + "Nhc th 1\n" + "vqD qu 1\n" + "ujY qu 1\n" + "iJd in 1\n" + "dLf de 1\n" + "cQn ch 1\n" + "Wfx fo 1\n" + "hkZ th 1\n" + "mhC th 1\n" + "zMq qu 1\n" + "zLz sz 1\n" + "Xgt th 1\n" + "qKr qu 1\n" + "yjJ ij 1\n" + "rJm er 1\n" + "Vxc ch 1\n" + "Bxn an 1\n" + "cnQ ch 1\n" + "qkQ qu 1\n" + "Nlw le 1\n" + "hWv th 1\n" + "wdU de 1\n" + "qtB th 1\n" + "qIe qu 1\n" + "qeY qu 1\n" + "Zrp er 1\n" + "Nhd th 1\n" + "fDp po 1\n" + "Cnj an 1\n" + "kxU ka 1\n" + "Bqv qu 1\n" + "vXr er 1\n" + "kBx ka 1\n" + "fBn an 1\n" + "pMx pr 1\n" + "kxR ka 1\n" + "Lzg ng 1\n" + "jBh th 1\n" + "Fjn an 1\n" + "wpC pr 1\n" + "fKy ny 1\n" + "hwD th 1\n" + "fqf qu 1\n" + "qBy qu 1\n" + "Ycq ch 1\n" + "Nns an 1\n" + "jmZ ij 1\n" + "gKw ng 1\n" + "dqA qu 1\n" + "Bjg ng 1\n" + "fGx fo 1\n" + "Lnp an 1\n" + "whU th 1\n" + "qPd qu 1\n" + "yMx ny 1\n" + "wEj ij 1\n" + "kmJ ka 1\n" + "Qsx st 1\n" + "lCw le 1\n" + "Qqb qu 1\n" + "hvJ th 1\n" + "xkN ka 1\n" + "uVg qu 1\n" + "sQm st 1\n" + "uJp qu 1\n" + "Yzn an 1\n" + "cXh th 1\n" + "srI er 1\n" + "tBz th 1\n" + "cRj ch 1\n" + "yIw wa 1\n" + "jHg ng 1\n" + "xFp pr 1\n" + "wJq qu 1\n" + "qdF qu 1\n" + "vKv va 1\n" + "sHc ch 1\n" + "hBf th 1\n" + "jDy ij 1\n" + "Gjx ij 1\n" + "Fkd de 1\n" + "Hhz th 1\n" + "xSg ng 1\n" + "jFf ij 1\n" + "qvM qu 1\n" + "oRw on 1\n" + "xgX ng 1\n" + "gjF ng 1\n" + "qDz qu 1\n" + "Ycf ch 1\n" + "Xcw ch 1\n" + "nfQ an 1\n" + "qGs qu 1\n" + "kGs st 1\n" + "fxV fo 1\n" + "iPj in 1\n" + "qgP qu 1\n" + "jIv ij 1\n" + "Vhu th 1\n" + "Bzj sz 1\n" + "Jvg ng 1\n" + "Vjf ij 1\n" + "wTq qu 1\n" + "pDw pr 1\n" + "Ysv st 1\n" + "ztV th 1\n" + "mtZ th 1\n" + "jFy ij 1\n" + "gqC qu 1\n" + "Vsg ng 1\n" + "gjS ng 1\n" + "vXz sz 1\n" + "bpK pr 1\n" + "nDq an 1\n" + "sKx st 1\n" + "xYg ng 1\n" + "fZd de 1\n" + "pxf pr 1\n" + "jqS qu 1\n" + "hTb th 1\n" + "Nkq qu 1\n" + "qpH qu 1\n" + "vEz sz 1\n" + "vqP qu 1\n" + "vHw va 1\n" + "Dkp ka 1\n" + "cqY ch 1\n" + "mqS qu 1\n" + "sVt th 1\n" + "Pxh th 1\n" + "hxN th 1\n" + "yTf ny 1\n" + "wCj ij 1\n" + "qQw qu 1\n" + "Vfv va 1\n" + "yQd de 1\n" + "gUc ch 1\n" + "wsQ st 1\n" + "fGw wa 1\n" + "wKf wa 1\n" + "wwB wa 1\n" + "vFt th 1\n" + "twQ th 1\n" + "nrB an 1\n" + "lpY le 1\n" + "xlR le 1\n" + "fdK de 1\n" + "eFz er 1\n" + "jyQ ij 1\n" + "lwT le 1\n" + "xCw wa 1\n" + "cgM ch 1\n" + "wtV th 1\n" + "aqJ an 1\n" + "bXu qu 1\n" + "qdQ qu 1\n" + "Yxd de 1\n" + "xcS ch 1\n" + "nmV an 1\n" + "rQd er 1\n" + "Glk le 1\n" + "qEm qu 1\n" + "uvO qu 1\n" + "svF st 1\n" + "sJx st 1\n" + "Qyg ng 1\n" + "mXh th 1\n" + "btD th 1\n" + "wGc ch 1\n" + "fZo on 1\n" + "Evx va 1\n" + "vzD sz 1\n" + "ufC qu 1\n" + "Pxq qu 1\n" + "qdt th 1\n" + "rKz er 1\n" + "Jhh th 1\n" + "Cxk ka 1\n" + "qxR qu 1\n" + "gTl ng 1\n" + "qGf qu 1\n" + "wYh th 1\n" + "cEh th 1\n" + "bzU sz 1\n" + "zWq qu 1\n" + "rWb er 1\n" + "Wrp er 1\n" + "sLc ch 1\n" + "Jpu qu 1\n" + "Jkf ka 1\n" + "vgE ng 1\n" + "Bqk qu 1\n" + "oQs on 1\n" + "kbZ ka 1\n" + "rVf er 1\n" + "qLw qu 1\n" + "Lrc ch 1\n" + "xsR st 1\n" + "hwB th 1\n" + "Qnk an 1\n" + "cPz ch 1\n" + "Ucq ch 1\n" + "egJ ng 1\n" + "Qyq qu 1\n" + "Xwr pr 1\n" + "xfD fo 1\n" + "wyH wa 1\n" + "lBw le 1\n" + "Mdx de 1\n" + "Qsy st 1\n" + "zqV qu 1\n" + "vpY va 1\n" + "slY le 1\n" + "wgL ng 1\n" + "snN an 1\n" + "hVd th 1\n" + "yKx ny 1\n" + "bdW de 1\n" + "lqL qu 1\n" + "yhD th 1\n" + "tNz th 1\n" + "zJg ng 1\n" + "kIx ka 1\n" + "fHp pr 1\n" + "yrJ er 1\n" + "lrR er 1\n" + "wzY sz 1\n" + "pgB pr 1\n" + "mfC me 1\n" + "qkL qu 1\n" + "jUu qu 1\n" + "qCh th 1\n" + "zlN le 1\n" + "Bgj ng 1\n" + "gcE ch 1\n" + "zRx sz 1\n" + "jhN th 1\n" + "eGz er 1\n" + "Fpq qu 1\n" + "Wvi in 1\n" + "mBf me 1\n" + "hhW th 1\n" + "oUq qu 1\n" + "dxQ de 1\n" + "Whq th 1\n" + "rMk er 1\n" + "lWd le 1\n" + "xWz sz 1\n" + "oQn an 1\n" + "mWx me 1\n" + "nuV an 1\n" + "wWz sz 1\n" + "hvR th 1\n" + "Zwd de 1\n" + "smJ st 1\n" + "Hlh th 1\n" + "sJh th 1\n" + "zmY sz 1\n" + "hZn th 1\n" + "Vjg ng 1\n" + "Jhz th 1\n" + "mqR qu 1\n" + "hcO th 1\n" + "dqL qu 1\n" + "Bfh th 1\n" + "pkV ka 1\n" + "tBx th 1\n" + "Hkc ch 1\n" + "Kqm qu 1\n" + "qWv qu 1\n" + "lXy le 1\n" + "yRd de 1\n" + "mjH ij 1\n" + "qzA qu 1\n" + "qxm qu 1\n" + "Qvm va 1\n" + "gcM ch 1\n" + "xqx qu 1\n" + "kKv ka 1\n" + "yoX po 1\n" + "xrT er 1\n" + "cWq ch 1\n" + "jqW qu 1\n" + "sWj st 1\n" + "Sdw de 1\n" + "dfR de 1\n" + "Kqn an 1\n" + "Gjd do 1\n" + "Qbd de 1\n" + "yyK ny 1\n" + "xmX me 1\n" + "xuF qu 1\n" + "yVg ng 1\n" + "qoO qu 1\n" + "Glq qu 1\n" + "Mkx ka 1\n" + "xLb be 1\n" + "gMr ng 1\n" + "sCp st 1\n" + "bGh th 1\n" + "cXo ch 1\n" + "zTz sz 1\n" + "qkC qu 1\n" + "hTp th 1\n" + "qNf qu 1\n" + "mXk ka 1\n" + "xcZ ch 1\n" + "jVm ij 1\n" + "bIi in 1\n" + "qnH an 1\n" + "nwC an 1\n" + "dSg ng 1\n" + "qoD qu 1\n" + "tDx th 1\n" + "jdU de 1\n" + "Xmw me 1\n" + "kNh th 1\n" + "jYr er 1\n" + "Ygp ng 1\n" + "blJ le 1\n" + "mFv va 1\n" + "Sxr er 1\n" + "Fzl le 1\n" + "jTq qu 1\n" + "cIp pr 1\n" + "ajY an 1\n" + "yYb be 1\n" + "rKb er 1\n" + "pzB sz 1\n" + "eIy er 1\n" + "wfK wa 1\n" + "Fmh th 1\n" + "ufL qu 1\n" + "Xlm le 1\n" + "Czg ng 1\n" + "lPq qu 1\n" + "tqV th 1\n" + "wFy wa 1\n" + "bQc ch 1\n" + "kVw ka 1\n" + "nMh th 1\n" + "cCj ch 1\n" + "oeE er 1\n" + "wHf wa 1\n" + "fNf fo 1\n" + "mXv va 1\n" + "Nkg ng 1\n" + "jWc ch 1\n" + "zFj sz 1\n" + "Kfx fo 1\n" + "bgY ng 1\n" + "lYz le 1\n" + "cgD ch 1\n" + "pgM ng 1\n" + "fhH th 1\n" + "jrD er 1\n" + "jwA ij 1\n" + "jyM ij 1\n" + "vzC sz 1\n" + "lQd le 1\n" + "zcH ch 1\n" + "lbX le 1\n" + "vzG sz 1\n" + "mSr er 1\n" + "xYf fo 1\n" + "qgB qu 1\n" + "jYk ij 1\n" + "dIq qu 1\n" + "wpG pr 1\n" + "hVk th 1\n" + "Tjb ij 1\n" + "zvP sz 1\n" + "bZg ng 1\n" + "bFg ng 1\n" + "kfU ka 1\n" + "Sxz sz 1\n" + "fwF wa 1\n" + "Qwg ng 1\n" + "fWb be 1\n" + "jqQ ij 1\n" + "Vfx fo 1\n" + "cJj ch 1\n" + "zwJ sz 1\n" + "xBg ng 1\n" + "Ddm de 1\n" + "bWv va 1\n" + "zpG sz 1\n" + "xrQ er 1\n" + "hcS th 1\n" + "wHn an 1\n" + "hIy th 1\n" + "Yxj ij 1\n" + "sdC st 1\n" + "yVu qu 1\n" + "qjf qu 1\n" + "Tzy sz 1\n" + "Ffn an 1\n" + "zzX sz 1\n" + "Hdx de 1\n" + "gLg ng 1\n" + "Yqg qu 1\n" + "fLb be 1\n" + "lQc ch 1\n" + "vjG ij 1\n" + "wpL pr 1\n" + "cJr ch 1\n" + "aJq an 1\n" + "Ynq an 1\n" + "Wvc ch 1\n" + "lKy le 1\n" + "eYq qu 1\n" + "kxL ka 1\n" + "gCb ng 1\n" + "sRd st 1\n" + "rMd er 1\n" + "Bvh th 1\n" + "kKg ng 1\n" + "wlK le 1\n" + "mDd de 1\n" + "zkJ sz 1\n" + "vRc ch 1\n" + "Xlh th 1\n" + "pRk ka 1\n" + "xvN va 1\n" + "nxI an 1\n" + "fCx fo 1\n" + "Ybt th 1\n" + "Ebq qu 1\n" + "bkN ka 1\n" + "bQy be 1\n" + "rDw er 1\n" + "djJ de 1\n" + "tmM th 1\n" + "nwH an 1\n" + "hJz th 1\n" + "lcM ch 1\n" + "ozV on 1\n" + "mLd de 1\n" + "bKc ch 1\n" + "eZf er 1\n" + "Fhg th 1\n" + "Zcj ch 1\n" + "pLr er 1\n" + "wqs qu 1\n" + "bXi in 1\n" + "tgD th 1\n" + "hQc th 1\n" + "zDp sz 1\n" + "oDg ng 1\n" + "sgM ng 1\n" + "bnD an 1\n" + "gHp ng 1\n" + "Wkf ka 1\n" + "qIs qu 1\n" + "wLd de 1\n" + "ztN th 1\n" + "gdQ ng 1\n" + "wCm ow 1\n" + "vVf va 1\n" + "Jmw me 1\n" + "hbC th 1\n" + "srW er 1\n" + "nxN an 1\n" + "pVs st 1\n" + "uWq qu 1\n" + "hgM th 1\n" + "lBc ch 1\n" + "wUo on 1\n" + "flH le 1\n" + "yWg ng 1\n" + "jjN ij 1\n" + "Uwn an 1\n" + "nYj an 1\n" + "mtN th 1\n" + "Pgp ng 1\n" + "zFc ch 1\n" + "oXz on 1\n" + "iCg ng 1\n" + "Lpc ch 1\n" + "Gqd qu 1\n" + "rYc ch 1\n" + "vqA qu 1\n" + "Vhc th 1\n" + "zmF sz 1\n" + "Bpc ch 1\n" + "Jfq qu 1\n" + "oXv on 1\n" + "lgX ng 1\n" + "Jfx fo 1\n" + "zpS sz 1\n" + "gcO ch 1\n" + "xwQ wa 1\n" + "pkQ ka 1\n" + "wOc ch 1\n" + "Wgm ng 1\n" + "cOj ch 1\n" + "Nft th 1\n" + "pqN qu 1\n" + "qsB qu 1\n" + "ydH de 1\n" + "qRs qu 1\n" + "ykX ka 1\n" + "cDq ch 1\n" + "mfU me 1\n" + "xzM sz 1\n" + "vGt th 1\n" + "fuW qu 1\n" + "lqG qu 1\n" + "Tqp qu 1\n" + "zvD sz 1\n" + "wWb wa 1\n" + "Fzi in 1\n" + "qpK qu 1\n" + "oyq qu 1\n" + "gQe ng 1\n" + "Zmw me 1\n" + "qYp qu 1\n" + "Wvf va 1\n" + "aQl an 1\n" + "oqO qu 1\n" + "eqJ qu 1\n" + "nvT an 1\n" + "fUk ka 1\n" + "ibH in 1\n" + "jvZ ij 1\n" + "Wwz sz 1\n" + "lgY ng 1\n" + "eFp er 1\n" + "Xgx ng 1\n" + "fYs st 1\n" + "kZs st 1\n" + "vpD va 1\n" + "qcZ ch 1\n" + "Bqo qu 1\n" + "jLb ij 1\n" + "rwX er 1\n" + "fyK ny 1\n" + "Sxv va 1\n" + "sxZ st 1\n" + "wkK ka 1\n" + "yJp pr 1\n" + "tjT th 1\n" + "qPv qu 1\n" + "yZj ij 1\n" + "Rrm er 1\n" + "nhJ th 1\n" + "vqJ qu 1\n" + "yxY ny 1\n" + "vsE st 1\n" + "fkK ka 1\n" + "fuY qu 1\n" + "zQo on 1\n" + "Xvr er 1\n" + "mMq qu 1\n" + "Oqm qu 1\n" + "Dxs st 1\n" + "Lqa an 1\n" + "Wnh th 1\n" + "jmG ij 1\n" + "Wqa an 1\n" + "mhT th 1\n" + "bgZ ng 1\n" + "vmO va 1\n" + "zFm sz 1\n" + "Khk th 1\n" + "yqB qu 1\n" + "nVv an 1\n" + "Rft th 1\n" + "zmL sz 1\n" + "hdD th 1\n" + "nWp an 1\n" + "vvO va 1\n" + "dYp de 1\n" + "ohX th 1\n" + "qoU qu 1\n" + "rjB er 1\n" + "Dwc ch 1\n" + "aWq an 1\n" + "clD ch 1\n" + "Vdk de 1\n" + "twM th 1\n" + "fZz sz 1\n" + "wQp pr 1\n" + "dwD de 1\n" + "iYv in 1\n" + "Awv va 1\n" + "pgG ng 1\n" + "Xoq qu 1\n" + "krQ er 1\n" + "Vxg ng 1\n" + "lwB le 1\n" + "Pxw wa 1\n" + "Jwf wa 1\n" + "zLh th 1\n" + "btH th 1\n" + "pwY pr 1\n" + "Mjd de 1\n" + "Xrh th 1\n" + "qXu un 1\n" + "Eqy qu 1\n" + "Bpy pr 1\n" + "znY an 1\n" + "Rqd qu 1\n" + "nQf an 1\n" + "Zvw va 1\n" + "zjO sz 1\n" + "wNd de 1\n" + "lIq qu 1\n" + "vMq qu 1\n" + "Gqt th 1\n" + "lMf le 1\n" + "Jqn an 1\n" + "fVw wa 1\n" + "qvQ qu 1\n" + "eHk er 1\n" + "jbK ij 1\n" + "fWs st 1\n" + "qTk qu 1\n" + "znF an 1\n" + "yxO ny 1\n" + "Fqr qu 1\n" + "nFb an 1\n" + "oDp on 1\n" + "jUc ch 1\n" + "qHg qu 1\n" + "gGq qu 1\n" + "qPs qu 1\n" + "jHv ij 1\n" + "Iwj ij 1\n" + "vzV sz 1\n" + "yUq qu 1\n" + "jQt th 1\n" + "sFb st 1\n" + "Lvg ng 1\n" + "zTt th 1\n" + "bvK va 1\n" + "Ccx ch 1\n" + "jyA ij 1\n" + "yEj ij 1\n" + "zdG sz 1\n" + "tqT th 1\n" + "qbH qu 1\n" + "nHd an 1\n" + "Hhj th 1\n" + "jVb ij 1\n" + "uHw un 1\n" + "Zck ch 1\n" + "gPq qu 1\n" + "mxq qu 1\n" + "wHs st 1\n" + "fDy ny 1\n" + "tlV th 1\n" + "Lsv st 1\n" + "zvF va 1\n" + "mqx qu 1\n" + "nqF an 1\n" + "xgM ng 1\n" + "gyq qu 1\n" + "grJ ng 1\n" + "jSq qu 1\n" + "Mmw me 1\n" + "Cgx ng 1\n" + "Rlr er 1\n" + "mvG va 1\n" + "fuA qu 1\n" + "uVh th 1\n" + "sMz st 1\n" + "wWr er 1\n" + "qpD qu 1\n" + "hQw th 1\n" + "xBc ch 1\n" + "fcW ch 1\n" + "hxL th 1\n" + "rfK er 1\n" + "mFn an 1\n" + "Qnw an 1\n" + "tjB th 1\n" + "Rkx ka 1\n" + "srE er 1\n" + "drG er 1\n" + "Cfy ny 1\n" + "yZw wa 1\n" + "Wxw wa 1\n" + "zCp sz 1\n" + "jZt th 1\n" + "Nqf qu 1\n" + "jgO ng 1\n" + "fWc ch 1\n" + "qrN qu 1\n" + "Nzj sz 1\n" + "Hjy ij 1\n" + "Uxy ny 1\n" + "oIy on 1\n" + "rfX er 1\n" + "oBw on 1\n" + "yyV ny 1\n" + "Qiv in 1\n" + "dKh th 1\n" + "qDk qu 1\n" + "tgQ th 1\n" + "xNw wa 1\n" + "qdL qu 1\n" + "ovY on 1\n" + "fbZ be 1\n" + "qiI qu 1\n" + "bvT va 1\n" + "jYq qu 1\n" + "kbK ka 1\n" + "Mfn an 1\n" + "Rpd de 1\n" + "pHb pr 1\n" + "qqO qu 1\n" + "vkV ka 1\n" + "sWp st 1\n" + "kPf ka 1\n" + "qLy qu 1\n" + "qoE qu 1\n" + "wLh th 1\n" + "zhV th 1\n" + "bpL pr 1\n" + "Tqf qu 1\n" + "pzG sz 1\n" + "kcT ch 1\n" + "wjX ij 1\n" + "kPy ku 1\n" + "fdB de 1\n" + "Qxs st 1\n" + "gYf ng 1\n" + "Ypx pr 1\n" + "zSk sz 1\n" + "tDg th 1\n" + "xbJ be 1\n" + "yfO ny 1\n" + "uQf qu 1\n" + "bpQ pr 1\n" + "dXc ch 1\n" + "lwP le 1\n" + "vTs st 1\n" + "Jlq qu 1\n" + "Cqw qu 1\n" + "bWy be 1\n" + "cUq ch 1\n" + "Ybk ka 1\n" + "wyq qu 1\n" + "jhq th 1\n" + "xUy ny 1\n" + "Ncj ch 1\n" + "kMh th 1\n" + "vZy va 1\n" + "zcq ch 1\n" + "Qsr er 1\n" + "Lhx th 1\n" + "Gcj ch 1\n" + "uQt th 1\n" + "wYn an 1\n" + "dYm de 1\n" + "Qvx va 1\n" + "Rcg ch 1\n" + "qGz qu 1\n" + "bxJ be 1\n" + "jFg ng 1\n" + "xLp pr 1\n" + "lDn an 1\n" + "wqS qu 1\n" + "bIq qu 1\n" + "tBm th 1\n" + "bQs st 1\n" + "zJb sz 1\n" + "jfJ ij 1\n" + "qTc ch 1\n" + "kbX ka 1\n" + "Hlz le 1\n" + "puQ qu 1\n" + "hKb th 1\n" + "rBb er 1\n" + "vpW va 1\n" + "Yjk ij 1\n" + "Wnm an 1\n" + "pZr er 1\n" + "ldZ le 1\n" + "gMm ng 1\n" + "pZf pi 1\n" + "eYp er 1\n" + "vTp va 1\n" + "Gkc ch 1\n" + "Cgy ng 1\n" + "qDw qu 1\n" + "gxW ng 1\n" + "Cwz sz 1\n" + "jhY th 1\n" + "Fvk ka 1\n" + "nfH an 1\n" + "zcW ch 1\n" + "zgC ng 1\n" + "Dfk ka 1\n" + "vpJ va 1\n" + "Wpj ij 1\n" + "sCb st 1\n" + "fgF ng 1\n" + "tPx th 1\n" + "oCp on 1\n" + "Nrx er 1\n" + "Hwm me 1\n" + "fRp pr 1\n" + "aeX an 1\n" + "jdI de 1\n" + "sBv st 1\n" + "vOv va 1\n" + "gQt th 1\n" + "Wmk ka 1\n" + "Pqj qu 1\n" + "khV th 1\n" + "Hkj ij 1\n" + "hbB th 1\n" + "vzF sz 1\n" + "Ybz sz 1\n" + "sXb st 1\n" + "yQr er 1\n" + "hhV th 1\n" + "tgW th 1\n" + "bXo on 1\n" + "Nxp pr 1\n" + "aOx an 1\n" + "zfb sz 1\n" + "Qxp pr 1\n" + "qwQ qu 1\n" + "fjV ij 1\n" + "hjY ij 1\n" + "wtX th 1\n" + "jgU ng 1\n" + "nMq an 1\n" + "Nwx wa 1\n" + "vPg ng 1\n" + "Xfh th 1\n" + "yFf ny 1\n" + "fHz sz 1\n" + "nZf an 1\n" + "jPt th 1\n" + "Jgb ng 1\n" + "xBb bi 1\n" + "sjO st 1\n" + "wDx wa 1\n" + "njN an 1\n" + "ohF th 1\n" + "pqR qu 1\n" + "Fzw sz 1\n" + "qrU qu 1\n" + "cjG ch 1\n" + "kFv ka 1\n" + "zQd sz 1\n" + "vbE vi 1\n" + "Ujt th 1\n" + "qIb qu 1\n" + "cFt th 1\n" + "bvY va 1\n" + "Szq qu 1\n" + "wlH le 1\n" + "qcY ch 1\n" + "gEw ng 1\n" + "xhL th 1\n" + "kVg ng 1\n" + "bfH be 1\n" + "Nrz er 1\n" + "sJn an 1\n" + "bWn an 1\n" + "nvK an 1\n" + "qiH qu 1\n" + "qbS qu 1\n" + "vxB va 1\n" + "tvT th 1\n" + "Nrh th 1\n" + "lYx le 1\n" + "tkX th 1\n" + "Gzx sz 1\n" + "vCx vi 1\n" + "Zbj ij 1\n" + "mWp me 1\n" + "Dqx qu 1\n" + "pfE pr 1\n" + "hvW th 1\n" + "Eox on 1\n" + "dbZ de 1\n" + "lNb le 1\n" + "rTd er 1\n" + "ljQ le 1\n" + "Vvp va 1\n" + "gJw ng 1\n" + "uqW qu 1\n" + "Gjf ij 1\n" + "pDd de 1\n" + "sgQ ng 1\n" + "hkQ th 1\n" + "fJc ch 1\n" + "mdI de 1\n" + "Gcp ch 1\n" + "pXa an 1\n" + "pQj ij 1\n" + "bgE ng 1\n" + "Kzv sz 1\n" + "cPb ch 1\n" + "Hcz ch 1\n" + "djQ de 1\n" + "pGd de 1\n" + "fyE ny 1\n" + "dBb de 1\n" + "ePj er 1\n" + "fgO ng 1\n" + "xRq qu 1\n" + "xqK qu 1\n" + "pKp pr 1\n" + "xmY me 1\n" + "hgO th 1\n" + "wdG de 1\n" + "hvZ th 1\n" + "srF er 1\n" + "Bvf vi 1\n" + "yvD va 1\n" + "xVg ng 1\n" + "fYg ng 1\n" + "bqd qu 1\n" + "eFq qu 1\n" + "cwZ ch 1\n" + "cqG ch 1\n" + "sKp st 1\n" + "hJq th 1\n" + "vLd de 1\n" + "hdK th 1\n" + "pcN ch 1\n" + "tNf th 1\n" + "xlK le 1\n" + "rJx er 1\n" + "qaN an 1\n" + "zKf sz 1\n" + "sNf st 1\n" + "qPz qu 1\n" + "bzL sz 1\n" + "Jdw de 1\n" + "nRb an 1\n" + "jNs st 1\n" + "tnV th 1\n" + "ynI an 1\n" + "tZp th 1\n" + "fZp pr 1\n" + "wMq qu 1\n" + "Onq an 1\n" + "zIh th 1\n" + "bvH va 1\n" + "Uvc ch 1\n" + "zxJ sz 1\n" + "Vmq qu 1\n" + "uPm qu 1\n" + "mwD me 1\n" + "jQc ch 1\n" + "gPk ng 1\n" + "vfV va 1\n" + "Tql qu 1\n" + "bJl le 1\n" + "lwO le 1\n" + "wbG wa 1\n" + "fTd de 1\n" + "Xtq th 1\n" + "hzX th 1\n" + "Pzv sz 1\n" + "Pmx me 1\n" + "xZm me 1\n" + "jCp ij 1\n" + "bKm me 1\n" + "Tmq qu 1\n" + "Hnf an 1\n" + "kjX ij 1\n" + "vgH ng 1\n" + "fSm me 1\n" + "ylN le 1\n" + "gvq qu 1\n" + "jTz sz 1\n" + "tWw th 1\n" + "ywB wa 1\n" + "bCq qu 1\n" + "dNk de 1\n" + "yCq qu 1\n" + "Rxj ij 1\n" + "nTq an 1\n" + "gFs ng 1\n" + "Xwq qu 1\n" + "gJl ng 1\n" + "vcR ch 1\n" + "fbT be 1\n" + "Fcd ch 1\n" + "Wxm me 1\n" + "qwv qu 1\n" + "Sfh th 1\n" + "lcK ch 1\n" + "sbV st 1\n" + "fSf fo 1\n" + "lbB le 1\n" + "Ocw ch 1\n" + "jgM ng 1\n" + "nbI an 1\n" + "qsK qu 1\n" + "Xyf ny 1\n" + "pxv va 1\n" + "mRc ch 1\n" + "Ogq qu 1\n" + "zuY qu 1\n" + "fXu qu 1\n" + "Wbj ij 1\n" + "Tbw wa 1\n" + "zrR er 1\n" + "gmP ng 1\n" + "cCm ch 1\n" + "gtQ th 1\n" + "phG th 1\n" + "qjV qu 1\n" + "ygG ng 1\n" + "wFb wa 1\n" + "rqL qu 1\n" + "qSx qu 1\n" + "ybK be 1\n" + "mqJ qu 1\n" + "Qrq qu 1\n" + "qdI qu 1\n" + "bcG ch 1\n" + "iFb in 1\n" + "mcZ ch 1\n" + "vCz sz 1\n" + "xHz tz 1\n" + "hjM th 1\n" + "qtL th 1\n" + "tmH th 1\n" + "slD le 1\n" + "vRz sz 1\n" + "gCd ng 1\n" + "Xxc ch 1\n" + "qKc ch 1\n" + "sIw st 1\n" + "fsY st 1\n" + "xrJ er 1\n" + "tNs th 1\n" + "gbD ng 1\n" + "wLl le 1\n" + "hFf th 1\n" + "Nxi in 1\n" + "fRb be 1\n" + "Jrb er 1\n" + "jEq qu 1\n" + "hwM th 1\n" + "uVw qu 1\n" + "fgN ng 1\n" + "mAo on 1\n" + "Pjb ij 1\n" + "npP in 1\n" + "Jcy ch 1\n" + "yJb bi 1\n" + "jxI ij 1\n" + "Kkc ch 1\n" + "kwV ka 1\n" + "gRf ng 1\n" + "Wfm me 1\n" + "Tdp po 1\n" + "wEz sz 1\n" + "Lvk ka 1\n" + "Dqn an 1\n" + "tqL th 1\n" + "jJq qu 1\n" + "vdC de 1\n" + "hxU th 1\n" + "xUe er 1\n" + "tQc th 1\n" + "Lzk sz 1\n" + "dTj de 1\n" + "Tlz le 1\n" + "xQw wa 1\n" + "Fcq ch 1\n" + "wgE ng 1\n" + "Ckd de 1\n" + "yKs st 1\n" + "xwS wa 1\n" + "wRt th 1\n" + "gkK ng 1\n" + "hQv th 1\n" + "sLp st 1\n" + "jAi in 1\n" + "dmG de 1\n" + "jKn an 1\n" + "qUb qu 1\n" + "wXy wa 1\n" + "bzJ sz 1\n" + "gzJ ng 1\n" + "hNz th 1\n" + "ygY ng 1\n" + "qhU th 1\n" + "afX an 1\n" + "jZw ij 1\n" + "Xdx de 1\n" + "Tdx de 1\n" + "jNn an 1\n" + "vXf va 1\n" + "qcE ch 1\n" + "Mnw an 1\n" + "qDh th 1\n" + "Tdj de 1\n" + "dgJ ng 1\n" + "sdR st 1\n" + "qGn an 1\n" + "Mjj ij 1\n" + "sxH st 1\n" + "Ppz sz 1\n" + "gfV ng 1\n" + "fOy ny 1\n" + "Nvx vi 1\n" + "qaV an 1\n" + "xjl le 1\n" + "xgZ ng 1\n" + "cGv ch 1\n" + "Zxu qu 1\n" + "Mfp pr 1\n" + "zFp sz 1\n" + "jgJ ng 1\n" + "bpG pr 1\n" + "vKz sz 1\n" + "hqI th 1\n" + "Qgw ng 1\n" + "Qyy ny 1\n" + "jmI ij 1\n" + "Vgd ng 1\n" + "xCt th 1\n" + "yVs st 1\n" + "uEq qu 1\n" + "dcN ch 1\n" + "Bzb sz 1\n" + "gVl ng 1\n" + "sXg ng 1\n" + "kQf ka 1\n" + "lrY er 1\n" + "Vtd th 1\n" + "nHs an 1\n" + "wjN ij 1\n" + "rzJ er 1\n" + "sYy st 1\n" + "wxQ wa 1\n" + "Ztb th 1\n" + "tWf th 1\n" + "tCx th 1\n" + "aFb an 1\n" + "lqf qu 1\n" + "feZ er 1\n" + "fPz sz 1\n" + "cjY ch 1\n" + "wKh th 1\n" + "Qhy th 1\n" + "dCj de 1\n" + "bkH ka 1\n" + "yjD ij 1\n" + "jTs st 1\n" + "hxI th 1\n" + "lvK vi 1\n" + "Lwz sz 1\n" + "swQ st 1\n" + "dTk di 1\n" + "fsO st 1\n" + "ljE le 1\n" + "wjM ij 1\n" + "uQk qu 1\n" + "xPg ng 1\n" + "vmC va 1\n" + "qsD qu 1\n" + "gDw ng 1\n" + "wJk ka 1\n" + "Zpq qu 1\n" + "Yhg th 1\n" + "kNc ch 1\n" + "bWl le 1\n" + "Fwh th 1\n" + "fHx fo 1\n" + "Fnv an 1\n" + "fdL de 1\n" + "oqD qu 1\n" + "aYx an 1\n" + "Vqx qu 1\n" + "vKf va 1\n" + "Cbw wa 1\n" + "vyq qu 1\n" + "cqZ ch 1\n" + "Rfh th 1\n" + "Swc ch 1\n" + "qNi qu 1\n" + "qoW qu 1\n" + "jhD th 1\n" + "kJq qu 1\n" + "gdF ng 1\n" + "pvF va 1\n" + "cpV ch 1\n" + "qtC th 1\n" + "gWm ng 1\n" + "gPc ch 1\n" + "jBs st 1\n" + "rlV er 1\n" + "gZc ch 1\n" + "kTk ka 1\n" + "hfJ th 1\n" + "Svv va 1\n" + "kmG ka 1\n" + "sDq qu 1\n" + "hGb th 1\n" + "Blq qu 1\n" + "Qry er 1\n" + "hHz th 1\n" + "yLx ny 1\n" + "lqF qu 1\n" + "wbB bi 1\n" + "iYr in 1\n" + "wDz tz 1\n" + "xsJ st 1\n" + "bzY sz 1\n" + "pMw pr 1\n" + "Uuj qu 1\n" + "hxK th 1\n" + "Xvf va 1\n" + "krZ er 1\n" + "fwV wa 1\n" + "gPw ng 1\n" + "qVn an 1\n" + "Qnq an 1\n" + "gDb ng 1\n" + "hVr th 1\n" + "zKh th 1\n" + "Fxy ny 1\n" + "oZj on 1\n" + "zAy sz 1\n" + "jMm ij 1\n" + "mvI va 1\n" + "Fwm me 1\n" + "zql qu 1\n" + "eVv er 1\n" + "yWq qu 1\n" + "Lwk ka 1\n" + "Lmw me 1\n" + "vXb va 1\n" + "Xhs th 1\n" + "hlR th 1\n" + "Qqw qu 1\n" + "zbK sz 1\n" + "Pxl le 1\n" + "nPm an 1\n" + "wQo on 1\n" + "Dcb ch 1\n" + "hjT th 1\n" + "rjJ er 1\n" + "bMc ch 1\n" + "iYb in 1\n" + "Fqj qu 1\n" + "Uoq qu 1\n" + "Xvp va 1\n" + "Lwb wa 1\n" + "Jpd de 1\n" + "qUg qu 1\n" + "lJx le 1\n" + "Xwd de 1\n" + "xKf fo 1\n" + "Znq an 1\n" + "qCb qu 1\n" + "Zbz sz 1\n" + "Qux qu 1\n" + "qNq qu 1\n" + "fvV va 1\n" + "Qqz qu 1\n" + "Hdf de 1\n" + "ySx ny 1\n" + "qSm qu 1\n" + "Lhb th 1\n" + "Mvf va 1\n" + "cDp ch 1\n" + "bHq qu 1\n" + "Wmg ng 1\n" + "ytG th 1\n" + "dbJ de 1\n" + "Ffg ng 1\n" + "hvM th 1\n" + "Wqy qu 1\n" + "gXd ng 1\n" + "uFg qu 1\n" + "jpR ij 1\n" + "Xcc ch 1\n" + "Tbp pr 1\n" + "Qwq qu 1\n" + "tPp th 1\n" + "fMh th 1\n" + "qiV qu 1\n" + "dcB ch 1\n" + "dFx de 1\n" + "Ymj ij 1\n" + "Ldq qu 1\n" + "lxV le 1\n" + "cCk ch 1\n" + "hVx th 1\n" + "dlT le 1\n" + "khP th 1\n" + "qVg qu 1\n" + "Ljj ij 1\n" + "zCv sz 1\n" + "ywV wa 1\n" + "ybZ be 1\n" + "vGh th 1\n" + "Bvj ij 1\n" + "Zqq qu 1\n" + "Gwk ka 1\n" + "qLq qu 1\n" + "fkX ka 1\n" + "Nbz sz 1\n" + "bXm me 1\n" + "dQh th 1\n" + "uYd qu 1\n" + "xYs st 1\n" + "zSs st 1\n" + "ycZ ch 1\n" + "lnU an 1\n" + "tCj th 1\n" + "xnY an 1\n" + "ptQ th 1\n" + "swO st 1\n" + "hXu th 1\n" + "mBw mb 1\n" + "wmF me 1\n" + "xJx xe 1\n" + "dXj de 1\n" + "eqg qu 1\n" + "nBf an 1\n" + "Xbd de 1\n" + "fcQ ch 1\n" + "xkS ka 1\n" + "tOq th 1\n" + "uQb qu 1\n" + "cvV ch 1\n" + "sBh th 1\n" + "dCk de 1\n" + "cKv ch 1\n" + "cVf ch 1\n" + "wZx wa 1\n" + "Bvm va 1\n" + "lqJ qu 1\n" + "fxR fo 1\n" + "vmF va 1\n" + "xnq an 1\n" + "bBg ng 1\n" + "tPd th 1\n" + "fNs st 1\n" + "Fkp ka 1\n" + "Yye er 1\n" + "Ubq qu 1\n" + "xzP sz 1\n" + "fmQ me 1\n" + "qcA ch 1\n" + "yKc ch 1\n" + "xvZ va 1\n" + "cbN ch 1\n" + "yYl le 1\n" + "Pmw me 1\n" + "wFx wa 1\n" + "hRh th 1\n" + "qpS qu 1\n" + "Vqf qu 1\n" + "Ghg th 1\n" + "Wvq qu 1\n" + "xkC ka 1\n" + "ytM th 1\n" + "Lnh th 1\n" + "dxD de 1\n" + "bMw wa 1\n" + "xvU va 1\n" + "Qzx sz 1\n" + "srM er 1\n" + "vLg ng 1\n" + "cGq ch 1\n" + "Vmy me 1\n" + "hcL th 1\n" + "pKx pr 1\n" + "Jxs st 1\n" + "blW le 1\n" + "pQo on 1\n" + "bEq qu 1\n" + "fWt th 1\n" + "sYm st 1\n" + "nKw an 1\n" + "dtF th 1\n" + "kTz sz 1\n" + "epX er 1\n" + "fCp pr 1\n" + "bFk ka 1\n" + "Rzb sz 1\n" + "vqI qu 1\n" + "Zhc th 1\n" + "Hvv va 1\n" + "mVt th 1\n" + "Iwx wa 1\n" + "phR th 1\n" + "wNb wa 1\n" + "fRc ch 1\n" + "ljq qu 1\n" + "lvY le 1\n" + "jcA ch 1\n" + "dGw de 1\n" + "Cqn an 1\n" + "mBx me 1\n" + "Mmx me 1\n" + "Vxa an 1\n" + "Xhw th 1\n" + "eqK qu 1\n" + "tCw th 1\n" + "zvU sz 1\n" + "lxQ le 1\n" + "vMv va 1\n" + "gqA qu 1\n" + "Jbn an 1\n" + "gCj ng 1\n" + "oTf on 1\n" + "kbW ka 1\n" + "qjY qu 1\n" + "Rqf qu 1\n" + "hYh th 1\n" + "yhE th 1\n" + "gYj ng 1\n" + "jcI ch 1\n" + "qvJ qu 1\n" + "qoC qu 1\n" + "qFc ch 1\n" + "qqH qu 1\n" + "Nxq qu 1\n" + "wVo on 1\n" + "zHv sz 1\n" + "ybS be 1\n" + "Hwc ch 1\n" + "Mxa an 1\n" + "xkL ka 1\n" + "qmO qu 1\n" + "qbR qu 1\n" + "Zfy ny 1\n" + "Rkf ka 1\n" + "vgV ng 1\n" + "hBw th 1\n" + "pXx pr 1\n" + "brQ er 1\n" + "fvO va 1\n" + "hDc th 1\n" + "xQa an 1\n" + "wfF wa 1\n" + "hZx th 1\n" + "Jgz ng 1\n" + "qnY an 1\n" + "qXl le 1\n" + "eNb er 1\n" + "fxS fo 1\n" + "sNk st 1\n" + "mFc ch 1\n" + "Uux qu 1\n" + "Ydg ng 1\n" + "ozW on 1\n" + "Xzd de 1\n" + "Jfe er 1\n" + "Ftx th 1\n" + "vzR sz 1\n" + "wZk ka 1\n" + "oHz on 1\n" + "qvT qu 1\n" + "qoA qu 1\n" + "Sdq qu 1\n" + "txW th 1\n" + "Egf ng 1\n" + "dMf de 1\n" + "Rhh th 1\n" + "vRn an 1\n" + "ujX qu 1\n" + "fRj ij 1\n" + "gjA ng 1\n" + "gDg ng 1\n" + "smZ st 1\n" + "jId de 1\n" + "qkM qu 1\n" + "bKz sz 1\n" + "sCg ng 1\n" + "uTp qu 1\n" + "lVs le 1\n" + "uQo qu 1\n" + "Jfs st 1\n" + "vKm va 1\n" + "jQh th 1\n" + "fUf fo 1\n" + "uTf qu 1\n" + "Bnv an 1\n" + "tdU th 1\n" + "dxY de 1\n" + "hgV th 1\n" + "Zdf de 1\n" + "hqS th 1\n" + "eJg ng 1\n" + "qGu un 1\n" + "vmE va 1\n" + "gKz ng 1\n" + "mUg ng 1\n" + "Vjy ij 1\n" + "uvJ qu 1\n" + "mHr er 1\n" + "Mhv th 1\n" + "zsZ st 1\n" + "Vzy sz 1\n" + "jKb ij 1\n" + "zPp sz 1\n" + "qgD qu 1\n" + "Xhf th 1\n" + "Ogp ng 1\n" + "jwX ij 1\n" + "lYy le 1\n" + "qzD qu 1\n" + "wXj jo 1\n" + "Kpx pr 1\n" + "ydY de 1\n" + "vBq qu 1\n" + "Zpp pr 1\n" + "bDd de 1\n" + "Fjk ij 1\n" + "kdA de 1\n" + "zWt th 1\n" + "wSd de 1\n" + "kFd de 1\n" + "Sxl le 1\n" + "Fvh th 1\n" + "pbR pr 1\n" + "qrD qu 1\n" + "vZs st 1\n" + "vUm va 1\n" + "wEy wa 1\n" + "jjH jo 1\n" + "sDg ng 1\n" + "Ujc ch 1\n" + "knI an 1\n" + "fOa an 1\n" + "Cjg ng 1\n" + "tbV th 1\n" + "gqd qu 1\n" + "ePx er 1\n" + "wRm me 1\n" + "pvG va 1\n" + "Qyl le 1\n" + "cwG ch 1\n" + "Dtq th 1\n" + "Pbz sz 1\n" + "Rgq qu 1\n" + "fjU ij 1\n" + "jJf ij 1\n" + "Rxq qu 1\n" + "Jtx th 1\n" + "qvZ qu 1\n" + "kKm ka 1\n" + "hFm th 1\n" + "kcX ch 1\n" + "fNm me 1\n" + "bpB pr 1\n" + "xqY qu 1\n" + "hYy th 1\n" + "gGp ng 1\n" + "Vfs st 1\n" + "wDt th 1\n" + "bTs st 1\n" + "hfV th 1\n" + "qzp qu 1\n" + "yUv va 1\n" + "qGc ch 1\n" + "Vdl le 1\n" + "Xjt th 1\n" + "kMj ij 1\n" + "hTg th 1\n" + "Hlc ch 1\n" + "tKz th 1\n" + "Wvt th 1\n" + "lMz le 1\n" + "Mwx wa 1\n" + "Wlv le 1\n" + "xzG sz 1\n" + "gmD ng 1\n" + "zOi in 1\n" + "bbI be 1\n" + "bpI pr 1\n" + "fQg ng 1\n" + "pQv va 1\n" + "vEb va 1\n" + "jFz sz 1\n" + "Whf th 1\n" + "jvQ ij 1\n" + "qYx qu 1\n" + "rxM er 1\n" + "vPp va 1\n" + "fjD ij 1\n" + "Vwy wa 1\n" + "Yqc ch 1\n" + "tcW th 1\n" + "jYg ng 1\n" + "gJb ng 1\n" + "Tkc ch 1\n" + "qhj th 1\n" + "jxF ij 1\n" + "Fpz sz 1\n" + "kXh th 1\n" + "lgZ ng 1\n" + "znI an 1\n" + "qyN qu 1\n" + "vBj ij 1\n" + "jSx ij 1\n" + "cqI ch 1\n" + "qYv qu 1\n" + "Zrr er 1\n" + "sHr er 1\n" + "vrK er 1\n" + "pbH pr 1\n" + "zVh th 1\n" + "dQb de 1\n" + "lxF le 1\n" + "sgW ng 1\n" + "Ghf th 1\n" + "xpq qu 1\n" + "qhN th 1\n" + "Fsf st 1\n" + "Qga an 1\n" + "Rdp de 1\n" + "fvK va 1\n" + "Ydz de 1\n" + "wvW va 1\n" + "cPm ch 1\n" + "cQy ch 1\n" + "ywF wa 1\n" + "Ypq qu 1\n" + "Rsj st 1\n" + "Ygw ng 1\n" + "xVp pr 1\n" + "yxL ny 1\n" + "Ywl le 1\n" + "jMc ch 1\n" + "zTl le 1\n" + "aIq an 1\n" + "qQi qu 1\n" + "tqI th 1\n" + "Hvp va 1\n" + "wQd de 1\n" + "hfG th 1\n" + "cTd ch 1\n" + "bfQ be 1\n" + "Kfd de 1\n" + "cXs ch 1\n" + "vYx va 1\n" + "Qoc ro 1\n" + "vrL er 1\n" + "pZk ka 1\n" + "cdX ch 1\n" + "Ygn an 1\n" + "lnO an 1\n" + "mfY me 1\n" + "fnV an 1\n" + "mbZ me 1\n" + "gbE ng 1\n" + "xjZ ij 1\n" + "Fpy pr 1\n" + "npE an 1\n" + "Rxy ny 1\n" + "oWp on 1\n" + "hVh th 1\n" + "yJf ny 1\n" + "sQd st 1\n" + "Zvg ng 1\n" + "bDm me 1\n" + "pLv va 1\n" + "wwF wa 1\n" + "xBh th 1\n" + "qKm qu 1\n" + "wXx wa 1\n" + "Iux qu 1\n" + "dgB ng 1\n" + "gJp ng 1\n" + "qgx qu 1\n" + "fNh ho 1\n" + "cvE ch 1\n" + "cgH ch 1\n" + "lNs le 1\n" + "vDj ij 1\n" + "zcG ch 1\n" + "fZn on 1\n" + "uUx qu 1\n" + "clQ le 1\n" + "fdH de 1\n" + "eZj er 1\n" + "Vqc ch 1\n" + "Rcx ch 1\n" + "jGh th 1\n" + "qzM sz 1\n" + "Qpw pr 1\n" + "Spx pr 1\n" + "cGx ch 1\n" + "cqA ch 1\n" + "vbK va 1\n" + "xeW er 1\n" + "vkC ka 1\n" + "xzB sz 1\n" + "xuR qu 1\n" + "Oyq qu 1\n" + "Mqx qu 1\n" + "qqj qu 1\n" + "yqY qu 1\n" + "cwL ch 1\n" + "pPt th 1\n" + "dSx de 1\n" + "dPk de 1\n" + "uzH qu 1\n" + "fvH va 1\n" + "pcH ch 1\n" + "hlY le 1\n" + "qtX th 1\n" + "Nvs st 1\n" + "hvL th 1\n" + "zRk sz 1\n" + "tNj th 1\n" + "Dbv va 1\n" + "jKc ch 1\n" + "dKy de 1\n" + "yVz sz 1\n" + "iqJ qu 1\n" + "zgJ ng 1\n" + "eJs er 1\n" + "wOx wa 1\n" + "rXh th 1\n" + "Hqp qu 1\n" + "vWx va 1\n" + "bTt th 1\n" + "fCy ny 1\n" + "aOq an 1\n" + "oCg ng 1\n" + "pnE an 1\n" + "Fwc ch 1\n" + "zrT er 1\n" + "xHs st 1\n" + "ydX de 1\n" + "dkV de 1\n" + "Rqy qu 1\n" + "Zyq qu 1\n" + "kXl le 1\n" + "oJt th 1\n" + "sxI st 1\n" + "qZw qu 1\n" + "zqx qu 1\n" + "clZ ch 1\n" + "swX sz 1\n" + "aHw an 1\n" + "rWc ch 1\n" + "cQp ch 1\n" + "Jwj ij 1\n" + "qeV qu 1\n" + "sQj st 1\n" + "Rpb pr 1\n" + "mZq qu 1\n" + "rBx er 1\n" + "mxV me 1\n" + "Mvy ny 1\n" + "cRl ch 1\n" + "Fzv sz 1\n" + "pBs sz 1\n" + "jWs st 1\n" + "vqK qu 1\n" + "Ixl le 1\n" + "yhw th 1\n" + "wyQ wa 1\n" + "uCb qu 1\n" + "zrF sz 1\n" + "iyQ in 1\n" + "qsP qu 1\n" + "hLr er 1\n" + "cvX ch 1\n" + "Scq ch 1\n" + "zrL er 1\n" + "ecU ch 1\n" + "Vxz sz 1\n" + "fCq qu 1\n" + "ovX on 1\n" + "Uqn an 1\n" + "sVw st 1\n" + "spX st 1\n" + "Qkv ka 1\n" + "fyW ny 1\n" + "rBc ch 1\n" + "mdC de 1\n" + "Wjk ij 1\n" + "jYh th 1\n" + "hXq th 1\n" + "xkm ka 1\n" + "hhU th 1\n" + "Dvz sz 1\n" + "tcq th 1\n" + "wZy wa 1\n" + "jtC th 1\n" + "qnD an 1\n" + "vmB va 1\n" + "kjB ij 1\n" + "cdG ch 1\n" + "Vkt th 1\n" + "hNq th 1\n" + "Jft th 1\n" + "iWv in 1\n" + "Wtn th 1\n" + "lfE le 1\n" + "dZb de 1\n" + "eqQ qu 1\n" + "gUq qu 1\n" + "qwL qu 1\n" + "hUq th 1\n" + "hGc th 1\n" + "nwX an 1\n" + "Nbt th 1\n" + "jjP ij 1\n" + "sqJ qu 1\n" + "lQf le 1\n" + "jZz sz 1\n" + "wWn an 1\n" + "Mxu qu 1\n" + "qFi qu 1\n" + "mjX ij 1\n" + "vDx va 1\n" + "vDn an 1\n" + "wUc ch 1\n" + "zhU th 1\n" + "zHw sz 1\n" + "Tjl le 1\n" + "xuX qu 1\n" + "jZp ij 1\n" + "wVc ch 1\n" + "gFp ng 1\n" + "Gyq qu 1\n" + "Jlh th 1\n" + "Bkf ka 1\n" + "hhJ th 1\n" + "tvW th 1\n" + "bIy ny 1\n" + "Llg ng 1\n" + "zJz sz 1\n" + "qeQ qu 1\n" + "nlX an 1\n" + "tcQ th 1\n" + "qtU th 1\n" + "fkW ka 1\n" + "gJk ng 1\n" + "gQy ng 1\n" + "sPz st 1\n" + "bmO me 1\n" + "Ytx th 1\n" + "yqF qu 1\n" + "iBk in 1\n" + "uzV qu 1\n" + "xNp pr 1\n" + "zRz sz 1\n" + "qHq qu 1\n" + "yuY qu 1\n" + "jqh th 1\n" + "xBd de 1\n" + "vvA va 1\n" + "eVj er 1\n" + "zGp sz 1\n" + "vcB ch 1\n" + "kpH ka 1\n" + "mDw me 1\n" + "vuG qu 1\n" + "vVy ny 1\n" + "mzS sz 1\n" + "jvM ij 1\n" + "sfV st 1\n" + "hQq th 1\n" + "wTm me 1\n" + "Plq qu 1\n" + "fxJ fo 1\n" + "qQq qu 1\n" + "Fnw an 1\n" + "qJo qu 1\n" + "Nsg ng 1\n" + "Ljx ij 1\n" + "sRb st 1\n" + "pcY ch 1\n" + "vVm va 1\n" + "sQg ng 1\n" + "Ywz sz 1\n" + "hqJ th 1\n" + "sjK st 1\n" + "Zks st 1\n" + "Mjt th 1\n" + "Dwh th 1\n" + "wbN wa 1\n" + "mvK va 1\n" + "rLp er 1\n" + "Lbm me 1\n" + "wjO ij 1\n" + "lQz le 1\n" + "Kwf wa 1\n" + "qmB qu 1\n" + "Xbv va 1\n" + "cKq ch 1\n" + "hqR th 1\n" + "yVb be 1\n" + "xcF ch 1\n" + "Ewv va 1\n" + "Gpq qu 1\n" + "Gbh th 1\n" + "yHj ij 1\n" + "gXk ng 1\n" + "qOx qu 1\n" + "Kbw wa 1\n" + "qHx qu 1\n" + "wjP ij 1\n" + "jQl le 1\n" + "Ffq qu 1\n" + "oYb on 1\n" + "Fqo qu 1\n" + "wXz sz 1\n" + "fIp pr 1\n" + "pMf pr 1\n" + "nqP an 1\n" + "bbZ be 1\n" + "hsX th 1\n" + "Wjr er 1\n" + "Zqn an 1\n" + "Pxb be 1\n" + "Bzs st 1\n" + "pbI pr 1\n" + "Yvp va 1\n" + "jxM ij 1\n" + "jyZ ij 1\n" + "mzJ sz 1\n" + "vYg ng 1\n" + "qMm qu 1\n" + "fhL th 1\n" + "qOg qu 1\n" + "Mnp an 1\n" + "Ifv va 1\n" + "qYm qu 1\n" + "gxv ng 1\n" + "zfG sz 1\n" + "fqG qu 1\n" + "lLq qu 1\n" + "hkK th 1\n" + "oYk on 1\n" + "lRg le 1\n" + "lOx le 1\n" + "Vxv va 1\n" + "qAs qu 1\n" + "tKk th 1\n" + "lhF th 1\n" + "dCv de 1\n" + "wvY va 1\n" + "wiV in 1\n" + "crF ch 1\n" + "fEp pr 1\n" + "Rrl er 1\n" + "Zjy ij 1\n" + "qbY qu 1\n" + "kMw ka 1\n" + "vZi in 1\n" + "Fxi in 1\n" + "zkS sz 1\n" + "vKb va 1\n" + "zbI sz 1\n" + "uHg qu 1\n" + "qzG qu 1\n" + "jMk ij 1\n" + "Fkc ch 1\n" + "dKm de 1\n" + "nHh th 1\n" + "xGc ch 1\n" + "qpU qu 1\n" + "rcU ch 1\n" + "aWx an 1\n" + "xdS de 1\n" + "qhV th 1\n" + "aHc ch 1\n" + "vmI va 1\n" + "Wcc ch 1\n" + "zBn an 1\n" + "kQe er 1\n" + "awJ an 1\n" + "xdD de 1\n" + "yZx ny 1\n" + "Kkd de 1\n" + "wBz sz 1\n" + "lzA le 1\n" + "yyT ny 1\n" + "qeK qu 1\n" + "zpE sz 1\n" + "zFn an 1\n" + "yyG ny 1\n" + "lLw le 1\n" + "bvS va 1\n" + "mvX va 1\n" + "hlW th 1\n" + "pgX ng 1\n" + "lQt th 1\n" + "ymY me 1\n" + "mjJ ij 1\n" + "mVc ch 1\n" + "Xqs qu 1\n" + "bKr er 1\n" + "bHt th 1\n" + "jRv ij 1\n" + "Lpw pr 1\n" + "zPb sz 1\n" + "wkR ka 1\n" + "kxS ka 1\n" + "jWf ij 1\n" + "Nkx ka 1\n" + "Kcj ch 1\n" + "bJb be 1\n" + "xwZ wa 1\n" + "Rqc ch 1\n" + "Qzg ng 1\n" + "jwH ij 1\n" + "Dqd qu 1\n" + "vLf va 1\n" + "hXd th 1\n" + "cfD ch 1\n" + "sjX st 1\n" + "hzI th 1\n" + "qUd qu 1\n" + "tSx th 1\n" + "hxA th 1\n" + "gxK ng 1\n" + "hVm th 1\n" + "yzX sz 1\n" + "Ucs ch 1\n" + "qaH an 1\n" + "Yfy ny 1\n" + "sJg ng 1\n" + "iHp in 1\n" + "iyC in 1\n" + "Tjf ij 1\n" + "dJp de 1\n" + "Jgv ng 1\n" + "uJf qu 1\n" + "nNl an 1\n" + "zdA sz 1\n" + "xIq qu 1\n" + "qjK qu 1\n" + "vzY sz 1\n" + "wqv qu 1\n" + "Xvx va 1\n" + "fJr er 1\n" + "nqH an 1\n" + "qGd qu 1\n" + "vQg ng 1\n" + "iQz in 1\n" + "tLn th 1\n" + "lVj le 1\n" + "vqW qu 1\n" + "zrN er 1\n" + "xKz sz 1\n" + "waV an 1\n" + "Ydq qu 1\n" + "dkq qu 1\n" + "fCn an 1\n" + "Xcy ch 1\n" + "pIl le 1\n" + "hXl th 1\n" + "aFs an 1\n" + "iwM in 1\n" + "Gwx wa 1\n" + "Xlp le 1\n" + "Qfu qu 1\n" + "jqE qu 1\n" + "lqP qu 1\n" + "kVq qu 1\n" + "xqJ qu 1\n" + "Mzf sz 1\n" + "mNw me 1\n" + "Wsv st 1\n" + "fnM an 1\n" + "uSf qu 1\n" + "hCf th 1\n" + "zjH sz 1\n" + "mTs st 1\n" + "jWz sz 1\n" + "Dxk ka 1\n" + "Ztd th 1\n" + "Rvv va 1\n" + "gBx ng 1\n" + "Lzx sz 1\n" + "ezU er 1\n" + "jqH qu 1\n" + "Rjh th 1\n" + "Dcg ch 1\n" + "bBh th 1\n" + "fhO th 1\n" + "hpH th 1\n" + "Zqa an 1\n" + "kCx ka 1\n" + "rRv er 1\n" + "dkZ de 1\n" + "Ggx ng 1\n" + "pQh th 1\n" + "Gcv ch 1\n" + "Scg ch 1\n" + "vDb va 1\n" + "pbD pr 1\n" + "vEh th 1\n" + "vlE le 1\n" + "Rjl le 1\n" + "lFw le 1\n" + "zqN qu 1\n" + "aPq an 1\n" + "gjD ng 1\n" + "jcE ch 1\n" + "wSw wa 1\n" + "Dgj ng 1\n" + "huZ th 1\n" + "gPv ng 1\n" + "pJj ij 1\n" + "cQh th 1\n" + "mwq qu 1\n" + "vpA va 1\n" + "hGf th 1\n" + "cXz ch 1\n" + "Lcb ch 1\n" + "fJm me 1\n" + "Qzy sz 1\n" + "zQm sz 1\n" + "Hhn th 1\n" + "xdY de 1\n" + "uYl qu 1\n" + "Xkj ij 1\n" + "jvA ij 1\n" + "Jvp va 1\n" + "iwZ in 1\n" + "zkq qu 1\n" + "Nhb th 1\n" + "kmV ka 1\n" + "qKd qu 1\n" + "Bcq ch 1\n" + "pfY pr 1\n" + "qUj qu 1\n" + "gqR qu 1\n" + "gwO ng 1\n" + "gXm ng 1\n" + "jHh th 1\n" + "rBn an 1\n" + "uPw qu 1\n" + "pJk ka 1\n" + "Ipj ij 1\n" + "yqM qu 1\n" + "Yqn an 1\n" + "Kbz sz 1\n" + "vfL va 1\n" + "npZ an 1\n" + "oqY qu 1\n" + "Zqf qu 1\n" + "jzU sz 1\n" + "vNx va 1\n" + "hXf th 1\n" + "fCg ng 1\n" + "nzJ an 1\n" + "mKj ij 1\n" + "wmB me 1\n" + "Wjq qu 1\n" + "Dbq qu 1\n" + "zXy sz 1\n" + "xYw wa 1\n" + "fQf fo 1\n" + "dqP qu 1\n" + "Kxq qu 1\n" + "jdZ de 1\n" + "qrX qu 1\n" + "Lxb be 1\n" + "yfL ny 1\n" + "yYm me 1\n" + "sbH st 1\n" + "wlV le 1\n" + "uKp qu 1\n" + "hhN th 1\n" + "Xxq qu 1\n" + "jLg ng 1\n" + "nQh th 1\n" + "Wqp qu 1\n" + "Nqd qu 1\n" + "jfD ij 1\n" + "Jnq an 1\n" + "Bzn an 1\n" + "mJr er 1\n" + "qaX an 1\n" + "pJw pr 1\n" + "jHz sz 1\n" + "yaX an 1\n" + "Whs th 1\n" + "hYr th 1\n" + "tmS th 1\n" + "Fhy th 1\n" + "Ggd ng 1\n" + "Xmy me 1\n" + "Rqh th 1\n" + "Fsn an 1\n" + "qhA th 1\n" + "fhX th 1\n" + "Hqx qu 1\n" + "wIo on 1\n" + "Ibx be 1\n" + "cFx ch 1\n" + "dRg ng 1\n" + "snV an 1\n" + "kqz qu 1\n" + "eqO er 1\n" + "Gkz sz 1\n" + "Nnz an 1\n" + "yqE qu 1\n" + "cJh th 1\n" + "xvA va 1\n" + "qMx qu 1\n" + "dwS de 1\n" + "yAj ij 1\n" + "xCq qu 1\n" + "gmE ng 1\n" + "bhP th 1\n" + "rwE er 1\n" + "Xnz an 1\n" + "Uhw th 1\n" + "xnR an 1\n" + "nfZ an 1\n" + "Qpx pr 1\n" + "qxO qu 1\n" + "lGt th 1\n" + "qRc ch 1\n" + "Rwx wa 1\n" + "tcM th 1\n" + "fBd de 1\n" + "Rjc ch 1\n" + "dfY de 1\n" + "hhR th 1\n" + "bCj ij 1\n" + "fqL qu 1\n" + "lzS le 1\n" + "Lrm er 1\n" + "eqE qu 1\n" + "vgL ng 1\n" + "wQr er 1\n" + "bwB wa 1\n" + "lGf le 1\n" + "Nwq qu 1\n" + "sdU st 1\n" + "Zxv va 1\n" + "yDm me 1\n" + "Lsw st 1\n" + "cNq ch 1\n" + "Dqc ch 1\n" + "vLz sz 1\n" + "dWv de 1\n" + "fkQ ka 1\n" + "zjD sz 1\n" + "yYv va 1\n" + "qeT qu 1\n" + "cvL ch 1\n" + "wkA ka 1\n" + "Nvb va 1\n" + "djM de 1\n" + "hgK th 1\n" + "pXb pr 1\n" + "Tlw le 1\n" + "Rhz ha 1\n" + "wkP ka 1\n" + "wDk ka 1\n" + "eFc ch 1\n" + "ehU th 1\n" + "Xly le 1\n" + "wxK wa 1\n" + "dPw de 1\n" + "sFd st 1\n" + "vcI ch 1\n" + "Fxd de 1\n" + "fvR va 1\n" + "jqs qu 1\n" + "rMj er 1\n" + "qbW qu 1\n" + "kpP ka 1\n" + "Bvw va 1\n" + "Tmk ka 1\n" + "hbP th 1\n" + "hMx th 1\n" + "jgL ng 1\n" + "efU er 1\n" + "cQb ch 1\n" + "mcA ch 1\n" + "Ewq qu 1\n" + "xmV me 1\n" + "Qcq ch 1\n" + "mzG sz 1\n" + "pKm me 1\n" + "Fwq qu 1\n" + "lRn an 1\n" + "jPk ij 1\n" + "jMb ij 1\n" + "mzO sz 1\n" + "oFw on 1\n" + "hJb th 1\n" + "sVq qu 1\n" + "iVz in 1\n" + "oqU qu 1\n" + "bhW th 1\n" + "Oxq qu 1\n" + "mQk ka 1\n" + "Xfb be 1\n" + "cNw ch 1\n" + "fgZ ng 1\n" + "Tvf va 1\n" + "sIx st 1\n" + "uZs qu 1\n" + "xzX sz 1\n" + "Ylq qu 1\n" + "oHf on 1\n" + "csU ch 1\n" + "Qzs st 1\n" + "Bfq qu 1\n" + "yJn an 1\n" + "pgQ ng 1\n" + "wxk ka 1\n" + "Tnw an 1\n" + "bKx be 1\n" + "bqX qu 1\n" + "Qjs st 1\n" + "pFh th 1\n" + "Xvl le 1\n" + "kfB ka 1\n" + "mZl le 1\n" + "Csg ng 1\n" + "vrJ er 1\n" + "Gfy ny 1\n" + "jbP ij 1\n" + "Yvl le 1\n" + "Hxb be 1\n" + "lrD er 1\n" + "qTl qu 1\n" + "aBc ch 1\n" + "fGb be 1\n" + "mhS th 1\n" + "zTp sz 1\n" + "kRd de 1\n" + "Wph th 1\n" + "Npj ij 1\n" + "lwS le 1\n" + "mGm me 1\n" + "nqT an 1\n" + "Ujn an 1\n" + "xjO ij 1\n" + "dMz sz 1\n" + "wKj ij 1\n" + "yZr er 1\n" + "Njb ij 1\n" + "Ylr er 1\n" + "mVf me 1\n" + "gZg ng 1\n" + "Hcb ch 1\n" + "xcB ch 1\n" + "kMm ka 1\n" + "lwC le 1\n" + "Dnf an 1\n" + "hjW th 1\n" + "rTk er 1\n" + "Vzj sz 1\n" + "Vxy ny 1\n" + "wlQ le 1\n" + "Nrv er 1\n" + "pjP ij 1\n" + "fwZ wa 1\n" + "tnW th 1\n" + "oJw on 1\n" + "kJx ka 1\n" + "Vpj ij 1\n" + "qAw qu 1\n" + "Qht th 1\n" + "bCn an 1\n" + "vrU er 1\n" + "hRc th 1\n" + "clC ch 1\n" + "rFd er 1\n" + "twH th 1\n" + "kCw ka 1\n" + "mSd de 1\n" + "Xnw an 1\n" + "fXm me 1\n" + "Twf wa 1\n" + "Fwj ij 1\n" + "bjJ ij 1\n" + "lbQ le 1\n" + "kvS ka 1\n" + "Smz sz 1\n" + "fBp pr 1\n" + "Nzz sz 1\n" + "bQp pr 1\n" + "vLx va 1\n" + "hVf th 1\n" + "yUj ij 1\n" + "cZd ch 1\n" + "gIy eg 1\n" + "hVq th 1\n" + "aQx an 1\n" + "Qfv va 1\n" + "lKb le 1\n" + "zhN th 1\n" + "Zbm me 1\n" + "Gcq ch 1\n" + "gbT ng 1\n" + "pYk ka 1\n" + "Xvd de 1\n" + "xMl le 1\n" + "uHb qu 1\n" + "bXf be 1\n" + "sNc ch 1\n" + "qVy qu 1\n" + "cpO ch 1\n" + "Fgb ng 1\n" + "eWl er 1\n" + "kKd de 1\n" + "Cbj ij 1\n" + "mfH me 1\n" + "qIa an 1\n" + "sfX st 1\n" + "snH an 1\n" + "Hjg ng 1\n" + "Lmf me 1\n" + "xgf ng 1\n" + "Evw va 1\n" + "wOk ka 1\n" + "Hjf ij 1\n" + "zuJ qu 1\n" + "fZm me 1\n" + "lNq qu 1\n" + "xUg ng 1\n" + "nLs an 1\n" + "jkS ij 1\n" + "Gvp va 1\n" + "jPd de 1\n" + "ywQ wa 1\n" + "qrG qu 1\n" + "bbH be 1\n" + "ghJ th 1\n" + "mMh th 1\n" + "Yvt th 1\n" + "xLq qu 1\n" + "Bdq qu 1\n" + "zJd sz 1\n" + "xRs st 1\n" + "vgP ng 1\n" + "Hhb th 1\n" + "npL an 1\n" + "vFp va 1\n" + "hSj th 1\n" + "bdC de 1\n" + "kGg ng 1\n" + "kVf ka 1\n" + "qvP qu 1\n" + "kwO ka 1\n" + "Jqt th 1\n" + "zWx sz 1\n" + "sQk st 1\n" + "hnV th 1\n" + "rrD er 1\n" + "jVh th 1\n" + "vvY va 1\n" + "bfI be 1\n" + "fSz sz 1\n" + "Czf sz 1\n" + "kWl le 1\n" + "jJc ch 1\n" + "Gwj ij 1\n" + "lFh th 1\n" + "Vpf fo 1\n" + "fkV ka 1\n" + "cYj ch 1\n" + "mrW er 1\n" + "hBb th 1\n" + "hJx th 1\n" + "wIq qu 1\n" + "cdA ch 1\n" + "wQy wa 1\n" + "wCq qu 1\n" + "wqZ qu 1\n" + "jfX ij 1\n" + "jtG th 1\n" + "xkJ ka 1\n" + "Qzf sz 1\n" + "gKs ng 1\n" + "Qzo on 1\n" + "bwI wa 1\n" + "Tsb st 1\n" + "vvX va 1\n" + "jlR le 1\n" + "qlQ qu 1\n" + "dbX de 1\n" + "Hfc ch 1\n" + "Bsj st 1\n" + "Yqk qu 1\n" + "Xnc ch 1\n" + "bzZ sz 1\n" + "dGt th 1\n" + "Xgg ng 1\n" + "jwE wa 1\n" + "Oyc ch 1\n" + "pQd de 1\n" + "jRy ij 1\n" + "pmX me 1\n" + "lZx le 1\n" + "gFq qu 1\n" + "mJd de 1\n" + "sKq qu 1\n" + "Ikj ij 1\n" + "zkG sz 1\n" + "wGf wa 1\n" + "qRp qu 1\n" + "xDn an 1\n" + "gvL ng 1\n" + "mGx me 1\n" + "iIj in 1\n" + "Gzd sz 1\n" + "bLx be 1\n" + "jUl le 1\n" + "Qvc ch 1\n" + "mVh th 1\n" + "uhF th 1\n" + "fVk ka 1\n" + "cnM ch 1\n" + "uFh th 1\n" + "mXf me 1\n" + "rCb er 1\n" + "nLw an 1\n" + "vfH fo 1\n" + "iqV qu 1\n" + "qhD th 1\n" + "sHx st 1\n" + "Ywy wa 1\n" + "mDx me 1\n" + "cBt th 1\n" + "Bmq qu 1\n" + "xRc ch 1\n" + "bSz sz 1\n" + "vCj ij 1\n" + "Tcv ch 1\n" + "aZq an 1\n" + "Jcx ch 1\n" + "nbF an 1\n" + "Qzb sz 1\n" + "vkQ ka 1\n" + "hzD th 1\n" + "xHp pr 1\n" + "hqX th 1\n" + "fEv va 1\n" + "yjF ij 1\n" + "Pjk ij 1\n" + "sfU st 1\n" + "bGc ch 1\n" + "mcX ch 1\n" + "pXc ch 1\n" + "yvS va 1\n" + "pMl le 1\n" + "wJs st 1\n" + "Vwq qu 1\n" + "yCw wa 1\n" + "qds qu 1\n" + "rRj er 1\n" + "Qhv th 1\n" + "ucG ch 1\n" + "oEh th 1\n" + "wQi in 1\n" + "lSg ng 1\n" + "Lqt th 1\n" + "nlH an 1\n" + "uqG qu 1\n" + "Oao an 1\n" + "hlX th 1\n" + "fPw wa 1\n" + "tIb th 1\n" + "zIq qu 1\n" + "qmG qu 1\n" + "xJm me 1\n" + "Vgw ng 1\n" + "Ukx ka 1\n" + "ztH th 1\n" + "lhP th 1\n" + "Jtk th 1\n" + "Hzd sz 1\n" + "yxQ ny 1\n" + "nrP an 1\n" + "fHh th 1\n" + "Yct th 1\n" + "Gqa an 1\n" + "Fgy ng 1\n" + "oBn an 1\n" + "vuC qu 1\n" + "Bnz an 1\n" + "vPu qu 1\n" + "xFf fo 1\n" + "jdJ de 1\n" + "fGf fo 1\n" + "Yjq qu 1\n" + "Qjp ij 1\n" + "xTj ij 1\n" + "vOq qu 1\n" + "vLw va 1\n" + "sMf st 1\n" + "oVl on 1\n" + "cwN ch 1\n" + "sgR ng 1\n" + "jjQ ij 1\n" + "wzR sz 1\n" + "zhY th 1\n" + "vbR va 1\n" + "wgW ng 1\n" + "qwX qu 1\n" + "Nxw wa 1\n" + "eQo er 1\n" + "mQp me 1\n" + "Kqh th 1\n" + "tvA th 1\n" + "dlJ le 1\n" + "yVx ny 1\n" + "sPf st 1\n" + "dQz sz 1\n" + "sZb st 1\n" + "zhS th 1\n" + "kWb ka 1\n" + "mqP qu 1\n" + "Ffk ka 1\n" + "xql qu 1\n" + "gqH qu 1\n" + "Tly le 1\n" + "kpL ka 1\n" + "qEg qu 1\n" + "bMg ng 1\n" + "xRj ij 1\n" + "xsC st 1\n" + "jlS le 1\n" + "lzM le 1\n" + "Pfb be 1\n" + "uJv qu 1\n" + "yVf ny 1\n" + "Zgq qu 1\n" + "xbS be 1\n" + "oFh th 1\n" + "xvb va 1\n" + "hcU th 1\n" + "wwU wa 1\n" + "yCg ng 1\n" + "mPz sz 1\n" + "sJd st 1\n" + "bmN me 1\n" + "uVc ch 1\n" + "qdS qu 1\n" + "Vwp pr 1\n" + "Vml le 1\n" + "Hqy qu 1\n" + "Lfz sz 1\n" + "Ayj ij 1\n" + "yxK ny 1\n" + "Hwv va 1\n" + "gIp ng 1\n" + "Zgt th 1\n" + "Xtw th 1\n" + "hLf th 1\n" + "Nkd de 1\n" + "jMs st 1\n" + "xFt th 1\n" + "xBw wa 1\n" + "wHd de 1\n" + "Qzz sz 1\n" + "gYt th 1\n" + "Pvk ka 1\n" + "pvY va 1\n" + "Jxt th 1\n" + "ugQ qu 1\n" + "Lqq qu 1\n" + "xlL le 1\n" + "wMb wa 1\n" + "Sbz sz 1\n" + "vEv va 1\n" + "qfz qu 1\n" + "gcS ch 1\n" + "tCq th 1\n" + "yHp pr 1\n" + "zkF sz 1\n" + "xuK qu 1\n" + "Tbf be 1\n" + "Ipg ng 1\n" + "Yzk sz 1\n" + "Qwz sz 1\n" + "pFj ij 1\n" + "jPm ij 1\n" + "Dpq qu 1\n" + "pJz sz 1\n" + "wpN pr 1\n" + "wzE sz 1\n" + "gqD qu 1\n" + "Xwm me 1\n" + "oQx on 1\n" + "lCp le 1\n" + "Mhk th 1\n" + "dTq qu 1\n" + "xUw wa 1\n" + "hgE th 1\n" + "gcB ch 1\n" + "hpJ th 1\n" + "mqK qu 1\n" + "gBn an 1\n" + "hIv th 1\n" + "lqD qu 1\n" + "wPx wa 1\n" + "sMt th 1\n" + "yXw wa 1\n" + "jKq qu 1\n" + "Lrz er 1\n" + "Hwj ij 1\n" + "yfW ny 1\n" + "Yyu qu 1\n" + "qYs qu 1\n" + "yvR va 1\n" + "sRz st 1\n" + "Kyx ny 1\n" + "nxR an 1\n" + "cdJ ch 1\n" + "Nwc ch 1\n" + "tbE th 1\n" + "oeZ er 1\n" + "bcQ ch 1\n" + "Swb wa 1\n" + "Ikq qu 1\n" + "Bvz sz 1\n" + "zhF th 1\n" + "Xqy qu 1\n" + "kKb ka 1\n" + "Wdk de 1\n" + "wpP pr 1\n" + "kQy ka 1\n" + "Bqe qu 1\n" + "qfZ qu 1\n" + "pPw pr 1\n" + "Aoh th 1\n" + "plJ le 1\n" + "Ynv an 1\n" + "jMh th 1\n" + "bQg ng 1\n" + "afM an 1\n" + "jvO ij 1\n" + "eHf er 1\n" + "hQg th 1\n" + "kqY qu 1\n" + "zJq qu 1\n" + "pYh th 1\n" + "qeM qu 1\n" + "Kpk ka 1\n" + "kfW ka 1\n" + "Wds st 1\n" + "bNc ch 1\n" + "vBx va 1\n" + "suJ qu 1\n" + "qEx qu 1\n" + "rfZ er 1\n" + "oHg ng 1\n" + "eFw er 1\n" + "fPp pr 1\n" + "kDb ka 1\n" + "tZn th 1\n" + "dcK ch 1\n" + "yWv va 1\n" + "Uxv va 1\n" + "yQe er 1\n" + "Zjq qu 1\n" + "Wjv ij 1\n" + "ygO ng 1\n" + "ojQ on 1\n" + "Kwc ch 1\n" + "pFg ng 1\n" + "sMd st 1\n" + "Mfq qu 1\n" + "Mzy sz 1\n" + "Nwp pr 1\n" + "ywT wa 1\n" + "wLq qu 1\n" + "Hqm qu 1\n" + "qsC qu 1\n" + "bNn an 1\n" + "bUv va 1\n" + "nRc ch 1\n" + "Rlk le 1\n" + "Bqp qu 1\n" + "cfI ch 1\n" + "mVq qu 1\n" + "qGj qu 1\n" + "vlX le 1\n" + "kfG ka 1\n" + "wVd de 1\n" + "cdE ch 1\n" + "hzE th 1\n" + "Dhv th 1\n" + "bzj sz 1\n" + "vvL va 1\n" + "bzQ sz 1\n" + "wVb wa 1\n" + "Zxl le 1\n" + "zLw sz 1\n" + "hTq th 1\n" + "Vqp qu 1\n" + "hmW th 1\n" + "flD le 1\n" + "Kcd ch 1\n" + "pDq qu 1\n" + "kvY ka 1\n" + "cQl ch 1\n" + "Ixk ka 1\n" + "sGf st 1\n" + "gFh th 1\n" + "Rkd de 1\n" + "qHl qu 1\n" + "rCg ng 1\n" + "qBn an 1\n" + "sJw st 1\n" + "cWj ch 1\n" + "zXp sz 1\n" + "Hhl th 1\n" + "hjP th 1\n" + "qlZ qu 1\n" + "Hxr er 1\n" + "zrE er 1\n" + "gkH ng 1\n" + "uHk qu 1\n" + "Gzm sz 1\n" + "cBc ch 1\n" + "zff sz 1\n" + "zLs st 1\n" + "Uqy qu 1\n" + "vkD ka 1\n" + "fqX qu 1\n" + "hLj th 1\n" + "fYu qu 1\n" + "jKw ij 1\n" + "jIb ij 1\n" + "nrU an 1\n" + "fFp pr 1\n" + "sbC st 1\n" + "mGv va 1\n" + "fXp pr 1\n" + "Pkv ka 1\n" + "Cqe qu 1\n" + "cCx ch 1\n" + "rNq qu 1\n" + "Zwf wa 1\n" + "Jgc ch 1\n" + "xlQ le 1\n" + "gBz ng 1\n" + "cIx ch 1\n" + "odQ on 1\n" + "Qnz an 1\n" + "Uzx sz 1\n" + "Jpt th 1\n" + "gxX ng 1\n" + "Zkd de 1\n" + "Xkk ka 1\n" + "hRv th 1\n" + "ycV ch 1\n" + "zMm sz 1\n" + "eBq qu 1\n" + "gHd ng 1\n" + "bxU be 1\n" + "xdK de 1\n" + "mQc ch 1\n" + "tYj th 1\n" + "hlF th 1\n" + "cRz ch 1\n" + "lGz le 1\n" + "zFz ze 1\n" + "qOp qu 1\n" + "Ggc ch 1\n" + "oGm on 1\n" + "Xnp an 1\n" + "wYg ng 1\n" + "wuJ qu 1\n" + "sNs st 1\n" + "zqU qu 1\n" + "kCp ka 1\n" + "Whw th 1\n" + "nQx an 1\n" + "vwA va 1\n" + "Vcg ch 1\n" + "kWj ij 1\n" + "Hqd qu 1\n" + "Cpy pr 1\n" + "zcL ch 1\n" + "cfF ch 1\n" + "kXn an 1\n" + "aXj an 1\n" + "Swk ka 1\n" + "fhq th 1\n" + "Vxi in 1\n" + "Gqu un 1\n" + "Uxd de 1\n" + "zdK sz 1\n" + "hZq th 1\n" + "mwJ me 1\n" + "cvD ch 1\n" + "lbZ le 1\n" + "Pzl le 1\n" + "hdO th 1\n" + "hJn th 1\n" + "qWp qu 1\n" + "dXy de 1\n" + "fuU qu 1\n" + "fXy ny 1\n" + "xnL an 1\n" + "gMf ng 1\n" + "rNf er 1\n" + "xQh th 1\n" + "kqH qu 1\n" + "rFz er 1\n" + "vpT va 1\n" + "Nwy wa 1\n" + "yqA qu 1\n" + "vhO th 1\n" + "kVh th 1\n" + "nYb an 1\n" + "jvN ij 1\n" + "bIf be 1\n" + "qqS qu 1\n" + "jbF ij 1\n" + "gMk ng 1\n" + "bTd de 1\n" + "Rhd th 1\n" + "tWq th 1\n" + "gLz ng 1\n" + "fsD st 1\n" + "uMt th 1\n" + "yHq qu 1\n" + "Xgj ng 1\n" + "Lmm me 1\n" + "vkU ka 1\n" + "lAx le 1\n" + "Kzd sz 1\n" + "hKm th 1\n" + "kQd de 1\n" + "gFc ch 1\n" + "wyX wa 1\n" + "zfU sz 1\n" + "xpU pr 1\n" + "ywJ wa 1\n" + "Ayq qu 1\n" + "gIu qu 1\n" + "zuQ qu 1\n" + "Vfn an 1\n" + "vBn an 1\n" + "Hty th 1\n" + "gRv ng 1\n" + "pTb pr 1\n" + "Uqx qu 1\n" + "vTn an 1\n" + "vJc ch 1\n" + "Uiw in 1\n" + "Jlp le 1\n" + "zPq qu 1\n" + "rCx er 1\n" + "lqS qu 1\n" + "zlZ le 1\n" + "zOw sz 1\n" + "klK le 1\n" + "kfQ ka 1\n" + "uJx qu 1\n" + "pkP ka 1\n" + "Gqz qu 1\n" + "Jlc ch 1\n" + "yyD ny 1\n" + "jhX th 1\n" + "crV ch 1\n" + "Dww wa 1\n" + "yjw ij 1\n" + "qpX qu 1\n" + "Qmd de 1\n" + "yWz sz 1\n" + "wPd de 1\n" + "Uqk qu 1\n" + "nbR an 1\n" + "Ydc ch 1\n" + "qQl qu 1\n" + "pmD me 1\n" + "Jkj ka 1\n" + "jTk ka 1\n" + "wYf wa 1\n" + "Zzx sz 1\n" + "rkQ er 1\n" + "bDp pr 1\n" + "qSs qu 1\n" + "gXr ng 1\n" + "cZb ch 1\n" + "Ngp ng 1\n" + "hqQ th 1\n" + "Wvw va 1\n" + "Wbw wa 1\n" + "wvK va 1\n" + "cJf ch 1\n" + "Mwd de 1\n" + "ddJ de 1\n" + "iwE in 1\n" + "bxX be 1\n" + "jxT ij 1\n" + "Ycn ch 1\n" + "wMf wa 1\n" + "bqD qu 1\n" + "yqI qu 1\n" + "dRj de 1\n" + "wYy wa 1\n" + "Txz sz 1\n" + "vrN er 1\n" + "qVu un 1\n" + "mRj ij 1\n" + "Fjx ij 1\n" + "fyQ ny 1\n" + "xeI er 1\n" + "Wqf qu 1\n" + "Jly le 1\n" + "jDb ij 1\n" + "Yzu qu 1\n" + "Bxm me 1\n" + "wLj ij 1\n" + "bqc ch 1\n" + "sgK ng 1\n" + "kqW qu 1\n" + "Zsn an 1\n" + "Fqq qu 1\n" + "rXz er 1\n" + "lJq qu 1\n" + "jEh th 1\n" + "nCb an 1\n" + "Xrd er 1\n" + "Rzh th 1\n" + "gfW ng 1\n" + "Xtl th 1\n" + "mTx me 1\n" + "ufA qu 1\n" + "wjQ ij 1\n" + "xlW le 1\n" + "dqH qu 1\n" + "xhM th 1\n" + "Xwt th 1\n" + "dnW an 1\n" + "Rfz sz 1\n" + "fKp pr 1\n" + "kFw ka 1\n" + "Quv qu 1\n" + "mXw me 1\n" + "Vkw ka 1\n" + "tFh ch 1\n" + "hIu th 1\n" + "lTf le 1\n" + "Mwv va 1\n" + "wvT va 1\n" + "kKp ka 1\n" + "tRv th 1\n" + "wXo on 1\n" + "vzL sz 1\n" + "Jcf ch 1\n" + "Tbq qu 1\n" + "jdQ de 1\n" + "Rbx be 1\n" + "Jrm er 1\n" + "sRj st 1\n" + "zWz sz 1\n" + "qnE an 1\n" + "Kcf ch 1\n" + "Qqm qu 1\n" + "fpI pr 1\n" + "iNw in 1\n" + "ujE qu 1\n" + "qHv qu 1\n" + "Jvx va 1\n" + "hHc th 1\n" + "fvJ va 1\n" + "nqY an 1\n" + "wpE wa 1\n" + "Hws st 1\n" + "xzI sz 1\n" + "Cgg ng 1\n" + "cWd ch 1\n" + "quV un 1\n" + "bjN ij 1\n" + "xQp pr 1\n" + "bxE be 1\n" + "uVk qu 1\n" + "Wrl er 1\n" + "Lrx er 1\n" + "Iwl le 1\n" + "aqB an 1\n" + "Vcp ch 1\n" + "Wwt th 1\n" + "aGx an 1\n" + "fPn an 1\n" + "mFq qu 1\n" + "qgd qu 1\n" + "Zsd st 1\n" + "Vxs sz 1\n" + "Khq th 1\n" + "wSs st 1\n" + "oGq qu 1\n" + "Yzv sz 1\n" + "dqX qu 1\n" + "mpQ me 1\n" + "Kcp ch 1\n" + "swD st 1\n" + "rZg ng 1\n" + "jYm ij 1\n" + "uJl qu 1\n" + "vWv va 1\n" + "svO st 1\n" + "pFd de 1\n" + "Yjx ij 1\n" + "tpI th 1\n" + "dVt th 1\n" + "sNm st 1\n" + "lKt th 1\n" + "nvU an 1\n" + "Hxf fo 1\n" + "puW qu 1\n" + "wJg ng 1\n" + "gxR ng 1\n" + "fAg ng 1\n" + "Yqe qu 1\n" + "Pwz sz 1\n" + "hmC th 1\n" + "ylJ le 1\n" + "mqT qu 1\n" + "cCf ch 1\n" + "pZg ng 1\n" + "aFx an 1\n" + "oYq qu 1\n" + "fPj ij 1\n" + "dJt th 1\n" + "xwn an 1\n" + "Ccb ch 1\n" + "wFn an 1\n" + "wrY er 1\n" + "Cdh th 1\n" + "hLc th 1\n" + "Zxg ng 1\n" + "Mxc ch 1\n" + "hcY th 1\n" + "zVw sz 1\n" + "hkV th 1\n" + "txE th 1\n" + "yvT va 1\n" + "Mlw le 1\n" + "ztF th 1\n" + "fGd de 1\n" + "zjE sz 1\n" + "gjM ng 1\n" + "jwP ij 1\n" + "Kxt th 1\n" + "yFg ng 1\n" + "Wcg ch 1\n" + "thZ ch 1\n" + "hzQ th 1\n" + "Jtg th 1\n" + "yvK va 1\n" + "zVz sz 1\n" + "Pwb wa 1\n" + "xqD qu 1\n" + "uyQ qu 1\n" + "gCm ng 1\n" + "zjU sz 1\n" + "xGq qu 1\n" + "Mqy qu 1\n" + "Ocx ch 1\n" + "sqM qu 1\n" + "lRb le 1\n" + "tfU th 1\n" + "vZg ng 1\n" + "fZc ch 1\n" + "gpZ ng 1\n" + "Fpf pr 1\n" + "qtQ th 1\n" + "mhZ th 1\n" + "bqF qu 1\n" + "fgG ng 1\n" + "woT on 1\n" + "zSb sz 1\n" + "wxS wa 1\n" + "Wrf er 1\n" + "Oqk qu 1\n" + "xLc ch 1\n" + "Qzj sz 1\n" + "wXk ka 1\n" + "tdX th 1\n" + "Jqc ch 1\n" + "fXk ka 1\n" + "kBd de 1\n" + "iqW qu 1\n" + "Ocb ch 1\n" + "fUo on 1\n" + "jXk ij 1\n" + "hbI th 1\n" + "Zcg ch 1\n" + "zwS wa 1\n" + "cVm ch 1\n" + "vwj ij 1\n" + "gwG ng 1\n" + "zsM st 1\n" + "Pqo qu 1\n" + "hPj th 1\n" + "fwG wa 1\n" + "Xwh th 1\n" + "Wwh th 1\n" + "Vqw qu 1\n" + "vmY va 1\n" + "uvF qu 1\n" + "tfK th 1\n" + "Xbg ng 1\n" + "Nfn an 1\n" + "wpH pr 1\n" + "yJq qu 1\n" + "wqO qu 1\n" + "ncV ch 1\n" + "wgM ng 1\n" + "fQk ka 1\n" + "hvK th 1\n" + "qLr qu 1\n" + "Wce ch 1\n" + "kFn an 1\n" + "rBm er 1\n" + "mdV de 1\n" + "jFc ch 1\n" + "knX an 1\n" + "nMf an 1\n" + "sCc ch 1\n" + "pCq qu 1\n" + "uJt th 1\n" + "Cfk ka 1\n" + "Cxb be 1\n" + "fOw wa 1\n" + "aJz an 1\n" + "gLt th 1\n" + "bmX me 1\n" + "Yfo on 1\n" + "dJf de 1\n" + "Eay an 1\n" + "qSd qu 1\n" + "mjQ ij 1\n" + "pNk ka 1\n" + "Nvh th 1\n" + "xkX ka 1\n" + "Jwx wa 1\n" + "jvL ij 1\n" + "fpH pr 1\n" + "pxO pr 1\n" + "vPx va 1\n" + "dWu qu 1\n" + "hbR th 1\n" + "woE on 1\n" + "gtX th 1\n" + "bfF be 1\n" + "mvW va 1\n" + "xsM st 1\n" + "wLv va 1\n" + "wHh th 1\n" + "sCn an 1\n" + "pLw pr 1\n" + "kXw ka 1\n" + "xVl le 1\n" + "hCc th 1\n" + "oUk on 1\n" + "zcF ch 1\n" + "sMv st 1\n" + "drZ er 1\n" + "wfO wa 1\n" + "yFv va 1\n" + "hXa th 1\n" + "qMu un 1\n" + "fCv va 1\n" + "fwC wa 1\n" + "oTg ng 1\n" + "Fkm ka 1\n" + "eQt th 1\n" + "Pxd de 1\n" + "kjG ij 1\n" + "tGs th 1\n" + "dqB qu 1\n" + "fmX me 1\n" + "xYi in 1\n" + "kIk ka 1\n" + "vDd de 1\n" + "kvC ka 1\n" + "qtZ th 1\n" + "fPc ch 1\n" + "dpN de 1\n" + "hNr th 1\n" + "Znj an 1\n" + "Hke er 1\n" + "Iqp qu 1\n" + "wfN wa 1\n" + "Vhx th 1\n" + "Dgk ng 1\n" + "mkQ ka 1\n" + "Wxd de 1\n" + "Icx ch 1\n" + "yYt th 1\n" + "tqx th 1\n" + "Zvf va 1\n" + "sxU st 1\n" + "Lqk qu 1\n" + "nfI an 1\n" + "jyq qu 1\n" + "Wvn an 1\n" + "Sdv de 1\n" + "uYc ch 1\n" + "Qgm ng 1\n" + "cXa ch 1\n" + "wBx wa 1\n" + "pYx pr 1\n" + "jWl le 1\n" + "Kfw wa 1\n" + "qjJ qu 1\n" + "Pjj ij 1\n" + "ajX an 1\n" + "sXd st 1\n" + "xHg ng 1\n" + "xhA th 1\n" + "rGm er 1\n" + "Qtm th 1\n" + "srY er 1\n" + "qPx qu 1\n" + "wRz sz 1\n" + "wOg wa 1\n" + "fLg ng 1\n" + "hQt th 1\n" + "jhW th 1\n" + "Cwk ka 1\n" + "zWl le 1\n" + "wJc ch 1\n" + "Pxv va 1\n" + "npI an 1\n" + "lnW an 1\n" + "kqy qu 1\n" + "ywg ng 1\n" + "sCd st 1\n" + "qfF qu 1\n" + "qpg qu 1\n" + "Mbx be 1\n" + "nwN an 1\n" + "wLs st 1\n" + "Wcv ch 1\n" + "Vvr er 1\n" + "Vkx ka 1\n" + "dmU de 1\n" + "fGs st 1\n" + "gJz ng 1\n" + "dFz sz 1\n" + "qCf qu 1\n" + "lvW le 1\n" + "Svb va 1\n" + "xJr er 1\n" + "uZf qu 1\n" + "Tjc ch 1\n" + "pIj ij 1\n" + "bVg ng 1\n" + "vdO de 1\n" + "lTq qu 1\n" + "bMh th 1\n" + "nDm an 1\n" + "Tzb sz 1\n" + "pCw pr 1\n" + "Qkg ng 1\n" + "fpY pr 1\n" + "yQj ij 1\n" + "qiC qu 1\n" + "mQi in 1\n" + "wUq qu 1\n" + "kVj ij 1\n" + "tjQ th 1\n" + "mXj ij 1\n" + "Xfd de 1\n" + "cgI ch 1\n" + "Pkj ij 1\n" + "jjF ij 1\n" + "jrJ er 1\n" + "qwZ qu 1\n" + "Rtz th 1\n" + "fHb be 1\n" + "Hgx ng 1\n" + "Dzf sz 1\n" + "cbE ch 1\n" + "Xfs st 1\n" + "Rjm ij 1\n" + "fmY me 1\n" + "wYj ij 1\n" + "uFp qu 1\n" + "vWm va 1\n" + "yVc ch 1\n" + "cgL ch 1\n" + "zmR sz 1\n" + "zfB sz 1\n" + "znH an 1\n" + "hgG th 1\n" + "xuE qu 1\n" + "Bsl le 1\n" + "oWx on 1\n" + "Pjl le 1\n" + "Jdf de 1\n" + "Xmp me 1\n" + "sgO ng 1\n" + "hCj th 1\n" + "wtR th 1\n" + "fDs st 1\n" + "bQb be 1\n" + "quM un 1\n" + "fLl le 1\n" + "Nhp th 1\n" + "znU an 1\n" + "sdS st 1\n" + "wWu qu 1\n" + "tFq th 1\n" + "cFq ch 1\n" + "Wwl le 1\n" + "Lqy qu 1\n" + "nqQ an 1\n" + "zmD sz 1\n" + "Gyx ny 1\n" + "bkR ka 1\n" + "lQw le 1\n" + "Pqm qu 1\n" + "Fwk ka 1\n" + "tHt th 1\n" + "jyL ij 1\n" + "qxA qu 1\n" + "mrC er 1\n" + "qzL qu 1\n" + "jJg ng 1\n" + "jfS ij 1\n" + "qMh th 1\n" + "mlV le 1\n" + "bkJ ka 1\n" + "knH an 1\n" + "Uqt th 1\n" + "cuF ch 1\n" + "iYq qu 1\n" + "fUe er 1\n" + "sBb st 1\n" + "Nhx th 1\n" + "rhP th 1\n" + "dWp de 1\n" + "Yvf va 1\n" + "Rxr er 1\n" + "kzG sz 1\n" + "xuZ qu 1\n" + "xvD va 1\n" + "fwq qu 1\n" + "hjJ th 1\n" + "kZr er 1\n" + "vJn an 1\n" + "xnO an 1\n" + "vcA ch 1\n" + "mfK me 1\n" + "vjS ij 1\n" + "Nvp va 1\n" + "dfB de 1\n" + "Qsb st 1\n" + "dXp pr 1\n" + "zRl le 1\n" + "Ejq qu 1\n" + "aGz an 1\n" + "nHg an 1\n" + "bvA va 1\n" + "Bfd de 1\n" + "zVg ng 1\n" + "zsY st 1\n" + "hVz th 1\n" + "Pjm ij 1\n" + "sXi in 1\n" + "iKj in 1\n" + "qaE an 1\n" + "Cfj ij 1\n" + "zMc ch 1\n" + "mgZ ng 1\n" + "vgA ng 1\n" + "iwJ in 1\n" + "vGx va 1\n" + "tfY th 1\n" + "ljH le 1\n" + "zGj sz 1\n" + "bmK me 1\n" + "nUq an 1\n" + "zRt th 1\n" + "tGj th 1\n" + "zVd sz 1\n" + "jSr er 1\n" + "fNq qu 1\n" + "xTg ng 1\n" + "nqE an 1\n" + "Wng an 1\n" + "zVv sz 1\n" + "gVs ng 1\n" + "fNd de 1\n" + "qNw qu 1\n" + "Znc ch 1\n" + "uJs qu 1\n" + "yvJ va 1\n" + "xlM le 1\n" + "Jzc ch 1\n" + "vRh th 1\n" + "fcK ch 1\n" + "wVn an 1\n" + "rWw er 1\n" + "cHk ch 1\n" + "vOx va 1\n" + "iUa an 1\n" + "nWn an 1\n" + "zqZ qu 1\n" + "xFj ij 1\n" + "nCg an 1\n" + "fYj ij 1\n" + "Vsx st 1\n" + "mtM th 1\n" + "mhG th 1\n" + "jtN th 1\n" + "hcC th 1\n" + "Nwk ka 1\n" + "dXu qu 1\n" + "mJq qu 1\n" + "xsO st 1\n" + "qRn an 1\n" + "Rnj an 1\n" + "kmP ka 1\n" + "Xtg th 1\n" + "Gvh th 1\n" + "jqv qu 1\n" + "cVl ch 1\n" + "cdI ch 1\n" + "zdE sz 1\n" + "hZk th 1\n" + "Bdx de 1\n" + "hHn th 1\n" + "hkG th 1\n" + "vxJ va 1\n" + "lrA er 1\n" + "lrT er 1\n" + "hjV th 1\n" + "qbI qu 1\n" + "mTg ng 1\n" + "fmV me 1\n" + "rDk er 1\n" + "dNd de 1\n" + "Gzj sz 1\n" + "aVj an 1\n" + "vNr er 1\n" + "kXa an 1\n" + "rGs er 1\n" + "xaX an 1\n" + "crG ch 1\n" + "qJa an 1\n" + "jDt th 1\n" + "Mfx fo 1\n" + "xEa an 1\n" + "Qvz sz 1\n" + "wRg ng 1\n" + "pFc ch 1\n" + "Cpv va 1\n" + "rJk er 1\n" + "fbQ be 1\n" + "Xzg ng 1\n" + "qFy qu 1\n" + "Zfj ij 1\n" + "twE th 1\n" + "Oaq an 1\n" + "ysY st 1\n" + "wdZ de 1\n" + "gmO ng 1\n" + "wGn an 1\n" + "wRk ka 1\n" + "gqS qu 1\n" + "Agq qu 1\n" + "Twv va 1\n" + "Qnv an 1\n" + "bVv va 1\n" + "cDw ch 1\n" + "tGq th 1\n" + "fbq qu 1\n" + "Tvw va 1\n" + "mNv va 1\n" + "dtE th 1\n" + "pzP sz 1\n" + "Vsw sz 1\n" + "qGq qu 1\n" + "qPc ch 1\n" + "qyC qu 1\n" + "nxF an 1\n" + "jDl le 1\n" + "jHt th 1\n" + "fxZ fo 1\n" + "sQc ch 1\n" + "nmH an 1\n" + "xrD er 1\n" + "hMh th 1\n" + "vHk ka 1\n" + "hmS th 1\n" + "Xdt th 1\n" + "Xwl le 1\n" + "uJr qu 1\n" + "sPk st 1\n" + "Xjp ij 1\n" + "Uqi qu 1\n" + "kgD ng 1\n" + "jgI ng 1\n" + "uFw qu 1\n" + "xNd de 1\n" + "dhI th 1\n" + "Lxo on 1\n" + "Sfq qu 1\n" + "zRp sz 1\n" + "xwK wa 1\n" + "fmB me 1\n" + "vrV er 1\n" + "qSf qu 1\n" + "jPn an 1\n" + "Hbp pr 1\n" + "bJt th 1\n" + "lqQ qu 1\n" + "xSd de 1\n" + "dMk de 1\n" + "vVz sz 1\n" + "vkK ka 1\n" + "Xds de 1\n" + "ybB be 1\n" + "gpE ng 1\n" + "qcC ch 1\n" + "pxL pr 1\n" + "gPm ng 1\n" + "Bpd de 1\n" + "dpB de 1\n" + "jlJ le 1\n" + "pkC ka 1\n" + "ypP pr 1\n" + "Nqm qu 1\n" + "tgZ th 1\n" + "Eqo qu 1\n" + "dRk de 1\n" + "Ubc ch 1\n" + "xhY th 1\n" + "lJd le 1\n" + "pvN va 1\n" + "Qfc ch 1\n" + "Dbw wa 1\n" + "sFc ch 1\n" + "wkX ka 1\n" + "xpR pr 1\n" + "pjJ ij 1\n" + "gkQ ng 1\n" + "rMf er 1\n" + "Jsn an 1\n" + "xOw wa 1\n" + "Dqu un 1\n" + "nbJ an 1\n" + "gvF ng 1\n" + "Fnp an 1\n" + "jpV ij 1\n" + "qtD th 1\n" + "uEj qu 1\n" + "yhY th 1\n" + "Ohq th 1\n" + "nXy an 1\n" + "pdU de 1\n" + "mDz sz 1\n" + "iVk in 1\n" + "Hqq qu 1\n" + "xpZ po 1\n" + "aeU an 1\n" + "sjZ st 1\n" + "sGp st 1\n" + "Wqn an 1\n" + "xqS qu 1\n" + "Jjc ch 1\n" + "qPp qu 1\n" + "sXz st 1\n" + "xvP va 1\n" + "Wbq qu 1\n" + "tjK th 1\n" + "lhH th 1\n" + "hqV th 1\n" + "dYf de 1\n" + "pFk ka 1\n" + "sFq qu 1\n" + "uHq qu 1\n" + "vhA th 1\n" + "jlE le 1\n" + "sqB qu 1\n" + "qnr an 1\n" + "Fxq qu 1\n" + "zHn an 1\n" + "pdB de 1\n" + "wHc ch 1\n" + "Pxj ij 1\n" + "gHx ng 1\n" + "nqJ an 1\n" + "oqX qu 1\n" + "Xby be 1\n" + "tbI th 1\n" + "kSf ka 1\n" + "vhD th 1\n" + "qHj qu 1\n" + "Npx pr 1\n" + "Qzp sz 1\n" + "xiU in 1\n" + "rjZ er 1\n" + "wjU ij 1\n" + "jtB th 1\n" + "Ygq qu 1\n" + "aQf an 1\n" + "xWu qu 1\n" + "aVf an 1\n" + "pQx pr 1\n" + "Lnw an 1\n" + "qWa an 1\n" + "uHp qu 1\n" + "Lvp va 1\n" + "Jxp pr 1\n" + "zHk sz 1\n" + "wvU va 1\n" + "Wqh th 1\n" + "hVs th 1\n" + "Xgy ng 1\n" + "dZj de 1\n" + "uCq qu 1\n" + "Gxl le 1\n" + "Hlg ng 1\n" + "Wqd qu 1\n" + "Dxz sz 1\n" + "hdN th 1\n" + "pvM va 1\n" + "Wxk ka 1\n" + "qWd qu 1\n" + "fiO in 1\n" + "fDw wa 1\n" + "bHj ij 1\n" + "iVh th 1\n" + "Pmg ng 1\n" + "fXc ch 1\n" + "xfL fo 1\n" + "yGc ch 1\n" + "yBn an 1\n" + "hCk th 1\n" + "Llk le 1\n" + "yMh th 1\n" + "qrY qu 1\n" + "gdX ng 1\n" + "qxG qu 1\n" + "Zmt th 1\n" + "Rzw sz 1\n" + "nBd an 1\n" + "mWl le 1\n" + "xuI qu 1\n" + "jyF ij 1\n" + "bVu qu 1\n" + "ygP ng 1\n" + "dFq qu 1\n" + "jFm ij 1\n" + "Rml le 1\n" + "klH le 1\n" + "Vff fo 1\n" + "Kzk sz 1\n" + "Lhv th 1\n" + "cSj ch 1\n" + "Qrh th 1\n" + "uBw qu 1\n" + "sCk ka 1\n" + "qyS qu 1\n" + "cXu ch 1\n" + "wfM wa 1\n" + "kdK de 1\n" + "cXj ch 1\n" + "ctZ th 1\n" + "fjI ij 1\n" + "cgS ch 1\n" + "mwL me 1\n" + "kzU sz 1\n" + "cZr ch 1\n" + "fqU qu 1\n" + "qJi qu 1\n" + "gDd ng 1\n" + "bKq qu 1\n" + "aUw an 1\n" + "sxE st 1\n" + "mxU me 1\n" + "cwY ch 1\n" + "fpC pr 1\n" + "sRw st 1\n" + "Kkq qu 1\n" + "wxA wa 1\n" + "gQf ng 1\n" + "pPb pr 1\n" + "Hwu ku 1\n" + "suX qu 1\n" + "lqY qu 1\n" + "sxW st 1\n" + "aFh th 1\n" + "lWq qu 1\n" + "pbZ pr 1\n" + "bqm qu 1\n" + "kJk ka 1\n" + "qtT th 1\n" + "zMd sz 1\n" + "hGs th 1\n" + "xlH le 1\n" + "dmq qu 1\n" + "Xrk er 1\n" + "Ocf ch 1\n" + "mKc ch 1\n" + "zrA er 1\n" + "gxE ng 1\n" + "qWu un 1\n" + "xQf fo 1\n" + "Xoz on 1\n" + "fmP me 1\n" + "kdD de 1\n" + "bBz sz 1\n" + "wpA pr 1\n" + "nMb an 1\n" + "tHq th 1\n" + "jMt th 1\n" + "Svq qu 1\n" + "jMl le 1\n" + "wBc ch 1\n" + "ymX me 1\n" + "hcB th 1\n" + "brU er 1\n" + "paX an 1\n" + "hdG th 1\n" + "Fwp pr 1\n" + "sbY st 1\n" + "mhB th 1\n" + "pfZ pr 1\n" + "Vmh th 1\n" + "sCq qu 1\n" + "Zfw wa 1\n" + "Ljm ij 1\n" + "pqG qu 1\n" + "dpK de 1\n" + "tfG th 1\n" + "ijR in 1\n" + "iJy in 1\n" + "qfN qu 1\n" + "crS ch 1\n" + "cgT ch 1\n" + "wOt th 1\n" + "fnE an 1\n" + "hWp th 1\n" + "Zpw pr 1\n" + "wdO de 1\n" + "vYy va 1\n" + "qrI qu 1\n" + "dmF de 1\n" + "jhJ th 1\n" + "wHr er 1\n" + "Jzb sz 1\n" + "fEy ny 1\n" + "hhZ th 1\n" + "wpQ pr 1\n" + "qYg qu 1\n" + "qtY th 1\n" + "Kdx de 1\n" + "qfj qu 1\n" + "Rbv va 1\n" + "bbO be 1\n" + "Xcn ch 1\n" + "kCd de 1\n" + "Gcx ch 1\n" + "zmC sz 1\n" + "wJl le 1\n" + "qDc ch 1\n" + "Jzr er 1\n" + "Yrw er 1\n" + "Ksx st 1\n" + "uKx qu 1\n" + "jSc ch 1\n" + "Ljz sz 1\n" + "xdB de 1\n" + "zWb sz 1\n" + "vwY va 1\n" + "vMd de 1\n" + "dbH de 1\n" + "Qsu qu 1\n" + "wHq qu 1\n" + "gJh th 1\n" + "wZp pr 1\n" + "btO th 1\n" + "Xmv va 1\n" + "qpd qu 1\n" + "Jnw an 1\n" + "vlD le 1\n" + "xcX ch 1\n" + "Yvv va 1\n" + "Zft th 1\n" + "Hqz qu 1\n" + "xqM qu 1\n" + "Hth ch 1\n" + "ztL th 1\n" + "iOj in 1\n" + "cIz ch 1\n" + "hhC th 1\n" + "tvX th 1\n" + "Fgk ng 1\n" + "mjC ij 1\n" + "Ojp ij 1\n" + "kvI ka 1\n" + "zqb qu 1\n" + "qqW qu 1\n" + "iHg ng 1\n" + "jxJ ij 1\n" + "Gbz sz 1\n" + "nQc ch 1\n" + "pXq qu 1\n" + "jDd de 1\n" + "qQr qu 1\n" + "vJx va 1\n" + "zbY sz 1\n" + "fRm me 1\n" + "qEl qu 1\n" + "oaZ an 1\n" + "vjF ij 1\n" + "lqX qu 1\n" + "pSd de 1\n" + "bXq qu 1\n" + "jJv ij 1\n" + "Wrv er 1\n" + "Kpw pr 1\n" + "xaY an 1\n" + "jCv ij 1\n" + "fbR be 1\n" + "pTp pr 1\n" + "wdI de 1\n" + "qfQ qu 1\n" + "Rrq qu 1\n" + "dbF de 1\n" + "bzF sz 1\n" + "qwO qu 1\n" + "vrY er 1\n" + "twI th 1\n" + "zLf sz 1\n" + "bVc ch 1\n" + "Xnl an 1\n" + "Wgb ng 1\n" + "fuS qu 1\n" + "vIf va 1\n" + "Twt th 1\n" + "nKd an 1\n" + "Dkh th 1\n" + "uBd qu 1\n" + "kOz ka 1\n" + "zOj sz 1\n" + "nzE an 1\n" + "Zbh th 1\n" + "qMg qu 1\n" + "gfC ng 1\n" + "vgD ng 1\n" + "ytC th 1\n" + "mqM qu 1\n" + "Kjn an 1\n" + "xbX be 1\n" + "zfH sz 1\n" + "mwH me 1\n" + "zQb sz 1\n" + "Gzk sz 1\n" + "qsW qu 1\n" + "kNs st 1\n" + "Lqz qu 1\n" + "nmW an 1\n" + "qNx qu 1\n" + "zcQ ch 1\n" + "qMz qu 1\n" + "wGz sz 1\n" + "uCd qu 1\n" + "Bpv pr 1\n" + "qNe qu 1\n" + "bpP pr 1\n" + "lXf le 1\n" + "cLq ch 1\n" + "pdX de 1\n" + "qzU qu 1\n" + "Kxd de 1\n" + "jvF ij 1\n" + "rFn an 1\n" + "Etq th 1\n" + "zYh th 1\n" + "Ksv st 1\n" + "fJk ka 1\n" + "fkC ka 1\n" + "mxK me 1\n" + "fbz sz 1\n" + "vrW er 1\n" + "mPq qu 1\n" + "yBt th 1\n" + "iCf in 1\n" + "srH er 1\n" + "hjB th 1\n" + "fcG ch 1\n" + "Ftg th 1\n" + "uBp qu 1\n" + "yqT qu 1\n" + "djF de 1\n" + "tgU th 1\n" + "Wrj er 1\n" + "xFc ch 1\n" + "ycC ch 1\n" + "eqA qu 1\n" + "pbG pr 1\n" + "Cwh th 1\n" + "fDk ka 1\n" + "wTz sz 1\n" + "xrW er 1\n" + "kQs st 1\n" + "wMl le 1\n" + "yCn nd 1\n" + "eGp er 1\n" + "uPv qu 1\n" + "Wqe qu 1\n" + "yiI in 1\n" + "rqF qu 1\n" + "Kjs st 1\n" + "lwK le 1\n" + "fjQ ij 1\n" + "uIq qu 1\n" + "dxR de 1\n" + "Gqj qu 1\n" + "nLb an 1\n" + "gRd ng 1\n" + "qyv qu 1\n" + "wtZ th 1\n" + "cRk ch 1\n" + "iKf in 1\n" + "hbK th 1\n" + "rqT qu 1\n" + "xmF me 1\n" + "vHt th 1\n" + "tqN th 1\n" + "vLv va 1\n" + "xvJ va 1\n" + "bgJ ng 1\n" + "Qjq qu 1\n" + "Lvb va 1\n" + "Hxg ng 1\n" + "tVq th 1\n" + "rhZ th 1\n" + "slL le 1\n" + "kdH de 1\n" + "Kfb be 1\n" + "Dfh th 1\n" + "Cqq qu 1\n" + "nQk an 1\n" + "Wnz an 1\n" + "Njj ij 1\n" + "bJf be 1\n" + "wRh th 1\n" + "Dpb pr 1\n" + "sPj st 1\n" + "Zpn an 1\n" + "mPj ij 1\n" + "Qcl ch 1\n" + "zCd sz 1\n" + "yrC er 1\n" + "hCb th 1\n" + "aBv an 1\n" + "yuG qu 1\n" + "fcN ch 1\n" + "bZp pr 1\n" + "Gtf th 1\n" + "wbW wa 1\n" + "vPq qu 1\n" + "Vtj th 1\n" + "kWq qu 1\n" + "Jbm me 1\n" + "Wmb me 1\n" + "pxY pr 1\n" + "hQx th 1\n" + "tNn th 1\n" + "qdx qu 1\n" + "cYv ch 1\n" + "zlX le 1\n" + "rwF er 1\n" + "cZm ch 1\n" + "ybJ be 1\n" + "qaB an 1\n" + "tVj th 1\n" + "zUg ng 1\n" + "cfC ch 1\n" + "hxB th 1\n" + "Tbz sz 1\n" + "oFn an 1\n" + "bTp pr 1\n" + "hBk th 1\n" + "hQe th 1\n" + "qBe de 1\n" + "dpC de 1\n" + "kpW ka 1\n" + "Zkj ij 1\n" + "Nwn an 1\n" + "grC ng 1\n" + "uXq qu 1\n" + "Uoy on 1\n" + "Zfu qu 1\n" + "xKb be 1\n" + "hSb th 1\n" + "bPc ch 1\n" + "qcg ch 1\n" + "xIu qu 1\n" + "gBv ng 1\n" + "gZm me 1\n" + "qPu un 1\n" + "Bfp pr 1\n" + "rxC er 1\n" + "sLk st 1\n" + "hGj th 1\n" + "qvR qu 1\n" + "qpR qu 1\n" + "vNn an 1\n" + "Dft th 1\n" + "nRq an 1\n" + "khR th 1\n" + "pqP qu 1\n" + "tNp th 1\n" + "Vwt th 1\n" + "xwA wa 1\n" + "wMn an 1\n" + "Snq an 1\n" + "dfD de 1\n" + "vGw va 1\n" + "Xqb qu 1\n" + "Kww wa 1\n" + "Qhx th 1\n" + "Oyx ny 1\n" + "dvB de 1\n" + "sVh th 1\n" + "Hcn ch 1\n" + "sbU st 1\n" + "fFw wa 1\n" + "kfT ka 1\n" + "rvW er 1\n" + "Yxw wa 1\n" + "nFk an 1\n" + "Lqd qu 1\n" + "hoQ th 1\n" + "Nfj ij 1\n" + "grH ng 1\n" + "cJk ch 1\n" + "Pnv an 1\n" + "Nqx qu 1\n" + "yfE ny 1\n" + "kmI ka 1\n" + "Gmz sz 1\n" + "bxS be 1\n" + "quU un 1\n" + "qYf qu 1\n" + "zKw sz 1\n" + "whK th 1\n" + "ofY on 1\n" + "prH er 1\n" + "jXz sz 1\n" + "vQm va 1\n" + "iWx in 1\n" + "bzC sz 1\n" + "nYx an 1\n" + "qaK an 1\n" + "Ggb ng 1\n" + "zSf sz 1\n" + "rQz er 1\n" + "hkW th 1\n" + "Vnl an 1\n" + "Gtd th 1\n" + "rMw er 1\n" + "wvX va 1\n" + "jyU ij 1\n" + "Qqp qu 1\n" + "Hnq an 1\n" + "bFb be 1\n" + "qkH qu 1\n" + "Wck ch 1\n" + "fMw wa 1\n" + "zgE ng 1\n" + "oJz on 1\n" + "xvH va 1\n" + "hQy th 1\n" + "cYf ch 1\n" + "cxD ch 1\n" + "yDs st 1\n" + "qBh th 1\n" + "cJx ch 1\n" + "dPj de 1\n" + "wWd de 1\n" + "rHn an 1\n" + "iyM in 1\n" + "yxD ny 1\n" + "kPc ch 1\n" + "cXv ch 1\n" + "Nmg ng 1\n" + "vkN ka 1\n" + "lFj le 1\n" + "ymU me 1\n" + "pZv va 1\n" + "gZt th 1\n" + "Jqy qu 1\n" + "qAz qu 1\n" + "Bcy ch 1\n" + "pqj qu 1\n" + "cqE ch 1\n" + "Rwv va 1\n" + "crM ch 1\n" + "Axz sz 1\n" + "Zjp ij 1\n" + "yxF ny 1\n" + "vZh th 1\n" + "sPb st 1\n" + "vCs st 1\n" + "fQq qu 1\n" + "qYq qu 1\n" + "hBp th 1\n" + "Jbk ka 1\n" + "gqK qu 1\n" + "krq qu 1\n" + "Cfz sz 1\n" + "mbJ me 1\n" + "fRq qu 1\n" + "Iwv va 1\n" + "uFn an 1\n" + "cYz ch 1\n" + "qDb qu 1\n" + "xHd de 1\n" + "qmI qu 1\n" + "ycE ch 1\n" + "Mhf th 1\n" + "iuE qu 1\n" + "gXf ng 1\n" + "lPy le 1\n" + "bPv va 1\n" + "jXh th 1\n" + "gOx ng 1\n" + "Nmv va 1\n" + "xDg ng 1\n" + "Cwd de 1\n" + "ljP le 1\n" + "wqV qu 1\n" + "nrE an 1\n" + "Kmw me 1\n" + "gJt th 1\n" + "tgB th 1\n" + "xzR sz 1\n" + "vJr er 1\n" + "aUi an 1\n" + "ynY an 1\n" + "bZv va 1\n" + "fFq qu 1\n" + "Sxg ng 1\n" + "qAc ch 1\n" + "iZv in 1\n" + "jXu qu 1\n" + "gpR ng 1\n" + "wVl le 1\n" + "dNj de 1\n" + "fBw wa 1\n" + "Mjy ij 1\n" + "kjZ ij 1\n" + "tLs th 1\n" + "iYj in 1\n" + "wbO wa 1\n" + "qXb qu 1\n" + "uJq qu 1\n" + "qKt th 1\n" + "vjO ij 1\n" + "wuD qu 1\n" + "blQ le 1\n" + "yfB ny 1\n" + "Qsk st 1\n" + "Uwm me 1\n" + "Zqg qu 1\n" + "nmY an 1\n" + "pXw pr 1\n" + "yVj ij 1\n" + "gIw ng 1\n" + "Hxk ka 1\n" + "Pgy ng 1\n" + "lQv le 1\n" + "bnK an 1\n" + "xtZ th 1\n" + "Qce ch 1\n" + "Njq qu 1\n" + "mvq qu 1\n" + "Mwz sz 1\n" + "Gtn th 1\n" + "fJh th 1\n" + "vJz sz 1\n" + "gDk ng 1\n" + "dLw de 1\n" + "oeU er 1\n" + "cvY ch 1\n" + "Gbb be 1\n" + "Tqd qu 1\n" + "aTp an 1\n" + "Ywg ng 1\n" + "jdT de 1\n" + "Wkm ka 1\n" + "pxA pr 1\n" + "vDl le 1\n" + "sfD st 1\n" + "rqV qu 1\n" + "cHb ch 1\n" + "iVc ch 1\n" + "Mfh th 1\n" + "sVm st 1\n" + "nzR an 1\n" + "Qvs st 1\n" + "kZg ng 1\n" + "Wnw an 1\n" + "qZb qu 1\n" + "Gvq qu 1\n" + "vPk ka 1\n" + "Sxq qu 1\n" + "vNg ng 1\n" + "qrH qu 1\n" + "fLc ch 1\n" + "wVs st 1\n" + "qEh th 1\n" + "uqC qu 1\n" + "tZx th 1\n" + "yhI th 1\n" + "wNh th 1\n" + "rFj er 1\n" + "xPq qu 1\n" + "pqW qu 1\n" + "Pjc ch 1\n" + "jYj ij 1\n" + "pFv va 1\n" + "vLr er 1\n" + "lqq qu 1\n" + "xJg ng 1\n" + "lVz le 1\n" + "cZc ch 1\n" + "hcF th 1\n" + "uhJ th 1\n" + "cLj ch 1\n" + "qyW qu 1\n" + "zhT th 1\n" + "mtK th 1\n" + "pRb pr 1\n" + "bCx be 1\n" + "nJf an 1\n" + "jwF ij 1\n" + "Pdj de 1\n" + "jxE ij 1\n" + "slZ le 1\n" + "Lxn an 1\n" + "znL an 1\n" + "mzV sz 1\n" + "lGq le 1\n" + "Qbw wa 1\n" + "jbY ij 1\n" + "zSm sz 1\n" + "Qqx qu 1\n" + "ypR pr 1\n" + "gCc ch 1\n" + "Yvx va 1\n" + "ihI th 1\n" + "Zfx fo 1\n" + "njI nd 1\n" + "Ypt th 1\n" + "lxT le 1\n" + "fVv va 1\n" + "Jzm sz 1\n" + "jxA ij 1\n" + "gDl ng 1\n" + "Eaq an 1\n" + "Qcn an 1\n" + "zGb sz 1\n" + "jLh th 1\n" + "qkX qu 1\n" + "wbK wa 1\n" + "nNx an 1\n" + "sqW qu 1\n" + "wRx wa 1\n" + "xrU er 1\n" + "fnQ an 1\n" + "kzB sz 1\n" + "Rcn ch 1\n" + "qbL qu 1\n" + "srD er 1\n" + "Vxu qu 1\n" + "qvF qu 1\n" + "wJr er 1\n" + "Yxg ng 1\n" + "qiY qu 1\n" + "fMc ch 1\n" + "hbY th 1\n" + "hgH th 1\n" + "dmS de 1\n" + "jTn an 1\n" + "Zjm ij 1\n" + "Njl le 1\n" + "dqV qu 1\n" + "Yjh th 1\n" + "rKw er 1\n" + "cxU ch 1\n" + "Ckj ij 1\n" + "zfJ sz 1\n" + "ytF th 1\n" + "xrP er 1\n" + "qEj qu 1\n" + "rxO er 1\n" + "rZn an 1\n" + "bZq qu 1\n" + "cXq ch 1\n" + "wvD va 1\n" + "hcX th 1\n" + "zkO sz 1\n" + "hNx th 1\n" + "wFg ng 1\n" + "kXu qu 1\n" + "Vkn an 1\n" + "Gjz sz 1\n" + "Qcd ch 1\n" + "yvF va 1\n" + "xFx xe 1\n" + "dSj de 1\n" + "xPb be 1\n" + "oFp on 1\n" + "qAk qu 1\n" + "rqU qu 1\n" + "pGv va 1\n" + "hzC th 1\n" + "qIk qu 1\n" + "Lhl th 1\n" + "Fwb wa 1\n" + "pgE ng 1\n" + "Awz sz 1\n" + "fBk ka 1\n" + "xKd de 1\n" + "Pfw wa 1\n" + "uqK qu 1\n" + "pJc ch 1\n" + "bTc ch 1\n" + "tWg th 1\n" + "gdN ng 1\n" + "jrN er 1\n" + "klS le 1\n" + "qEi qu 1\n" + "sFn an 1\n" + "tqR th 1\n" + "Fnm an 1\n" + "hXv th 1\n" + "fxN fo 1\n" + "bvL va 1\n" + "oGf on 1\n" + "hZm th 1\n" + "yfH ny 1\n" + "dcE ch 1\n" + "pgW ng 1\n" + "wrB er 1\n" + "kWm ka 1\n" + "Shx th 1\n" + "twP th 1\n" + "Qvd de 1\n" + "Qgu qu 1\n" + "pJt th 1\n" + "zNv sz 1\n" + "Hph th 1\n" + "klF le 1\n" + "vqz qu 1\n" + "sgG ng 1\n" + "kdZ de 1\n" + "ejX er 1\n" + "Pxu qu 1\n" + "pvT va 1\n" + "Kqx qu 1\n" + "Qmb me 1\n" + "xFk ka 1\n" + "wQb wa 1\n" + "Pgx ng 1\n" + "ypL pr 1\n" + "bwE wa 1\n" + "xHt th 1\n" + "kVz sz 1\n" + "jmF ij 1\n" + "Ixq qu 1\n" + "qyP qu 1\n" + "rVv er 1\n" + "Ytw th 1\n" + "qpZ qu 1\n" + "tpZ th 1\n" + "zjX sz 1\n" + "Khg th 1\n" + "qfV qu 1\n" + "Jzx sz 1\n" + "kTj ij 1\n" + "Bzq qu 1\n" + "njR an 1\n" + "cgW ch 1\n" + "cmI ch 1\n" + "kCb ka 1\n" + "pYp pr 1\n" + "vkZ ka 1\n" + "wvk ka 1\n" + "Vfq qu 1\n" + "nlZ an 1\n" + "qNj qu 1\n" + "rCq qu 1\n" + "kbV ka 1\n" + "Dqj qu 1\n" + "brD er 1\n" + "lbG le 1\n" + "xhF th 1\n" + "kxZ ka 1\n" + "Iuq qu 1\n" + "yFx ny 1\n" + "qVl qu 1\n" + "lcG ch 1\n" + "vWr er 1\n" + "aBq an 1\n" + "yJk ka 1\n" + "czL ch 1\n" + "jIu qu 1\n" + "vUl le 1\n" + "pZq qu 1\n" + "vtW th 1\n" + "Qxw wa 1\n" + "dYv de 1\n" + "iqH qu 1\n" + "Xws st 1\n" + "fDj ij 1\n" + "xVz sz 1\n" + "dKq qu 1\n" + "vfQ va 1\n" + "hvD th 1\n" + "wdY de 1\n" + "Hzz sz 1\n" + "cYs ch 1\n" + "Ftj th 1\n" + "dpU de 1\n" + "Lld le 1\n" + "Gqw qu 1\n" + "kdR de 1\n" + "vXg ng 1\n" + "qsY qu 1\n" + "jNf ij 1\n" + "Qjj ij 1\n" + "pVl le 1\n" + "Jmx me 1\n" + "pDj ij 1\n" + "iBc ch 1\n" + "kLj ij 1\n" + "xnG an 1\n" + "vTl le 1\n" + "Ndg ng 1\n" + "pqU qu 1\n" + "Uaw an 1\n" + "fzN sz 1\n" + "gNq qu 1\n" + "kjM ij 1\n" + "lnK an 1\n" + "zxb sz 1\n" + "kcS ch 1\n" + "njM an 1\n" + "Gdw de 1\n" + "lnZ an 1\n" + "Ygj ng 1\n" + "hKd th 1\n" + "gpT ng 1\n" + "yqP qu 1\n" + "ijX in 1\n" + "jGf ij 1\n" + "bxI be 1\n" + "vXx va 1\n" + "Vrw er 1\n" + "Cwx wa 1\n" + "nBh th 1\n" + "qvy qu 1\n" + "sxB st 1\n" + "mVk ka 1\n" + "Czx sz 1\n" + "fyV ny 1\n" + "cXw ch 1\n" + "Qnf an 1\n" + "Yqd qu 1\n" + "lqH qu 1\n" + "dbY de 1\n" + "Sqb qu 1\n" + "Kqw qu 1\n" + "zpJ sz 1\n" + "cbM ch 1\n" + "zFg ng 1\n" + "sKb st 1\n" + "qrK qu 1\n" + "zJc ch 1\n" + "nRn an 1\n" + "fqN qu 1\n" + "hfA th 1\n" + "qoG qu 1\n" + "Owz sz 1\n" + "nlG an 1\n" + "wIx wa 1\n" + "qrP qu 1\n" + "Nwg ng 1\n" + "qaW an 1\n" + "hcT th 1\n" + "wkB ka 1\n" + "Ndt th 1\n" + "Kzq qu 1\n" + "gxB ng 1\n" + "Bjz sz 1\n" + "vTf va 1\n" + "jFq qu 1\n" + "qMe qu 1\n" + "ufQ qu 1\n" + "npG an 1\n" + "uZk qu 1\n" + "qTw qu 1\n" + "Glw le 1\n" + "Kqq qu 1\n" + "Cxr er 1\n" + "jZs st 1\n" + "Sqv qu 1\n" + "yPm me 1\n" + "eQj er 1\n" + "aIh th 1\n" + "gDq qu 1\n" + "lIp le 1\n" + "jNj ij 1\n" + "qOd qu 1\n" + "vkM ka 1\n" + "vFy va 1\n" + "cfV ch 1\n" + "Kjh th 1\n" + "gkP ng 1\n" + "rJc ch 1\n" + "uPq qu 1\n" + "ozQ on 1\n" + "Dlk le 1\n" + "vXh th 1\n" + "ktY th 1\n" + "vWy va 1\n" + "gQv ng 1\n" + "Yww wa 1\n" + "Tpz sz 1\n" + "Qhc th 1\n" + "xuT qu 1\n" + "nbS an 1\n" + "zQg ng 1\n" + "vgZ ng 1\n" + "pUo on 1\n" + "uWb qu 1\n" + "mMf me 1\n" + "Zcd ch 1\n" + "iBp in 1\n" + "fwp pr 1\n" + "zYf sz 1\n" + "wCp pr 1\n" + "Cqy qu 1\n" + "cjF ch 1\n" + "Gfh th 1\n" + "mcW ch 1\n" + "cqV ch 1\n" + "uJd qu 1\n" + "iUj in 1\n" + "vkR ka 1\n" + "wgI ng 1\n" + "vUg ng 1\n" + "Wdn de 1\n" + "sjF st 1\n" + "tPv th 1\n" + "xRn an 1\n" + "klV le 1\n" + "sbM st 1\n" + "mfT me 1\n" + "dbV de 1\n" + "Fmn an 1\n" + "gfU ng 1\n" + "cbB ch 1\n" + "Yxz sz 1\n" + "Kxk ka 1\n" + "Dwq qu 1\n" + "wgX ng 1\n" + "sPv st 1\n" + "vHd de 1\n" + "nbH an 1\n" + "cFn an 1\n" + "qqX qu 1\n" + "jFe er 1\n" + "qEb qu 1\n" + "dFh th 1\n" + "uEo qu 1\n" + "lcI ch 1\n" + "bMm me 1\n" + "zZw sz 1\n" + "hjO th 1\n" + "hKx th 1\n" + "jgC ng 1\n" + "cnL an 1\n" + "Fdg ng 1\n" + "bGf be 1\n" + "Sjz sz 1\n" + "bMj ij 1\n" + "vXw va 1\n" + "Gff fo 1\n" + "Cww wa 1\n" + "jsQ st 1\n" + "Zgv ng 1\n" + "lPf le 1\n" + "nmQ an 1\n" + "Vdq qu 1\n" + "lcX ch 1\n" + "gjT ng 1\n" + "mwE me 1\n" + "qLm qu 1\n" + "cHq ch 1\n" + "Xtn th 1\n" + "Ntq th 1\n" + "gWk ng 1\n" + "Pqd qu 1\n" + "qpP qu 1\n" + "sRf st 1\n" + "qpL qu 1\n" + "cnD an 1\n" + "qpG qu 1\n" + "dzS sz 1\n" + "tZb th 1\n" + "ygM ng 1\n" + "bxC be 1\n" + "dfU de 1\n" + "bmB me 1\n" + "lBz le 1\n" + "gJx ng 1\n" + "Ykv ka 1\n" + "Zdk de 1\n" + "wnQ an 1\n" + "tZj th 1\n" + "Zzm sz 1\n" + "Vfh th 1\n" + "Mwc ch 1\n" + "rUo on 1\n" + "qwp qu 1\n" + "tcI th 1\n" + "tfD th 1\n" + "uoZ qu 1\n" + "fCw wa 1\n" + "iQq qu 1\n" + "qBg qu 1\n" + "sVb st 1\n" + "pjU ij 1\n" + "scQ ch 1\n" + "pqQ qu 1\n" + "svZ st 1\n" + "Zpj ij 1\n" + "piV in 1\n" + "kbP ka 1\n" + "wqM qu 1\n" + "rVb er 1\n" + "qZr qu 1\n" + "hxO th 1\n" + "wTn an 1\n" + "Jzf sz 1\n" + "Qjb ij 1\n" + "uYv qu 1\n" + "pwK pr 1\n" + "hvH th 1\n" + "Dqe qu 1\n" + "pfI pr 1\n" + "mhV th 1\n" + "jgE ng 1\n" + "rcQ ch 1\n" + "kmT ka 1\n" + "Wzj sz 1\n" + "xNs st 1\n" + "Pbj ij 1\n" + "zvB sz 1\n" + "xhJ th 1\n" + "svq qu 1\n" + "Nvn an 1\n" + "swZ st 1\n" + "jgF ng 1\n" + "mfL me 1\n" + "zkL sz 1\n" + "jVp ij 1\n" + "Dkj ij 1\n" + "xuY qu 1\n" + "hHq th 1\n" + "cSf ch 1\n" + "Jzd sz 1\n" + "lqU qu 1\n" + "qMd qu 1\n" + "Qgj ng 1\n" + "fxk ka 1\n" + "tRt th 1\n" + "zFk sz 1\n" + "qEo qu 1\n" + "voY on 1\n" + "Awj ij 1\n" + "Txj ij 1\n" + "cIg ch 1\n" + "xUu qu 1\n" + "sRr er 1\n" + "Jxn an 1\n" + "iPf in 1\n" + "ejY er 1\n" + "Xts th 1\n" + "pfT pr 1\n" + "Pqa an 1\n" + "zsV st 1\n" + "ypC pr 1\n" + "wMs st 1\n" + "qEc ch 1\n" + "vxY va 1\n" + "fUg ng 1\n" + "Dff fo 1\n" + "gqQ qu 1\n" + "zMv sz 1\n" + "vJi in 1\n" + "fPv va 1\n" + "dLz sz 1\n" + "cdM ch 1\n" + "gNx ng 1\n" + "aGv an 1\n" + "vvD va 1\n" + "dJh th 1\n" + "rxY er 1\n" + "rWj er 1\n" + "Pvx va 1\n" + "rhD th 1\n" + "zRd sz 1\n" + "Kgv ng 1\n" + "Xvy va 1\n" + "kZj ij 1\n" + "kpK ka 1\n" + "Pfn an 1\n" + "wUe er 1\n" + "wWx wa 1\n" + "jPw ij 1\n" + "gLq qu 1\n" + "iJq qu 1\n" + "gPx ng 1\n" + "jHd de 1\n" + "vJb va 1\n" + "xhB th 1\n" + "xQv va 1\n" + "Eoa an 1\n" + "pjO ij 1\n" + "yFj ij 1\n" + "sXo on 1\n" + "wbY wa 1\n" + "cjO ch 1\n" + "mlZ le 1\n" + "bNv va 1\n" + "kjP ij 1\n" + "yXn an 1\n" + "qVj qu 1\n" + "fNv va 1\n" + "gjW ng 1\n" + "nXj an 1\n" + "dqJ qu 1\n" + "Hnh th 1\n" + "Qyk ka 1\n" + "kvB ka 1\n" + "qyB qu 1\n" + "mDt th 1\n" + "zgP ng 1\n" + "Zzk sz 1\n" + "fMk ka 1\n" + "xzY sz 1\n" + "qbT qu 1\n" + "xOt th 1\n" + "xsA st 1\n" + "gLj ng 1\n" + "zxH sz 1\n" + "cLm ch 1\n" + "Dnk an 1\n" + "zIu qu 1\n" + "kpJ ka 1\n" + "xrK er 1\n" + "eIb er 1\n" + "Jbp pr 1\n" + "Bqg qu 1\n" + "tXg th 1\n" + "Zjk ij 1\n" + "dRd de 1\n" + "tjZ th 1\n" + "hQl th 1\n" + "iyW in 1\n" + "Jwd de 1\n" + "qZt th 1\n" + "cJp ch 1\n" + "jBg ng 1\n" + "zrG er 1\n" + "hWf th 1\n" + "Zds st 1\n" + "qsZ qu 1\n" + "cQx ch 1\n" + "ccN ch 1\n" + "ywM wa 1\n" + "gbX ng 1\n" + "tfT th 1\n" + "vwt th 1\n" + "Qbp pr 1\n" + "yeY er 1\n" + "aUb an 1\n" + "qHw qu 1\n" + "Fhq th 1\n" + "Fng an 1\n" + "lvI le 1\n" + "jCf ij 1\n" + "hqH th 1\n" + "tTq th 1\n" + "sfI st 1\n" + "vsM st 1\n" + "lDp le 1\n" + "wJb wa 1\n" + "bhX th 1\n" + "rRq qu 1\n" + "qtS th 1\n" + "Zwp pr 1\n" + "Jbh th 1\n" + "hHb th 1\n" + "pDy pr 1\n" + "sjD st 1\n" + "Oyp pr 1\n" + "qwD qu 1\n" + "jbD ij 1\n" + "vpG va 1\n" + "Wjb ij 1\n" + "vpB va 1\n" + "aXq an 1\n" + "mWz sz 1\n" + "qHi qu 1\n" + "fyN ny 1\n" + "mbQ me 1\n" + "ywC wa 1\n" + "oVg ng 1\n" + "xmZ me 1\n" + "slO le 1\n" + "fXn an 1\n" + "kYs st 1\n" + "pVu qu 1\n" + "bkU ka 1\n" + "Brq qu 1\n" + "qCq qu 1\n" + "Xcx ch 1\n" + "zMt th 1\n" + "cRw ch 1\n" + "gzQ ng 1\n" + "Qbg ng 1\n" + "juU qu 1\n" + "xSz sz 1\n" + "Vgz ng 1\n" + "oMw on 1\n" + "fpE pr 1\n" + "xjX ij 1\n" + "qCg qu 1\n" + "zwM sz 1\n" + "uQl qu 1\n" + "qPk qu 1\n" + "pjD ij 1\n" + "Qzm sz 1\n" + "sIp st 1\n" + "uoG qu 1\n" + "rVl er 1\n" + "cbK ch 1\n" + "hXm th 1\n" + "Ksf st 1\n" + "kbF ka 1\n" + "wBm me 1\n" + "iYt th 1\n" + "sgH ng 1\n" + "Gzv sz 1\n" + "yvE va 1\n" + "xKq qu 1\n" + "sWf st 1\n" + "zBc ch 1\n" + "ykH ka 1\n" + "vjH ij 1\n" + "whI th 1\n" + "vPj ij 1\n" + "Zht th 1\n" + "iJx in 1\n" + "cZt th 1\n" + "dqU qu 1\n" + "hMd th 1\n" + "cUj ch 1\n" + "vMg ng 1\n" + "pcJ ch 1\n" + "Bcm ch 1\n" + "jXi in 1\n" + "xoI on 1\n" + "Zkq qu 1\n" + "Xzr er 1\n" + "yzM sz 1\n" + "qjX qu 1\n" + "mNq qu 1\n" + "hpX th 1\n" + "fBq qu 1\n" + "tXd th 1\n" + "Xki in 1\n" + "Hsq qu 1\n" + "bqU qu 1\n" + "sgF ng 1\n" + "dPc ch 1\n" + "Jxi in 1\n" + "Ugp ng 1\n" + "Rxi in 1\n" + "Kwm me 1\n" + "zkD sz 1\n" + "Rql qu 1\n" + "pJb pr 1\n" + "fcV ch 1\n" + "iVd in 1\n" + "bBp be 1\n" + "Ojw ij 1\n" + "vZl le 1\n" + "Iyj ij 1\n" + "fkU ka 1\n" + "Kcq ch 1\n" + "dBq qu 1\n" + "Mqq qu 1\n" + "iMg ng 1\n" + "Wws st 1\n" + "tqX th 1\n" + "xhD th 1\n" + "rNl er 1\n" + "pWd de 1\n" + "jrV er 1\n" + "Bmj ij 1\n" + "Hmq qu 1\n" + "vlH le 1\n" + "Mxb be 1\n" + "yyS ny 1\n" + "qvW qu 1\n" + "fvX va 1\n" + "Vfe er 1\n" + "Cdw de 1\n" + "Kge ng 1\n" + "Qej er 1\n" + "rvZ er 1\n" + "vzI sz 1\n" + "dDn an 1\n" + "nwS an 1\n" + "Qcb ch 1\n" + "wkV ka 1\n" + "uCx qu 1\n" + "Igk ng 1\n" + "Vpm me 1\n" + "hBm th 1\n" + "pdQ de 1\n" + "fgQ ng 1\n" + "yQm me 1\n" + "gxH ng 1\n" + "pqK qu 1\n" + "lRc ch 1\n" + "Xdv de 1\n" + "hDz th 1\n" + "dFw de 1\n" + "qQu un 1\n" + "xbD be 1\n" + "qmE qu 1\n" + "mWm me 1\n" + "jBb ij 1\n" + "jXt th 1\n" + "fxU fo 1\n" + "Xwc ch 1\n" + "Lqf qu 1\n" + "hcP th 1\n" + "pfB pr 1\n" + "vSg ng 1\n" + "xJw wa 1\n" + "mRf me 1\n" + "hqW th 1\n" + "nVb an 1\n" + "cEu ch 1\n" + "nfN an 1\n" + "nVj an 1\n" + "Rwk ka 1\n" + "nmG an 1\n" + "oDt th 1\n" + "kPb ka 1\n" + "gqW qu 1\n" + "Qhf th 1\n" + "qZl qu 1\n" + "zHq qu 1\n" + "iXl in 1\n" +#endif +}; + +inline const int ksizeofUniversalAmbigsFile = sizeof(kUniversalAmbigsFile); } // namespace tesseract diff --git a/src/classify/adaptive.cpp b/src/classify/adaptive.cpp index b111a4de1..3139cce9b 100644 --- a/src/classify/adaptive.cpp +++ b/src/classify/adaptive.cpp @@ -99,7 +99,7 @@ ADAPT_TEMPLATES_STRUCT::ADAPT_TEMPLATES_STRUCT(UNICHARSET &unicharset) { NumNonEmptyClasses = 0; /* Insert an empty class for each unichar id in unicharset */ - for (int i = 0; i < MAX_NUM_CLASSES; i++) { + for (unsigned i = 0; i < MAX_NUM_CLASSES; i++) { Class[i] = nullptr; if (i < unicharset.size()) { AddAdaptedClass(this, new ADAPT_CLASS_STRUCT, i); @@ -108,7 +108,7 @@ ADAPT_TEMPLATES_STRUCT::ADAPT_TEMPLATES_STRUCT(UNICHARSET &unicharset) { } ADAPT_TEMPLATES_STRUCT::~ADAPT_TEMPLATES_STRUCT() { - for (int i = 0; i < (Templates)->NumClasses; i++) { + for (unsigned i = 0; i < (Templates)->NumClasses; i++) { delete Class[i]; } delete Templates; @@ -160,11 +160,11 @@ void Classify::PrintAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templat fprintf(File, " Id NC NPC NP NPP\n"); fprintf(File, "------------------------\n"); - for (int i = 0; i < (Templates->Templates)->NumClasses; i++) { + for (unsigned i = 0; i < (Templates->Templates)->NumClasses; i++) { IClass = Templates->Templates->Class[i]; AClass = Templates->Class[i]; if (!IsEmptyAdaptedClass(AClass)) { - fprintf(File, "%5d %s %3d %3d %3d %3zd\n", i, unicharset.id_to_unichar(i), IClass->NumConfigs, + fprintf(File, "%5u %s %3d %3d %3d %3zd\n", i, unicharset.id_to_unichar(i), IClass->NumConfigs, AClass->NumPermConfigs, IClass->NumProtos, IClass->NumProtos - AClass->TempProtos->size()); } @@ -242,7 +242,7 @@ ADAPT_TEMPLATES_STRUCT *Classify::ReadAdaptedTemplates(TFile *fp) { Templates->Templates = ReadIntTemplates(fp); /* then read in the adaptive info for each class */ - for (int i = 0; i < (Templates->Templates)->NumClasses; i++) { + for (unsigned i = 0; i < (Templates->Templates)->NumClasses; i++) { Templates->Class[i] = ReadAdaptedClass(fp); } return (Templates); @@ -343,8 +343,6 @@ void WriteAdaptedClass(FILE *File, ADAPT_CLASS_STRUCT *Class, int NumConfigs) { * @note Globals: none */ void Classify::WriteAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templates) { - int i; - /* first write the high level adaptive template struct */ fwrite(Templates, sizeof(ADAPT_TEMPLATES_STRUCT), 1, File); @@ -352,7 +350,7 @@ void Classify::WriteAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templat WriteIntTemplates(File, Templates->Templates, unicharset); /* then write out the adaptive info for each class */ - for (i = 0; i < (Templates->Templates)->NumClasses; i++) { + for (unsigned i = 0; i < (Templates->Templates)->NumClasses; i++) { WriteAdaptedClass(File, Templates->Class[i], Templates->Templates->Class[i]->NumConfigs); } } /* WriteAdaptedTemplates */ diff --git a/src/classify/adaptmatch.cpp b/src/classify/adaptmatch.cpp index 080092d06..271aac5e0 100644 --- a/src/classify/adaptmatch.cpp +++ b/src/classify/adaptmatch.cpp @@ -72,6 +72,11 @@ namespace tesseract { +// TODO: The parameter classify_enable_adaptive_matcher can cause +// a segmentation fault if it is set to false (issue #256), +// so override it here. +#define classify_enable_adaptive_matcher true + #define ADAPT_TEMPLATE_SUFFIX ".a" #define MAX_MATCHES 10 @@ -143,7 +148,7 @@ inline bool MarginalMatch(float confidence, float matcher_great_threshold) { -----------------------------------------------------------------------------*/ // Returns the index of the given id in results, if present, or the size of the // vector (index it will go at) if not present. -static int FindScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) { +static unsigned FindScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) { for (unsigned i = 0; i < results.match.size(); i++) { if (results.match[i].unichar_id == id) { return i; @@ -155,7 +160,7 @@ static int FindScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) { // Returns the current rating for a unichar id if we have rated it, defaulting // to WORST_POSSIBLE_RATING. static float ScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) { - int index = FindScoredUnichar(id, results); + unsigned index = FindScoredUnichar(id, results); if (index >= results.match.size()) { return WORST_POSSIBLE_RATING; } @@ -271,7 +276,7 @@ void Classify::LearnWord(const char *fontname, WERD_RES *word) { tprintf("\n\nAdapting to word = %s\n", word->best_choice->debug_string().c_str()); } thresholds = new float[word_len]; - word->ComputeAdaptionThresholds(certainty_scale, matcher_perfect_threshold, + word->ComputeAdaptionThresholds(getDict().certainty_scale, matcher_perfect_threshold, matcher_good_threshold, matcher_rating_margin, thresholds); } int start_blob = 0; @@ -323,7 +328,7 @@ void Classify::LearnWord(const char *fontname, WERD_RES *word) { pieces_all_natural); std::string full_string; - for (int i = 0; i < tokens.size(); i++) { + for (unsigned i = 0; i < tokens.size(); i++) { full_string += tokens[i]; if (i != tokens.size() - 1) { full_string += ' '; @@ -421,7 +426,7 @@ void Classify::LearnPieces(const char *fontname, int start, int length, float th LearnBlob(fontname, rotated_blob, cn_denorm, fx_info, correct_text); } else if (unicharset.contains_unichar(correct_text)) { UNICHAR_ID class_id = unicharset.unichar_to_id(correct_text); - int font_id = word->fontinfo != nullptr ? fontinfo_table_.get_id(*word->fontinfo) : 0; + int font_id = word->fontinfo != nullptr ? fontinfo_table_.get_index(*word->fontinfo) : 0; if (classify_learning_debug_level >= 1) { tprintf("Adapting to char = %s, thr= %g font_id= %d\n", unicharset.id_to_unichar(class_id), threshold, font_id); @@ -578,7 +583,7 @@ void Classify::InitAdaptiveClassifier(TessdataManager *mgr) { tprintf("\n"); PrintAdaptedTemplates(stdout, AdaptedTemplates); - for (int i = 0; i < AdaptedTemplates->Templates->NumClasses; i++) { + for (unsigned i = 0; i < AdaptedTemplates->Templates->NumClasses; i++) { BaselineCutoffs[i] = CharNormCutoffs[i]; } } @@ -807,7 +812,7 @@ bool Classify::AdaptableWord(WERD_RES *word) { if (word->best_choice == nullptr) { return false; } - int BestChoiceLength = word->best_choice->length(); + auto BestChoiceLength = word->best_choice->length(); float adaptable_score = getDict().segment_penalty_dict_case_ok + ADAPTABLE_WERD_ADJUSTMENT; return // rules that apply in general - simplest to compute first BestChoiceLength > 0 && BestChoiceLength == word->rebuild_word->NumBlobs() && @@ -979,7 +984,7 @@ void Classify::DisplayAdaptedChar(TBLOB *blob, INT_CLASS_STRUCT *int_class) { * @param[out] results results to add new result to */ void Classify::AddNewResult(const UnicharRating &new_result, ADAPT_RESULTS *results) { - int old_match = FindScoredUnichar(new_result.unichar_id, *results); + auto old_match = FindScoredUnichar(new_result.unichar_id, *results); if (new_result.rating + matcher_bad_match_pad < results->best_rating || (old_match < results->match.size() && @@ -1120,8 +1125,8 @@ void Classify::ExpandShapesAndApplyCorrections(ADAPT_CLASS_STRUCT **classes, boo // by int_result. In this case, build a vector of UnicharRating to // gather together different font-ids for each unichar. Also covers case1. std::vector mapped_results; - for (int f = 0; f < int_result->fonts.size(); ++f) { - int shape_id = int_result->fonts[f].fontinfo_id; + for (auto &f : int_result->fonts) { + int shape_id = f.fontinfo_id; const Shape &shape = shape_table_->GetShape(shape_id); for (int c = 0; c < shape.size(); ++c) { int unichar_id = shape[c].unichar_id; @@ -1139,7 +1144,7 @@ void Classify::ExpandShapesAndApplyCorrections(ADAPT_CLASS_STRUCT **classes, boo mapped_results[r].fonts.clear(); } for (int font_id : shape[c].font_ids) { - mapped_results[r].fonts.emplace_back(font_id, int_result->fonts[f].score); + mapped_results[r].fonts.emplace_back(font_id, f.score); } } } @@ -1283,7 +1288,7 @@ int Classify::CharNormClassifier(TBLOB *blob, const TrainingSample &sample, int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const TrainingSample &sample, std::vector *results) { results->clear(); - auto *adapt_results = new ADAPT_RESULTS(); + std::unique_ptr adapt_results(new ADAPT_RESULTS()); adapt_results->Initialize(); // Compute the bounding box of the features. uint32_t num_features = sample.num_features(); @@ -1293,16 +1298,15 @@ int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const Trai sample.geo_feature(GeoTop), sample.geo_feature(GeoTop)); // Compute the char_norm_array from the saved cn_feature. FEATURE norm_feature = sample.GetCNFeature(); - auto *char_norm_array = new uint8_t[unicharset.size()]; - int num_pruner_classes = std::max(unicharset.size(), PreTrainedTemplates->NumClasses); - auto *pruner_norm_array = new uint8_t[num_pruner_classes]; - adapt_results->BlobLength = static_cast(ActualOutlineLength(norm_feature) * 20 + 0.5); - ComputeCharNormArrays(norm_feature, PreTrainedTemplates, char_norm_array, pruner_norm_array); + std::vector char_norm_array(unicharset.size()); + auto num_pruner_classes = std::max(static_cast(unicharset.size()), PreTrainedTemplates->NumClasses); + std::vector pruner_norm_array(num_pruner_classes); + adapt_results->BlobLength = static_cast(ActualOutlineLength(norm_feature) * 20 + 0.5f); + ComputeCharNormArrays(norm_feature, PreTrainedTemplates, &char_norm_array[0], &pruner_norm_array[0]); - PruneClasses(PreTrainedTemplates, num_features, keep_this, sample.features(), pruner_norm_array, + PruneClasses(PreTrainedTemplates, num_features, keep_this, sample.features(), &pruner_norm_array[0], shape_table_ != nullptr ? &shapetable_cutoffs_[0] : CharNormCutoffs, &adapt_results->CPResults); - delete[] pruner_norm_array; if (keep_this >= 0) { adapt_results->CPResults[0].Class = keep_this; adapt_results->CPResults.resize(1); @@ -1314,9 +1318,9 @@ int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const Trai results->push_back(UnicharRating(class_id, 1.0f - it.Rating)); } } else { - MasterMatcher(PreTrainedTemplates, num_features, sample.features(), char_norm_array, nullptr, + MasterMatcher(PreTrainedTemplates, num_features, sample.features(), &char_norm_array[0], nullptr, matcher_debug_flags, classify_integer_matcher_multiplier, blob_box, - adapt_results->CPResults, adapt_results); + adapt_results->CPResults, adapt_results.get()); // Convert master matcher results to output format. for (auto &i : adapt_results->match) { results->push_back(i); @@ -1325,8 +1329,6 @@ int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const Trai std::sort(results->begin(), results->end(), SortDescendingRating); } } - delete[] char_norm_array; - delete adapt_results; return num_features; } /* CharNormTrainingSample */ @@ -1345,7 +1347,7 @@ int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const Trai void Classify::ClassifyAsNoise(ADAPT_RESULTS *results) { float rating = results->BlobLength / matcher_avg_noise_size; rating *= rating; - rating /= 1.0 + rating; + rating /= 1 + rating; AddNewResult(UnicharRating(UNICHAR_SPACE, 1.0f - rating), results); } /* ClassifyAsNoise */ @@ -1627,18 +1629,18 @@ int Classify::GetCharNormFeature(const INT_FX_RESULT_STRUCT &fx_info, INT_TEMPLA void Classify::ComputeCharNormArrays(FEATURE_STRUCT *norm_feature, INT_TEMPLATES_STRUCT *templates, uint8_t *char_norm_array, uint8_t *pruner_array) { ComputeIntCharNormArray(*norm_feature, char_norm_array); - if (pruner_array != nullptr) { + //if (pruner_array != nullptr) { if (shape_table_ == nullptr) { ComputeIntCharNormArray(*norm_feature, pruner_array); } else { - memset(pruner_array, UINT8_MAX, templates->NumClasses * sizeof(pruner_array[0])); + memset(&pruner_array[0], UINT8_MAX, templates->NumClasses * sizeof(pruner_array[0])); // Each entry in the pruner norm array is the MIN of all the entries of // the corresponding unichars in the CharNormArray. - for (int id = 0; id < templates->NumClasses; ++id) { + for (unsigned id = 0; id < templates->NumClasses; ++id) { int font_set_id = templates->Class[id]->font_set_id; const FontSet &fs = fontset_table_.at(font_set_id); - for (int config = 0; config < fs.size(); ++config) { - const Shape &shape = shape_table_->GetShape(fs[config]); + for (auto f : fs) { + const Shape &shape = shape_table_->GetShape(f); for (int c = 0; c < shape.size(); ++c) { if (char_norm_array[shape[c].unichar_id] < pruner_array[id]) { pruner_array[id] = char_norm_array[shape[c].unichar_id]; @@ -1647,7 +1649,7 @@ void Classify::ComputeCharNormArrays(FEATURE_STRUCT *norm_feature, INT_TEMPLATES } } } - } + //} delete norm_feature; } @@ -1780,13 +1782,13 @@ PROTO_ID Classify::MakeNewTempProtos(FEATURE_SET Features, int NumBadFeat, FEATU Y2 = F2->Params[PicoFeatY]; A2 = F2->Params[PicoFeatDir]; - AngleDelta = fabs(A1 - A2); - if (AngleDelta > 0.5) { - AngleDelta = 1.0 - AngleDelta; + AngleDelta = std::fabs(A1 - A2); + if (AngleDelta > 0.5f) { + AngleDelta = 1 - AngleDelta; } - if (AngleDelta > matcher_clustering_max_angle_delta || fabs(X1 - X2) > SegmentLength || - fabs(Y1 - Y2) > SegmentLength) { + if (AngleDelta > matcher_clustering_max_angle_delta || std::fabs(X1 - X2) > SegmentLength || + std::fabs(Y1 - Y2) > SegmentLength) { break; } } @@ -1809,8 +1811,8 @@ PROTO_ID Classify::MakeNewTempProtos(FEATURE_SET Features, int NumBadFeat, FEATU instead of the -0.25 to 0.75 used in baseline normalization */ Proto->Length = SegmentLength; Proto->Angle = A1; - Proto->X = (X1 + X2) / 2.0; - Proto->Y = (Y1 + Y2) / 2.0 - Y_DIM_OFFSET; + Proto->X = (X1 + X2) / 2; + Proto->Y = (Y1 + Y2) / 2 - Y_DIM_OFFSET; FillABC(Proto); TempProto->ProtoId = Pid; @@ -2043,7 +2045,7 @@ void Classify::RemoveExtraPuncs(ADAPT_RESULTS *Results) { * - matcher_good_threshold default good match rating */ void Classify::SetAdaptiveThreshold(float Threshold) { - Threshold = (Threshold == matcher_good_threshold) ? 0.9 : (1.0 - Threshold); + Threshold = (Threshold == matcher_good_threshold) ? 0.9f : (1 - Threshold); classify_adapt_proto_threshold.set_value(ClipToRange(255 * Threshold, 0, 255)); classify_adapt_feature_threshold.set_value(ClipToRange(255 * Threshold, 0, 255)); } /* SetAdaptiveThreshold */ @@ -2117,12 +2119,12 @@ int Classify::ClassAndConfigIDToFontOrShapeID(int class_id, int int_result_confi // Converts a shape_table_ index to a classifier class_id index (not a // unichar-id!). Uses a search, so not fast. int Classify::ShapeIDToClassID(int shape_id) const { - for (int id = 0; id < PreTrainedTemplates->NumClasses; ++id) { + for (unsigned id = 0; id < PreTrainedTemplates->NumClasses; ++id) { int font_set_id = PreTrainedTemplates->Class[id]->font_set_id; ASSERT_HOST(font_set_id >= 0); const FontSet &fs = fontset_table_.at(font_set_id); - for (int config = 0; config < fs.size(); ++config) { - if (fs[config] == shape_id) { + for (auto f : fs) { + if (f == shape_id) { return id; } } diff --git a/src/classify/classify.cpp b/src/classify/classify.cpp index 77b48e4e5..c11a0912d 100644 --- a/src/classify/classify.cpp +++ b/src/classify/classify.cpp @@ -101,7 +101,6 @@ Classify::Classify() "its expected textline position", this->params()) , double_MEMBER(rating_scale, 1.5, "Rating scaling factor", this->params()) - , double_MEMBER(certainty_scale, 20.0, "Certainty scaling factor", this->params()) , double_MEMBER(tessedit_class_miss_scale, 0.00390625, "Scale factor for features not used", this->params()) , double_MEMBER(classify_adapted_pruning_factor, 2.5, diff --git a/src/classify/classify.h b/src/classify/classify.h index b399f053b..2225e5fea 100644 --- a/src/classify/classify.h +++ b/src/classify/classify.h @@ -41,14 +41,10 @@ public: // Member variables. - INT_VAR_H(classify_debug_level, 0, "Classify debug level"); - - BOOL_VAR_H(classify_bln_numeric_mode, 0, "Assume the input is numbers [0-9]."); - - double_VAR_H(classify_max_rating_ratio, 1.5, "Veto ratio between classifier ratings"); - - double_VAR_H(classify_max_certainty_margin, 5.5, - "Veto difference between classifier certainties"); + INT_VAR_H(classify_debug_level); + BOOL_VAR_H(classify_bln_numeric_mode); + double_VAR_H(classify_max_rating_ratio); + double_VAR_H(classify_max_certainty_margin); private: Dict dict_; @@ -360,80 +356,64 @@ public: // Parameters. // Set during training (in lang.config) to indicate whether the divisible // blobs chopper should be used (true for latin script.) - BOOL_VAR_H(allow_blob_division, true, "Use divisible blobs chopping"); + BOOL_VAR_H(allow_blob_division); // Set during training (in lang.config) to indicate whether the divisible // blobs chopper should be used in preference to chopping. Set to true for // southern Indic scripts. - BOOL_VAR_H(prioritize_division, false, "Prioritize blob division over chopping"); - BOOL_VAR_H(classify_enable_learning, true, "Enable adaptive classifier"); - INT_VAR_H(classify_debug_level, 0, "Classify debug level"); + BOOL_VAR_H(prioritize_division); + BOOL_VAR_H(classify_enable_learning); + INT_VAR_H(classify_debug_level); /* mfoutline.cpp ***********************************************************/ /* control knobs used to control normalization of outlines */ - INT_VAR_H(classify_norm_method, character, "Normalization Method ..."); - double_VAR_H(classify_char_norm_range, 0.2, "Character Normalization Range ..."); - double_VAR_H(classify_max_rating_ratio, 1.5, "Veto ratio between classifier ratings"); - double_VAR_H(classify_max_certainty_margin, 5.5, - "Veto difference between classifier certainties"); + INT_VAR_H(classify_norm_method); + double_VAR_H(classify_char_norm_range); + double_VAR_H(classify_max_rating_ratio); + double_VAR_H(classify_max_certainty_margin); /* adaptmatch.cpp ***********************************************************/ - BOOL_VAR_H(tess_cn_matching, 0, "Character Normalized Matching"); - BOOL_VAR_H(tess_bn_matching, 0, "Baseline Normalized Matching"); - BOOL_VAR_H(classify_enable_adaptive_matcher, 1, "Enable adaptive classifier"); - BOOL_VAR_H(classify_use_pre_adapted_templates, 0, "Use pre-adapted classifier templates"); - BOOL_VAR_H(classify_save_adapted_templates, 0, "Save adapted templates to a file"); - BOOL_VAR_H(classify_enable_adaptive_debugger, 0, "Enable match debugger"); - BOOL_VAR_H(classify_nonlinear_norm, 0, "Non-linear stroke-density normalization"); - INT_VAR_H(matcher_debug_level, 0, "Matcher Debug Level"); - INT_VAR_H(matcher_debug_flags, 0, "Matcher Debug Flags"); - INT_VAR_H(classify_learning_debug_level, 0, "Learning Debug Level: "); - double_VAR_H(matcher_good_threshold, 0.125, "Good Match (0-1)"); - double_VAR_H(matcher_reliable_adaptive_result, 0.0, "Great Match (0-1)"); - double_VAR_H(matcher_perfect_threshold, 0.02, "Perfect Match (0-1)"); - double_VAR_H(matcher_bad_match_pad, 0.15, "Bad Match Pad (0-1)"); - double_VAR_H(matcher_rating_margin, 0.1, "New template margin (0-1)"); - double_VAR_H(matcher_avg_noise_size, 12.0, "Avg. noise blob length: "); - INT_VAR_H(matcher_permanent_classes_min, 1, "Min # of permanent classes"); - INT_VAR_H(matcher_min_examples_for_prototyping, 3, "Reliable Config Threshold"); - INT_VAR_H(matcher_sufficient_examples_for_prototyping, 5, - "Enable adaption even if the ambiguities have not been seen"); - double_VAR_H(matcher_clustering_max_angle_delta, 0.015, - "Maximum angle delta for prototype clustering"); - double_VAR_H(classify_misfit_junk_penalty, 0.0, - "Penalty to apply when a non-alnum is vertically out of " - "its expected textline position"); - double_VAR_H(rating_scale, 1.5, "Rating scaling factor"); - double_VAR_H(certainty_scale, 20.0, "Certainty scaling factor"); - double_VAR_H(tessedit_class_miss_scale, 0.00390625, "Scale factor for features not used"); - double_VAR_H(classify_adapted_pruning_factor, 2.5, - "Prune poor adapted results this much worse than best result"); - double_VAR_H(classify_adapted_pruning_threshold, -1.0, - "Threshold at which classify_adapted_pruning_factor starts"); - INT_VAR_H(classify_adapt_proto_threshold, 230, "Threshold for good protos during adaptive 0-255"); - INT_VAR_H(classify_adapt_feature_threshold, 230, - "Threshold for good features during adaptive 0-255"); - BOOL_VAR_H(disable_character_fragments, true, - "Do not include character fragments in the" - " results of the classifier"); - double_VAR_H(classify_character_fragments_garbage_certainty_threshold, -3.0, - "Exclude fragments that do not match any whole character" - " with at least this certainty"); - BOOL_VAR_H(classify_debug_character_fragments, false, - "Bring up graphical debugging windows for fragments training"); - BOOL_VAR_H(matcher_debug_separate_windows, false, - "Use two different windows for debugging the matching: " - "One for the protos and one for the features."); - STRING_VAR_H(classify_learn_debug_str, "", "Class str to debug learning"); + BOOL_VAR_H(tess_cn_matching); + BOOL_VAR_H(tess_bn_matching); + BOOL_VAR_H(classify_enable_adaptive_matcher); + BOOL_VAR_H(classify_use_pre_adapted_templates); + BOOL_VAR_H(classify_save_adapted_templates); + BOOL_VAR_H(classify_enable_adaptive_debugger); + BOOL_VAR_H(classify_nonlinear_norm); + INT_VAR_H(matcher_debug_level); + INT_VAR_H(matcher_debug_flags); + INT_VAR_H(classify_learning_debug_level); + double_VAR_H(matcher_good_threshold); + double_VAR_H(matcher_reliable_adaptive_result); + double_VAR_H(matcher_perfect_threshold); + double_VAR_H(matcher_bad_match_pad); + double_VAR_H(matcher_rating_margin); + double_VAR_H(matcher_avg_noise_size); + INT_VAR_H(matcher_permanent_classes_min); + INT_VAR_H(matcher_min_examples_for_prototyping); + INT_VAR_H(matcher_sufficient_examples_for_prototyping); + double_VAR_H(matcher_clustering_max_angle_delta); + double_VAR_H(classify_misfit_junk_penalty); + double_VAR_H(rating_scale); + double_VAR_H(tessedit_class_miss_scale); + double_VAR_H(classify_adapted_pruning_factor); + double_VAR_H(classify_adapted_pruning_threshold); + INT_VAR_H(classify_adapt_proto_threshold); + INT_VAR_H(classify_adapt_feature_threshold); + BOOL_VAR_H(disable_character_fragments); + double_VAR_H(classify_character_fragments_garbage_certainty_threshold); + BOOL_VAR_H(classify_debug_character_fragments); + BOOL_VAR_H(matcher_debug_separate_windows); + STRING_VAR_H(classify_learn_debug_str); /* intmatcher.cpp **********************************************************/ - INT_VAR_H(classify_class_pruner_threshold, 229, "Class Pruner Threshold 0-255"); - INT_VAR_H(classify_class_pruner_multiplier, 15, "Class Pruner Multiplier 0-255: "); - INT_VAR_H(classify_cp_cutoff_strength, 7, "Class Pruner CutoffStrength: "); - INT_VAR_H(classify_integer_matcher_multiplier, 10, "Integer Matcher Multiplier 0-255: "); + INT_VAR_H(classify_class_pruner_threshold); + INT_VAR_H(classify_class_pruner_multiplier); + INT_VAR_H(classify_cp_cutoff_strength); + INT_VAR_H(classify_integer_matcher_multiplier); - BOOL_VAR_H(classify_bln_numeric_mode, 0, "Assume the input is numbers [0-9]."); - double_VAR_H(speckle_large_max_size, 0.30, "Max large speckle size"); - double_VAR_H(speckle_rating_penalty, 10.0, "Penalty to add to worst rating for noise"); + BOOL_VAR_H(classify_bln_numeric_mode); + double_VAR_H(speckle_large_max_size); + double_VAR_H(speckle_rating_penalty); // Use class variables to hold onto built-in templates and adapted templates. INT_TEMPLATES_STRUCT *PreTrainedTemplates = nullptr; diff --git a/src/classify/cluster.cpp b/src/classify/cluster.cpp index ea04af394..046c79f14 100644 --- a/src/classify/cluster.cpp +++ b/src/classify/cluster.cpp @@ -28,7 +28,6 @@ #include // for FLT_MAX #include // for M_PI -#include // for std::array #include // for std::vector namespace tesseract { @@ -1489,7 +1488,7 @@ CLUSTERER *MakeClusterer(int16_t SampleSize, const PARAM_DESC ParamDesc[]) { * * @return Pointer to the new sample data structure */ -SAMPLE *MakeSample(CLUSTERER *Clusterer, const float *Feature, int32_t CharID) { +SAMPLE *MakeSample(CLUSTERER *Clusterer, const float *Feature, uint32_t CharID) { int i; // see if the samples have already been clustered - if so trap an error @@ -1674,13 +1673,13 @@ float Mean(PROTOTYPE *Proto, uint16_t Dimension) { float StandardDeviation(PROTOTYPE *Proto, uint16_t Dimension) { switch (Proto->Style) { case spherical: - return sqrt(Proto->Variance.Spherical); + return std::sqrt(Proto->Variance.Spherical); case elliptical: - return sqrt(Proto->Variance.Elliptical[Dimension]); + return std::sqrt(Proto->Variance.Elliptical[Dimension]); case mixed: switch (Proto->Distrib[Dimension]) { case normal: - return sqrt(Proto->Variance.Elliptical[Dimension]); + return std::sqrt(Proto->Variance.Elliptical[Dimension]); case uniform: case D_random: return Proto->Variance.Elliptical[Dimension]; @@ -2268,7 +2267,7 @@ static PROTOTYPE *MakeMixedProto(CLUSTERER *Clusterer, CLUSTER *Cluster, STATIST } FillBuckets(NormalBuckets, Cluster, i, &(Clusterer->ParamDesc[i]), Proto->Mean[i], - sqrt(Proto->Variance.Elliptical[i])); + std::sqrt(Proto->Variance.Elliptical[i])); if (DistributionOK(NormalBuckets)) { continue; } @@ -2576,7 +2575,7 @@ static bool Independent(PARAM_DESC *ParamDesc, int16_t N, float *CoVariance, flo if ((*VARii == 0.0) || (*VARjj == 0.0)) { CorrelationCoeff = 0.0; } else { - CorrelationCoeff = sqrt(sqrt(*CoVariance * *CoVariance / (*VARii * *VARjj))); + CorrelationCoeff = sqrt(std::sqrt(*CoVariance * *CoVariance / (*VARii * *VARjj))); } if (CorrelationCoeff > Independence) { return false; diff --git a/src/classify/cluster.h b/src/classify/cluster.h index 5e8bf0d06..c8580bf65 100644 --- a/src/classify/cluster.h +++ b/src/classify/cluster.h @@ -95,7 +95,7 @@ struct CLUSTERER { KDTREE *KDTree; // for optimal nearest neighbor searching CLUSTER *Root; // ptr to root cluster of cluster tree LIST ProtoList; // list of prototypes - int32_t NumChar; // # of characters represented by samples + uint32_t NumChar; // # of characters represented by samples // cache of reusable histograms by distribution type and number of buckets. BUCKETS *bucket_cache[DISTRIBUTION_COUNT][MAXBUCKETS + 1 - MINBUCKETS]; }; @@ -116,7 +116,7 @@ TESS_API CLUSTERER *MakeClusterer(int16_t SampleSize, const PARAM_DESC ParamDesc[]); TESS_API -SAMPLE *MakeSample(CLUSTERER *Clusterer, const float *Feature, int32_t CharID); +SAMPLE *MakeSample(CLUSTERER *Clusterer, const float *Feature, uint32_t CharID); TESS_API LIST ClusterSamples(CLUSTERER *Clusterer, CLUSTERCONFIG *Config); diff --git a/src/classify/clusttool.cpp b/src/classify/clusttool.cpp index 4e920538d..543378d6b 100644 --- a/src/classify/clusttool.cpp +++ b/src/classify/clusttool.cpp @@ -207,7 +207,7 @@ PROTOTYPE *ReadPrototype(TFile *fp, uint16_t N) { case spherical: ReadNFloats(fp, 1, &(Proto->Variance.Spherical)); Proto->Magnitude.Spherical = 1.0 / sqrt(2.0 * M_PI * Proto->Variance.Spherical); - Proto->TotalMagnitude = pow(Proto->Magnitude.Spherical, static_cast(N)); + Proto->TotalMagnitude = std::pow(Proto->Magnitude.Spherical, static_cast(N)); Proto->LogMagnitude = log(static_cast(Proto->TotalMagnitude)); Proto->Weight.Spherical = 1.0 / Proto->Variance.Spherical; Proto->Distrib.clear(); diff --git a/src/classify/float2int.cpp b/src/classify/float2int.cpp index 6da55221f..cb47e6485 100644 --- a/src/classify/float2int.cpp +++ b/src/classify/float2int.cpp @@ -57,7 +57,7 @@ void Classify::ClearCharNormArray(uint8_t *char_norm_array) { */ void Classify::ComputeIntCharNormArray(const FEATURE_STRUCT &norm_feature, uint8_t *char_norm_array) { - for (int i = 0; i < unicharset.size(); i++) { + for (unsigned i = 0; i < unicharset.size(); i++) { if (i < PreTrainedTemplates->NumClasses) { int norm_adjust = static_cast(INT_CHAR_NORM_RANGE * ComputeNormMatch(i, norm_feature, false)); diff --git a/src/classify/intmatcher.cpp b/src/classify/intmatcher.cpp index e25b9a095..98162cdd2 100644 --- a/src/classify/intmatcher.cpp +++ b/src/classify/intmatcher.cpp @@ -165,7 +165,7 @@ public: void ComputeScores(const INT_TEMPLATES_STRUCT *int_templates, int num_features, const INT_FEATURE_STRUCT *features) { num_features_ = num_features; - int num_pruners = int_templates->NumClassPruners; + auto num_pruners = int_templates->NumClassPruners; for (int f = 0; f < num_features; ++f) { const INT_FEATURE_STRUCT *feature = &features[f]; // Quantize the feature to NUM_CP_BUCKETS*NUM_CP_BUCKETS*NUM_CP_BUCKETS. @@ -175,7 +175,7 @@ public: int class_id = 0; // Each CLASS_PRUNER_STRUCT only covers CLASSES_PER_CP(32) classes, so // we need a collection of them, indexed by pruner_set. - for (int pruner_set = 0; pruner_set < num_pruners; ++pruner_set) { + for (unsigned pruner_set = 0; pruner_set < num_pruners; ++pruner_set) { // Look up quantized feature in a 3-D array, an array of weights for // each class. const uint32_t *pruner_word_ptr = int_templates->ClassPruners[pruner_set]->p[x][y][theta]; @@ -372,6 +372,7 @@ for (int bit = 0; bit < BITS_PER_WERD/NUM_BITS_PER_CLASS; bit++) { /// Copies the pruned, sorted classes into the output results and returns /// the number of classes. int SetupResults(std::vector *results) const { + results->clear(); results->resize(num_classes_); for (int c = 0; c < num_classes_; ++c) { (*results)[c].Class = sort_index_[num_classes_ - c]; @@ -675,7 +676,7 @@ IntegerMatcher::IntegerMatcher(tesseract::IntParam *classify_debug_level) if (kSEExponentialMultiplier > 0.0) { double scale = - 1.0 - exp(-kSEExponentialMultiplier) * + 1.0 - std::exp(-kSEExponentialMultiplier) * exp(kSEExponentialMultiplier * (static_cast(i) / SE_TABLE_SIZE)); evidence *= ClipToRange(scale, 0.0, 1.0); } diff --git a/src/classify/intmatcher.h b/src/classify/intmatcher.h index b4c49eb2c..d46100cb7 100644 --- a/src/classify/intmatcher.h +++ b/src/classify/intmatcher.h @@ -26,11 +26,9 @@ namespace tesseract { // but turned on/off on the language-by-language basis or depending // on particular properties of the corpus (e.g. when we expect the // images to have low exposure). -extern BOOL_VAR_H(disable_character_fragments, false, - "Do not include character fragments in the" - " results of the classifier"); +extern BOOL_VAR_H(disable_character_fragments); -extern INT_VAR_H(classify_integer_matcher_multiplier, 10, "Integer Matcher Multiplier 0-255: "); +extern INT_VAR_H(classify_integer_matcher_multiplier); struct UnicharRating; diff --git a/src/classify/intproto.cpp b/src/classify/intproto.cpp index 33e539628..373a96725 100644 --- a/src/classify/intproto.cpp +++ b/src/classify/intproto.cpp @@ -221,7 +221,7 @@ void AddIntClass(INT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, INT_CLASS_ST int Pruner; assert(LegalClassId(ClassId)); - if (ClassId != Templates->NumClasses) { + if (static_cast(ClassId) != Templates->NumClasses) { fprintf(stderr, "Please make sure that classes are added to templates" " in increasing order of ClassIds\n"); @@ -365,14 +365,14 @@ void AddProtoToProtoPruner(PROTO_STRUCT *Proto, int ProtoId, INT_CLASS_STRUCT *C Length = Proto->Length; X = Proto->X + X_SHIFT; - Pad = std::max(fabs(cos(Angle)) * (Length / 2.0 + classify_pp_end_pad * GetPicoFeatureLength()), - fabs(sin(Angle)) * (classify_pp_side_pad * GetPicoFeatureLength())); + Pad = std::max(fabs(std::cos(Angle)) * (Length / 2.0 + classify_pp_end_pad * GetPicoFeatureLength()), + fabs(std::sin(Angle)) * (classify_pp_side_pad * GetPicoFeatureLength())); FillPPLinearBits(ProtoSet->ProtoPruner[PRUNER_X], Index, X, Pad, debug); Y = Proto->Y + Y_SHIFT; - Pad = std::max(fabs(sin(Angle)) * (Length / 2.0 + classify_pp_end_pad * GetPicoFeatureLength()), - fabs(cos(Angle)) * (classify_pp_side_pad * GetPicoFeatureLength())); + Pad = std::max(fabs(std::sin(Angle)) * (Length / 2.0 + classify_pp_end_pad * GetPicoFeatureLength()), + fabs(std::cos(Angle)) * (classify_pp_side_pad * GetPicoFeatureLength())); FillPPLinearBits(ProtoSet->ProtoPruner[PRUNER_Y], Index, Y, Pad, debug); } /* AddProtoToProtoPruner */ @@ -491,13 +491,12 @@ INT_TEMPLATES_STRUCT *Classify::CreateIntTemplates(CLASSES FloatProtos, const UNICHARSET &target_unicharset) { CLASS_TYPE FClass; INT_CLASS_STRUCT *IClass; - int ClassId; int ProtoId; int ConfigId; auto IntTemplates = new INT_TEMPLATES_STRUCT; - for (ClassId = 0; ClassId < target_unicharset.size(); ClassId++) { + for (unsigned ClassId = 0; ClassId < target_unicharset.size(); ClassId++) { FClass = &(FloatProtos[ClassId]); if (FClass->NumProtos == 0 && FClass->NumConfigs == 0 && strcmp(target_unicharset.id_to_unichar(ClassId), " ") != 0) { @@ -507,14 +506,10 @@ INT_TEMPLATES_STRUCT *Classify::CreateIntTemplates(CLASSES FloatProtos, assert(UnusedClassIdIn(IntTemplates, ClassId)); IClass = new INT_CLASS_STRUCT(FClass->NumProtos, FClass->NumConfigs); FontSet fs{FClass->font_set.size()}; - for (int i = 0; i < fs.size(); ++i) { + for (unsigned i = 0; i < fs.size(); ++i) { fs[i] = FClass->font_set.at(i); } - if (this->fontset_table_.contains(fs)) { - IClass->font_set_id = this->fontset_table_.get_id(fs); - } else { - IClass->font_set_id = this->fontset_table_.push_back(fs); - } + IClass->font_set_id = this->fontset_table_.push_back(fs); AddIntClass(IntTemplates, ClassId, IClass); for (ProtoId = 0; ProtoId < FClass->NumProtos; ProtoId++) { @@ -613,10 +608,10 @@ INT_TEMPLATES_STRUCT::INT_TEMPLATES_STRUCT() { } INT_TEMPLATES_STRUCT::~INT_TEMPLATES_STRUCT() { - for (int i = 0; i < NumClasses; i++) { + for (unsigned i = 0; i < NumClasses; i++) { delete Class[i]; } - for (int i = 0; i < NumClassPruners; i++) { + for (unsigned i = 0; i < NumClassPruners; i++) { delete ClassPruners[i]; } } @@ -630,9 +625,7 @@ INT_TEMPLATES_STRUCT::~INT_TEMPLATES_STRUCT() { * @note Globals: none */ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { - int i, j, w, x, y, z; - int unicharset_size; - int version_id = 0; + int j, w, x, y, z; INT_TEMPLATES_STRUCT *Templates; CLASS_PRUNER_STRUCT *Pruner; INT_CLASS_STRUCT *Class; @@ -645,25 +638,29 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { uint32_t SetBitsForMask = // word with NUM_BITS_PER_CLASS (1 << NUM_BITS_PER_CLASS) - 1; // set starting at bit 0 uint32_t Mask, NewMask, ClassBits; - int MaxNumConfigs = MAX_NUM_CONFIGS; - int WerdsPerConfigVec = WERDS_PER_CONFIG_VEC; + unsigned MaxNumConfigs = MAX_NUM_CONFIGS; + unsigned WerdsPerConfigVec = WERDS_PER_CONFIG_VEC; /* first read the high level template struct */ Templates = new INT_TEMPLATES_STRUCT; // Read Templates in parts for 64 bit compatibility. + uint32_t unicharset_size; if (fp->FReadEndian(&unicharset_size, sizeof(unicharset_size), 1) != 1) { tprintf("Bad read of inttemp!\n"); } - if (fp->FReadEndian(&Templates->NumClasses, sizeof(Templates->NumClasses), 1) != 1 || + int32_t version_id = 0; + if (fp->FReadEndian(&version_id, sizeof(version_id), 1) != 1 || fp->FReadEndian(&Templates->NumClassPruners, sizeof(Templates->NumClassPruners), 1) != 1) { tprintf("Bad read of inttemp!\n"); } - if (Templates->NumClasses < 0) { + if (version_id < 0) { // This file has a version id! - version_id = -Templates->NumClasses; + version_id = -version_id; if (fp->FReadEndian(&Templates->NumClasses, sizeof(Templates->NumClasses), 1) != 1) { tprintf("Bad read of inttemp!\n"); } + } else { + Templates->NumClasses = version_id; } if (version_id < 3) { @@ -683,8 +680,8 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { } /* then read in the class pruners */ - const int kNumBuckets = NUM_CP_BUCKETS * NUM_CP_BUCKETS * NUM_CP_BUCKETS * WERDS_PER_CP_VECTOR; - for (i = 0; i < Templates->NumClassPruners; i++) { + const unsigned kNumBuckets = NUM_CP_BUCKETS * NUM_CP_BUCKETS * NUM_CP_BUCKETS * WERDS_PER_CP_VECTOR; + for (unsigned i = 0; i < Templates->NumClassPruners; i++) { Pruner = new CLASS_PRUNER_STRUCT; if (fp->FReadEndian(Pruner, sizeof(Pruner->p[0][0][0][0]), kNumBuckets) != kNumBuckets) { tprintf("Bad read of inttemp!\n"); @@ -700,19 +697,19 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { if (version_id < 2) { // Allocate enough class pruners to cover all the class ids. max_class_id = 0; - for (i = 0; i < Templates->NumClasses; i++) { + for (unsigned i = 0; i < Templates->NumClasses; i++) { if (ClassIdFor[i] > max_class_id) { max_class_id = ClassIdFor[i]; } } - for (i = 0; i <= CPrunerIdFor(max_class_id); i++) { + for (int i = 0; i <= CPrunerIdFor(max_class_id); i++) { Templates->ClassPruners[i] = new CLASS_PRUNER_STRUCT; memset(Templates->ClassPruners[i], 0, sizeof(CLASS_PRUNER_STRUCT)); } // Convert class pruners from the old format (indexed by class index) // to the new format (indexed by class id). last_cp_bit_number = NUM_BITS_PER_CLASS * Templates->NumClasses - 1; - for (i = 0; i < Templates->NumClassPruners; i++) { + for (unsigned i = 0; i < Templates->NumClassPruners; i++) { for (x = 0; x < NUM_CP_BUCKETS; x++) { for (y = 0; y < NUM_CP_BUCKETS; y++) { for (z = 0; z < NUM_CP_BUCKETS; z++) { @@ -750,13 +747,13 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { } } } - for (i = 0; i < Templates->NumClassPruners; i++) { + for (unsigned i = 0; i < Templates->NumClassPruners; i++) { delete TempClassPruner[i]; } } /* then read in each class */ - for (i = 0; i < Templates->NumClasses; i++) { + for (unsigned i = 0; i < Templates->NumClasses; i++) { /* first read in the high level struct for the class */ Class = new INT_CLASS_STRUCT; if (fp->FReadEndian(&Class->NumProtos, sizeof(Class->NumProtos), 1) != 1 || @@ -773,7 +770,7 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { } } } - int num_configs = version_id < 4 ? MaxNumConfigs : Class->NumConfigs; + unsigned num_configs = version_id < 4 ? MaxNumConfigs : Class->NumConfigs; ASSERT_HOST(num_configs <= MaxNumConfigs); if (fp->FReadEndian(Class->ConfigLengths, sizeof(uint16_t), num_configs) != num_configs) { tprintf("Bad read of inttemp!\n"); @@ -797,7 +794,7 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { /* then read in the proto sets */ for (j = 0; j < Class->NumProtoSets; j++) { auto ProtoSet = new PROTO_SET_STRUCT; - int num_buckets = NUM_PP_PARAMS * NUM_PP_BUCKETS * WERDS_PER_PP_VECTOR; + unsigned num_buckets = NUM_PP_PARAMS * NUM_PP_BUCKETS * WERDS_PER_PP_VECTOR; if (fp->FReadEndian(&ProtoSet->ProtoPruner, sizeof(ProtoSet->ProtoPruner[0][0][0]), num_buckets) != num_buckets) { tprintf("Bad read of inttemp!\n"); @@ -830,7 +827,7 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { ClassForClassId(Templates, 0)->font_set_id = -1; Templates->NumClasses++; /* make sure the classes are contiguous */ - for (i = 0; i < MAX_NUM_CLASSES; i++) { + for (unsigned i = 0; i < MAX_NUM_CLASSES; i++) { if (i < Templates->NumClasses) { if (ClassForClassId(Templates, i) == nullptr) { fprintf(stderr, "Non-contiguous class ids in inttemp\n"); @@ -838,7 +835,7 @@ INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) { } } else { if (ClassForClassId(Templates, i) != nullptr) { - fprintf(stderr, "Class id %d exceeds NumClassesIn (Templates) %d\n", i, + fprintf(stderr, "Class id %u exceeds NumClassesIn (Templates) %u\n", i, Templates->NumClasses); exit(1); } @@ -919,15 +916,14 @@ void ClearFeatureSpaceWindow(NORM_METHOD norm_method, ScrollView *window) { */ void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates, const UNICHARSET &target_unicharset) { - int i, j; INT_CLASS_STRUCT *Class; - int unicharset_size = target_unicharset.size(); + auto unicharset_size = target_unicharset.size(); int version_id = -5; // When negated by the reader -1 becomes +1 etc. if (Templates->NumClasses != unicharset_size) { tprintf( "Warning: executing WriteIntTemplates() with %d classes in" - " Templates, while target_unicharset size is %d\n", + " Templates, while target_unicharset size is %zu\n", Templates->NumClasses, unicharset_size); } @@ -938,12 +934,12 @@ void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates, fwrite(&Templates->NumClasses, sizeof(Templates->NumClasses), 1, File); /* then write out the class pruners */ - for (i = 0; i < Templates->NumClassPruners; i++) { + for (unsigned i = 0; i < Templates->NumClassPruners; i++) { fwrite(Templates->ClassPruners[i], sizeof(CLASS_PRUNER_STRUCT), 1, File); } /* then write out each class */ - for (i = 0; i < Templates->NumClasses; i++) { + for (unsigned i = 0; i < Templates->NumClasses; i++) { Class = Templates->Class[i]; /* first write out the high level struct for the class */ @@ -951,7 +947,7 @@ void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates, fwrite(&Class->NumProtoSets, sizeof(Class->NumProtoSets), 1, File); ASSERT_HOST(Class->NumConfigs == this->fontset_table_.at(Class->font_set_id).size()); fwrite(&Class->NumConfigs, sizeof(Class->NumConfigs), 1, File); - for (j = 0; j < Class->NumConfigs; ++j) { + for (int j = 0; j < Class->NumConfigs; ++j) { fwrite(&Class->ConfigLengths[j], sizeof(uint16_t), 1, File); } @@ -961,7 +957,7 @@ void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates, } /* then write out the proto sets */ - for (j = 0; j < Class->NumProtoSets; j++) { + for (int j = 0; j < Class->NumProtoSets; j++) { fwrite(Class->ProtoSets[j], sizeof(PROTO_SET_STRUCT), 1, File); } @@ -991,7 +987,7 @@ void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates, * @note Globals: none */ float BucketStart(int Bucket, float Offset, int NumBuckets) { - return ((static_cast(Bucket) / NumBuckets) - Offset); + return static_cast(Bucket) / NumBuckets - Offset; } /* BucketStart */ @@ -1007,7 +1003,7 @@ float BucketStart(int Bucket, float Offset, int NumBuckets) { * @note Globals: none */ float BucketEnd(int Bucket, float Offset, int NumBuckets) { - return ((static_cast(Bucket + 1) / NumBuckets) - Offset); + return static_cast(Bucket + 1) / NumBuckets - Offset; } /* BucketEnd */ /** @@ -1180,7 +1176,7 @@ CLASS_ID Classify::GetClassToDebug(const char *Prompt, bool *adaptive_on, bool * *shape_id = atoi(ev->parameter); *adaptive_on = false; *pretrained_on = true; - if (*shape_id >= 0 && *shape_id < shape_table_->NumShapes()) { + if (*shape_id >= 0 && static_cast(*shape_id) < shape_table_->NumShapes()) { int font_id; shape_table_->GetFirstUnicharAndFont(*shape_id, &unichar_id, &font_id); tprintf("Shape %d, first unichar=%d, font=%d\n", *shape_id, unichar_id, font_id); @@ -1208,7 +1204,7 @@ CLASS_ID Classify::GetClassToDebug(const char *Prompt, bool *adaptive_on, bool * *shape_id = -1; return unichar_id; } - for (int s = 0; s < shape_table_->NumShapes(); ++s) { + for (unsigned s = 0; s < shape_table_->NumShapes(); ++s) { if (shape_table_->GetShape(s).ContainsUnichar(unichar_id)) { tprintf("%s\n", shape_table_->DebugStr(s).c_str()); } @@ -1388,8 +1384,8 @@ void InitTableFiller(float EndPad, float SidePad, float AnglePad, PROTO_STRUCT * if ((Angle > 0.0 && Angle < 0.25) || (Angle > 0.5 && Angle < 0.75)) { /* rising diagonal proto */ Angle *= 2.0 * M_PI; - Cos = fabs(cos(Angle)); - Sin = fabs(sin(Angle)); + Cos = fabs(std::cos(Angle)); + Sin = fabs(std::sin(Angle)); /* compute the positions of the corners of the acceptance region */ Start.x = X - (HalfLength + EndPad) * Cos - SidePad * Sin; @@ -1438,8 +1434,8 @@ void InitTableFiller(float EndPad, float SidePad, float AnglePad, PROTO_STRUCT * } else { /* falling diagonal proto */ Angle *= 2.0 * M_PI; - Cos = fabs(cos(Angle)); - Sin = fabs(sin(Angle)); + Cos = fabs(std::cos(Angle)); + Sin = fabs(std::sin(Angle)); /* compute the positions of the corners of the acceptance region */ Start.x = X - (HalfLength + EndPad) * Cos - SidePad * Sin; diff --git a/src/classify/intproto.h b/src/classify/intproto.h index cd0bde997..b16aab45d 100644 --- a/src/classify/intproto.h +++ b/src/classify/intproto.h @@ -106,8 +106,8 @@ struct INT_CLASS_STRUCT { struct TESS_API INT_TEMPLATES_STRUCT { INT_TEMPLATES_STRUCT(); ~INT_TEMPLATES_STRUCT(); - int NumClasses; - int NumClassPruners; + unsigned NumClasses; + unsigned NumClassPruners; INT_CLASS_STRUCT *Class[MAX_NUM_CLASSES]; CLASS_PRUNER_STRUCT *ClassPruners[MAX_NUM_CLASS_PRUNERS]; }; diff --git a/src/classify/kdtree.cpp b/src/classify/kdtree.cpp index 4d419a23c..df706d77c 100644 --- a/src/classify/kdtree.cpp +++ b/src/classify/kdtree.cpp @@ -33,7 +33,7 @@ namespace tesseract { /*----------------------------------------------------------------------------- Global Data Definitions and Declarations -----------------------------------------------------------------------------*/ -#define MINSEARCH -FLT_MAX +#define MINSEARCH (-FLT_MAX) #define MAXSEARCH FLT_MAX // Helper function to find the next essential dimension in a cycle. @@ -398,7 +398,7 @@ float DistanceSquared(int k, PARAM_DESC *dim, float p1[], float p2[]) { } float ComputeDistance(int k, PARAM_DESC *dim, float p1[], float p2[]) { - return sqrt(DistanceSquared(k, dim, p1, p2)); + return std::sqrt(DistanceSquared(k, dim, p1, p2)); } /*---------------------------------------------------------------------------*/ diff --git a/src/classify/mf.cpp b/src/classify/mf.cpp index f66ffe4e1..000c78cc2 100644 --- a/src/classify/mf.cpp +++ b/src/classify/mf.cpp @@ -44,7 +44,7 @@ FEATURE_SET ExtractMicros(TBLOB *Blob, const DENORM &cn_denorm) { return nullptr; } int n = 0; - for (auto &f : features) { + for ([[maybe_unused]] auto &f: features) { ++n; } auto FeatureSet = new FEATURE_SET_STRUCT(n); diff --git a/src/classify/mfx.h b/src/classify/mfx.h index e89e13e3c..854528ba0 100644 --- a/src/classify/mfx.h +++ b/src/classify/mfx.h @@ -31,9 +31,8 @@ struct TBLOB; ----------------------------------------------------------------------------**/ /* old numbers corresponded to 10.0 degrees and 80.0 degrees */ -extern double_VAR_H(classify_min_slope, 0.414213562, - "Slope below which lines are called horizontal"); -extern double_VAR_H(classify_max_slope, 2.414213562, "Slope above which lines are called vertical"); +extern double_VAR_H(classify_min_slope); +extern double_VAR_H(classify_max_slope); /*---------------------------------------------------------------------------- Public Function Prototypes diff --git a/src/classify/normmatch.h b/src/classify/normmatch.h index d49db3cf3..797f9d6e2 100644 --- a/src/classify/normmatch.h +++ b/src/classify/normmatch.h @@ -25,8 +25,8 @@ namespace tesseract { /* control knobs used to control the normalization adjustment process */ -extern double_VAR_H(classify_norm_adj_midpoint, 32.0, "Norm adjust midpoint ..."); -extern double_VAR_H(classify_norm_adj_curl, 2.0, "Norm adjust curl ..."); +extern double_VAR_H(classify_norm_adj_midpoint); +extern double_VAR_H(classify_norm_adj_curl); } // namespace tesseract diff --git a/src/classify/picofeat.h b/src/classify/picofeat.h index 663373ed4..391aaa2f8 100644 --- a/src/classify/picofeat.h +++ b/src/classify/picofeat.h @@ -48,7 +48,7 @@ typedef enum { PicoFeatY, PicoFeatDir, PicoFeatX } PICO_FEAT_PARAM_NAME; Variables ----------------------------------------------------------------------------*/ -extern double_VAR_H(classify_pico_feature_length, 0.05, "Pico Feature Length"); +extern double_VAR_H(classify_pico_feature_length); /**---------------------------------------------------------------------------- Public Function Prototypes diff --git a/src/classify/shapeclassifier.cpp b/src/classify/shapeclassifier.cpp index eb247de3f..da0ddb3bc 100644 --- a/src/classify/shapeclassifier.cpp +++ b/src/classify/shapeclassifier.cpp @@ -45,8 +45,7 @@ int ShapeClassifier::UnicharClassifySample(const TrainingSample &sample, Image p std::vector shape_results; int num_shape_results = ClassifySample(sample, page_pix, debug, keep_this, &shape_results); const ShapeTable *shapes = GetShapeTable(); - std::vector unichar_map; - unichar_map.resize(shapes->unicharset().size(), -1); + std::vector unichar_map(shapes->unicharset().size(), -1); for (int r = 0; r < num_shape_results; ++r) { shapes->AddShapeToResults(shape_results[r], &unichar_map, results); } @@ -205,13 +204,13 @@ void ShapeClassifier::FilterDuplicateUnichars(std::vector *results) std::vector filtered_results; // Copy results to filtered results and knock out duplicate unichars. const ShapeTable *shapes = GetShapeTable(); - for (int r = 0; r < results->size(); ++r) { + for (unsigned r = 0; r < results->size(); ++r) { if (r > 0) { const Shape &shape_r = shapes->GetShape((*results)[r].shape_id); int c; for (c = 0; c < shape_r.size(); ++c) { int unichar_id = shape_r[c].unichar_id; - int s; + unsigned s; for (s = 0; s < r; ++s) { const Shape &shape_s = shapes->GetShape((*results)[s].shape_id); if (shape_s.ContainsUnichar(unichar_id)) { diff --git a/src/classify/shapetable.cpp b/src/classify/shapetable.cpp index f4b4e3508..abfb3f0fc 100644 --- a/src/classify/shapetable.cpp +++ b/src/classify/shapetable.cpp @@ -37,8 +37,8 @@ namespace tesseract { // Returns -1 if the unichar_id is not found int ShapeRating::FirstResultWithUnichar(const std::vector &results, const ShapeTable &shape_table, UNICHAR_ID unichar_id) { - for (int r = 0; r < results.size(); ++r) { - const int shape_id = results[r].shape_id; + for (unsigned r = 0; r < results.size(); ++r) { + const auto shape_id = results[r].shape_id; const Shape &shape = shape_table.GetShape(shape_id); if (shape.ContainsUnichar(unichar_id)) { return r; @@ -53,7 +53,7 @@ int ShapeRating::FirstResultWithUnichar(const std::vector &results, // Returns -1 if the unichar_id is not found int UnicharRating::FirstResultWithUnichar(const std::vector &results, UNICHAR_ID unichar_id) { - for (int r = 0; r < results.size(); ++r) { + for (unsigned r = 0; r < results.size(); ++r) { if (results[r].unichar_id == unichar_id) { return r; } @@ -122,7 +122,7 @@ void Shape::AddToShape(int unichar_id, int font_id) { // Adds everything in other to this. void Shape::AddShape(const Shape &other) { for (const auto &unichar : other.unichars_) { - for (int f = 0; f < unichar.font_ids.size(); ++f) { + for (unsigned f = 0; f < unichar.font_ids.size(); ++f) { AddToShape(unichar.unichar_id, unichar.font_ids[f]); } } @@ -229,7 +229,7 @@ bool Shape::IsEqualUnichars(Shape *other) { if (!other->unichars_sorted_) { other->SortUnichars(); } - for (int c = 0; c < unichars_.size(); ++c) { + for (unsigned c = 0; c < unichars_.size(); ++c) { if (unichars_[c].unichar_id != other->unichars_[c].unichar_id) { return false; } @@ -289,8 +289,8 @@ void ShapeTable::ReMapClassIds(const std::vector &unicharset_map) { } // Returns a string listing the classes/fonts in a shape. -std::string ShapeTable::DebugStr(int shape_id) const { - if (shape_id < 0 || shape_id >= shape_table_.size()) { +std::string ShapeTable::DebugStr(unsigned shape_id) const { + if (shape_id >= shape_table_.size()) { return "INVALID_UNICHAR_ID"; } const Shape &shape = GetShape(shape_id); @@ -326,7 +326,7 @@ std::string ShapeTable::SummaryStr() const { int max_unichars = 0; int num_multi_shapes = 0; int num_master_shapes = 0; - for (int s = 0; s < shape_table_.size(); ++s) { + for (unsigned s = 0; s < shape_table_.size(); ++s) { if (MasterDestinationIndex(s) != s) { continue; } @@ -348,8 +348,8 @@ std::string ShapeTable::SummaryStr() const { // Adds a new shape starting with the given unichar_id and font_id. // Returns the assigned index. -int ShapeTable::AddShape(int unichar_id, int font_id) { - int index = shape_table_.size(); +unsigned ShapeTable::AddShape(int unichar_id, int font_id) { + auto index = shape_table_.size(); auto *shape = new Shape; shape->AddToShape(unichar_id, font_id); shape_table_.push_back(shape); @@ -359,8 +359,8 @@ int ShapeTable::AddShape(int unichar_id, int font_id) { // Adds a copy of the given shape unless it is already present. // Returns the assigned index or index of existing shape if already present. -int ShapeTable::AddShape(const Shape &other) { - int index; +unsigned ShapeTable::AddShape(const Shape &other) { + unsigned index; for (index = 0; index < shape_table_.size() && !(other == *shape_table_[index]); ++index) { continue; } @@ -373,21 +373,21 @@ int ShapeTable::AddShape(const Shape &other) { } // Removes the shape given by the shape index. -void ShapeTable::DeleteShape(int shape_id) { +void ShapeTable::DeleteShape(unsigned shape_id) { delete shape_table_[shape_id]; shape_table_.erase(shape_table_.begin() + shape_id); } // Adds a font_id to the given existing shape index for the given // unichar_id. If the unichar_id is not in the shape, it is added. -void ShapeTable::AddToShape(int shape_id, int unichar_id, int font_id) { +void ShapeTable::AddToShape(unsigned shape_id, int unichar_id, int font_id) { Shape &shape = *shape_table_[shape_id]; shape.AddToShape(unichar_id, font_id); num_fonts_ = std::max(num_fonts_, font_id + 1); } // Adds the given shape to the existing shape with the given index. -void ShapeTable::AddShapeToShape(int shape_id, const Shape &other) { +void ShapeTable::AddShapeToShape(unsigned shape_id, const Shape &other) { Shape &shape = *shape_table_[shape_id]; shape.AddShape(other); num_fonts_ = 0; @@ -398,7 +398,7 @@ void ShapeTable::AddShapeToShape(int shape_id, const Shape &other) { // If font_id < 0, the font_id is ignored and the first shape that matches // the unichar_id is returned. int ShapeTable::FindShape(int unichar_id, int font_id) const { - for (int s = 0; s < shape_table_.size(); ++s) { + for (unsigned s = 0; s < shape_table_.size(); ++s) { const Shape &shape = GetShape(s); for (int c = 0; c < shape.size(); ++c) { if (shape[c].unichar_id == unichar_id) { @@ -417,7 +417,7 @@ int ShapeTable::FindShape(int unichar_id, int font_id) const { } // Returns the first unichar_id and font_id in the given shape. -void ShapeTable::GetFirstUnicharAndFont(int shape_id, int *unichar_id, int *font_id) const { +void ShapeTable::GetFirstUnicharAndFont(unsigned shape_id, int *unichar_id, int *font_id) const { const UnicharAndFonts &unichar_and_fonts = (*shape_table_[shape_id])[0]; *unichar_id = unichar_and_fonts.unichar_id; *font_id = unichar_and_fonts.font_ids[0]; @@ -428,7 +428,7 @@ void ShapeTable::GetFirstUnicharAndFont(int shape_id, int *unichar_id, int *font int ShapeTable::BuildFromShape(const Shape &shape, const ShapeTable &master_shapes) { BitVector shape_map(master_shapes.NumShapes()); for (int u_ind = 0; u_ind < shape.size(); ++u_ind) { - for (int f_ind = 0; f_ind < shape[u_ind].font_ids.size(); ++f_ind) { + for (unsigned f_ind = 0; f_ind < shape[u_ind].font_ids.size(); ++f_ind) { int c = shape[u_ind].unichar_id; int f = shape[u_ind].font_ids[f_ind]; int master_id = master_shapes.FindShape(c, f); @@ -440,7 +440,7 @@ int ShapeTable::BuildFromShape(const Shape &shape, const ShapeTable &master_shap } } int num_masters = 0; - for (int s = 0; s < master_shapes.NumShapes(); ++s) { + for (unsigned s = 0; s < master_shapes.NumShapes(); ++s) { if (shape_map[s]) { AddShape(master_shapes.GetShape(s)); ++num_masters; @@ -450,14 +450,14 @@ int ShapeTable::BuildFromShape(const Shape &shape, const ShapeTable &master_shap } // Returns true if the shapes are already merged. -bool ShapeTable::AlreadyMerged(int shape_id1, int shape_id2) const { +bool ShapeTable::AlreadyMerged(unsigned shape_id1, unsigned shape_id2) const { return MasterDestinationIndex(shape_id1) == MasterDestinationIndex(shape_id2); } // Returns true if any shape contains multiple unichars. bool ShapeTable::AnyMultipleUnichars() const { - int num_shapes = NumShapes(); - for (int s1 = 0; s1 < num_shapes; ++s1) { + auto num_shapes = NumShapes(); + for (unsigned s1 = 0; s1 < num_shapes; ++s1) { if (MasterDestinationIndex(s1) != s1) { continue; } @@ -482,11 +482,11 @@ int ShapeTable::MaxNumUnichars() const { // Merges shapes with a common unichar over the [start, end) interval. // Assumes single unichar per shape. -void ShapeTable::ForceFontMerges(int start, int end) { - for (int s1 = start; s1 < end; ++s1) { +void ShapeTable::ForceFontMerges(unsigned start, unsigned end) { + for (unsigned s1 = start; s1 < end; ++s1) { if (MasterDestinationIndex(s1) == s1 && GetShape(s1).size() == 1) { int unichar_id = GetShape(s1)[0].unichar_id; - for (int s2 = s1 + 1; s2 < end; ++s2) { + for (auto s2 = s1 + 1; s2 < end; ++s2) { if (MasterDestinationIndex(s2) == s2 && GetShape(s2).size() == 1 && unichar_id == GetShape(s2)[0].unichar_id) { MergeShapes(s1, s2); @@ -500,13 +500,13 @@ void ShapeTable::ForceFontMerges(int start, int end) { } // Returns the number of unichars in the master shape. -int ShapeTable::MasterUnicharCount(int shape_id) const { +unsigned ShapeTable::MasterUnicharCount(unsigned shape_id) const { int master_id = MasterDestinationIndex(shape_id); return GetShape(master_id).size(); } // Returns the sum of the font counts in the master shape. -int ShapeTable::MasterFontCount(int shape_id) const { +int ShapeTable::MasterFontCount(unsigned shape_id) const { int master_id = MasterDestinationIndex(shape_id); const Shape &shape = GetShape(master_id); int font_count = 0; @@ -517,7 +517,7 @@ int ShapeTable::MasterFontCount(int shape_id) const { } // Returns the number of unichars that would result from merging the shapes. -int ShapeTable::MergedUnicharCount(int shape_id1, int shape_id2) const { +int ShapeTable::MergedUnicharCount(unsigned shape_id1, unsigned shape_id2) const { // Do it the easy way for now. int master_id1 = MasterDestinationIndex(shape_id1); int master_id2 = MasterDestinationIndex(shape_id2); @@ -527,9 +527,9 @@ int ShapeTable::MergedUnicharCount(int shape_id1, int shape_id2) const { } // Merges two shape_ids, leaving shape_id2 marked as merged. -void ShapeTable::MergeShapes(int shape_id1, int shape_id2) { - int master_id1 = MasterDestinationIndex(shape_id1); - int master_id2 = MasterDestinationIndex(shape_id2); +void ShapeTable::MergeShapes(unsigned shape_id1, unsigned shape_id2) { + auto master_id1 = MasterDestinationIndex(shape_id1); + auto master_id2 = MasterDestinationIndex(shape_id2); // Point master_id2 (and all merged shapes) to master_id1. shape_table_[master_id2]->set_destination_index(master_id1); // Add all the shapes of master_id2 to master_id1. @@ -537,7 +537,7 @@ void ShapeTable::MergeShapes(int shape_id1, int shape_id2) { } // Swaps two shape_ids. -void ShapeTable::SwapShapes(int shape_id1, int shape_id2) { +void ShapeTable::SwapShapes(unsigned shape_id1, unsigned shape_id2) { Shape *tmp = shape_table_[shape_id1]; shape_table_[shape_id1] = shape_table_[shape_id2]; shape_table_[shape_id2] = tmp; @@ -545,12 +545,12 @@ void ShapeTable::SwapShapes(int shape_id1, int shape_id2) { // Returns the destination of this shape, (if merged), taking into account // the fact that the destination may itself have been merged. -int ShapeTable::MasterDestinationIndex(int shape_id) const { - int dest_id = shape_table_[shape_id]->destination_index(); - if (dest_id == shape_id || dest_id < 0) { +unsigned ShapeTable::MasterDestinationIndex(unsigned shape_id) const { + auto dest_id = shape_table_[shape_id]->destination_index(); + if (static_cast(dest_id) == shape_id || dest_id < 0) { return shape_id; // Is master already. } - int master_id = shape_table_[dest_id]->destination_index(); + auto master_id = shape_table_[dest_id]->destination_index(); if (master_id == dest_id || master_id < 0) { return dest_id; // Dest is the master and shape_id points to it. } @@ -559,7 +559,7 @@ int ShapeTable::MasterDestinationIndex(int shape_id) const { } // Returns false if the unichars in neither shape is a subset of the other. -bool ShapeTable::SubsetUnichar(int shape_id1, int shape_id2) const { +bool ShapeTable::SubsetUnichar(unsigned shape_id1, unsigned shape_id2) const { const Shape &shape1 = GetShape(shape_id1); const Shape &shape2 = GetShape(shape_id2); int c1, c2; @@ -579,7 +579,7 @@ bool ShapeTable::SubsetUnichar(int shape_id1, int shape_id2) const { } // Returns false if the unichars in neither shape is a subset of the other. -bool ShapeTable::MergeSubsetUnichar(int merge_id1, int merge_id2, int shape_id) const { +bool ShapeTable::MergeSubsetUnichar(int merge_id1, int merge_id2, unsigned shape_id) const { const Shape &merge1 = GetShape(merge_id1); const Shape &merge2 = GetShape(merge_id2); const Shape &shape = GetShape(shape_id); @@ -606,7 +606,7 @@ bool ShapeTable::MergeSubsetUnichar(int merge_id1, int merge_id2, int shape_id) } // Returns true if the unichar sets are equal between the shapes. -bool ShapeTable::EqualUnichars(int shape_id1, int shape_id2) const { +bool ShapeTable::EqualUnichars(unsigned shape_id1, unsigned shape_id2) const { const Shape &shape1 = GetShape(shape_id1); const Shape &shape2 = GetShape(shape_id2); for (int c1 = 0; c1 < shape1.size(); ++c1) { @@ -625,7 +625,7 @@ bool ShapeTable::EqualUnichars(int shape_id1, int shape_id2) const { } // Returns true if the unichar sets are equal between the shapes. -bool ShapeTable::MergeEqualUnichars(int merge_id1, int merge_id2, int shape_id) const { +bool ShapeTable::MergeEqualUnichars(int merge_id1, int merge_id2, unsigned shape_id) const { const Shape &merge1 = GetShape(merge_id1); const Shape &merge2 = GetShape(merge_id2); const Shape &shape = GetShape(shape_id); @@ -651,7 +651,7 @@ bool ShapeTable::MergeEqualUnichars(int merge_id1, int merge_id2, int shape_id) } // Returns true if there is a common unichar between the shapes. -bool ShapeTable::CommonUnichars(int shape_id1, int shape_id2) const { +bool ShapeTable::CommonUnichars(unsigned shape_id1, unsigned shape_id2) const { const Shape &shape1 = GetShape(shape_id1); const Shape &shape2 = GetShape(shape_id2); for (int c1 = 0; c1 < shape1.size(); ++c1) { @@ -664,7 +664,7 @@ bool ShapeTable::CommonUnichars(int shape_id1, int shape_id2) const { } // Returns true if there is a common font id between the shapes. -bool ShapeTable::CommonFont(int shape_id1, int shape_id2) const { +bool ShapeTable::CommonFont(unsigned shape_id1, unsigned shape_id2) const { const Shape &shape1 = GetShape(shape_id1); const Shape &shape2 = GetShape(shape_id2); for (int c1 = 0; c1 < shape1.size(); ++c1) { @@ -682,9 +682,10 @@ bool ShapeTable::CommonFont(int shape_id1, int shape_id2) const { // If not nullptr, shape_map is set to map other shape_ids to this's shape_ids. void ShapeTable::AppendMasterShapes(const ShapeTable &other, std::vector *shape_map) { if (shape_map != nullptr) { + shape_map->clear(); shape_map->resize(other.NumShapes(), -1); } - for (int s = 0; s < other.shape_table_.size(); ++s) { + for (unsigned s = 0; s < other.shape_table_.size(); ++s) { if (other.shape_table_[s]->destination_index() < 0) { int index = AddShape(*other.shape_table_[s]); if (shape_map != nullptr) { diff --git a/src/classify/shapetable.h b/src/classify/shapetable.h index d6ff126cd..ddf5f9574 100644 --- a/src/classify/shapetable.h +++ b/src/classify/shapetable.h @@ -44,7 +44,7 @@ struct UnicharRating { tprintf( "Unichar-id=%d, rating=%g, adapted=%d, config=%d, misses=%u," " %zu fonts\n", - unichar_id, rating, adapted, config, feature_misses, fonts.size()); + unichar_id, static_cast(rating), adapted, config, feature_misses, fonts.size()); } // Helper function to get the index of the first result with the required @@ -245,7 +245,7 @@ public: bool DeSerialize(TFile *fp); // Accessors. - int NumShapes() const { + unsigned NumShapes() const { return shape_table_.size(); } const UNICHARSET &unicharset() const { @@ -263,36 +263,36 @@ public: // Useful in conjunction with set_unicharset. void ReMapClassIds(const std::vector &unicharset_map); // Returns a string listing the classes/fonts in a shape. - std::string DebugStr(int shape_id) const; + std::string DebugStr(unsigned shape_id) const; // Returns a debug string summarizing the table. std::string SummaryStr() const; // Adds a new shape starting with the given unichar_id and font_id. // Returns the assigned index. - int AddShape(int unichar_id, int font_id); + unsigned AddShape(int unichar_id, int font_id); // Adds a copy of the given shape unless it is already present. // Returns the assigned index or index of existing shape if already present. - int AddShape(const Shape &other); + unsigned AddShape(const Shape &other); // Removes the shape given by the shape index. All indices above are changed! - void DeleteShape(int shape_id); + void DeleteShape(unsigned shape_id); // Adds a font_id to the given existing shape index for the given // unichar_id. If the unichar_id is not in the shape, it is added. - void AddToShape(int shape_id, int unichar_id, int font_id); + void AddToShape(unsigned shape_id, int unichar_id, int font_id); // Adds the given shape to the existing shape with the given index. - void AddShapeToShape(int shape_id, const Shape &other); + void AddShapeToShape(unsigned shape_id, const Shape &other); // Returns the id of the shape that contains the given unichar and font. // If not found, returns -1. // If font_id < 0, the font_id is ignored and the first shape that matches // the unichar_id is returned. int FindShape(int unichar_id, int font_id) const; // Returns the first unichar_id and font_id in the given shape. - void GetFirstUnicharAndFont(int shape_id, int *unichar_id, int *font_id) const; + void GetFirstUnicharAndFont(unsigned shape_id, int *unichar_id, int *font_id) const; // Accessors for the Shape with the given shape_id. - const Shape &GetShape(int shape_id) const { + const Shape &GetShape(unsigned shape_id) const { return *shape_table_[shape_id]; } - Shape *MutableShape(int shape_id) { + Shape *MutableShape(unsigned shape_id) { return shape_table_[shape_id]; } @@ -301,24 +301,24 @@ public: int BuildFromShape(const Shape &shape, const ShapeTable &master_shapes); // Returns true if the shapes are already merged. - bool AlreadyMerged(int shape_id1, int shape_id2) const; + bool AlreadyMerged(unsigned shape_id1, unsigned shape_id2) const; // Returns true if any shape contains multiple unichars. bool AnyMultipleUnichars() const; // Returns the maximum number of unichars over all shapes. int MaxNumUnichars() const; // Merges shapes with a common unichar over the [start, end) interval. // Assumes single unichar per shape. - void ForceFontMerges(int start, int end); + void ForceFontMerges(unsigned start, unsigned end); // Returns the number of unichars in the master shape. - int MasterUnicharCount(int shape_id) const; + unsigned MasterUnicharCount(unsigned shape_id) const; // Returns the sum of the font counts in the master shape. - int MasterFontCount(int shape_id) const; + int MasterFontCount(unsigned shape_id) const; // Returns the number of unichars that would result from merging the shapes. - int MergedUnicharCount(int shape_id1, int shape_id2) const; + int MergedUnicharCount(unsigned shape_id1, unsigned shape_id2) const; // Merges two shape_ids, leaving shape_id2 marked as merged. - void MergeShapes(int shape_id1, int shape_id2); + void MergeShapes(unsigned shape_id1, unsigned shape_id2); // Swaps two shape_ids. - void SwapShapes(int shape_id1, int shape_id2); + void SwapShapes(unsigned shape_id1, unsigned shape_id2); // Appends the master shapes from other to this. // Used to create a clean ShapeTable from a merged one, or to create a // copy of a ShapeTable. @@ -330,19 +330,19 @@ public: // Returns the destination of this shape, (if merged), taking into account // the fact that the destination may itself have been merged. // For a non-merged shape, returns the input shape_id. - int MasterDestinationIndex(int shape_id) const; + unsigned MasterDestinationIndex(unsigned shape_id) const; // Returns false if the unichars in neither shape is a subset of the other.. - bool SubsetUnichar(int shape_id1, int shape_id2) const; + bool SubsetUnichar(unsigned shape_id1, unsigned shape_id2) const; // Returns false if the unichars in neither shape is a subset of the other.. - bool MergeSubsetUnichar(int merge_id1, int merge_id2, int shape_id) const; + bool MergeSubsetUnichar(int merge_id1, int merge_id2, unsigned shape_id) const; // Returns true if the unichar sets are equal between the shapes. - bool EqualUnichars(int shape_id1, int shape_id2) const; - bool MergeEqualUnichars(int merge_id1, int merge_id2, int shape_id) const; + bool EqualUnichars(unsigned shape_id1, unsigned shape_id2) const; + bool MergeEqualUnichars(int merge_id1, int merge_id2, unsigned shape_id) const; // Returns true if there is a common unichar between the shapes. - bool CommonUnichars(int shape_id1, int shape_id2) const; + bool CommonUnichars(unsigned shape_id1, unsigned shape_id2) const; // Returns true if there is a common font id between the shapes. - bool CommonFont(int shape_id1, int shape_id2) const; + bool CommonFont(unsigned shape_id1, unsigned shape_id2) const; // Adds the unichars of the given shape_id to the vector of results. Any // unichar_id that is already present just has the fonts added to the diff --git a/src/dict/context.cpp b/src/dict/context.cpp index 567b3e40d..d3f07fe04 100644 --- a/src/dict/context.cpp +++ b/src/dict/context.cpp @@ -44,9 +44,8 @@ const int case_state_table[6][4] = { int Dict::case_ok(const WERD_CHOICE &word) const { int state = 0; - int x; const UNICHARSET *unicharset = word.unicharset(); - for (x = 0; x < word.length(); ++x) { + for (unsigned x = 0; x < word.length(); ++x) { UNICHAR_ID ch_id = word.unichar_id(x); if (unicharset->get_isupper(ch_id)) { state = case_state_table[state][1]; @@ -69,7 +68,7 @@ bool Dict::absolute_garbage(const WERD_CHOICE &word, const UNICHARSET &unicharse return false; } int num_alphanum = 0; - for (int x = 0; x < word.length(); ++x) { + for (unsigned x = 0; x < word.length(); ++x) { num_alphanum += (unicharset.get_isalpha(word.unichar_id(x)) || unicharset.get_isdigit(word.unichar_id(x))); } diff --git a/src/dict/dawg.cpp b/src/dict/dawg.cpp index 21995dda8..af45176f2 100644 --- a/src/dict/dawg.cpp +++ b/src/dict/dawg.cpp @@ -38,7 +38,8 @@ namespace tesseract { // instead of weak vtables in every compilation unit. Dawg::~Dawg() = default; -bool Dawg::prefix_in_dawg(const WERD_CHOICE &word, bool requires_complete) const { +bool Dawg::prefix_in_dawg(const WERD_CHOICE &word, + bool requires_complete) const { if (word.empty()) { return !requires_complete; } @@ -56,7 +57,8 @@ bool Dawg::prefix_in_dawg(const WERD_CHOICE &word, bool requires_complete) const } } // Now check the last character. - return edge_char_of(node, word.unichar_id(end_index), requires_complete) != NO_EDGE; + return edge_char_of(node, word.unichar_id(end_index), requires_complete) != + NO_EDGE; } bool Dawg::word_in_dawg(const WERD_CHOICE &word) const { @@ -84,7 +86,8 @@ int Dawg::check_for_words(const char *filename, const UNICHARSET &unicharset, chomp_string(string); // remove newline WERD_CHOICE word(string, unicharset); if (word.length() > 0 && !word.contains_unichar_id(INVALID_UNICHAR_ID)) { - if (!match_words(&word, 0, 0, enable_wildcard ? wildcard : INVALID_UNICHAR_ID)) { + if (!match_words(&word, 0, 0, + enable_wildcard ? wildcard : INVALID_UNICHAR_ID)) { tprintf("Missing word: %s\n", string); ++misses; } @@ -106,21 +109,25 @@ void Dawg::iterate_words(const UNICHARSET &unicharset, iterate_words_rec(word, 0, cb); } -static void CallWithUTF8(std::function cb, const WERD_CHOICE *wc) { +static void CallWithUTF8(const std::function &cb, + const WERD_CHOICE *wc) { std::string s; wc->string_and_lengths(&s, nullptr); cb(s.c_str()); } -void Dawg::iterate_words(const UNICHARSET &unicharset, std::function cb) const { +void Dawg::iterate_words(const UNICHARSET &unicharset, + const std::function &cb) const { using namespace std::placeholders; // for _1 - std::function shim(std::bind(CallWithUTF8, cb, _1)); + std::function shim( + std::bind(CallWithUTF8, cb, _1)); WERD_CHOICE word(&unicharset); iterate_words_rec(word, 0, shim); } -void Dawg::iterate_words_rec(const WERD_CHOICE &word_so_far, NODE_REF to_explore, - std::function cb) const { +void Dawg::iterate_words_rec( + const WERD_CHOICE &word_so_far, NODE_REF to_explore, + const std::function &cb) const { NodeChildVector children; this->unichar_ids_of(to_explore, &children, false); for (auto &i : children) { @@ -136,10 +143,8 @@ void Dawg::iterate_words_rec(const WERD_CHOICE &word_so_far, NODE_REF to_explore } } -bool Dawg::match_words(WERD_CHOICE *word, int32_t index, NODE_REF node, UNICHAR_ID wildcard) const { - EDGE_REF edge; - int32_t word_end; - +bool Dawg::match_words(WERD_CHOICE *word, uint32_t index, NODE_REF node, + UNICHAR_ID wildcard) const { if (wildcard != INVALID_UNICHAR_ID && word->unichar_id(index) == wildcard) { bool any_matched = false; NodeChildVector vec; @@ -153,8 +158,8 @@ bool Dawg::match_words(WERD_CHOICE *word, int32_t index, NODE_REF node, UNICHAR_ word->set_unichar_id(wildcard, index); return any_matched; } else { - word_end = index == word->length() - 1; - edge = edge_char_of(node, word->unichar_id(index), word_end); + auto word_end = index == word->length() - 1; + auto edge = edge_char_of(node, word->unichar_id(index), word_end); if (edge != NO_EDGE) { // normal edge in DAWG node = next_node(edge); if (word_end) { @@ -190,7 +195,8 @@ SquishedDawg::~SquishedDawg() { delete[] edges_; } -EDGE_REF SquishedDawg::edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, bool word_end) const { +EDGE_REF SquishedDawg::edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, + bool word_end) const { EDGE_REF edge = node; if (node == 0) { // binary search EDGE_REF start = 0; @@ -198,7 +204,8 @@ EDGE_REF SquishedDawg::edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, bool w int compare; while (start <= end) { edge = (start + end) >> 1; // (start + end) / 2 - compare = given_greater_than_edge_rec(NO_EDGE, word_end, unichar_id, edges_[edge]); + compare = given_greater_than_edge_rec(NO_EDGE, word_end, unichar_id, + edges_[edge]); if (compare == 0) { // given == vec[k] return edge; } else if (compare == 1) { // given > vec[k] @@ -261,8 +268,8 @@ void SquishedDawg::print_node(NODE_REF node, int max_num_edges) const { eow = end_of_word(edge) ? eow_string : not_eow_string; unichar_id = edge_letter(edge); - tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = %d, %s %s %s\n", edge, - next_node(edge), unichar_id, direction, is_last, eow); + tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = %d, %s %s %s\n", + edge, next_node(edge), unichar_id, direction, is_last, eow); if (edge - node > max_num_edges) { return; @@ -276,8 +283,9 @@ void SquishedDawg::print_node(NODE_REF node, int max_num_edges) const { eow = end_of_word(edge) ? eow_string : not_eow_string; unichar_id = edge_letter(edge); - tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = %d, %s %s %s\n", edge, - next_node(edge), unichar_id, direction, is_last, eow); + tprintf(REFFORMAT " : next = " REFFORMAT + ", unichar_id = %d, %s %s %s\n", + edge, next_node(edge), unichar_id, direction, is_last, eow); if (edge - node > MAX_NODE_EDGES_DISPLAY) { return; @@ -294,9 +302,11 @@ void SquishedDawg::print_edge(EDGE_REF edge) const { if (edge == NO_EDGE) { tprintf("NO_EDGE\n"); } else { - tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = '%d', %s %s %s\n", edge, - next_node(edge), edge_letter(edge), (forward_edge(edge) ? "FORWARD" : " "), - (last_edge(edge) ? "LAST" : " "), (end_of_word(edge) ? "EOW" : "")); + tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = '%d', %s %s %s\n", + edge, next_node(edge), edge_letter(edge), + (forward_edge(edge) ? "FORWARD" : " "), + (last_edge(edge) ? "LAST" : " "), + (end_of_word(edge) ? "EOW" : "")); } } @@ -331,8 +341,8 @@ bool SquishedDawg::read_squished_dawg(TFile *file) { return false; } if (debug_level_ > 2) { - tprintf("type: %d lang: %s perm: %d unicharset_size: %d num_edges: %d\n", type_, lang_.c_str(), - perm_, unicharset_size_, num_edges_); + tprintf("type: %d lang: %s perm: %d unicharset_size: %d num_edges: %d\n", + type_, lang_.c_str(), perm_, unicharset_size_, num_edges_); for (EDGE_REF edge = 0; edge < num_edges_; ++edge) { print_edge(edge); } @@ -340,7 +350,8 @@ bool SquishedDawg::read_squished_dawg(TFile *file) { return true; } -std::unique_ptr SquishedDawg::build_node_map(int32_t *num_nodes) const { +std::unique_ptr SquishedDawg::build_node_map( + int32_t *num_nodes) const { EDGE_REF edge; std::unique_ptr node_map(new EDGE_REF[num_edges_]); int32_t node_counter; diff --git a/src/dict/dawg.h b/src/dict/dawg.h index a6da96e6a..05e7c5c59 100644 --- a/src/dict/dawg.h +++ b/src/dict/dawg.h @@ -147,16 +147,19 @@ public: // For each word in the Dawg, call the given (permanent) callback with the // text (UTF-8) version of the word. - void iterate_words(const UNICHARSET &unicharset, std::function cb) const; + void iterate_words(const UNICHARSET &unicharset, + const std::function &cb) const; // Pure virtual function that should be implemented by the derived classes. /// Returns the edge that corresponds to the letter out of this node. - virtual EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, bool word_end) const = 0; + virtual EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, + bool word_end) const = 0; /// Fills the given NodeChildVector with all the unichar ids (and the /// corresponding EDGE_REFs) for which there is an edge out of this node. - virtual void unichar_ids_of(NODE_REF node, NodeChildVector *vec, bool word_end) const = 0; + virtual void unichar_ids_of(NODE_REF node, NodeChildVector *vec, + bool word_end) const = 0; /// Returns the next node visited by following the edge /// indicated by the given EDGE_REF. @@ -175,7 +178,8 @@ public: /// Fills vec with unichar ids that represent the character classes /// of the given unichar_id. - virtual void unichar_id_to_patterns(UNICHAR_ID unichar_id, const UNICHARSET &unicharset, + virtual void unichar_id_to_patterns(UNICHAR_ID unichar_id, + const UNICHARSET &unicharset, std::vector *vec) const { (void)unichar_id; (void)unicharset; @@ -194,8 +198,13 @@ public: } protected: - Dawg(DawgType type, const std::string &lang, PermuterType perm, int debug_level) - : lang_(lang), type_(type), perm_(perm), unicharset_size_(0), debug_level_(debug_level) {} + Dawg(DawgType type, const std::string &lang, PermuterType perm, + int debug_level) + : lang_(lang), + type_(type), + perm_(perm), + unicharset_size_(0), + debug_level_(debug_level) {} /// Returns the next node visited by following this edge. inline NODE_REF next_node_from_edge_rec(const EDGE_RECORD &edge_rec) const { @@ -207,14 +216,16 @@ protected: } /// Returns the direction flag of this edge. inline int direction_from_edge_rec(const EDGE_RECORD &edge_rec) const { - return ((edge_rec & (DIRECTION_FLAG << flag_start_bit_))) ? BACKWARD_EDGE : FORWARD_EDGE; + return ((edge_rec & (DIRECTION_FLAG << flag_start_bit_))) ? BACKWARD_EDGE + : FORWARD_EDGE; } /// Returns true if this edge marks the end of a word. inline bool end_of_word_from_edge_rec(const EDGE_RECORD &edge_rec) const { return (edge_rec & (WERD_END_FLAG << flag_start_bit_)) != 0; } /// Returns UNICHAR_ID recorded in this edge. - inline UNICHAR_ID unichar_id_from_edge_rec(const EDGE_RECORD &edge_rec) const { + inline UNICHAR_ID unichar_id_from_edge_rec( + const EDGE_RECORD &edge_rec) const { return ((edge_rec & letter_mask_) >> LETTER_START_BIT); } /// Sets the next node link for this edge in the Dawg. @@ -233,13 +244,14 @@ protected: /// checked are the same) /// 0 if edge_rec_match() returns true /// -1 otherwise - inline int given_greater_than_edge_rec(NODE_REF next_node, bool word_end, UNICHAR_ID unichar_id, + inline int given_greater_than_edge_rec(NODE_REF next_node, bool word_end, + UNICHAR_ID unichar_id, const EDGE_RECORD &edge_rec) const { UNICHAR_ID curr_unichar_id = unichar_id_from_edge_rec(edge_rec); NODE_REF curr_next_node = next_node_from_edge_rec(edge_rec); bool curr_word_end = end_of_word_from_edge_rec(edge_rec); - if (edge_rec_match(next_node, word_end, unichar_id, curr_next_node, curr_word_end, - curr_unichar_id)) { + if (edge_rec_match(next_node, word_end, unichar_id, curr_next_node, + curr_word_end, curr_unichar_id)) { return 0; } if (unichar_id > curr_unichar_id) { @@ -260,8 +272,9 @@ protected: /// Returns true if all the values are equal (any value matches /// next_node if next_node == NO_EDGE, any value matches word_end /// if word_end is false). - inline bool edge_rec_match(NODE_REF next_node, bool word_end, UNICHAR_ID unichar_id, - NODE_REF other_next_node, bool other_word_end, + inline bool edge_rec_match(NODE_REF next_node, bool word_end, + UNICHAR_ID unichar_id, NODE_REF other_next_node, + bool other_word_end, UNICHAR_ID other_unichar_id) const { return ((unichar_id == other_unichar_id) && (next_node == NO_EDGE || next_node == other_next_node) && @@ -277,11 +290,13 @@ protected: /// the *'s in this string are interpreted as wildcards. /// WERD_CHOICE param is not passed by const so that wildcard searches /// can modify it and work without having to copy WERD_CHOICEs. - bool match_words(WERD_CHOICE *word, int32_t index, NODE_REF node, UNICHAR_ID wildcard) const; + bool match_words(WERD_CHOICE *word, uint32_t index, NODE_REF node, + UNICHAR_ID wildcard) const; // Recursively iterate over all words in a dawg (see public iterate_words). - void iterate_words_rec(const WERD_CHOICE &word_so_far, NODE_REF to_explore, - std::function cb) const; + void iterate_words_rec( + const WERD_CHOICE &word_so_far, NODE_REF to_explore, + const std::function &cb) const; // Member Variables. std::string lang_; @@ -339,12 +354,13 @@ protected: // We're back in the punctuation dawg. Continuing there is the only option. struct DawgPosition { DawgPosition() = default; - DawgPosition(int dawg_idx, EDGE_REF dawgref, int punc_idx, EDGE_REF puncref, bool backtopunc) - : dawg_ref(dawgref) - , punc_ref(puncref) - , dawg_index(dawg_idx) - , punc_index(punc_idx) - , back_to_punc(backtopunc) {} + DawgPosition(int dawg_idx, EDGE_REF dawgref, int punc_idx, EDGE_REF puncref, + bool backtopunc) + : dawg_ref(dawgref), + punc_ref(puncref), + dawg_index(dawg_idx), + punc_index(punc_idx), + back_to_punc(backtopunc) {} bool operator==(const DawgPosition &other) { return dawg_index == other.dawg_index && dawg_ref == other.dawg_ref && punc_index == other.punc_index && punc_ref == other.punc_ref && @@ -364,7 +380,8 @@ public: /// Adds an entry for the given dawg_index with the given node to the vec. /// Returns false if the same entry already exists in the vector, /// true otherwise. - inline bool add_unique(const DawgPosition &new_pos, bool debug, const char *debug_msg) { + inline bool add_unique(const DawgPosition &new_pos, bool debug, + const char *debug_msg) { for (auto position : *this) { if (position == new_pos) { return false; @@ -372,8 +389,9 @@ public: } push_back(new_pos); if (debug) { - tprintf("%s[%d, " REFFORMAT "] [punc: " REFFORMAT "%s]\n", debug_msg, new_pos.dawg_index, - new_pos.dawg_ref, new_pos.punc_ref, new_pos.back_to_punc ? " returned" : ""); + tprintf("%s[%d, " REFFORMAT "] [punc: " REFFORMAT "%s]\n", debug_msg, + new_pos.dawg_index, new_pos.dawg_ref, new_pos.punc_ref, + new_pos.back_to_punc ? " returned" : ""); } return true; } @@ -389,19 +407,23 @@ public: // class TESS_API SquishedDawg : public Dawg { public: - SquishedDawg(DawgType type, const std::string &lang, PermuterType perm, int debug_level) - : Dawg(type, lang, perm, debug_level) {} - SquishedDawg(const char *filename, DawgType type, const std::string &lang, PermuterType perm, + SquishedDawg(DawgType type, const std::string &lang, PermuterType perm, int debug_level) + : Dawg(type, lang, perm, debug_level) {} + SquishedDawg(const char *filename, DawgType type, const std::string &lang, + PermuterType perm, int debug_level) : Dawg(type, lang, perm, debug_level) { TFile file; ASSERT_HOST(file.Open(filename, nullptr)); ASSERT_HOST(read_squished_dawg(&file)); num_forward_edges_in_node0 = num_forward_edges(0); } - SquishedDawg(EDGE_ARRAY edges, int num_edges, DawgType type, const std::string &lang, - PermuterType perm, int unicharset_size, int debug_level) - : Dawg(type, lang, perm, debug_level), edges_(edges), num_edges_(num_edges) { + SquishedDawg(EDGE_ARRAY edges, int num_edges, DawgType type, + const std::string &lang, PermuterType perm, int unicharset_size, + int debug_level) + : Dawg(type, lang, perm, debug_level), + edges_(edges), + num_edges_(num_edges) { init(unicharset_size); num_forward_edges_in_node0 = num_forward_edges(0); if (debug_level > 3) { @@ -424,11 +446,13 @@ public: } /// Returns the edge that corresponds to the letter out of this node. - EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, bool word_end) const override; + EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, + bool word_end) const override; /// Fills the given NodeChildVector with all the unichar ids (and the /// corresponding EDGE_REFs) for which there is an edge out of this node. - void unichar_ids_of(NODE_REF node, NodeChildVector *vec, bool word_end) const override { + void unichar_ids_of(NODE_REF node, NodeChildVector *vec, + bool word_end) const override { EDGE_REF edge = node; if (!edge_occupied(edge) || edge == NO_EDGE) { return; @@ -502,7 +526,8 @@ private: } /// Returns true if this edge is in the forward direction. inline bool forward_edge(EDGE_REF edge_ref) const { - return (edge_occupied(edge_ref) && (FORWARD_EDGE == direction_from_edge_rec(edges_[edge_ref]))); + return (edge_occupied(edge_ref) && + (FORWARD_EDGE == direction_from_edge_rec(edges_[edge_ref]))); } /// Returns true if this edge is in the backward direction. inline bool backward_edge(EDGE_REF edge_ref) const { diff --git a/src/dict/dict.cpp b/src/dict/dict.cpp index aaffad4c6..dbb7e0b6a 100644 --- a/src/dict/dict.cpp +++ b/src/dict/dict.cpp @@ -364,7 +364,7 @@ bool Dict::FinishLoad() { successors_.reserve(dawgs_.size()); for (auto dawg : dawgs_) { auto *lst = new SuccessorList(); - for (int j = 0; j < dawgs_.size(); ++j) { + for (unsigned j = 0; j < dawgs_.size(); ++j) { const Dawg *other = dawgs_[j]; if (dawg != nullptr && other != nullptr && (dawg->lang() == other->lang()) && kDawgSuccessors[dawg->type()][other->type()]) { @@ -432,7 +432,7 @@ int Dict::def_letter_is_okay(void *void_dawg_args, const UNICHARSET &unicharset, // Go over the active_dawgs vector and insert DawgPosition records // with the updated ref (an edge with the corresponding unichar id) into // dawg_args->updated_pos. - for (int a = 0; a < dawg_args->active_dawgs->size(); ++a) { + for (unsigned a = 0; a < dawg_args->active_dawgs->size(); ++a) { const DawgPosition &pos = (*dawg_args->active_dawgs)[a]; const Dawg *punc_dawg = pos.punc_index >= 0 ? dawgs_[pos.punc_index] : nullptr; const Dawg *dawg = pos.dawg_index >= 0 ? dawgs_[pos.dawg_index] : nullptr; @@ -608,11 +608,10 @@ void Dict::ProcessPatternEdges(const Dawg *dawg, const DawgPosition &pos, UNICHA // beginning of the word. If hyphenated() returns true, copy the entries // from hyphen_active_dawgs_ instead. void Dict::init_active_dawgs(DawgPositionVector *active_dawgs, bool ambigs_mode) const { - int i; if (hyphenated()) { *active_dawgs = hyphen_active_dawgs_; if (dawg_debug_level >= 3) { - for (i = 0; i < hyphen_active_dawgs_.size(); ++i) { + for (unsigned i = 0; i < hyphen_active_dawgs_.size(); ++i) { tprintf("Adding hyphen beginning dawg [%d, " REFFORMAT "]\n", hyphen_active_dawgs_[i].dawg_index, hyphen_active_dawgs_[i].dawg_ref); } @@ -626,7 +625,7 @@ void Dict::default_dawgs(DawgPositionVector *dawg_pos_vec, bool suppress_pattern bool punc_dawg_available = (punc_dawg_ != nullptr) && punc_dawg_->edge_char_of(0, Dawg::kPatternUnicharID, true) != NO_EDGE; - for (int i = 0; i < dawgs_.size(); i++) { + for (unsigned i = 0; i < dawgs_.size(); i++) { if (dawgs_[i] != nullptr && !(suppress_patterns && (dawgs_[i])->type() == DAWG_TYPE_PATTERN)) { int dawg_ty = dawgs_[i]->type(); bool subsumed_by_punc = kDawgSuccessors[DAWG_TYPE_PUNCTUATION][dawg_ty]; @@ -666,7 +665,7 @@ void Dict::add_document_word(const WERD_CHOICE &best_choice) { if (best_choice.length() >= kDocDictMaxRepChars) { int num_rep_chars = 1; UNICHAR_ID uch_id = best_choice.unichar_id(0); - for (int i = 1; i < best_choice.length(); ++i) { + for (unsigned i = 1; i < best_choice.length(); ++i) { if (best_choice.unichar_id(i) != uch_id) { num_rep_chars = 1; uch_id = best_choice.unichar_id(i); @@ -841,7 +840,7 @@ bool Dict::valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) cons // Extract the core word from the middle of each word with any digits // replaced with question marks. - int w1start, w1end, w2start, w2end; + unsigned w1start, w1end, w2start, w2end; word1.punct_stripped(&w1start, &w1end); word2.punct_stripped(&w2start, &w2end); @@ -857,7 +856,7 @@ bool Dict::valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) cons const UNICHARSET &uchset = getUnicharset(); std::vector bigram_string; bigram_string.reserve(w1end + w2end + 1); - for (int i = w1start; i < w1end; i++) { + for (auto i = w1start; i < w1end; i++) { const auto &normed_ids = getUnicharset().normed_ids(word1.unichar_id(i)); if (normed_ids.size() == 1 && uchset.get_isdigit(normed_ids[0])) { bigram_string.push_back(question_unichar_id_); @@ -866,7 +865,7 @@ bool Dict::valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) cons } } bigram_string.push_back(UNICHAR_SPACE); - for (int i = w2start; i < w2end; i++) { + for (auto i = w2start; i < w2end; i++) { const auto &normed_ids = getUnicharset().normed_ids(word2.unichar_id(i)); if (normed_ids.size() == 1 && uchset.get_isdigit(normed_ids[0])) { bigram_string.push_back(question_unichar_id_); @@ -885,11 +884,10 @@ bool Dict::valid_punctuation(const WERD_CHOICE &word) { if (word.empty()) { return NO_PERM; } - int i; WERD_CHOICE new_word(word.unicharset()); - int last_index = word.length() - 1; + auto last_index = word.length() - 1; int new_len = 0; - for (i = 0; i <= last_index; ++i) { + for (unsigned i = 0; i <= last_index; ++i) { UNICHAR_ID unichar_id = (word.unichar_id(i)); if (getUnicharset().get_ispunctuation(unichar_id)) { new_word.append_unichar_id(unichar_id, 1, 0.0, 0.0); @@ -901,7 +899,7 @@ bool Dict::valid_punctuation(const WERD_CHOICE &word) { new_word.append_unichar_id(Dawg::kPatternUnicharID, 1, 0.0, 0.0); } } - for (i = 0; i < dawgs_.size(); ++i) { + for (unsigned i = 0; i < dawgs_.size(); ++i) { if (dawgs_[i] != nullptr && dawgs_[i]->type() == DAWG_TYPE_PUNCTUATION && dawgs_[i]->word_in_dawg(new_word)) { return true; diff --git a/src/dict/dict.h b/src/dict/dict.h index ff221b0c9..136308153 100644 --- a/src/dict/dict.h +++ b/src/dict/dict.h @@ -534,81 +534,42 @@ public: /// Variable members. /// These have to be declared and initialized after image_ptr_, which contains /// the pointer to the params vector - the member of its base CCUtil class. - STRING_VAR_H(user_words_file, "", "A filename of user-provided words."); - STRING_VAR_H(user_words_suffix, "", "A suffix of user-provided words located in tessdata."); - STRING_VAR_H(user_patterns_file, "", "A filename of user-provided patterns."); - STRING_VAR_H(user_patterns_suffix, "", "A suffix of user-provided patterns located in tessdata."); - BOOL_VAR_H(load_system_dawg, true, "Load system word dawg."); - BOOL_VAR_H(load_freq_dawg, true, "Load frequent word dawg."); - BOOL_VAR_H(load_unambig_dawg, true, "Load unambiguous word dawg."); - BOOL_VAR_H(load_punc_dawg, true, "Load dawg with punctuation patterns."); - BOOL_VAR_H(load_number_dawg, true, "Load dawg with number patterns."); - BOOL_VAR_H(load_bigram_dawg, true, "Load dawg with special word bigrams."); - double_VAR_H(xheight_penalty_subscripts, 0.125, - "Score penalty (0.1 = 10%) added if there are subscripts " - "or superscripts in a word, but it is otherwise OK."); - double_VAR_H(xheight_penalty_inconsistent, 0.25, - "Score penalty (0.1 = 10%) added if an xheight is " - "inconsistent."); - double_VAR_H(segment_penalty_dict_frequent_word, 1.0, - "Score multiplier for word matches which have good case and" - "are frequent in the given language (lower is better)."); - - double_VAR_H(segment_penalty_dict_case_ok, 1.1, - "Score multiplier for word matches that have good case " - "(lower is better)."); - - double_VAR_H(segment_penalty_dict_case_bad, 1.3125, - "Default score multiplier for word matches, which may have " - "case issues (lower is better)."); - - double_VAR_H(segment_penalty_dict_nonword, 1.25, - "Score multiplier for glyph fragment segmentations which " - "do not match a dictionary word (lower is better)."); - - double_VAR_H(segment_penalty_garbage, 1.50, - "Score multiplier for poorly cased strings that are not in" - " the dictionary and generally look like garbage (lower is" - " better)."); - STRING_VAR_H(output_ambig_words_file, "", "Output file for ambiguities found in the dictionary"); - INT_VAR_H(dawg_debug_level, 0, - "Set to 1 for general debug info" - ", to 2 for more details, to 3 to see all the debug messages"); - INT_VAR_H(hyphen_debug_level, 0, "Debug level for hyphenated words."); - BOOL_VAR_H(use_only_first_uft8_step, false, - "Use only the first UTF8 step of the given string" - " when computing log probabilities."); - double_VAR_H(certainty_scale, 20.0, "Certainty scaling factor"); - double_VAR_H(stopper_nondict_certainty_base, -2.50, "Certainty threshold for non-dict words"); - double_VAR_H(stopper_phase2_certainty_rejection_offset, 1.0, "Reject certainty offset"); - INT_VAR_H(stopper_smallword_size, 2, "Size of dict word to be treated as non-dict word"); - double_VAR_H(stopper_certainty_per_char, -0.50, - "Certainty to add for each dict char above small word size."); - double_VAR_H(stopper_allowable_character_badness, 3.0, - "Max certaintly variation allowed in a word (in sigma)"); - INT_VAR_H(stopper_debug_level, 0, "Stopper debug level"); - BOOL_VAR_H(stopper_no_acceptable_choices, false, - "Make AcceptableChoice() always return false. Useful" - " when there is a need to explore all segmentations"); - INT_VAR_H(tessedit_truncate_wordchoice_log, 10, "Max words to keep in list"); - STRING_VAR_H(word_to_debug, "", - "Word for which stopper debug information" - " should be printed to stdout"); - BOOL_VAR_H(segment_nonalphabetic_script, false, - "Don't use any alphabetic-specific tricks." - "Set to true in the traineddata config file for" - " scripts that are cursive or inherently fixed-pitch"); - BOOL_VAR_H(save_doc_words, 0, "Save Document Words"); - double_VAR_H(doc_dict_pending_threshold, 0.0, "Worst certainty for using pending dictionary"); - double_VAR_H(doc_dict_certainty_threshold, -2.25, - "Worst certainty" - " for words that can be inserted into the document dictionary"); - INT_VAR_H(max_permuter_attempts, 10000, - "Maximum number of different" - " character choices to consider during permutation." - " This limit is especially useful when user patterns" - " are specified, since overly generic patterns can result in" - " dawg search exploring an overly large number of options."); + STRING_VAR_H(user_words_file); + STRING_VAR_H(user_words_suffix); + STRING_VAR_H(user_patterns_file); + STRING_VAR_H(user_patterns_suffix); + BOOL_VAR_H(load_system_dawg); + BOOL_VAR_H(load_freq_dawg); + BOOL_VAR_H(load_unambig_dawg); + BOOL_VAR_H(load_punc_dawg); + BOOL_VAR_H(load_number_dawg); + BOOL_VAR_H(load_bigram_dawg); + double_VAR_H(xheight_penalty_subscripts); + double_VAR_H(xheight_penalty_inconsistent); + double_VAR_H(segment_penalty_dict_frequent_word); + double_VAR_H(segment_penalty_dict_case_ok); + double_VAR_H(segment_penalty_dict_case_bad); + double_VAR_H(segment_penalty_dict_nonword); + double_VAR_H(segment_penalty_garbage); + STRING_VAR_H(output_ambig_words_file); + INT_VAR_H(dawg_debug_level); + INT_VAR_H(hyphen_debug_level); + BOOL_VAR_H(use_only_first_uft8_step); + double_VAR_H(certainty_scale); + double_VAR_H(stopper_nondict_certainty_base); + double_VAR_H(stopper_phase2_certainty_rejection_offset); + INT_VAR_H(stopper_smallword_size); + double_VAR_H(stopper_certainty_per_char); + double_VAR_H(stopper_allowable_character_badness); + INT_VAR_H(stopper_debug_level); + BOOL_VAR_H(stopper_no_acceptable_choices); + INT_VAR_H(tessedit_truncate_wordchoice_log); + STRING_VAR_H(word_to_debug); + BOOL_VAR_H(segment_nonalphabetic_script); + BOOL_VAR_H(save_doc_words); + double_VAR_H(doc_dict_pending_threshold); + double_VAR_H(doc_dict_certainty_threshold); + INT_VAR_H(max_permuter_attempts); }; } // namespace tesseract diff --git a/src/dict/permdawg.cpp b/src/dict/permdawg.cpp index e9abdb172..07f3a59ba 100644 --- a/src/dict/permdawg.cpp +++ b/src/dict/permdawg.cpp @@ -46,7 +46,7 @@ void Dict::go_deeper_dawg_fxn(const char *debug, const BLOB_CHOICE_LIST_VECTOR & float *limit, WERD_CHOICE *best_choice, int *attempts_left, void *void_more_args) { auto *more_args = static_cast(void_more_args); - word_ending = (char_choice_index == char_choices.size() - 1); + word_ending = (static_cast(char_choice_index) == char_choices.size() - 1); int word_index = word->length() - 1; if (best_choice->rating() < *limit) { return; @@ -73,7 +73,7 @@ void Dict::go_deeper_dawg_fxn(const char *debug, const BLOB_CHOICE_LIST_VECTOR & DawgPositionVector unigram_updated_dawgs; DawgArgs unigram_dawg_args(&unigram_active_dawgs, &unigram_updated_dawgs, more_args->permuter); // Check unigrams in the ngram with letter_is_okay(). - for (int i = 0; unigrams_ok && i < encoding.size(); ++i) { + for (size_t i = 0; unigrams_ok && i < encoding.size(); ++i) { UNICHAR_ID uch_id = encoding[i]; ASSERT_HOST(uch_id != INVALID_UNICHAR_ID); ++num_unigrams; @@ -195,7 +195,7 @@ void Dict::permute_choices(const char *debug, const BLOB_CHOICE_LIST_VECTOR &cha debug, char_choice_index, *limit, word->rating(), word->certainty(), word->debug_string().c_str()); } - if (char_choice_index < char_choices.size()) { + if (static_cast(char_choice_index) < char_choices.size()) { BLOB_CHOICE_IT blob_choice_it; blob_choice_it.set_to_list(char_choices.at(char_choice_index)); for (blob_choice_it.mark_cycle_pt(); !blob_choice_it.cycled_list(); blob_choice_it.forward()) { @@ -226,7 +226,7 @@ void Dict::append_choices(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char const CHAR_FRAGMENT_INFO *prev_char_frag_info, WERD_CHOICE *word, float certainties[], float *limit, WERD_CHOICE *best_choice, int *attempts_left, void *more_args) { - int word_ending = (char_choice_index == char_choices.size() - 1); + auto word_ending = (static_cast(char_choice_index) == char_choices.size() - 1); // Deal with fragments. CHAR_FRAGMENT_INFO char_frag_info; diff --git a/src/dict/stopper.cpp b/src/dict/stopper.cpp index 5c4461679..4c33fa89c 100644 --- a/src/dict/stopper.cpp +++ b/src/dict/stopper.cpp @@ -164,7 +164,6 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r // Construct BLOB_CHOICE_LIST_VECTOR with ambiguities // for each unichar id in BestChoice. BLOB_CHOICE_LIST_VECTOR ambig_blob_choices; - int i; bool ambigs_found = false; // For each position in best_choice: // -- choose AMBIG_SPEC_LIST that corresponds to unichar_id at best_choice[i] @@ -190,7 +189,7 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r // unichar id for the corresponding position in best_choice. // best_choice consisting from only the original letters will // have a rating of 0.0. - for (i = 0; i < best_choice->length(); ++i) { + for (unsigned i = 0; i < best_choice->length(); ++i) { auto *lst = new BLOB_CHOICE_LIST(); BLOB_CHOICE_IT lst_it(lst); // TODO(rays/antonova) Put real xheights and y shifts here. @@ -201,10 +200,9 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r } UNICHAR_ID wrong_ngram[MAX_AMBIG_SIZE + 1]; int wrong_ngram_index; - int next_index; int blob_index = 0; - for (i = 0; i < best_choice->length(); blob_index += best_choice->state(i), ++i) { - UNICHAR_ID curr_unichar_id = best_choice->unichar_id(i); + for (unsigned i = 0; i < best_choice->length(); blob_index += best_choice->state(i), ++i) { + auto curr_unichar_id = best_choice->unichar_id(i); if (stopper_debug_level > 2) { tprintf("Looking for %s ngrams starting with %s:\n", replace ? "replaceable" : "ambiguous", getUnicharset().debug_str(curr_unichar_id).c_str()); @@ -212,7 +210,7 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r int num_wrong_blobs = best_choice->state(i); wrong_ngram_index = 0; wrong_ngram[wrong_ngram_index] = curr_unichar_id; - if (curr_unichar_id == INVALID_UNICHAR_ID || curr_unichar_id >= table.size() || + if (curr_unichar_id == INVALID_UNICHAR_ID || static_cast(curr_unichar_id) >= table.size() || table[curr_unichar_id] == nullptr) { continue; // there is no ambig spec for this unichar id } @@ -272,6 +270,7 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r } spec_it.forward(); } else if (compare == -1) { + unsigned next_index; if (wrong_ngram_index + 1 < ambig_spec->wrong_ngram_size && ((next_index = wrong_ngram_index + 1 + i) < best_choice->length())) { // Add the next unichar id to wrong_ngram and keep looking for @@ -293,7 +292,7 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r if (ambigs_found) { if (stopper_debug_level > 2) { tprintf("\nResulting ambig_blob_choices:\n"); - for (i = 0; i < ambig_blob_choices.size(); ++i) { + for (unsigned i = 0; i < ambig_blob_choices.size(); ++i) { print_ratings_list("", ambig_blob_choices.at(i), getUnicharset()); tprintf("\n"); } @@ -310,7 +309,7 @@ bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_r // the capability to produce classifications combined from character // fragments is added to other functions. int orig_i = 0; - for (i = 0; i < alt_word->length(); ++i) { + for (unsigned i = 0; i < alt_word->length(); ++i) { const UNICHARSET &uchset = getUnicharset(); bool replacement_is_ngram = uchset.get_isngram(alt_word->unichar_id(i)); UNICHAR_ID leftmost_id = alt_word->unichar_id(i); @@ -444,7 +443,7 @@ void Dict::ReplaceAmbig(int wrong_ngram_begin_index, int wrong_ngram_size, int Dict::LengthOfShortestAlphaRun(const WERD_CHOICE &WordChoice) const { int shortest = INT32_MAX; int curr_len = 0; - for (int w = 0; w < WordChoice.length(); ++w) { + for (unsigned w = 0; w < WordChoice.length(); ++w) { if (WordChoice.unicharset()->get_isalpha(WordChoice.unichar_id(w))) { curr_len++; } else if (curr_len > 0) { diff --git a/src/dict/trie.cpp b/src/dict/trie.cpp index 9a501d9da..3ba415e3a 100644 --- a/src/dict/trie.cpp +++ b/src/dict/trie.cpp @@ -71,7 +71,7 @@ bool Trie::edge_char_of(NODE_REF node_ref, NODE_REF next_node, int direction, bo if (node_ref == NO_EDGE) { return false; } - assert(node_ref < nodes_.size()); + assert(static_cast(node_ref) < nodes_.size()); EDGE_VECTOR &vec = (direction == FORWARD_EDGE) ? nodes_[node_ref]->forward_edges : nodes_[node_ref]->backward_edges; int vec_size = vec.size(); @@ -111,7 +111,7 @@ bool Trie::add_edge_linkage(NODE_REF node1, NODE_REF node2, bool marker_flag, in bool word_end, UNICHAR_ID unichar_id) { EDGE_VECTOR *vec = (direction == FORWARD_EDGE) ? &(nodes_[node1]->forward_edges) : &(nodes_[node1]->backward_edges); - int search_index; + unsigned search_index; if (node1 == 0 && direction == FORWARD_EDGE) { search_index = 0; // find the index to make the add sorted while (search_index < vec->size() && @@ -164,7 +164,7 @@ bool Trie::add_word_to_dawg(const WERD_CHOICE &word, const std::vector *re ASSERT_HOST(repetitions->size() == word.length()); } // Make sure the word does not contain invalid unchar ids. - for (int i = 0; i < word.length(); ++i) { + for (unsigned i = 0; i < word.length(); ++i) { if (word.unichar_id(i) < 0 || word.unichar_id(i) >= unicharset_size_) { return false; } @@ -175,7 +175,6 @@ bool Trie::add_word_to_dawg(const WERD_CHOICE &word, const std::vector *re NODE_REF the_next_node; bool marker_flag = false; EDGE_INDEX edge_index; - int i; int32_t still_finding_chars = true; int32_t word_end = false; bool add_failed = false; @@ -186,6 +185,7 @@ bool Trie::add_word_to_dawg(const WERD_CHOICE &word, const std::vector *re } UNICHAR_ID unichar_id; + unsigned i; for (i = 0; i < word.length() - 1; ++i) { unichar_id = word.unichar_id(i); marker_flag = (repetitions != nullptr) ? (*repetitions)[i] : false; @@ -417,6 +417,7 @@ bool Trie::read_pattern_list(const char *filename, const UNICHARSET &unicharset) if (*str_ptr == '\\') { // regular '\' unichar that was escaped curr_unichar_id = unicharset.unichar_to_id(str_ptr, step); } else { +#if 0 // TODO: This code should be enabled if kSaneNumConcreteChars != 0. if (word.length() < kSaneNumConcreteChars) { tprintf( "Please provide at least %d concrete characters at the" @@ -425,6 +426,7 @@ bool Trie::read_pattern_list(const char *filename, const UNICHARSET &unicharset) failed = true; break; } +#endif // Parse character class from expression. curr_unichar_id = character_class_to_pattern(*str_ptr); } @@ -508,21 +510,16 @@ SquishedDawg *Trie::trie_to_dawg() { if (debug_level_ > 2) { print_all("Before reduction:", MAX_NODE_EDGES_DISPLAY); } - auto reduced_nodes = new bool[nodes_.size()]; - for (int i = 0; i < nodes_.size(); i++) { - reduced_nodes[i] = false; - } + std::vector reduced_nodes(nodes_.size()); this->reduce_node_input(0, reduced_nodes); - delete[] reduced_nodes; if (debug_level_ > 2) { print_all("After reduction:", MAX_NODE_EDGES_DISPLAY); } // Build a translation map from node indices in nodes_ vector to // their target indices in EDGE_ARRAY. - auto *node_ref_map = new NODE_REF[nodes_.size() + 1]; - int i, j; - node_ref_map[0] = 0; + std::vector node_ref_map(nodes_.size() + 1); + unsigned i; for (i = 0; i < nodes_.size(); ++i) { node_ref_map[i + 1] = node_ref_map[i] + nodes_[i]->forward_edges.size(); } @@ -535,10 +532,10 @@ SquishedDawg *Trie::trie_to_dawg() { for (i = 0; i < nodes_.size(); ++i) { TRIE_NODE_RECORD *node_ptr = nodes_[i]; int end = node_ptr->forward_edges.size(); - for (j = 0; j < end; ++j) { + for (int j = 0; j < end; ++j) { EDGE_RECORD &edge_rec = node_ptr->forward_edges[j]; NODE_REF node_ref = next_node_from_edge_rec(edge_rec); - ASSERT_HOST(node_ref < nodes_.size()); + ASSERT_HOST(static_cast(node_ref) < nodes_.size()); UNICHAR_ID unichar_id = unichar_id_from_edge_rec(edge_rec); link_edge(edge_array_ptr, node_ref_map[node_ref], false, FORWARD_EDGE, end_of_word_from_edge_rec(edge_rec), unichar_id); @@ -548,7 +545,6 @@ SquishedDawg *Trie::trie_to_dawg() { ++edge_array_ptr; } } - delete[] node_ref_map; return new SquishedDawg(edge_array, num_forward_edges, type_, lang_, perm_, unicharset_size_, debug_level_); @@ -571,10 +567,9 @@ bool Trie::eliminate_redundant_edges(NODE_REF node, const EDGE_RECORD &edge1, // Translate all edges going to/from next_node2 to go to/from next_node1. EDGE_RECORD *edge_ptr = nullptr; EDGE_INDEX edge_index; - int i; // The backward link in node to next_node2 will be zeroed out by the caller. // Copy all the backward links in next_node2 to node next_node1 - for (i = 0; i < next_node2_ptr->backward_edges.size(); ++i) { + for (unsigned i = 0; i < next_node2_ptr->backward_edges.size(); ++i) { const EDGE_RECORD &bkw_edge = next_node2_ptr->backward_edges[i]; NODE_REF curr_next_node = next_node_from_edge_rec(bkw_edge); UNICHAR_ID curr_unichar_id = unichar_id_from_edge_rec(bkw_edge); @@ -599,13 +594,13 @@ bool Trie::eliminate_redundant_edges(NODE_REF node, const EDGE_RECORD &edge1, } bool Trie::reduce_lettered_edges(EDGE_INDEX edge_index, UNICHAR_ID unichar_id, NODE_REF node, - EDGE_VECTOR *backward_edges, NODE_MARKER reduced_nodes) { + EDGE_VECTOR *backward_edges, std::vector &reduced_nodes) { if (debug_level_ > 1) { tprintf("reduce_lettered_edges(edge=" REFFORMAT ")\n", edge_index); } // Compare each of the edge pairs with the given unichar_id. bool did_something = false; - for (int i = edge_index; i < backward_edges->size() - 1; ++i) { + for (unsigned i = edge_index; i < backward_edges->size() - 1; ++i) { // Find the first edge that can be eliminated. UNICHAR_ID curr_unichar_id = INVALID_UNICHAR_ID; while (i < backward_edges->size()) { @@ -625,7 +620,7 @@ bool Trie::reduce_lettered_edges(EDGE_INDEX edge_index, UNICHAR_ID unichar_id, N } const EDGE_RECORD &edge_rec = (*backward_edges)[i]; // Compare it to the rest of the edges with the given unichar_id. - for (int j = i + 1; j < backward_edges->size(); ++j) { + for (auto j = i + 1; j < backward_edges->size(); ++j) { const EDGE_RECORD &next_edge_rec = (*backward_edges)[j]; if (DeadEdge(next_edge_rec)) { continue; @@ -662,7 +657,7 @@ void Trie::sort_edges(EDGE_VECTOR *edges) { } } -void Trie::reduce_node_input(NODE_REF node, NODE_MARKER reduced_nodes) { +void Trie::reduce_node_input(NODE_REF node, std::vector &reduced_nodes) { EDGE_VECTOR &backward_edges = nodes_[node]->backward_edges; sort_edges(&backward_edges); if (debug_level_ > 1) { @@ -671,7 +666,7 @@ void Trie::reduce_node_input(NODE_REF node, NODE_MARKER reduced_nodes) { } EDGE_INDEX edge_index = 0; - while (edge_index < backward_edges.size()) { + while (static_cast(edge_index) < backward_edges.size()) { if (DeadEdge(backward_edges[edge_index])) { continue; } @@ -679,7 +674,7 @@ void Trie::reduce_node_input(NODE_REF node, NODE_MARKER reduced_nodes) { while (reduce_lettered_edges(edge_index, unichar_id, node, &backward_edges, reduced_nodes)) { ; } - while (++edge_index < backward_edges.size()) { + while (static_cast(++edge_index) < backward_edges.size()) { UNICHAR_ID id = unichar_id_from_edge_rec(backward_edges[edge_index]); if (!DeadEdge(backward_edges[edge_index]) && id != unichar_id) { break; diff --git a/src/dict/trie.h b/src/dict/trie.h index 1a82b39eb..3734f49ac 100644 --- a/src/dict/trie.h +++ b/src/dict/trie.h @@ -36,7 +36,6 @@ class UNICHARSET; // typedefs to int and restrict the casts to extracting these values from // the 64 bit EDGE_RECORD. using EDGE_INDEX = int64_t; // index of an edge in a given node -using NODE_MARKER = bool *; using EDGE_VECTOR = std::vector; struct TRIE_NODE_RECORD { @@ -383,7 +382,7 @@ protected: // caller when all edges with this letter have been reduced. // Returns true if further reduction is possible with this same letter. bool reduce_lettered_edges(EDGE_INDEX edge_index, UNICHAR_ID unichar_id, NODE_REF node, - EDGE_VECTOR *backward_edges, NODE_MARKER reduced_nodes); + EDGE_VECTOR *backward_edges, std::vector &reduced_nodes); /** * Order num_edges of consecutive EDGE_RECORDS in the given EDGE_VECTOR in @@ -394,7 +393,7 @@ protected: void sort_edges(EDGE_VECTOR *edges); /** Eliminates any redundant edges from this node in the Trie. */ - void reduce_node_input(NODE_REF node, NODE_MARKER reduced_nodes); + void reduce_node_input(NODE_REF node, std::vector &reduced_nodes); // Returns the pattern unichar id for the given character class code. UNICHAR_ID character_class_to_pattern(char ch); diff --git a/src/lstm/convolve.h b/src/lstm/convolve.h index 6c2477c5b..e91fde9b3 100644 --- a/src/lstm/convolve.h +++ b/src/lstm/convolve.h @@ -37,7 +37,7 @@ public: ~Convolve() override = default; std::string spec() const override { - return "C" + std::to_string(half_x_ * 2 + 1) + "," + std::to_string(half_y_ * 2 + 1); + return "C" + std::to_string(half_y_ * 2 + 1) + "," + std::to_string(half_x_ * 2 + 1); } // Writes to the given file. Returns false in case of error. diff --git a/src/lstm/fullyconnected.cpp b/src/lstm/fullyconnected.cpp index 80f7f2a5e..85989f407 100644 --- a/src/lstm/fullyconnected.cpp +++ b/src/lstm/fullyconnected.cpp @@ -156,7 +156,7 @@ void FullyConnected::Forward(bool debug, const NetworkIO &input, // Thread-local pointer to temporary storage. int thread_id = 0; #endif - double *temp_line = temp_lines[thread_id]; + TFloat *temp_line = temp_lines[thread_id]; if (input.int_mode()) { ForwardTimeStep(input.i(t), t, temp_line); } else { @@ -200,7 +200,7 @@ void FullyConnected::SetupForward(const NetworkIO &input, const TransposedArray } } -void FullyConnected::ForwardTimeStep(int t, double *output_line) { +void FullyConnected::ForwardTimeStep(int t, TFloat *output_line) { if (type_ == NT_TANH) { FuncInplace(no_, output_line); } else if (type_ == NT_LOGISTIC) { @@ -218,7 +218,7 @@ void FullyConnected::ForwardTimeStep(int t, double *output_line) { } } -void FullyConnected::ForwardTimeStep(const double *d_input, int t, double *output_line) { +void FullyConnected::ForwardTimeStep(const TFloat *d_input, int t, TFloat *output_line) { // input is copied to source_ line-by-line for cache coherency. if (IsTraining() && external_source_ == nullptr) { source_t_.WriteStrided(t, d_input); @@ -227,7 +227,7 @@ void FullyConnected::ForwardTimeStep(const double *d_input, int t, double *outpu ForwardTimeStep(t, output_line); } -void FullyConnected::ForwardTimeStep(const int8_t *i_input, int t, double *output_line) { +void FullyConnected::ForwardTimeStep(const int8_t *i_input, int t, TFloat *output_line) { // input is copied to source_ line-by-line for cache coherency. weights_.MatrixDotVector(i_input, output_line); ForwardTimeStep(t, output_line); @@ -265,11 +265,11 @@ bool FullyConnected::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkSc for (int t = 0; t < width; ++t) { int thread_id = 0; #endif - double *backprop = nullptr; + TFloat *backprop = nullptr; if (needs_to_backprop_) { backprop = temp_backprops[thread_id]; } - double *curr_errors = errors[thread_id]; + TFloat *curr_errors = errors[thread_id]; BackwardTimeStep(fwd_deltas, t, curr_errors, errors_t.get(), backprop); if (backprop != nullptr) { back_deltas->WriteTimeStep(t, backprop); @@ -287,8 +287,8 @@ bool FullyConnected::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkSc return false; // No point going further back. } -void FullyConnected::BackwardTimeStep(const NetworkIO &fwd_deltas, int t, double *curr_errors, - TransposedArray *errors_t, double *backprop) { +void FullyConnected::BackwardTimeStep(const NetworkIO &fwd_deltas, int t, TFloat *curr_errors, + TransposedArray *errors_t, TFloat *backprop) { if (type_ == NT_TANH) { acts_.FuncMultiply(fwd_deltas, t, curr_errors); } else if (type_ == NT_LOGISTIC) { @@ -328,7 +328,7 @@ void FullyConnected::Update(float learning_rate, float momentum, float adam_beta // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. -void FullyConnected::CountAlternators(const Network &other, double *same, double *changed) const { +void FullyConnected::CountAlternators(const Network &other, TFloat *same, TFloat *changed) const { ASSERT_HOST(other.type() == type_); const auto *fc = static_cast(&other); weights_.CountAlternators(fc->weights_, same, changed); diff --git a/src/lstm/fullyconnected.h b/src/lstm/fullyconnected.h index 95f27f1ab..0dbd46057 100644 --- a/src/lstm/fullyconnected.h +++ b/src/lstm/fullyconnected.h @@ -20,6 +20,7 @@ #include "network.h" #include "networkscratch.h" +#include "tesstypes.h" namespace tesseract { @@ -90,17 +91,17 @@ public: NetworkScratch *scratch, NetworkIO *output) override; // Components of Forward so FullyConnected can be reused inside LSTM. void SetupForward(const NetworkIO &input, const TransposedArray *input_transpose); - void ForwardTimeStep(int t, double *output_line); - void ForwardTimeStep(const double *d_input, int t, double *output_line); - void ForwardTimeStep(const int8_t *i_input, int t, double *output_line); + void ForwardTimeStep(int t, TFloat *output_line); + void ForwardTimeStep(const TFloat *d_input, int t, TFloat *output_line); + void ForwardTimeStep(const int8_t *i_input, int t, TFloat *output_line); // Runs backward propagation of errors on the deltas line. // See Network for a detailed discussion of the arguments. bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch, NetworkIO *back_deltas) override; // Components of Backward so FullyConnected can be reused inside LSTM. - void BackwardTimeStep(const NetworkIO &fwd_deltas, int t, double *curr_errors, - TransposedArray *errors_t, double *backprop); + void BackwardTimeStep(const NetworkIO &fwd_deltas, int t, TFloat *curr_errors, + TransposedArray *errors_t, TFloat *backprop); void FinishBackward(const TransposedArray &errors_t); // Updates the weights using the given learning rate, momentum and adam_beta. @@ -109,7 +110,7 @@ public: // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. - void CountAlternators(const Network &other, double *same, double *changed) const override; + void CountAlternators(const Network &other, TFloat *same, TFloat *changed) const override; protected: // Weight arrays of size [no, ni + 1]. diff --git a/src/lstm/functions.cpp b/src/lstm/functions.cpp index 46e1392c1..4640d1049 100644 --- a/src/lstm/functions.cpp +++ b/src/lstm/functions.cpp @@ -1,7 +1,7 @@ -// Generated code with lookup tables +// Generated code with lookup tables (see generate_lut.py) #include "functions.h" namespace tesseract { -const double TanhTable[] = { +const TFloat TanhTable[] = { 0.0, 0.00390623013190634, 0.007812341058161014, @@ -4099,7 +4099,7 @@ const double TanhTable[] = { 0.9999999999999742, 0.9999999999999745, }; -const double LogisticTable[] = { +const TFloat LogisticTable[] = { 0.5, 0.5009765612582384, 0.5019531150659532, diff --git a/src/lstm/functions.h b/src/lstm/functions.h index 65b4d3345..1e71b2f27 100644 --- a/src/lstm/functions.h +++ b/src/lstm/functions.h @@ -19,6 +19,7 @@ #define TESSERACT_LSTM_FUNCTIONS_H_ #include "helpers.h" +#include "tesstypes.h" // Setting this to 1 or more causes massive dumps of debug data: weights, // updates, internal calculations etc, and reduces the number of test iterations @@ -33,134 +34,134 @@ namespace tesseract { // Size of static tables. constexpr int kTableSize = 4096; // Scale factor for float arg to int index. -constexpr double kScaleFactor = 256.0; +constexpr TFloat kScaleFactor = 256.0; // Generated lookup tables. -extern const double TanhTable[]; -extern const double LogisticTable[]; +extern const TFloat TanhTable[]; +extern const TFloat LogisticTable[]; // Non-linearity (sigmoid) functions with cache tables and clipping. -inline double Tanh(double x) { - if (x < 0.0) { +inline TFloat Tanh(TFloat x) { + if (x < 0) { return -Tanh(-x); } x *= kScaleFactor; auto index = static_cast(x); if (index >= (kTableSize - 1)) { - return 1.0; + return 1; } - double tanh_i0 = TanhTable[index]; - double tanh_i1 = TanhTable[index + 1]; + TFloat tanh_i0 = TanhTable[index]; + TFloat tanh_i1 = TanhTable[index + 1]; // Linear interpolation. return tanh_i0 + (tanh_i1 - tanh_i0) * (x - index); } -inline double Logistic(double x) { - if (x < 0.0) { - return 1.0 - Logistic(-x); +inline TFloat Logistic(TFloat x) { + if (x < 0) { + return 1 - Logistic(-x); } x *= kScaleFactor; auto index = static_cast(x); if (index >= (kTableSize - 1)) { - return 1.0; + return 1; } - double l0 = LogisticTable[index]; - double l1 = LogisticTable[index + 1]; + TFloat l0 = LogisticTable[index]; + TFloat l1 = LogisticTable[index + 1]; // Linear interpolation. return l0 + (l1 - l0) * (x - index); } // Non-linearity (sigmoid) functions and their derivatives. struct FFunc { - inline double operator()(double x) const { + inline TFloat operator()(TFloat x) const { return Logistic(x); } }; struct FPrime { - inline double operator()(double y) const { - return y * (1.0 - y); + inline TFloat operator()(TFloat y) const { + return y * (1 - y); } }; struct ClipFFunc { - inline double operator()(double x) const { - if (x <= 0.0) { - return 0.0; + inline TFloat operator()(TFloat x) const { + if (x <= 0) { + return 0; } - if (x >= 1.0) { - return 1.0; + if (x >= 1) { + return 1; } return x; } }; struct ClipFPrime { - inline double operator()(double y) const { - return 0.0 < y && y < 1.0 ? 1.0 : 0.0; + inline TFloat operator()(TFloat y) const { + return 0 < y && y < 1 ? 1 : 0; } }; struct Relu { - inline double operator()(double x) const { - if (x <= 0.0) { - return 0.0; + inline TFloat operator()(TFloat x) const { + if (x <= 0) { + return 0; } return x; } }; struct ReluPrime { - inline double operator()(double y) const { - return 0.0 < y ? 1.0 : 0.0; + inline TFloat operator()(TFloat y) const { + return 0 < y ? 1 : 0; } }; struct GFunc { - inline double operator()(double x) const { + inline TFloat operator()(TFloat x) const { return Tanh(x); } }; struct GPrime { - inline double operator()(double y) const { - return 1.0 - y * y; + inline TFloat operator()(TFloat y) const { + return 1 - y * y; } }; struct ClipGFunc { - inline double operator()(double x) const { - if (x <= -1.0) { - return -1.0; + inline TFloat operator()(TFloat x) const { + if (x <= -1) { + return -1; } - if (x >= 1.0) { - return 1.0; + if (x >= 1) { + return 1; } return x; } }; struct ClipGPrime { - inline double operator()(double y) const { - return -1.0 < y && y < 1.0 ? 1.0 : 0.0; + inline TFloat operator()(TFloat y) const { + return -1 < y && y < 1 ? 1 : 0; } }; struct HFunc { - inline double operator()(double x) const { + inline TFloat operator()(TFloat x) const { return Tanh(x); } }; struct HPrime { - inline double operator()(double y) const { - double u = Tanh(y); - return 1.0 - u * u; + inline TFloat operator()(TFloat y) const { + TFloat u = Tanh(y); + return 1 - u * u; } }; struct UnityFunc { - inline double operator()(double /*x*/) const { + inline TFloat operator()(TFloat /*x*/) const { return 1.0; } }; struct IdentityFunc { - inline double operator()(double x) const { + inline TFloat operator()(TFloat x) const { return x; } }; // Applies Func in-place to inout, of size n. template -inline void FuncInplace(int n, double *inout) { +inline void FuncInplace(int n, TFloat *inout) { Func f; for (int i = 0; i < n; ++i) { inout[i] = f(inout[i]); @@ -169,7 +170,7 @@ inline void FuncInplace(int n, double *inout) { // Applies Func to u and multiplies the result by v component-wise, // putting the product in out, all of size n. template -inline void FuncMultiply(const double *u, const double *v, int n, double *out) { +inline void FuncMultiply(const TFloat *u, const TFloat *v, int n, TFloat *out) { Func f; for (int i = 0; i < n; ++i) { out[i] = f(u[i]) * v[i]; @@ -182,7 +183,7 @@ inline void SoftmaxInPlace(int n, T *inout) { return; } // A limit on the negative range input to exp to guarantee non-zero output. - const T kMaxSoftmaxActivation = 86.0f; + const T kMaxSoftmaxActivation = 86; T max_output = inout[0]; for (int i = 1; i < n; i++) { @@ -191,14 +192,14 @@ inline void SoftmaxInPlace(int n, T *inout) { max_output = output; } } - T prob_total = 0.0; + T prob_total = 0; for (int i = 0; i < n; i++) { T prob = inout[i] - max_output; - prob = exp(ClipToRange(prob, -kMaxSoftmaxActivation, static_cast(0))); + prob = std::exp(ClipToRange(prob, -kMaxSoftmaxActivation, static_cast(0))); prob_total += prob; inout[i] = prob; } - if (prob_total > 0.0) { + if (prob_total > 0) { for (int i = 0; i < n; i++) { inout[i] /= prob_total; } @@ -206,34 +207,34 @@ inline void SoftmaxInPlace(int n, T *inout) { } // Copies n values of the given src vector to dest. -inline void CopyVector(int n, const double *src, double *dest) { +inline void CopyVector(unsigned n, const TFloat *src, TFloat *dest) { memcpy(dest, src, n * sizeof(dest[0])); } // Adds n values of the given src vector to dest. -inline void AccumulateVector(int n, const double *src, double *dest) { +inline void AccumulateVector(int n, const TFloat *src, TFloat *dest) { for (int i = 0; i < n; ++i) { dest[i] += src[i]; } } // Multiplies n values of inout in-place element-wise by the given src vector. -inline void MultiplyVectorsInPlace(int n, const double *src, double *inout) { +inline void MultiplyVectorsInPlace(int n, const TFloat *src, TFloat *inout) { for (int i = 0; i < n; ++i) { inout[i] *= src[i]; } } // Multiplies n values of u by v, element-wise, accumulating to out. -inline void MultiplyAccumulate(int n, const double *u, const double *v, double *out) { +inline void MultiplyAccumulate(int n, const TFloat *u, const TFloat *v, TFloat *out) { for (int i = 0; i < n; i++) { out[i] += u[i] * v[i]; } } // Sums the given 5 n-vectors putting the result into sum. -inline void SumVectors(int n, const double *v1, const double *v2, const double *v3, - const double *v4, const double *v5, double *sum) { +inline void SumVectors(int n, const TFloat *v1, const TFloat *v2, const TFloat *v3, + const TFloat *v4, const TFloat *v5, TFloat *sum) { for (int i = 0; i < n; ++i) { sum[i] = v1[i] + v2[i] + v3[i] + v4[i] + v5[i]; } @@ -241,7 +242,7 @@ inline void SumVectors(int n, const double *v1, const double *v2, const double * // Sets the given n-vector vec to 0. template -inline void ZeroVector(int n, T *vec) { +inline void ZeroVector(unsigned n, T *vec) { memset(vec, 0, n * sizeof(*vec)); } @@ -255,12 +256,12 @@ inline void ClipVector(int n, T lower, T upper, T *vec) { // Converts the given n-vector to a binary encoding of the maximum value, // encoded as vector of nf binary values. -inline void CodeInBinary(int n, int nf, double *vec) { +inline void CodeInBinary(int n, int nf, TFloat *vec) { if (nf <= 0 || n < nf) { return; } int index = 0; - double best_score = vec[0]; + TFloat best_score = vec[0]; for (int i = 1; i < n; ++i) { if (vec[i] > best_score) { best_score = vec[i]; diff --git a/src/lstm/generate_lut.py b/src/lstm/generate_lut.py index cf2e844a3..c2283be7e 100755 --- a/src/lstm/generate_lut.py +++ b/src/lstm/generate_lut.py @@ -4,22 +4,24 @@ import math +# kTableSize and kScaleFactor must match the values in functions.h. + # Size of static tables. kTableSize = 4096 # Scale factor for float arg to int index. kScaleFactor = 256.0 -print("// Generated code with lookup tables") +print("// Generated code with lookup tables (see generate_lut.py)") print('#include "functions.h"') print("namespace tesseract {") -print("const double TanhTable[] = {") +print("const TFloat TanhTable[] = {") for i in range(kTableSize): - print(" %a," % math.tanh(i / kScaleFactor)) + print(" %a," % math.tanh(i / kScaleFactor)) print("};") -print("const double LogisticTable[] = {") +print("const TFloat LogisticTable[] = {") for i in range(kTableSize): - print(" %a," % (1 / (1 + math.exp(-i / kScaleFactor)))) + print(" %a," % (1 / (1 + math.exp(-i / kScaleFactor)))) print("};") -print("} // namespace tesseract.") +print("} // namespace tesseract.") diff --git a/src/lstm/input.h b/src/lstm/input.h index fb6131fb8..4ec870d81 100644 --- a/src/lstm/input.h +++ b/src/lstm/input.h @@ -33,8 +33,10 @@ public: ~Input() override = default; std::string spec() const override { - return std::to_string(shape_.batch()) + "," + std::to_string(shape_.height()) + "," + - std::to_string(shape_.width()) + "," + std::to_string(shape_.depth()); + return std::to_string(shape_.batch()) + "," + + std::to_string(shape_.height()) + "," + + std::to_string(shape_.width()) + "," + + std::to_string(shape_.depth()); } // Returns the required shape input to the network. @@ -43,7 +45,8 @@ public: } // Returns the shape output from the network given an input shape (which may // be partially unknown ie zero). - StaticShape OutputShape(const StaticShape &input_shape) const override { + StaticShape OutputShape( + [[maybe_unused]] const StaticShape &input_shape) const override { return shape_; } // Writes to the given file. Returns false in case of error. @@ -66,27 +69,29 @@ public: // Runs forward propagation of activations on the input line. // See Network for a detailed discussion of the arguments. - void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose, - NetworkScratch *scratch, NetworkIO *output) override; + void Forward(bool debug, const NetworkIO &input, + const TransposedArray *input_transpose, NetworkScratch *scratch, + NetworkIO *output) override; // Runs backward propagation of errors on the deltas line. // See Network for a detailed discussion of the arguments. - bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch, - NetworkIO *back_deltas) override; + bool Backward(bool debug, const NetworkIO &fwd_deltas, + NetworkScratch *scratch, NetworkIO *back_deltas) override; // Creates and returns a Pix of appropriate size for the network from the // image_data. If non-null, *image_scale returns the image scale factor used. // Returns nullptr on error. /* static */ - static Image PrepareLSTMInputs(const ImageData &image_data, const Network *network, int min_width, - TRand *randomizer, float *image_scale); + static Image PrepareLSTMInputs(const ImageData &image_data, + const Network *network, int min_width, + TRand *randomizer, float *image_scale); // Converts the given pix to a NetworkIO of height and depth appropriate to // the given StaticShape: // If depth == 3, convert to 24 bit color, otherwise normalized grey. // Scale to target height, if the shape's height is > 1, or its depth if the // height == 1. If height == 0 then no scaling. // NOTE: It isn't safe for multiple threads to call this on the same pix. - static void PreparePixInput(const StaticShape &shape, const Image pix, TRand *randomizer, - NetworkIO *input); + static void PreparePixInput(const StaticShape &shape, const Image pix, + TRand *randomizer, NetworkIO *input); private: void DebugWeights() override { diff --git a/src/lstm/lstm.cpp b/src/lstm/lstm.cpp index 9a8ab2cfe..11722d795 100644 --- a/src/lstm/lstm.cpp +++ b/src/lstm/lstm.cpp @@ -28,7 +28,7 @@ #include #include // for std::ostringstream -#if !defined(__GNUC__) && defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) # include // _BitScanReverse #endif @@ -68,9 +68,9 @@ namespace tesseract { // Max absolute value of state_. It is reasonably high to enable the state // to count things. -const double kStateClip = 100.0; +const TFloat kStateClip = 100.0; // Max absolute value of gate_errors (the gradients). -const double kErrClip = 1.0f; +const TFloat kErrClip = 1.0f; // Calculate ceil(log2(n)). static inline uint32_t ceil_log2(uint32_t n) { @@ -312,9 +312,9 @@ void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *in // Single timestep buffers for the current/recurrent output and state. NetworkScratch::FloatVec curr_state, curr_output; curr_state.Init(ns_, scratch); - ZeroVector(ns_, curr_state); + ZeroVector(ns_, curr_state); curr_output.Init(ns_, scratch); - ZeroVector(ns_, curr_output); + ZeroVector(ns_, curr_output); // Rotating buffers of width buf_width allow storage of the state and output // for the other dimension, used only when working in true 2D mode. The width // is enough to hold an entire strip of the major direction. @@ -325,9 +325,9 @@ void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *in outputs.resize(buf_width); for (int i = 0; i < buf_width; ++i) { states[i].Init(ns_, scratch); - ZeroVector(ns_, states[i]); + ZeroVector(ns_, states[i]); outputs[i].Init(ns_, scratch); - ZeroVector(ns_, outputs[i]); + ZeroVector(ns_, outputs[i]); } } // Used only if a softmax LSTM. @@ -335,7 +335,7 @@ void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *in NetworkScratch::IO int_output; if (softmax_ != nullptr) { softmax_output.Init(no_, scratch); - ZeroVector(no_, softmax_output); + ZeroVector(no_, softmax_output); int rounded_softmax_inputs = gate_weights_[CI].RoundInputs(ns_); if (input.int_mode()) { int_output.Resize2d(true, 1, rounded_softmax_inputs, scratch); @@ -429,7 +429,7 @@ void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *in int8_t *which_fg_col = which_fg_[t]; memset(which_fg_col, 1, ns_ * sizeof(which_fg_col[0])); if (valid_2d) { - const double *stepped_state = states[mod_t]; + const TFloat *stepped_state = states[mod_t]; for (int i = 0; i < ns_; ++i) { if (temp_lines[GF1][i] < temp_lines[GFS][i]) { curr_state[i] = temp_lines[GFS][i] * stepped_state[i]; @@ -440,7 +440,7 @@ void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *in } MultiplyAccumulate(ns_, temp_lines[CI], temp_lines[GI], curr_state); // Clip curr_state to a sane range. - ClipVector(ns_, -kStateClip, kStateClip, curr_state); + ClipVector(ns_, -kStateClip, kStateClip, curr_state); if (IsTraining()) { // Save the gate node values. node_values_[CI].WriteTimeStep(t, temp_lines[CI]); @@ -483,8 +483,8 @@ void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *in // Always zero the states at the end of every row, but only for the major // direction. The 2-D state remains intact. if (src_index.IsLast(FD_WIDTH)) { - ZeroVector(ns_, curr_state); - ZeroVector(ns_, curr_output); + ZeroVector(ns_, curr_state); + ZeroVector(ns_, curr_output); } } while (src_index.Increment()); #if DEBUG_DETAIL > 0 @@ -520,8 +520,8 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr NetworkScratch::FloatVec curr_stateerr, curr_sourceerr; curr_stateerr.Init(ns_, scratch); curr_sourceerr.Init(na_, scratch); - ZeroVector(ns_, curr_stateerr); - ZeroVector(na_, curr_sourceerr); + ZeroVector(ns_, curr_stateerr); + ZeroVector(na_, curr_sourceerr); // Errors in the gates. NetworkScratch::FloatVec gate_errors[WT_COUNT]; for (auto &gate_error : gate_errors) { @@ -537,8 +537,8 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr for (int t = 0; t < buf_width; ++t) { stateerr[t].Init(ns_, scratch); sourceerr[t].Init(na_, scratch); - ZeroVector(ns_, stateerr[t]); - ZeroVector(na_, sourceerr[t]); + ZeroVector(ns_, stateerr[t]); + ZeroVector(na_, sourceerr[t]); } } // Parallel-generated sourceerr from each of the gates. @@ -559,7 +559,7 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr softmax_errors.Init(no_, scratch); softmax_errors_t.Init(no_, width, scratch); } - double state_clip = Is2D() ? 9.0 : 4.0; + TFloat state_clip = Is2D() ? 9.0 : 4.0; #if DEBUG_DETAIL > 1 tprintf("fwd_deltas:%s\n", name_.c_str()); fwd_deltas.Print(10); @@ -594,8 +594,8 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr int mod_t = Modulo(t, buf_width); // Current timestep. // Zero the state in the major direction only at the end of every row. if (at_last_x) { - ZeroVector(na_, curr_sourceerr); - ZeroVector(ns_, curr_stateerr); + ZeroVector(na_, curr_sourceerr); + ZeroVector(ns_, curr_stateerr); } // Setup the outputerr. if (type_ == NT_LSTM_SUMMARY) { @@ -603,7 +603,7 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr fwd_deltas.ReadTimeStep(src_index.t(), outputerr); src_index.Decrement(); } else { - ZeroVector(ns_, outputerr); + ZeroVector(ns_, outputerr); } } else if (softmax_ == nullptr) { fwd_deltas.ReadTimeStep(t, outputerr); @@ -631,7 +631,7 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr } if (down_pos >= 0) { const float *right_node_gfs = node_values_[GFS].f(down_pos); - const double *right_stateerr = stateerr[mod_t]; + const TFloat *right_stateerr = stateerr[mod_t]; for (int i = 0; i < ns_; ++i) { if (which_fg_[down_pos][i] == 2) { curr_stateerr[i] += right_stateerr[i] * right_node_gfs[i]; @@ -641,7 +641,7 @@ bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scr } state_.FuncMultiply3Add(node_values_[GO], t, outputerr, curr_stateerr); // Clip stateerr_ to a sane range. - ClipVector(ns_, -state_clip, state_clip, curr_stateerr); + ClipVector(ns_, -state_clip, state_clip, curr_stateerr); #if DEBUG_DETAIL > 1 if (t + 10 > width) { tprintf("t=%d, stateerr=", t); @@ -758,7 +758,7 @@ void LSTM::Update(float learning_rate, float momentum, float adam_beta, int num_ // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. -void LSTM::CountAlternators(const Network &other, double *same, double *changed) const { +void LSTM::CountAlternators(const Network &other, TFloat *same, TFloat *changed) const { ASSERT_HOST(other.type() == type_); const LSTM *lstm = static_cast(&other); for (int w = 0; w < WT_COUNT; ++w) { @@ -772,6 +772,8 @@ void LSTM::CountAlternators(const Network &other, double *same, double *changed) } } +#if DEBUG_DETAIL > 3 + // Prints the weights for debug purposes. void LSTM::PrintW() { tprintf("Weight state:%s\n", name_.c_str()); @@ -834,6 +836,8 @@ void LSTM::PrintDW() { } } +#endif + // Resizes forward data to cope with an input image of the given width. void LSTM::ResizeForward(const NetworkIO &input) { int rounded_inputs = gate_weights_[CI].RoundInputs(na_); diff --git a/src/lstm/lstm.h b/src/lstm/lstm.h index 4d399b1db..7f7cace1b 100644 --- a/src/lstm/lstm.h +++ b/src/lstm/lstm.h @@ -109,7 +109,7 @@ public: // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. - void CountAlternators(const Network &other, double *same, double *changed) const override; + void CountAlternators(const Network &other, TFloat *same, TFloat *changed) const override; // Prints the weights for debug purposes. void PrintW(); // Prints the weight deltas for debug purposes. diff --git a/src/lstm/lstmrecognizer.cpp b/src/lstm/lstmrecognizer.cpp index 14ea1d6bc..52f8a9f65 100644 --- a/src/lstm/lstmrecognizer.cpp +++ b/src/lstm/lstmrecognizer.cpp @@ -234,7 +234,9 @@ bool LSTMRecognizer::LoadDictionary(const ParamsVectors *params, const std::stri if (dict_->FinishLoad()) { return true; // Success. } - tprintf("Failed to load any lstm-specific dictionaries for lang %s!!\n", lang.c_str()); + if (log_level <= 0) { + tprintf("Failed to load any lstm-specific dictionaries for lang %s!!\n", lang.c_str()); + } delete dict_; dict_ = nullptr; return false; @@ -269,7 +271,7 @@ void LSTMRecognizer::RecognizeLine(const ImageData &image_data, bool invert, boo } search_->segmentTimestepsByCharacters(); unsigned char_it = 0; - for (int i = 0; i < words->size(); ++i) { + for (size_t i = 0; i < words->size(); ++i) { for (int j = 0; j < words->at(i)->end; ++j) { if (char_it < search_->ctc_choices.size()) { words->at(i)->CTC_symbol_choices.push_back(search_->ctc_choices[char_it]); diff --git a/src/lstm/lstmrecognizer.h b/src/lstm/lstmrecognizer.h index 2892c1702..c1659502c 100644 --- a/src/lstm/lstmrecognizer.h +++ b/src/lstm/lstmrecognizer.h @@ -157,6 +157,26 @@ public: series->ScaleLayerLearningRate(&id[1], factor); } + // Set the all the learning rate(s) to the given value. + void SetLearningRate(float learning_rate) + { + ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES); + learning_rate_ = learning_rate; + if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) { + for (auto &id : EnumerateLayers()) { + SetLayerLearningRate(id, learning_rate); + } + } + } + // Set the learning rate of the layer with id, by the given value. + void SetLayerLearningRate(const std::string &id, float learning_rate) + { + ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES); + ASSERT_HOST(id.length() > 1 && id[0] == ':'); + auto *series = static_cast(network_); + series->SetLayerLearningRate(&id[1], learning_rate); + } + // Converts the network to int if not already. void ConvertToInt() { if ((training_flags_ & TF_INT_MODE) == 0) { diff --git a/src/lstm/network.cpp b/src/lstm/network.cpp index f00a4274b..23a6c3541 100644 --- a/src/lstm/network.cpp +++ b/src/lstm/network.cpp @@ -134,7 +134,7 @@ void Network::SetNetworkFlags(uint32_t flags) { // Sets up the network for training. Initializes weights using weights of // scale `range` picked according to the random number generator `randomizer`. -int Network::InitWeights(float range, TRand *randomizer) { +int Network::InitWeights([[maybe_unused]] float range, TRand *randomizer) { randomizer_ = randomizer; return 0; } @@ -321,7 +321,7 @@ Network *Network::CreateFromFile(TFile *fp) { } // Returns a random number in [-range, range]. -double Network::Random(double range) { +TFloat Network::Random(TFloat range) { ASSERT_HOST(randomizer_ != nullptr); return randomizer_->SignedRand(range); } diff --git a/src/lstm/network.h b/src/lstm/network.h index 4faac88de..0a62baf29 100644 --- a/src/lstm/network.h +++ b/src/lstm/network.h @@ -187,7 +187,8 @@ public: // output from code_map[output] where non-negative, and uses the mean (over // all outputs) of the existing weights for all outputs with negative code_map // entries. Returns the new number of weights. - virtual int RemapOutputs(int old_no, const std::vector &code_map) { + virtual int RemapOutputs([[maybe_unused]] int old_no, + [[maybe_unused]] const std::vector &code_map) { return 0; } @@ -216,7 +217,7 @@ public: // Provides the (minimum) x scale factor to the network (of interest only to // input units) so they can determine how to scale bounding boxes. - virtual void CacheXScaleFactor(int factor) {} + virtual void CacheXScaleFactor([[maybe_unused]] int factor) {} // Provides debug output on the weights. virtual void DebugWeights() = 0; @@ -231,11 +232,16 @@ public: public: // Updates the weights using the given learning rate, momentum and adam_beta. // num_samples is used in the adam computation iff use_adam_ is true. - virtual void Update(float learning_rate, float momentum, float adam_beta, int num_samples) {} + virtual void Update([[maybe_unused]] float learning_rate, + [[maybe_unused]] float momentum, + [[maybe_unused]] float adam_beta, + [[maybe_unused]] int num_samples) {} // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. - virtual void CountAlternators(const Network &other, double *same, double *changed) const {} + virtual void CountAlternators([[maybe_unused]] const Network &other, + [[maybe_unused]] TFloat *same, + [[maybe_unused]] TFloat *changed) const {} // Reads from the given file. Returns nullptr in case of error. // Determines the type of the serialized class and calls its DeSerialize @@ -260,7 +266,8 @@ public: // reference it on a call to backward. This is a bit ugly, but it makes it // possible for a replicating parallel to calculate the input transpose once // instead of all the replicated networks having to do it. - virtual void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose, + virtual void Forward(bool debug, const NetworkIO &input, + const TransposedArray *input_transpose, NetworkScratch *scratch, NetworkIO *output) = 0; // Runs backward propagation of errors on fwdX_deltas. @@ -268,8 +275,8 @@ public: // Returns false if back_deltas was not set, due to there being no point in // propagating further backwards. Thus most complete networks will always // return false from Backward! - virtual bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch, - NetworkIO *back_deltas) = 0; + virtual bool Backward(bool debug, const NetworkIO &fwd_deltas, + NetworkScratch *scratch, NetworkIO *back_deltas) = 0; // === Debug image display methods. === // Displays the image of the matrix to the forward window. @@ -278,8 +285,8 @@ public: void DisplayBackward(const NetworkIO &matrix); // Creates the window if needed, otherwise clears it. - static void ClearWindow(bool tess_coords, const char *window_name, int width, int height, - ScrollView **window); + static void ClearWindow(bool tess_coords, const char *window_name, int width, + int height, ScrollView **window); // Displays the pix in the given window. and returns the height of the pix. // The pix is pixDestroyed. @@ -287,7 +294,7 @@ public: protected: // Returns a random number in [-range, range]. - double Random(double range); + TFloat Random(TFloat range); protected: NetworkType type_; // Type of the derived network class. diff --git a/src/lstm/networkio.cpp b/src/lstm/networkio.cpp index 72f33efcb..080fed0fb 100644 --- a/src/lstm/networkio.cpp +++ b/src/lstm/networkio.cpp @@ -17,6 +17,7 @@ #include "networkio.h" #include // for FLT_MAX +#include #include #include "functions.h" @@ -28,7 +29,7 @@ namespace tesseract { // Minimum value to output for certainty. const float kMinCertainty = -20.0f; // Probability corresponding to kMinCertainty. -const float kMinProb = exp(kMinCertainty); +const float kMinProb = std::exp(kMinCertainty); // Resizes to a specific size as a 2-d temp buffer. No batches, no y-dim. void NetworkIO::Resize2d(bool int_mode, int width, int num_features) { @@ -356,7 +357,7 @@ Image NetworkIO::ToPix() const { } else if (num_features > 3) { // More than 3 features use false yellow/blue color, assuming a signed // input in the range [-1,1]. - red = ClipToRange(IntCastRounded(fabs(pixel) * 255), 0, 255); + red = ClipToRange(IntCastRounded(std::fabs(pixel) * 255), 0, 255); if (pixel >= 0) { green = red; blue = 0; @@ -411,15 +412,6 @@ void NetworkIO::CopyTimeStepGeneral(int dest_t, int dest_offset, int num_feature } } -// Zeroes a single time step. -void NetworkIO::ZeroTimeStepGeneral(int t, int offset, int num_features) { - if (int_mode_) { - ZeroVector(num_features, i_[t] + offset); - } else { - ZeroVector(num_features, f_[t] + offset); - } -} - // Sets the given range to random values. void NetworkIO::Randomize(int t, int offset, int num_features, TRand *randomizer) { if (int_mode_) { @@ -529,9 +521,9 @@ int NetworkIO::PositionOfBestMatch(const std::vector &labels, int start, in int length = labels.size(); int last_start = end - length; int best_start = -1; - double best_score = 0.0; + TFloat best_score = 0; for (int s = start; s <= last_start; ++s) { - double score = ScoreOfLabels(labels, s); + TFloat score = ScoreOfLabels(labels, s); if (score > best_score || best_start < 0) { best_score = score; best_start = s; @@ -542,9 +534,9 @@ int NetworkIO::PositionOfBestMatch(const std::vector &labels, int start, in // Returns the cumulative score of the given labels starting at start, and // using one label per time-step. -double NetworkIO::ScoreOfLabels(const std::vector &labels, int start) const { +TFloat NetworkIO::ScoreOfLabels(const std::vector &labels, int start) const { int length = labels.size(); - double score = 0.0; + TFloat score = 0; for (int i = 0; i < length; ++i) { score += f_(start + i, labels[i]); } @@ -586,7 +578,7 @@ void NetworkIO::EnsureBestLabel(int t, int label) { // Helper function converts prob to certainty taking the minimum into account. /* static */ float NetworkIO::ProbToCertainty(float prob) { - return prob > kMinProb ? log(prob) : kMinCertainty; + return prob > kMinProb ? std::log(prob) : kMinCertainty; } // Returns true if there is any bad value that is suspiciously like a GT @@ -615,27 +607,27 @@ bool NetworkIO::AnySuspiciousTruth(float confidence_thr) const { } // Reads a single timestep to floats in the range [-1, 1]. -void NetworkIO::ReadTimeStep(int t, double *output) const { +void NetworkIO::ReadTimeStep(int t, TFloat *output) const { if (int_mode_) { const int8_t *line = i_[t]; for (int i = 0; i < i_.dim2(); ++i) { - output[i] = static_cast(line[i]) / INT8_MAX; + output[i] = static_cast(line[i]) / INT8_MAX; } } else { const float *line = f_[t]; for (int i = 0; i < f_.dim2(); ++i) { - output[i] = static_cast(line[i]); + output[i] = static_cast(line[i]); } } } // Adds a single timestep to floats. -void NetworkIO::AddTimeStep(int t, double *inout) const { +void NetworkIO::AddTimeStep(int t, TFloat *inout) const { int num_features = NumFeatures(); if (int_mode_) { const int8_t *line = i_[t]; for (int i = 0; i < num_features; ++i) { - inout[i] += static_cast(line[i]) / INT8_MAX; + inout[i] += static_cast(line[i]) / INT8_MAX; } } else { const float *line = f_[t]; @@ -661,13 +653,13 @@ void NetworkIO::AddTimeStepPart(int t, int offset, int num_features, float *inou } // Writes a single timestep from floats in the range [-1, 1]. -void NetworkIO::WriteTimeStep(int t, const double *input) { +void NetworkIO::WriteTimeStep(int t, const TFloat *input) { WriteTimeStepPart(t, 0, NumFeatures(), input); } // Writes a single timestep from floats in the range [-1, 1] writing only // num_features elements of input to (*this)[t], starting at offset. -void NetworkIO::WriteTimeStepPart(int t, int offset, int num_features, const double *input) { +void NetworkIO::WriteTimeStepPart(int t, int offset, int num_features, const TFloat *input) { if (int_mode_) { int8_t *line = i_[t] + offset; for (int i = 0; i < num_features; ++i) { @@ -807,7 +799,7 @@ void NetworkIO::ComputeCombinerDeltas(const NetworkIO &fwd_deltas, const Network // Reconstruct the target from the delta. float comb_target = delta_line[i] + output; comb_line[i] = comb_target - comb_line[i]; - float base_delta = fabs(comb_target - base_line[i]); + float base_delta = std::fabs(comb_target - base_line[i]); if (base_delta > max_base_delta) { max_base_delta = base_delta; } diff --git a/src/lstm/networkio.h b/src/lstm/networkio.h index e170bc853..a5e7a80be 100644 --- a/src/lstm/networkio.h +++ b/src/lstm/networkio.h @@ -2,7 +2,6 @@ // File: networkio.h // Description: Network input/output data, allowing float/int implementations. // Author: Ray Smith -// Created: Tue Jun 17 08:43:11 PST 2014 // // (C) Copyright 2014, Google Inc. // Licensed under the Apache License, Version 2.0 (the "License"); @@ -146,9 +145,12 @@ public: int src_t, int src_offset); // Zeroes a single time step. void ZeroTimeStep(int t) { - ZeroTimeStepGeneral(t, 0, NumFeatures()); + if (int_mode_) { + memset(i_[t], 0, sizeof(*i_[t]) * NumFeatures()); + } else { + memset(f_[t], 0, sizeof(*f_[t]) * NumFeatures()); + } } - void ZeroTimeStepGeneral(int t, int offset, int num_features); // Sets the given range to random values. void Randomize(int t, int offset, int num_features, TRand *randomizer); @@ -172,7 +174,7 @@ public: int PositionOfBestMatch(const std::vector &labels, int start, int end) const; // Returns the cumulative score of the given labels starting at start, and // using one label per time-step. - double ScoreOfLabels(const std::vector &labels, int start) const; + TFloat ScoreOfLabels(const std::vector &labels, int start) const; // Helper function sets all the outputs for a single timestep, such that // label has value ok_score, and the other labels share 1 - ok_score. // Assumes float mode. @@ -193,16 +195,16 @@ public: bool AnySuspiciousTruth(float confidence_thr) const; // Reads a single timestep to floats in the range [-1, 1]. - void ReadTimeStep(int t, double *output) const; + void ReadTimeStep(int t, TFloat *output) const; // Adds a single timestep to floats. - void AddTimeStep(int t, double *inout) const; + void AddTimeStep(int t, TFloat *inout) const; // Adds part of a single timestep to floats. void AddTimeStepPart(int t, int offset, int num_features, float *inout) const; // Writes a single timestep from floats in the range [-1, 1]. - void WriteTimeStep(int t, const double *input); + void WriteTimeStep(int t, const TFloat *input); // Writes a single timestep from floats in the range [-1, 1] writing only // num_features elements of input to (*this)[t], starting at offset. - void WriteTimeStepPart(int t, int offset, int num_features, const double *input); + void WriteTimeStepPart(int t, int offset, int num_features, const TFloat *input); // Maxpools a single time step from src. void MaxpoolTimeStep(int dest_t, const NetworkIO &src, int src_t, int *max_line); // Runs maxpool backward, using maxes to index timesteps in *this. @@ -253,9 +255,9 @@ public: // Applies Func to timestep t of *this (u) and multiplies the result by v // component-wise, putting the product in *product. - // *this and v may be int or float, but must match. The outputs are double. + // *this and v may be int or float, but must match. The outputs are TFloat. template - void FuncMultiply(const NetworkIO &v_io, int t, double *product) { + void FuncMultiply(const NetworkIO &v_io, int t, TFloat *product) { Func f; ASSERT_HOST(!int_mode_); ASSERT_HOST(!v_io.int_mode_); @@ -264,7 +266,7 @@ public: const int8_t *u = i_[t]; const int8_t *v = v_io.i_[t]; for (int i = 0; i < dim; ++i) { - product[i] = f(u[i] / static_cast(INT8_MAX)) * v[i] / static_cast(INT8_MAX); + product[i] = f(u[i] / static_cast(INT8_MAX)) * v[i] / INT8_MAX; } } else { const float *u = f_[t]; @@ -278,8 +280,8 @@ public: // component-wise, putting the product in *product. // All NetworkIOs are assumed to be float. template - void FuncMultiply3(int u_t, const NetworkIO &v_io, int v_t, const double *w, - double *product) const { + void FuncMultiply3(int u_t, const NetworkIO &v_io, int v_t, const TFloat *w, + TFloat *product) const { ASSERT_HOST(!int_mode_); ASSERT_HOST(!v_io.int_mode_); Func f; @@ -294,7 +296,7 @@ public: // component-wise, adding the product to *product. // All NetworkIOs are assumed to be float. template - void FuncMultiply3Add(const NetworkIO &v_io, int t, const double *w, double *product) const { + void FuncMultiply3Add(const NetworkIO &v_io, int t, const TFloat *w, TFloat *product) const { ASSERT_HOST(!int_mode_); ASSERT_HOST(!v_io.int_mode_); Func f; @@ -309,7 +311,7 @@ public: // component-wise, putting the product in product, all at timestep t, except // w, which is a simple array. All NetworkIOs are assumed to be float. template - void Func2Multiply3(const NetworkIO &v_io, int t, const double *w, double *product) const { + void Func2Multiply3(const NetworkIO &v_io, int t, const TFloat *w, TFloat *product) const { ASSERT_HOST(!int_mode_); ASSERT_HOST(!v_io.int_mode_); Func1 f; diff --git a/src/lstm/networkscratch.h b/src/lstm/networkscratch.h index f36737470..869560e1b 100644 --- a/src/lstm/networkscratch.h +++ b/src/lstm/networkscratch.h @@ -140,14 +140,14 @@ public: } } - void Init(int size, int reserve, NetworkScratch *scratch) { + void Init(int /*size*/, int reserve, NetworkScratch *scratch) { if (scratch_space_ != nullptr && vec_ != nullptr) { scratch_space_->vec_stack_.Return(vec_); } scratch_space_ = scratch; vec_ = scratch_space_->vec_stack_.Borrow(); - vec_->reserve(reserve); - vec_->resize(size); + // TODO: optimize. + vec_->resize(reserve); data_ = &(*vec_)[0]; } @@ -156,25 +156,25 @@ public: } // Use the cast operator instead of operator[] so the FloatVec can be used - // as a double* argument to a function call. - operator double *() const { + // as a TFloat* argument to a function call. + operator TFloat *() const { return data_; } - double *get() { + TFloat *get() { return data_; } private: // Vector borrowed from the scratch space. Use Return to free it. - std::vector *vec_; + std::vector *vec_; // Short-cut pointer to the underlying array. - double *data_; + TFloat *data_; // The source scratch_space_. Borrowed pointer, used to free the // vector. Don't delete! NetworkScratch *scratch_space_; }; // class FloatVec - // Class that acts like a 2-D array of double, yet actually uses space + // Class that acts like a 2-D array of TFloat, yet actually uses space // from the source NetworkScratch, and knows how to unstack the borrowed // array on destruction. class GradientStore { @@ -270,7 +270,7 @@ private: // deleted until the NetworkScratch is deleted. Stack int_stack_; Stack float_stack_; - Stack> vec_stack_; + Stack> vec_stack_; Stack array_stack_; }; diff --git a/src/lstm/parallel.cpp b/src/lstm/parallel.cpp index 343b47996..2713314cc 100644 --- a/src/lstm/parallel.cpp +++ b/src/lstm/parallel.cpp @@ -120,14 +120,14 @@ bool Parallel::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch #endif debug = false; } - int stack_size = stack_.size(); + auto stack_size = stack_.size(); if (type_ == NT_PAR_2D_LSTM) { // Special case, run parallel in parallel. std::vector in_deltas(stack_size); std::vector out_deltas(stack_size); // Split the forward deltas for each stack element. int feature_offset = 0; - for (int i = 0; i < stack_.size(); ++i) { + for (unsigned i = 0; i < stack_.size(); ++i) { int num_features = stack_[i]->NumOutputs(); in_deltas[i].Resize(fwd_deltas, num_features, scratch); out_deltas[i].Resize(fwd_deltas, stack_[i]->NumInputs(), scratch); @@ -137,11 +137,11 @@ bool Parallel::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch #ifdef _OPENMP # pragma omp parallel for num_threads(stack_size) #endif - for (int i = 0; i < stack_size; ++i) { + for (unsigned i = 0; i < stack_size; ++i) { stack_[i]->Backward(debug, *in_deltas[i], scratch, i == 0 ? back_deltas : out_deltas[i]); } if (needs_to_backprop_) { - for (int i = 1; i < stack_size; ++i) { + for (unsigned i = 1; i < stack_size; ++i) { back_deltas->AddAllToFloat(*out_deltas[i]); } } @@ -152,7 +152,7 @@ bool Parallel::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch // back_deltas. NetworkScratch::IO out_deltas; int feature_offset = 0; - for (int i = 0; i < stack_.size(); ++i) { + for (unsigned i = 0; i < stack_.size(); ++i) { int num_features = stack_[i]->NumOutputs(); in_deltas->CopyUnpacking(fwd_deltas, feature_offset, num_features); feature_offset += num_features; diff --git a/src/lstm/plumbing.cpp b/src/lstm/plumbing.cpp index 98ec64ef2..ebb6612ea 100644 --- a/src/lstm/plumbing.cpp +++ b/src/lstm/plumbing.cpp @@ -142,7 +142,7 @@ void Plumbing::DebugWeights() { // Returns a set of strings representing the layer-ids of all layers below. void Plumbing::EnumerateLayers(const std::string *prefix, std::vector &layers) const { - for (int i = 0; i < stack_.size(); ++i) { + for (size_t i = 0; i < stack_.size(); ++i) { std::string layer_name; if (prefix) { layer_name = *prefix; @@ -161,7 +161,7 @@ void Plumbing::EnumerateLayers(const std::string *prefix, std::vector= stack_.size()) { + if (index < 0 || static_cast(index) >= stack_.size()) { return nullptr; } if (stack_[index]->IsPlumbingType()) { @@ -176,7 +176,7 @@ Network *Plumbing::GetLayer(const char *id) const { float *Plumbing::LayerLearningRatePtr(const char *id) { char *next_id; int index = strtol(id, &next_id, 10); - if (index < 0 || index >= stack_.size()) { + if (index < 0 || static_cast(index) >= stack_.size()) { return nullptr; } if (stack_[index]->IsPlumbingType()) { @@ -184,7 +184,7 @@ float *Plumbing::LayerLearningRatePtr(const char *id) { ASSERT_HOST(*next_id == ':'); return plumbing->LayerLearningRatePtr(next_id + 1); } - if (index >= learning_rates_.size()) { + if (static_cast(index) >= learning_rates_.size()) { return nullptr; } return &learning_rates_[index]; @@ -238,7 +238,7 @@ bool Plumbing::DeSerialize(TFile *fp) { // Updates the weights using the given learning rate, momentum and adam_beta. // num_samples is used in the adam computation iff use_adam_ is true. void Plumbing::Update(float learning_rate, float momentum, float adam_beta, int num_samples) { - for (int i = 0; i < stack_.size(); ++i) { + for (size_t i = 0; i < stack_.size(); ++i) { if (network_flags_ & NF_LAYER_SPECIFIC_LR) { if (i < learning_rates_.size()) { learning_rate = learning_rates_[i]; @@ -255,11 +255,11 @@ void Plumbing::Update(float learning_rate, float momentum, float adam_beta, int // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. -void Plumbing::CountAlternators(const Network &other, double *same, double *changed) const { +void Plumbing::CountAlternators(const Network &other, TFloat *same, TFloat *changed) const { ASSERT_HOST(other.type() == type_); const auto *plumbing = static_cast(&other); ASSERT_HOST(plumbing->stack_.size() == stack_.size()); - for (int i = 0; i < stack_.size(); ++i) { + for (size_t i = 0; i < stack_.size(); ++i) { stack_[i]->CountAlternators(*plumbing->stack_[i], same, changed); } } diff --git a/src/lstm/plumbing.h b/src/lstm/plumbing.h index 1c65fe9f6..c1ecc2f23 100644 --- a/src/lstm/plumbing.h +++ b/src/lstm/plumbing.h @@ -120,6 +120,14 @@ public: ASSERT_HOST(lr_ptr != nullptr); *lr_ptr *= factor; } + + // Set the learning rate for a specific layer of the stack to the given value. + void SetLayerLearningRate(const char *id, float learning_rate) { + float *lr_ptr = LayerLearningRatePtr(id); + ASSERT_HOST(lr_ptr != nullptr); + *lr_ptr = learning_rate; + } + // Returns a pointer to the learning rate for the given layer id. TESS_API float *LayerLearningRatePtr(const char *id); @@ -135,7 +143,7 @@ public: // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. - void CountAlternators(const Network &other, double *same, double *changed) const override; + void CountAlternators(const Network &other, TFloat *same, TFloat *changed) const override; protected: // The networks. diff --git a/src/lstm/recodebeam.cpp b/src/lstm/recodebeam.cpp index 4413181a8..c3a80f40b 100644 --- a/src/lstm/recodebeam.cpp +++ b/src/lstm/recodebeam.cpp @@ -46,11 +46,13 @@ static const char *kNodeContNames[] = {"Anything", "OnlyDup", "NoDup"}; static const float kMinDiplopiaKey = 0.25f; // Prints debug details of the node. -void RecodeNode::Print(int null_char, const UNICHARSET &unicharset, int depth) const { +void RecodeNode::Print(int null_char, const UNICHARSET &unicharset, + int depth) const { if (code == null_char) { tprintf("null_char"); } else { - tprintf("label=%d, uid=%d=%s", code, unichar_id, unicharset.debug_str(unichar_id).c_str()); + tprintf("label=%d, uid=%d=%s", code, unichar_id, + unicharset.debug_str(unichar_id).c_str()); } tprintf(" score=%g, c=%g,%s%s%s perm=%d, hash=%" PRIx64, score, certainty, start_of_dawg ? " DawgStart" : "", start_of_word ? " Start" : "", @@ -64,16 +66,16 @@ void RecodeNode::Print(int null_char, const UNICHARSET &unicharset, int depth) c } // Borrows the pointer, which is expected to survive until *this is deleted. -RecodeBeamSearch::RecodeBeamSearch(const UnicharCompress &recoder, int null_char, bool simple_text, - Dict *dict) - : recoder_(recoder) - , beam_size_(0) - , top_code_(-1) - , second_code_(-1) - , dict_(dict) - , space_delimited_(true) - , is_simple_text_(simple_text) - , null_char_(null_char) { +RecodeBeamSearch::RecodeBeamSearch(const UnicharCompress &recoder, + int null_char, bool simple_text, Dict *dict) + : recoder_(recoder), + beam_size_(0), + top_code_(-1), + second_code_(-1), + dict_(dict), + space_delimited_(true), + is_simple_text_(simple_text), + null_char_(null_char) { if (dict_ != nullptr && !dict_->IsSpaceDelimitedLang()) { space_delimited_ = false; } @@ -89,9 +91,9 @@ RecodeBeamSearch::~RecodeBeamSearch() { } // Decodes the set of network outputs, storing the lattice internally. -void RecodeBeamSearch::Decode(const NetworkIO &output, double dict_ratio, double cert_offset, - double worst_dict_cert, const UNICHARSET *charset, - int lstm_choice_mode) { +void RecodeBeamSearch::Decode(const NetworkIO &output, double dict_ratio, + double cert_offset, double worst_dict_cert, + const UNICHARSET *charset, int lstm_choice_mode) { beam_size_ = 0; int width = output.Width(); if (lstm_choice_mode) { @@ -99,14 +101,16 @@ void RecodeBeamSearch::Decode(const NetworkIO &output, double dict_ratio, double } for (int t = 0; t < width; ++t) { ComputeTopN(output.f(t), output.NumFeatures(), kBeamWidths[0]); - DecodeStep(output.f(t), t, dict_ratio, cert_offset, worst_dict_cert, charset); + DecodeStep(output.f(t), t, dict_ratio, cert_offset, worst_dict_cert, + charset); if (lstm_choice_mode) { SaveMostCertainChoices(output.f(t), output.NumFeatures(), charset, t); } } } -void RecodeBeamSearch::Decode(const GENERIC_2D_ARRAY &output, double dict_ratio, - double cert_offset, double worst_dict_cert, +void RecodeBeamSearch::Decode(const GENERIC_2D_ARRAY &output, + double dict_ratio, double cert_offset, + double worst_dict_cert, const UNICHARSET *charset) { beam_size_ = 0; int width = output.dim1(); @@ -116,9 +120,9 @@ void RecodeBeamSearch::Decode(const GENERIC_2D_ARRAY &output, double dict } } -void RecodeBeamSearch::DecodeSecondaryBeams(const NetworkIO &output, double dict_ratio, - double cert_offset, double worst_dict_cert, - const UNICHARSET *charset, int lstm_choice_mode) { +void RecodeBeamSearch::DecodeSecondaryBeams( + const NetworkIO &output, double dict_ratio, double cert_offset, + double worst_dict_cert, const UNICHARSET *charset, int lstm_choice_mode) { for (auto data : secondary_beam_) { delete data; } @@ -127,20 +131,23 @@ void RecodeBeamSearch::DecodeSecondaryBeams(const NetworkIO &output, double dict return; } int width = output.Width(); - int bucketNumber = 0; + unsigned bucketNumber = 0; for (int t = 0; t < width; ++t) { while ((bucketNumber + 1) < character_boundaries_.size() && t >= character_boundaries_[bucketNumber + 1]) { ++bucketNumber; } - ComputeSecTopN(&(excludedUnichars)[bucketNumber], output.f(t), output.NumFeatures(), - kBeamWidths[0]); - DecodeSecondaryStep(output.f(t), t, dict_ratio, cert_offset, worst_dict_cert, charset); + ComputeSecTopN(&(excludedUnichars)[bucketNumber], output.f(t), + output.NumFeatures(), kBeamWidths[0]); + DecodeSecondaryStep(output.f(t), t, dict_ratio, cert_offset, + worst_dict_cert, charset); } } -void RecodeBeamSearch::SaveMostCertainChoices(const float *outputs, int num_outputs, - const UNICHARSET *charset, int xCoord) { +void RecodeBeamSearch::SaveMostCertainChoices(const float *outputs, + int num_outputs, + const UNICHARSET *charset, + int xCoord) { std::vector> choices; for (int i = 0; i < num_outputs; ++i) { if (outputs[i] >= 0.01f) { @@ -158,16 +165,18 @@ void RecodeBeamSearch::SaveMostCertainChoices(const float *outputs, int num_outp while (choices.size() > pos && choices[pos].second > outputs[i]) { pos++; } - choices.insert(choices.begin() + pos, std::pair(character, outputs[i])); + choices.insert(choices.begin() + pos, + std::pair(character, outputs[i])); } } timesteps.push_back(choices); } void RecodeBeamSearch::segmentTimestepsByCharacters() { - for (int i = 1; i < character_boundaries_.size(); ++i) { + for (unsigned i = 1; i < character_boundaries_.size(); ++i) { std::vector>> segment; - for (int j = character_boundaries_[i - 1]; j < character_boundaries_[i]; ++j) { + for (int j = character_boundaries_[i - 1]; j < character_boundaries_[i]; + ++j) { segment.push_back(timesteps[j]); } segmentedTimesteps.push_back(segment); @@ -175,7 +184,8 @@ void RecodeBeamSearch::segmentTimestepsByCharacters() { } std::vector>> RecodeBeamSearch::combineSegmentedTimesteps( - std::vector>>> *segmentedTimesteps) { + std::vector>>> + *segmentedTimesteps) { std::vector>> combined_timesteps; for (auto &segmentedTimestep : *segmentedTimesteps) { for (auto &j : segmentedTimestep) { @@ -185,10 +195,12 @@ RecodeBeamSearch::combineSegmentedTimesteps( return combined_timesteps; } -void RecodeBeamSearch::calculateCharBoundaries(std::vector *starts, std::vector *ends, - std::vector *char_bounds_, int maxWidth) { +void RecodeBeamSearch::calculateCharBoundaries(std::vector *starts, + std::vector *ends, + std::vector *char_bounds_, + int maxWidth) { char_bounds_->push_back((*starts)[0]); - for (int i = 0; i < ends->size(); ++i) { + for (unsigned i = 0; i < ends->size(); ++i) { int middle = ((*starts)[i + 1] - (*ends)[i]) / 2; char_bounds_->push_back((*ends)[i] + middle); } @@ -197,8 +209,8 @@ void RecodeBeamSearch::calculateCharBoundaries(std::vector *starts, std::ve } // Returns the best path as labels/scores/xcoords similar to simple CTC. -void RecodeBeamSearch::ExtractBestPathAsLabels(std::vector *labels, - std::vector *xcoords) const { +void RecodeBeamSearch::ExtractBestPathAsLabels( + std::vector *labels, std::vector *xcoords) const { labels->clear(); xcoords->clear(); std::vector best_nodes; @@ -220,22 +232,23 @@ void RecodeBeamSearch::ExtractBestPathAsLabels(std::vector *labels, // Returns the best path as unichar-ids/certs/ratings/xcoords skipping // duplicates, nulls and intermediate parts. -void RecodeBeamSearch::ExtractBestPathAsUnicharIds(bool debug, const UNICHARSET *unicharset, - std::vector *unichar_ids, - std::vector *certs, - std::vector *ratings, - std::vector *xcoords) const { +void RecodeBeamSearch::ExtractBestPathAsUnicharIds( + bool debug, const UNICHARSET *unicharset, std::vector *unichar_ids, + std::vector *certs, std::vector *ratings, + std::vector *xcoords) const { std::vector best_nodes; ExtractBestPaths(&best_nodes, nullptr); ExtractPathAsUnicharIds(best_nodes, unichar_ids, certs, ratings, xcoords); if (debug) { DebugPath(unicharset, best_nodes); - DebugUnicharPath(unicharset, best_nodes, *unichar_ids, *certs, *ratings, *xcoords); + DebugUnicharPath(unicharset, best_nodes, *unichar_ids, *certs, *ratings, + *xcoords); } } // Returns the best path as a set of WERD_RES. -void RecodeBeamSearch::ExtractBestPathAsWords(const TBOX &line_box, float scale_factor, bool debug, +void RecodeBeamSearch::ExtractBestPathAsWords(const TBOX &line_box, + float scale_factor, bool debug, const UNICHARSET *unicharset, PointerVector *words, int lstm_choice_mode) { @@ -250,9 +263,11 @@ void RecodeBeamSearch::ExtractBestPathAsWords(const TBOX &line_box, float scale_ ExtractBestPaths(&best_nodes, &second_nodes); if (debug) { DebugPath(unicharset, best_nodes); - ExtractPathAsUnicharIds(second_nodes, &unichar_ids, &certs, &ratings, &xcoords); + ExtractPathAsUnicharIds(second_nodes, &unichar_ids, &certs, &ratings, + &xcoords); tprintf("\nSecond choice path:\n"); - DebugUnicharPath(unicharset, second_nodes, unichar_ids, certs, ratings, xcoords); + DebugUnicharPath(unicharset, second_nodes, unichar_ids, certs, ratings, + xcoords); } // If lstm choice mode is required in granularity level 2, it stores the x // Coordinates of every chosen character, to match the alternative choices to @@ -261,7 +276,8 @@ void RecodeBeamSearch::ExtractBestPathAsWords(const TBOX &line_box, float scale_ &character_boundaries_); int num_ids = unichar_ids.size(); if (debug) { - DebugUnicharPath(unicharset, best_nodes, unichar_ids, certs, ratings, xcoords); + DebugUnicharPath(unicharset, best_nodes, unichar_ids, certs, ratings, + xcoords); } // Convert labels to unichar-ids. int word_end = 0; @@ -288,16 +304,19 @@ void RecodeBeamSearch::ExtractBestPathAsWords(const TBOX &line_box, float scale_ if (word_end < num_ids && unichar_ids[word_end] == UNICHAR_SPACE) { space_cert = certs[word_end]; } - bool leading_space = word_start > 0 && unichar_ids[word_start - 1] == UNICHAR_SPACE; + bool leading_space = + word_start > 0 && unichar_ids[word_start - 1] == UNICHAR_SPACE; // Create a WERD_RES for the output word. WERD_RES *word_res = InitializeWord(leading_space, line_box, word_start, word_end, - std::min(space_cert, prev_space_cert), unicharset, xcoords, scale_factor); + std::min(space_cert, prev_space_cert), unicharset, + xcoords, scale_factor); for (int i = word_start; i < word_end; ++i) { auto *choices = new BLOB_CHOICE_LIST; BLOB_CHOICE_IT bc_it(choices); - auto *choice = new BLOB_CHOICE(unichar_ids[i], ratings[i], certs[i], -1, 1.0f, - static_cast(INT16_MAX), 0.0f, BCC_STATIC_CLASSIFIER); + auto *choice = new BLOB_CHOICE(unichar_ids[i], ratings[i], certs[i], -1, + 1.0f, static_cast(INT16_MAX), 0.0f, + BCC_STATIC_CLASSIFIER); int col = i - word_start; choice->set_matrix_cell(col, col); bc_it.add_after_then_move(choice); @@ -319,7 +338,8 @@ struct greater_than { } }; -void RecodeBeamSearch::PrintBeam2(bool uids, int num_outputs, const UNICHARSET *charset, +void RecodeBeamSearch::PrintBeam2(bool uids, int num_outputs, + const UNICHARSET *charset, bool secondary) const { std::vector> topology; std::unordered_set visited; @@ -344,8 +364,8 @@ void RecodeBeamSearch::PrintBeam2(bool uids, int num_outputs, const UNICHARSET * } } int ct = 0; - int cb = 1; - for (std::vector layer : topology) { + unsigned cb = 1; + for (const std::vector &layer : topology) { if (cb >= character_boundaries_.size()) { break; } @@ -385,7 +405,8 @@ void RecodeBeamSearch::PrintBeam2(bool uids, int num_outputs, const UNICHARSET * prevCode = " "; } if (uids) { - tprintf("%x(|)%f(>)%x(|)%f\n", intPrevCode, prevScore, intCode, node->score); + tprintf("%x(|)%f(>)%x(|)%f\n", intPrevCode, prevScore, intCode, + node->score); } else { tprintf("%s(|)%f(>)%s(|)%f\n", prevCode, prevScore, code, node->score); } @@ -402,16 +423,17 @@ void RecodeBeamSearch::extractSymbolChoices(const UNICHARSET *unicharset) { } // For the first iteration the original beam is analyzed. After that a // new beam is calculated based on the results from the original beam. - std::vector ¤tBeam = secondary_beam_.empty() ? beam_ : secondary_beam_; + std::vector ¤tBeam = + secondary_beam_.empty() ? beam_ : secondary_beam_; character_boundaries_[0] = 0; - for (int j = 1; j < character_boundaries_.size(); ++j) { + for (unsigned j = 1; j < character_boundaries_.size(); ++j) { std::vector unichar_ids; std::vector certs; std::vector ratings; std::vector xcoords; int backpath = character_boundaries_[j] - character_boundaries_[j - 1]; std::vector &heaps = - currentBeam.at(character_boundaries_[j] - 1)->beams_->heap(); + currentBeam.at(character_boundaries_[j] - 1)->beams_->heap(); std::vector best_nodes; std::vector best; // Scan the segmented node chain for valid unichar ids. @@ -420,7 +442,8 @@ void RecodeBeamSearch::extractSymbolChoices(const UNICHARSET *unicharset) { int backcounter = 0; const RecodeNode *node = &entry.data(); while (node != nullptr && backcounter < backpath) { - if (node->code != null_char_ && node->unichar_id != INVALID_UNICHAR_ID) { + if (node->code != null_char_ && + node->unichar_id != INVALID_UNICHAR_ID) { validChar = true; break; } @@ -435,22 +458,24 @@ void RecodeBeamSearch::extractSymbolChoices(const UNICHARSET *unicharset) { if (!best.empty()) { std::sort(best.begin(), best.end(), greater_than()); ExtractPath(best[0], &best_nodes, backpath); - ExtractPathAsUnicharIds(best_nodes, &unichar_ids, &certs, &ratings, &xcoords); + ExtractPathAsUnicharIds(best_nodes, &unichar_ids, &certs, &ratings, + &xcoords); } if (!unichar_ids.empty()) { int bestPos = 0; - for (int i = 1; i < unichar_ids.size(); ++i) { + for (unsigned i = 1; i < unichar_ids.size(); ++i) { if (ratings[i] < ratings[bestPos]) { bestPos = i; } } - // TODO: bestCode is currently unused (see commit 2dd5d0d60). +#if 0 // TODO: bestCode is currently unused (see commit 2dd5d0d60). int bestCode = -10; for (auto &node : best_nodes) { if (node->unichar_id == unichar_ids[bestPos]) { bestCode = node->code; } } +#endif // Exclude the best choice for the followup decoding. std::unordered_set excludeCodeList; for (auto &best_node : best_nodes) { @@ -470,7 +495,8 @@ void RecodeBeamSearch::extractSymbolChoices(const UNICHARSET *unicharset) { int id = unichar_ids[bestPos]; const char *result = unicharset->id_to_unichar_ext(id); float rating = ratings[bestPos]; - ctc_choices[j - 1].push_back(std::pair(result, rating)); + ctc_choices[j - 1].push_back( + std::pair(result, rating)); } else { std::vector> choice; int id = unichar_ids[bestPos]; @@ -508,7 +534,8 @@ void RecodeBeamSearch::DebugBeams(const UNICHARSET &unicharset) const { continue; } // Print all the best scoring nodes for each unichar found. - tprintf("Position %d: %s+%s beam\n", p, d ? "Dict" : "Non-Dict", kNodeContNames[c]); + tprintf("Position %d: %s+%s beam\n", p, d ? "Dict" : "Non-Dict", + kNodeContNames[c]); DebugBeamPos(unicharset, beam_[p]->beams_[index]); } } @@ -516,7 +543,8 @@ void RecodeBeamSearch::DebugBeams(const UNICHARSET &unicharset) const { } // Generates debug output of the content of a single beam position. -void RecodeBeamSearch::DebugBeamPos(const UNICHARSET &unicharset, const RecodeHeap &heap) const { +void RecodeBeamSearch::DebugBeamPos(const UNICHARSET &unicharset, + const RecodeHeap &heap) const { std::vector unichar_bests(unicharset.size()); const RecodeNode *null_best = nullptr; int heap_size = heap.size(); @@ -547,12 +575,11 @@ void RecodeBeamSearch::DebugBeamPos(const UNICHARSET &unicharset, const RecodeHe // Returns the given best_nodes as unichar-ids/certs/ratings/xcoords skipping // duplicates, nulls and intermediate parts. /* static */ -void RecodeBeamSearch::ExtractPathAsUnicharIds(const std::vector &best_nodes, - std::vector *unichar_ids, - std::vector *certs, - std::vector *ratings, - std::vector *xcoords, - std::vector *character_boundaries) { +void RecodeBeamSearch::ExtractPathAsUnicharIds( + const std::vector &best_nodes, + std::vector *unichar_ids, std::vector *certs, + std::vector *ratings, std::vector *xcoords, + std::vector *character_boundaries) { unichar_ids->clear(); certs->clear(); ratings->clear(); @@ -575,7 +602,8 @@ void RecodeBeamSearch::ExtractPathAsUnicharIds(const std::vectorunichar_id; - if (unichar_id == UNICHAR_SPACE && !certs->empty() && best_nodes[t]->permuter != NO_PERM) { + if (unichar_id == UNICHAR_SPACE && !certs->empty() && + best_nodes[t]->permuter != NO_PERM) { // All the rating and certainty go on the previous character except // for the space itself. if (certainty < certs->back()) { @@ -592,8 +620,8 @@ void RecodeBeamSearch::ExtractPathAsUnicharIds(const std::vectorcertainty; // Special-case NO-PERM space to forget the certainty of the previous // nulls. See long comment in ContinueContext. - if (cert < certainty || - (unichar_id == UNICHAR_SPACE && best_nodes[t - 1]->permuter == NO_PERM)) { + if (cert < certainty || (unichar_id == UNICHAR_SPACE && + best_nodes[t - 1]->permuter == NO_PERM)) { certainty = cert; } rating -= cert; @@ -618,19 +646,23 @@ void RecodeBeamSearch::ExtractPathAsUnicharIds(const std::vector &xcoords, float scale_factor) { + const std::vector &xcoords, + float scale_factor) { // Make a fake blob for each non-zero label. C_BLOB_LIST blobs; C_BLOB_IT b_it(&blobs); for (int i = word_start; i < word_end; ++i) { - if (character_boundaries_.size() > (i + 1)) { - TBOX box(static_cast(std::floor(character_boundaries_[i] * scale_factor)) + + if (static_cast(i + 1) < character_boundaries_.size()) { + TBOX box(static_cast( + std::floor(character_boundaries_[i] * scale_factor)) + line_box.left(), line_box.bottom(), - static_cast(std::ceil(character_boundaries_[i + 1] * scale_factor)) + + static_cast( + std::ceil(character_boundaries_[i + 1] * scale_factor)) + line_box.left(), line_box.top()); b_it.add_after_then_move(C_BLOB::FakeBlob(box)); @@ -650,7 +682,9 @@ WERD_RES *RecodeBeamSearch::InitializeWord(bool leading_space, const TBOX &line_ // Fills top_n_flags_ with bools that are true iff the corresponding output // is one of the top_n. -void RecodeBeamSearch::ComputeTopN(const float *outputs, int num_outputs, int top_n) { +void RecodeBeamSearch::ComputeTopN(const float *outputs, int num_outputs, + int top_n) { + top_n_flags_.clear(); top_n_flags_.resize(num_outputs, TN_ALSO_RAN); top_code_ = -1; second_code_ = -1; @@ -709,14 +743,17 @@ void RecodeBeamSearch::ComputeTopN(const float *outputs, int num_outputs, int to top_n_flags_[null_char_] = TN_TOP2; } -void RecodeBeamSearch::ComputeSecTopN(std::unordered_set *exList, const float *outputs, - int num_outputs, int top_n) { +void RecodeBeamSearch::ComputeSecTopN(std::unordered_set *exList, + const float *outputs, int num_outputs, + int top_n) { + top_n_flags_.clear(); top_n_flags_.resize(num_outputs, TN_ALSO_RAN); top_code_ = -1; second_code_ = -1; top_heap_.clear(); for (int i = 0; i < num_outputs; ++i) { - if ((top_heap_.size() < top_n || outputs[i] > top_heap_.PeekTop().key()) && !exList->count(i)) { + if ((top_heap_.size() < top_n || outputs[i] > top_heap_.PeekTop().key()) && + !exList->count(i)) { TopPair entry(outputs[i], i); top_heap_.Push(&entry); if (top_heap_.size() > top_n) { @@ -744,10 +781,11 @@ void RecodeBeamSearch::ComputeSecTopN(std::unordered_set *exList, const flo // Adds the computation for the current time-step to the beam. Call at each // time-step in sequence from left to right. outputs is the activation vector // for the current timestep. -void RecodeBeamSearch::DecodeStep(const float *outputs, int t, double dict_ratio, - double cert_offset, double worst_dict_cert, +void RecodeBeamSearch::DecodeStep(const float *outputs, int t, + double dict_ratio, double cert_offset, + double worst_dict_cert, const UNICHARSET *charset, bool debug) { - if (t == beam_.size()) { + if (t == static_cast(beam_.size())) { beam_.push_back(new RecodeBeam); } RecodeBeam *step = beam_[t]; @@ -755,11 +793,12 @@ void RecodeBeamSearch::DecodeStep(const float *outputs, int t, double dict_ratio step->Clear(); if (t == 0) { // The first step can only use singles and initials. - ContinueContext(nullptr, BeamIndex(false, NC_ANYTHING, 0), outputs, TN_TOP2, charset, - dict_ratio, cert_offset, worst_dict_cert, step); + ContinueContext(nullptr, BeamIndex(false, NC_ANYTHING, 0), outputs, TN_TOP2, + charset, dict_ratio, cert_offset, worst_dict_cert, step); if (dict_ != nullptr) { - ContinueContext(nullptr, BeamIndex(true, NC_ANYTHING, 0), outputs, TN_TOP2, charset, - dict_ratio, cert_offset, worst_dict_cert, step); + ContinueContext(nullptr, BeamIndex(true, NC_ANYTHING, 0), outputs, + TN_TOP2, charset, dict_ratio, cert_offset, + worst_dict_cert, step); } } else { RecodeBeam *prev = beam_[t - 1]; @@ -791,8 +830,9 @@ void RecodeBeamSearch::DecodeStep(const float *outputs, int t, double dict_ratio // best first, but it comes before a lot of the worst, so it is slightly // more efficient than going forwards. for (int i = prev->beams_[index].size() - 1; i >= 0; --i) { - ContinueContext(&prev->beams_[index].get(i).data(), index, outputs, top_n, charset, - dict_ratio, cert_offset, worst_dict_cert, step); + ContinueContext(&prev->beams_[index].get(i).data(), index, outputs, + top_n, charset, dict_ratio, cert_offset, + worst_dict_cert, step); } } for (int index = 0; index < kNumBeams; ++index) { @@ -807,27 +847,29 @@ void RecodeBeamSearch::DecodeStep(const float *outputs, int t, double dict_ratio if (step->best_initial_dawgs_[c].code >= 0) { int index = BeamIndex(true, static_cast(c), 0); RecodeHeap *dawg_heap = &step->beams_[index]; - PushHeapIfBetter(kBeamWidths[0], &step->best_initial_dawgs_[c], dawg_heap); + PushHeapIfBetter(kBeamWidths[0], &step->best_initial_dawgs_[c], + dawg_heap); } } } } -void RecodeBeamSearch::DecodeSecondaryStep(const float *outputs, int t, double dict_ratio, - double cert_offset, double worst_dict_cert, - const UNICHARSET *charset, bool debug) { - if (t == secondary_beam_.size()) { +void RecodeBeamSearch::DecodeSecondaryStep( + const float *outputs, int t, double dict_ratio, double cert_offset, + double worst_dict_cert, const UNICHARSET *charset, bool debug) { + if (t == static_cast(secondary_beam_.size())) { secondary_beam_.push_back(new RecodeBeam); } RecodeBeam *step = secondary_beam_[t]; step->Clear(); if (t == 0) { // The first step can only use singles and initials. - ContinueContext(nullptr, BeamIndex(false, NC_ANYTHING, 0), outputs, TN_TOP2, charset, - dict_ratio, cert_offset, worst_dict_cert, step); + ContinueContext(nullptr, BeamIndex(false, NC_ANYTHING, 0), outputs, TN_TOP2, + charset, dict_ratio, cert_offset, worst_dict_cert, step); if (dict_ != nullptr) { - ContinueContext(nullptr, BeamIndex(true, NC_ANYTHING, 0), outputs, TN_TOP2, charset, - dict_ratio, cert_offset, worst_dict_cert, step); + ContinueContext(nullptr, BeamIndex(true, NC_ANYTHING, 0), outputs, + TN_TOP2, charset, dict_ratio, cert_offset, + worst_dict_cert, step); } } else { RecodeBeam *prev = secondary_beam_[t - 1]; @@ -859,8 +901,9 @@ void RecodeBeamSearch::DecodeSecondaryStep(const float *outputs, int t, double d // best first, but it comes before a lot of the worst, so it is slightly // more efficient than going forwards. for (int i = prev->beams_[index].size() - 1; i >= 0; --i) { - ContinueContext(&prev->beams_[index].get(i).data(), index, outputs, top_n, charset, - dict_ratio, cert_offset, worst_dict_cert, step); + ContinueContext(&prev->beams_[index].get(i).data(), index, outputs, + top_n, charset, dict_ratio, cert_offset, + worst_dict_cert, step); } } for (int index = 0; index < kNumBeams; ++index) { @@ -875,7 +918,8 @@ void RecodeBeamSearch::DecodeSecondaryStep(const float *outputs, int t, double d if (step->best_initial_dawgs_[c].code >= 0) { int index = BeamIndex(true, static_cast(c), 0); RecodeHeap *dawg_heap = &step->beams_[index]; - PushHeapIfBetter(kBeamWidths[0], &step->best_initial_dawgs_[c], dawg_heap); + PushHeapIfBetter(kBeamWidths[0], &step->best_initial_dawgs_[c], + dawg_heap); } } } @@ -885,10 +929,10 @@ void RecodeBeamSearch::DecodeSecondaryStep(const float *outputs, int t, double d // continuations of context prev, which is of the given length, using the // given network outputs to provide scores to the choices. Uses only those // choices for which top_n_flags[index] == top_n_flag. -void RecodeBeamSearch::ContinueContext(const RecodeNode *prev, int index, const float *outputs, - TopNState top_n_flag, const UNICHARSET *charset, - double dict_ratio, double cert_offset, - double worst_dict_cert, RecodeBeam *step) { +void RecodeBeamSearch::ContinueContext( + const RecodeNode *prev, int index, const float *outputs, + TopNState top_n_flag, const UNICHARSET *charset, double dict_ratio, + double cert_offset, double worst_dict_cert, RecodeBeam *step) { RecodedCharID prefix; RecodedCharID full_code; const RecodeNode *previous = prev; @@ -896,7 +940,8 @@ void RecodeBeamSearch::ContinueContext(const RecodeNode *prev, int index, const bool use_dawgs = IsDawgFromBeamsIndex(index); NodeContinuation prev_cont = ContinuationFromBeamsIndex(index); for (int p = length - 1; p >= 0; --p, previous = previous->prev) { - while (previous != nullptr && (previous->duplicate || previous->code == null_char_)) { + while (previous != nullptr && + (previous->duplicate || previous->code == null_char_)) { previous = previous->prev; } if (previous != nullptr) { @@ -907,26 +952,34 @@ void RecodeBeamSearch::ContinueContext(const RecodeNode *prev, int index, const if (prev != nullptr && !is_simple_text_) { if (top_n_flags_[prev->code] == top_n_flag) { if (prev_cont != NC_NO_DUP) { - float cert = NetworkIO::ProbToCertainty(outputs[prev->code]) + cert_offset; - PushDupOrNoDawgIfBetter(length, true, prev->code, prev->unichar_id, cert, worst_dict_cert, - dict_ratio, use_dawgs, NC_ANYTHING, prev, step); - } - if (prev_cont == NC_ANYTHING && top_n_flag == TN_TOP2 && prev->code != null_char_) { float cert = - NetworkIO::ProbToCertainty(outputs[prev->code] + outputs[null_char_]) + cert_offset; - PushDupOrNoDawgIfBetter(length, true, prev->code, prev->unichar_id, cert, worst_dict_cert, - dict_ratio, use_dawgs, NC_NO_DUP, prev, step); + NetworkIO::ProbToCertainty(outputs[prev->code]) + cert_offset; + PushDupOrNoDawgIfBetter(length, true, prev->code, prev->unichar_id, + cert, worst_dict_cert, dict_ratio, use_dawgs, + NC_ANYTHING, prev, step); + } + if (prev_cont == NC_ANYTHING && top_n_flag == TN_TOP2 && + prev->code != null_char_) { + float cert = NetworkIO::ProbToCertainty(outputs[prev->code] + + outputs[null_char_]) + + cert_offset; + PushDupOrNoDawgIfBetter(length, true, prev->code, prev->unichar_id, + cert, worst_dict_cert, dict_ratio, use_dawgs, + NC_NO_DUP, prev, step); } } if (prev_cont == NC_ONLY_DUP) { return; } - if (prev->code != null_char_ && length > 0 && top_n_flags_[null_char_] == top_n_flag) { + if (prev->code != null_char_ && length > 0 && + top_n_flags_[null_char_] == top_n_flag) { // Allow nulls within multi code sequences, as the nulls within are not // explicitly included in the code sequence. - float cert = NetworkIO::ProbToCertainty(outputs[null_char_]) + cert_offset; - PushDupOrNoDawgIfBetter(length, false, null_char_, INVALID_UNICHAR_ID, cert, worst_dict_cert, - dict_ratio, use_dawgs, NC_ANYTHING, prev, step); + float cert = + NetworkIO::ProbToCertainty(outputs[null_char_]) + cert_offset; + PushDupOrNoDawgIfBetter(length, false, null_char_, INVALID_UNICHAR_ID, + cert, worst_dict_cert, dict_ratio, use_dawgs, + NC_ANYTHING, prev, step); } } const std::vector *final_codes = recoder_.GetFinalCodes(prefix); @@ -952,18 +1005,19 @@ void RecodeBeamSearch::ContinueContext(const RecodeNode *prev, int index, const !charset->get_enabled(unichar_id)) { continue; // disabled by whitelist/blacklist } - ContinueUnichar(code, unichar_id, cert, worst_dict_cert, dict_ratio, use_dawgs, NC_ANYTHING, - prev, step); + ContinueUnichar(code, unichar_id, cert, worst_dict_cert, dict_ratio, + use_dawgs, NC_ANYTHING, prev, step); if (top_n_flag == TN_TOP2 && code != null_char_) { float prob = outputs[code] + outputs[null_char_]; - if (prev != nullptr && prev_cont == NC_ANYTHING && prev->code != null_char_ && + if (prev != nullptr && prev_cont == NC_ANYTHING && + prev->code != null_char_ && ((prev->code == top_code_ && code == second_code_) || (code == top_code_ && prev->code == second_code_))) { prob += outputs[prev->code]; } - float cert = NetworkIO::ProbToCertainty(prob) + cert_offset; - ContinueUnichar(code, unichar_id, cert, worst_dict_cert, dict_ratio, use_dawgs, NC_ONLY_DUP, - prev, step); + cert = NetworkIO::ProbToCertainty(prob) + cert_offset; + ContinueUnichar(code, unichar_id, cert, worst_dict_cert, dict_ratio, + use_dawgs, NC_ONLY_DUP, prev, step); } } } @@ -977,37 +1031,44 @@ void RecodeBeamSearch::ContinueContext(const RecodeNode *prev, int index, const continue; } float cert = NetworkIO::ProbToCertainty(outputs[code]) + cert_offset; - PushDupOrNoDawgIfBetter(length + 1, false, code, INVALID_UNICHAR_ID, cert, worst_dict_cert, - dict_ratio, use_dawgs, NC_ANYTHING, prev, step); + PushDupOrNoDawgIfBetter(length + 1, false, code, INVALID_UNICHAR_ID, cert, + worst_dict_cert, dict_ratio, use_dawgs, + NC_ANYTHING, prev, step); if (top_n_flag == TN_TOP2 && code != null_char_) { float prob = outputs[code] + outputs[null_char_]; - if (prev != nullptr && prev_cont == NC_ANYTHING && prev->code != null_char_ && + if (prev != nullptr && prev_cont == NC_ANYTHING && + prev->code != null_char_ && ((prev->code == top_code_ && code == second_code_) || (code == top_code_ && prev->code == second_code_))) { prob += outputs[prev->code]; } - float cert = NetworkIO::ProbToCertainty(prob) + cert_offset; - PushDupOrNoDawgIfBetter(length + 1, false, code, INVALID_UNICHAR_ID, cert, worst_dict_cert, - dict_ratio, use_dawgs, NC_ONLY_DUP, prev, step); + cert = NetworkIO::ProbToCertainty(prob) + cert_offset; + PushDupOrNoDawgIfBetter(length + 1, false, code, INVALID_UNICHAR_ID, + cert, worst_dict_cert, dict_ratio, use_dawgs, + NC_ONLY_DUP, prev, step); } } } } // Continues for a new unichar, using dawg or non-dawg as per flag. -void RecodeBeamSearch::ContinueUnichar(int code, int unichar_id, float cert, float worst_dict_cert, - float dict_ratio, bool use_dawgs, NodeContinuation cont, - const RecodeNode *prev, RecodeBeam *step) { +void RecodeBeamSearch::ContinueUnichar(int code, int unichar_id, float cert, + float worst_dict_cert, float dict_ratio, + bool use_dawgs, NodeContinuation cont, + const RecodeNode *prev, + RecodeBeam *step) { if (use_dawgs) { if (cert > worst_dict_cert) { ContinueDawg(code, unichar_id, cert, cont, prev, step); } } else { RecodeHeap *nodawg_heap = &step->beams_[BeamIndex(false, cont, 0)]; - PushHeapIfBetter(kBeamWidths[0], code, unichar_id, TOP_CHOICE_PERM, false, false, false, false, - cert * dict_ratio, prev, nullptr, nodawg_heap); - if (dict_ != nullptr && ((unichar_id == UNICHAR_SPACE && cert > worst_dict_cert) || - !dict_->getUnicharset().IsSpaceDelimited(unichar_id))) { + PushHeapIfBetter(kBeamWidths[0], code, unichar_id, TOP_CHOICE_PERM, false, + false, false, false, cert * dict_ratio, prev, nullptr, + nodawg_heap); + if (dict_ != nullptr && + ((unichar_id == UNICHAR_SPACE && cert > worst_dict_cert) || + !dict_->getUnicharset().IsSpaceDelimited(unichar_id))) { // Any top choice position that can start a new word, ie a space or // any non-space-delimited character, should also be considered // by the dawg search, so push initial dawg to the dawg heap. @@ -1027,8 +1088,8 @@ void RecodeBeamSearch::ContinueUnichar(int code, int unichar_id, float cert, flo } else { dawg_cert *= dict_ratio; } - PushInitialDawgIfBetter(code, unichar_id, permuter, false, false, dawg_cert, cont, prev, - step); + PushInitialDawgIfBetter(code, unichar_id, permuter, false, false, + dawg_cert, cont, prev, step); } } } @@ -1036,13 +1097,14 @@ void RecodeBeamSearch::ContinueUnichar(int code, int unichar_id, float cert, flo // Adds a RecodeNode composed of the tuple (code, unichar_id, cert, prev, // appropriate-dawg-args, cert) to the given heap (dawg_beam_) if unichar_id // is a valid continuation of whatever is in prev. -void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert, NodeContinuation cont, +void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert, + NodeContinuation cont, const RecodeNode *prev, RecodeBeam *step) { RecodeHeap *dawg_heap = &step->beams_[BeamIndex(true, cont, 0)]; RecodeHeap *nodawg_heap = &step->beams_[BeamIndex(false, cont, 0)]; if (unichar_id == INVALID_UNICHAR_ID) { - PushHeapIfBetter(kBeamWidths[0], code, unichar_id, NO_PERM, false, false, false, false, cert, - prev, nullptr, dawg_heap); + PushHeapIfBetter(kBeamWidths[0], code, unichar_id, NO_PERM, false, false, + false, false, cert, prev, nullptr, dawg_heap); return; } // Avoid dictionary probe if score a total loss. @@ -1050,8 +1112,10 @@ void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert, NodeCo if (prev != nullptr) { score += prev->score; } - if (dawg_heap->size() >= kBeamWidths[0] && score <= dawg_heap->PeekTop().data().score && - nodawg_heap->size() >= kBeamWidths[0] && score <= nodawg_heap->PeekTop().data().score) { + if (dawg_heap->size() >= kBeamWidths[0] && + score <= dawg_heap->PeekTop().data().score && + nodawg_heap->size() >= kBeamWidths[0] && + score <= nodawg_heap->PeekTop().data().score) { return; } const RecodeNode *uni_prev = prev; @@ -1065,10 +1129,11 @@ void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert, NodeCo if (uni_prev != nullptr && uni_prev->end_of_word) { // Space is good. Push initial state, to the dawg beam and a regular // space to the top choice beam. - PushInitialDawgIfBetter(code, unichar_id, uni_prev->permuter, false, false, cert, cont, prev, - step); - PushHeapIfBetter(kBeamWidths[0], code, unichar_id, uni_prev->permuter, false, false, false, - false, cert, prev, nullptr, nodawg_heap); + PushInitialDawgIfBetter(code, unichar_id, uni_prev->permuter, false, + false, cert, cont, prev, step); + PushHeapIfBetter(kBeamWidths[0], code, unichar_id, uni_prev->permuter, + false, false, false, false, cert, prev, nullptr, + nodawg_heap); } return; } else if (uni_prev != nullptr && uni_prev->start_of_dawg && @@ -1092,18 +1157,21 @@ void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert, NodeCo } else { return; // Can't continue if not a dict word. } - auto permuter = static_cast( - dict_->def_letter_is_okay(&dawg_args, dict_->getUnicharset(), unichar_id, false)); + auto permuter = static_cast(dict_->def_letter_is_okay( + &dawg_args, dict_->getUnicharset(), unichar_id, false)); if (permuter != NO_PERM) { - PushHeapIfBetter(kBeamWidths[0], code, unichar_id, permuter, false, word_start, - dawg_args.valid_end, false, cert, prev, dawg_args.updated_dawgs, dawg_heap); + PushHeapIfBetter(kBeamWidths[0], code, unichar_id, permuter, false, + word_start, dawg_args.valid_end, false, cert, prev, + dawg_args.updated_dawgs, dawg_heap); if (dawg_args.valid_end && !space_delimited_) { // We can start another word right away, so push initial state as well, // to the dawg beam, and the regular character to the top choice beam, // since non-dict words can start here too. - PushInitialDawgIfBetter(code, unichar_id, permuter, word_start, true, cert, cont, prev, step); - PushHeapIfBetter(kBeamWidths[0], code, unichar_id, permuter, false, word_start, true, false, - cert, prev, nullptr, nodawg_heap); + PushInitialDawgIfBetter(code, unichar_id, permuter, word_start, true, + cert, cont, prev, step); + PushHeapIfBetter(kBeamWidths[0], code, unichar_id, permuter, false, + word_start, true, false, cert, prev, nullptr, + nodawg_heap); } } else { delete updated_dawgs; @@ -1113,9 +1181,11 @@ void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert, NodeCo // Adds a RecodeNode composed of the tuple (code, unichar_id, // initial-dawg-state, prev, cert) to the given heap if/ there is room or if // better than the current worst element if already full. -void RecodeBeamSearch::PushInitialDawgIfBetter(int code, int unichar_id, PermuterType permuter, +void RecodeBeamSearch::PushInitialDawgIfBetter(int code, int unichar_id, + PermuterType permuter, bool start, bool end, float cert, - NodeContinuation cont, const RecodeNode *prev, + NodeContinuation cont, + const RecodeNode *prev, RecodeBeam *step) { RecodeNode *best_initial_dawg = &step->best_initial_dawgs_[cont]; float score = cert; @@ -1125,8 +1195,9 @@ void RecodeBeamSearch::PushInitialDawgIfBetter(int code, int unichar_id, Permute if (best_initial_dawg->code < 0 || score > best_initial_dawg->score) { auto *initial_dawgs = new DawgPositionVector; dict_->default_dawgs(initial_dawgs, false); - RecodeNode node(code, unichar_id, permuter, true, start, end, false, cert, score, prev, - initial_dawgs, ComputeCodeHash(code, false, prev)); + RecodeNode node(code, unichar_id, permuter, true, start, end, false, cert, + score, prev, initial_dawgs, + ComputeCodeHash(code, false, prev)); *best_initial_dawg = node; } } @@ -1135,22 +1206,23 @@ void RecodeBeamSearch::PushInitialDawgIfBetter(int code, int unichar_id, Permute // false, false, false, false, cert, prev, nullptr) to heap if there is room // or if better than the current worst element if already full. /* static */ -void RecodeBeamSearch::PushDupOrNoDawgIfBetter(int length, bool dup, int code, int unichar_id, - float cert, float worst_dict_cert, float dict_ratio, - bool use_dawgs, NodeContinuation cont, - const RecodeNode *prev, RecodeBeam *step) { +void RecodeBeamSearch::PushDupOrNoDawgIfBetter( + int length, bool dup, int code, int unichar_id, float cert, + float worst_dict_cert, float dict_ratio, bool use_dawgs, + NodeContinuation cont, const RecodeNode *prev, RecodeBeam *step) { int index = BeamIndex(use_dawgs, cont, length); if (use_dawgs) { if (cert > worst_dict_cert) { - PushHeapIfBetter(kBeamWidths[length], code, unichar_id, prev ? prev->permuter : NO_PERM, - false, false, false, dup, cert, prev, nullptr, &step->beams_[index]); + PushHeapIfBetter(kBeamWidths[length], code, unichar_id, + prev ? prev->permuter : NO_PERM, false, false, false, + dup, cert, prev, nullptr, &step->beams_[index]); } } else { cert *= dict_ratio; if (cert >= kMinCertainty || code == null_char_) { PushHeapIfBetter(kBeamWidths[length], code, unichar_id, - prev ? prev->permuter : TOP_CHOICE_PERM, false, false, false, dup, cert, - prev, nullptr, &step->beams_[index]); + prev ? prev->permuter : TOP_CHOICE_PERM, false, false, + false, dup, cert, prev, nullptr, &step->beams_[index]); } } } @@ -1159,17 +1231,19 @@ void RecodeBeamSearch::PushDupOrNoDawgIfBetter(int length, bool dup, int code, i // dawg_start, word_start, end, dup, cert, prev, d) to heap if there is room // or if better than the current worst element if already full. void RecodeBeamSearch::PushHeapIfBetter(int max_size, int code, int unichar_id, - PermuterType permuter, bool dawg_start, bool word_start, - bool end, bool dup, float cert, const RecodeNode *prev, - DawgPositionVector *d, RecodeHeap *heap) { + PermuterType permuter, bool dawg_start, + bool word_start, bool end, bool dup, + float cert, const RecodeNode *prev, + DawgPositionVector *d, + RecodeHeap *heap) { float score = cert; if (prev != nullptr) { score += prev->score; } if (heap->size() < max_size || score > heap->PeekTop().data().score) { uint64_t hash = ComputeCodeHash(code, dup, prev); - RecodeNode node(code, unichar_id, permuter, dawg_start, word_start, end, dup, cert, score, prev, - d, hash); + RecodeNode node(code, unichar_id, permuter, dawg_start, word_start, end, + dup, cert, score, prev, d, hash); if (UpdateHeapIfMatched(&node, heap)) { return; } @@ -1190,7 +1264,8 @@ void RecodeBeamSearch::PushHeapIfBetter(int max_size, int code, int unichar_id, // Adds a RecodeNode to heap if there is room // or if better than the current worst element if already full. -void RecodeBeamSearch::PushHeapIfBetter(int max_size, RecodeNode *node, RecodeHeap *heap) { +void RecodeBeamSearch::PushHeapIfBetter(int max_size, RecodeNode *node, + RecodeHeap *heap) { if (heap->size() < max_size || node->score > heap->PeekTop().data().score) { if (UpdateHeapIfMatched(node, heap)) { return; @@ -1206,7 +1281,8 @@ void RecodeBeamSearch::PushHeapIfBetter(int max_size, RecodeNode *node, RecodeHe // Searches the heap for a matching entry, and updates the score with // reshuffle if needed. Returns true if there was a match. -bool RecodeBeamSearch::UpdateHeapIfMatched(RecodeNode *new_node, RecodeHeap *heap) { +bool RecodeBeamSearch::UpdateHeapIfMatched(RecodeNode *new_node, + RecodeHeap *heap) { // TODO(rays) consider hash map instead of linear search. // It might not be faster because the hash map would have to be updated // every time a heap reshuffle happens, and that would be a lot of overhead. @@ -1214,7 +1290,8 @@ bool RecodeBeamSearch::UpdateHeapIfMatched(RecodeNode *new_node, RecodeHeap *hea for (auto &i : nodes) { RecodeNode &node = i.data(); if (node.code == new_node->code && node.code_hash == new_node->code_hash && - node.permuter == new_node->permuter && node.start_of_dawg == new_node->start_of_dawg) { + node.permuter == new_node->permuter && + node.start_of_dawg == new_node->start_of_dawg) { if (new_node->score > node.score) { // The new one is better. Update the entire node in the heap and // reshuffle. @@ -1244,7 +1321,8 @@ bool RecodeBeamSearch::AddToHeapIsAllowed(RecodeNode *new_node) { } // Computes and returns the code-hash for the given code and prev. -uint64_t RecodeBeamSearch::ComputeCodeHash(int code, bool dup, const RecodeNode *prev) const { +uint64_t RecodeBeamSearch::ComputeCodeHash(int code, bool dup, + const RecodeNode *prev) const { uint64_t hash = prev == nullptr ? 0 : prev->code_hash; if (!dup && code != null_char_) { int num_classes = recoder_.code_range(); @@ -1260,8 +1338,9 @@ uint64_t RecodeBeamSearch::ComputeCodeHash(int code, bool dup, const RecodeNode // during Decode. On return the best_nodes vector essentially contains the set // of code, score pairs that make the optimal path with the constraint that // the recoder can decode the code sequence back to a sequence of unichar-ids. -void RecodeBeamSearch::ExtractBestPaths(std::vector *best_nodes, - std::vector *second_nodes) const { +void RecodeBeamSearch::ExtractBestPaths( + std::vector *best_nodes, + std::vector *second_nodes) const { // Scan both beams to extract the best and second best paths. const RecodeNode *best_node = nullptr; const RecodeNode *second_best_node = nullptr; @@ -1281,11 +1360,13 @@ void RecodeBeamSearch::ExtractBestPaths(std::vector *best_no // last valid unichar_id. const RecodeNode *dawg_node = node; while (dawg_node != nullptr && - (dawg_node->unichar_id == INVALID_UNICHAR_ID || dawg_node->duplicate)) { + (dawg_node->unichar_id == INVALID_UNICHAR_ID || + dawg_node->duplicate)) { dawg_node = dawg_node->prev; } if (dawg_node == nullptr || - (!dawg_node->end_of_word && dawg_node->unichar_id != UNICHAR_SPACE)) { + (!dawg_node->end_of_word && + dawg_node->unichar_id != UNICHAR_SPACE)) { // Dawg node is not valid. continue; } @@ -1293,7 +1374,8 @@ void RecodeBeamSearch::ExtractBestPaths(std::vector *best_no if (best_node == nullptr || node->score > best_node->score) { second_best_node = best_node; best_node = node; - } else if (second_best_node == nullptr || node->score > second_best_node->score) { + } else if (second_best_node == nullptr || + node->score > second_best_node->score) { second_best_node = node; } } @@ -1307,8 +1389,8 @@ void RecodeBeamSearch::ExtractBestPaths(std::vector *best_no // Helper backtracks through the lattice from the given node, storing the // path and reversing it. -void RecodeBeamSearch::ExtractPath(const RecodeNode *node, - std::vector *path) const { +void RecodeBeamSearch::ExtractPath( + const RecodeNode *node, std::vector *path) const { path->clear(); while (node != nullptr) { path->push_back(node); @@ -1317,7 +1399,8 @@ void RecodeBeamSearch::ExtractPath(const RecodeNode *node, std::reverse(path->begin(), path->end()); } -void RecodeBeamSearch::ExtractPath(const RecodeNode *node, std::vector *path, +void RecodeBeamSearch::ExtractPath(const RecodeNode *node, + std::vector *path, int limiter) const { int pathcounter = 0; path->clear(); @@ -1330,29 +1413,29 @@ void RecodeBeamSearch::ExtractPath(const RecodeNode *node, std::vector &path) const { - for (int c = 0; c < path.size(); ++c) { +void RecodeBeamSearch::DebugPath( + const UNICHARSET *unicharset, + const std::vector &path) const { + for (unsigned c = 0; c < path.size(); ++c) { const RecodeNode &node = *path[c]; - tprintf("%d ", c); + tprintf("%u ", c); node.Print(null_char_, *unicharset, 1); } } // Helper prints debug information on the given unichar path. -void RecodeBeamSearch::DebugUnicharPath(const UNICHARSET *unicharset, - const std::vector &path, - const std::vector &unichar_ids, - const std::vector &certs, - const std::vector &ratings, - const std::vector &xcoords) const { - int num_ids = unichar_ids.size(); +void RecodeBeamSearch::DebugUnicharPath( + const UNICHARSET *unicharset, const std::vector &path, + const std::vector &unichar_ids, const std::vector &certs, + const std::vector &ratings, const std::vector &xcoords) const { + auto num_ids = unichar_ids.size(); double total_rating = 0.0; - for (int c = 0; c < num_ids; ++c) { + for (unsigned c = 0; c < num_ids; ++c) { int coord = xcoords[c]; tprintf("%d %d=%s r=%g, c=%g, s=%d, e=%d, perm=%d\n", coord, unichar_ids[c], unicharset->debug_str(unichar_ids[c]).c_str(), ratings[c], certs[c], - path[coord]->start_of_word, path[coord]->end_of_word, path[coord]->permuter); + path[coord]->start_of_word, path[coord]->end_of_word, + path[coord]->permuter); total_rating += ratings[c]; } tprintf("Path total rating = %g\n", total_rating); diff --git a/src/lstm/series.cpp b/src/lstm/series.cpp index 715e1fa4d..3a1ed0982 100644 --- a/src/lstm/series.cpp +++ b/src/lstm/series.cpp @@ -160,16 +160,16 @@ bool Series::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *s // Splits the series after the given index, returning the two parts and // deletes itself. The first part, up to network with index last_start, goes // into start, and the rest goes into end. -void Series::SplitAt(int last_start, Series **start, Series **end) { +void Series::SplitAt(unsigned last_start, Series **start, Series **end) { *start = nullptr; *end = nullptr; - if (last_start < 0 || last_start >= stack_.size()) { - tprintf("Invalid split index %d must be in range [0,%zu]!\n", last_start, stack_.size() - 1); + if (last_start >= stack_.size()) { + tprintf("Invalid split index %u must be in range [0,%zu]!\n", last_start, stack_.size() - 1); return; } auto *master_series = new Series("MasterSeries"); auto *boosted_series = new Series("BoostedSeries"); - for (int s = 0; s <= last_start; ++s) { + for (unsigned s = 0; s <= last_start; ++s) { if (s + 1 == stack_.size() && stack_[s]->type() == NT_SOFTMAX) { // Change the softmax to a tanh. auto *fc = static_cast(stack_[s]); @@ -178,7 +178,7 @@ void Series::SplitAt(int last_start, Series **start, Series **end) { master_series->AddToStack(stack_[s]); stack_[s] = nullptr; } - for (int s = last_start + 1; s < stack_.size(); ++s) { + for (unsigned s = last_start + 1; s < stack_.size(); ++s) { boosted_series->AddToStack(stack_[s]); stack_[s] = nullptr; } diff --git a/src/lstm/series.h b/src/lstm/series.h index 5075e5cdf..6d9965f4f 100644 --- a/src/lstm/series.h +++ b/src/lstm/series.h @@ -82,7 +82,7 @@ public: // deletes itself. The first part, up to network with index last_start, goes // into start, and the rest goes into end. TESS_API - void SplitAt(int last_start, Series **start, Series **end); + void SplitAt(unsigned last_start, Series **start, Series **end); // Appends the elements of the src series to this, removing from src and // deleting it. diff --git a/src/lstm/weightmatrix.cpp b/src/lstm/weightmatrix.cpp index e24f95f08..57a07dcb9 100644 --- a/src/lstm/weightmatrix.cpp +++ b/src/lstm/weightmatrix.cpp @@ -21,12 +21,12 @@ #include "intsimdmatrix.h" #include "simddetect.h" // for DotProduct #include "statistc.h" -#include "tprintf.h" +#include "tprintf.h" // forTFloat namespace tesseract { #if defined(ANDROID) -static inline double log2(double n) { +static inline TFloat log2(TFloat n) { return log(n) / log(2.0); } #endif // ANDROID @@ -34,7 +34,59 @@ static inline double log2(double n) { // Number of iterations after which the correction effectively becomes unity. const int kAdamCorrectionIterations = 200000; // Epsilon in Adam to prevent division by zero. -const double kAdamEpsilon = 1e-8; +const TFloat kAdamEpsilon = 1e-8; + +// Utility functions convert between double and float arrays. +#ifdef FAST_FLOAT +static void DoubleToFloat(const GENERIC_2D_ARRAY &src, GENERIC_2D_ARRAY &dst) { + const auto dim1 = src.dim1(); + const auto dim2 = src.dim2(); + dst.ResizeNoInit(dim1, dim2); + for (int i = 0; i < dim1; ++i) { + const auto *src_i = src[i]; + auto *dst_i = dst[i]; + for (int j = 0; j < dim2; ++j) { + dst_i[j] = static_cast(src_i[j]); + } + } +} +#endif + +static void FloatToDouble(const GENERIC_2D_ARRAY &src, GENERIC_2D_ARRAY &dst) { + const auto dim1 = src.dim1(); + const auto dim2 = src.dim2(); + dst.ResizeNoInit(dim1, dim2); + for (int i = 0; i < dim1; ++i) { + const auto *src_i = src[i]; + auto *dst_i = dst[i]; + for (int j = 0; j < dim2; ++j) { + dst_i[j] = static_cast(src_i[j]); + } + } +} + +static bool DeSerialize(TFile *fp, GENERIC_2D_ARRAY &tfloat_array) { +#ifdef FAST_FLOAT + GENERIC_2D_ARRAY double_array; + if (!double_array.DeSerialize(fp)) { + return false; + } + DoubleToFloat(double_array, tfloat_array); + return true; +#else + return tfloat_array.DeSerialize(fp); +#endif +} + +static bool Serialize(TFile *fp, const GENERIC_2D_ARRAY &tfloat_array) { +#ifdef FAST_FLOAT + GENERIC_2D_ARRAY double_array; + FloatToDouble(tfloat_array, double_array); + return double_array.Serialize(fp); +#else + return tfloat_array.Serialize(fp); +#endif +} // Computes matrix.vector v = Wu. // u is of size W.dim2() - add_bias_fwd and the output v is of size @@ -44,13 +96,13 @@ const double kAdamEpsilon = 1e-8; // If skip_bias_back, we are actually performing the backwards product on a // transposed matrix, so we need to drop the v output corresponding to the last // element in dim1. -static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY &w, bool add_bias_fwd, - bool skip_bias_back, const double *u, double *v) { +static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY &w, bool add_bias_fwd, + bool skip_bias_back, const TFloat *u, TFloat *v) { int num_results = w.dim1() - skip_bias_back; int extent = w.dim2() - add_bias_fwd; for (int i = 0; i < num_results; ++i) { - const double *wi = w[i]; - double total = DotProduct(wi, u, extent); + const TFloat *wi = w[i]; + TFloat total = DotProduct(wi, u, extent); if (add_bias_fwd) { total += wi[extent]; // The bias value. } @@ -58,8 +110,8 @@ static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY &w, bo } } -// Copies the whole input transposed, converted to double, into *this. -void TransposedArray::Transpose(const GENERIC_2D_ARRAY &input) { +// Copies the whole input transposed, converted to TFloat, into *this. +void TransposedArray::Transpose(const GENERIC_2D_ARRAY &input) { int width = input.dim1(); int num_features = input.dim2(); ResizeNoInit(num_features, width); @@ -97,25 +149,25 @@ int WeightMatrix::InitWeightsFloat(int no, int ni, bool use_adam, float weight_r // for all outputs with negative code_map entries. Returns the new number of // weights. int WeightMatrix::RemapOutputs(const std::vector &code_map) { - GENERIC_2D_ARRAY old_wf(wf_); + GENERIC_2D_ARRAY old_wf(wf_); int old_no = wf_.dim1(); int new_no = code_map.size(); int ni = wf_.dim2(); - std::vector means(ni, 0.0); + std::vector means(ni, 0.0); for (int c = 0; c < old_no; ++c) { - const double *weights = wf_[c]; + const TFloat *weights = wf_[c]; for (int i = 0; i < ni; ++i) { means[i] += weights[i]; } } - for (double &mean : means) { + for (auto &mean : means) { mean /= old_no; } wf_.Resize(new_no, ni, 0.0); InitBackward(); for (int dest = 0; dest < new_no; ++dest) { int src = code_map[dest]; - const double *src_data = src >= 0 ? old_wf[src] : means.data(); + const TFloat *src_data = src >= 0 ? old_wf[src] : means.data(); memcpy(wf_[dest], src_data, ni * sizeof(*src_data)); } return ni * new_no; @@ -126,23 +178,23 @@ int WeightMatrix::RemapOutputs(const std::vector &code_map) { // Compute the max absolute value of the weight set. // Scale so the max absolute value becomes INT8_MAX. // Round to integer. -// Store a multiplicative scale factor (as a double) that will reproduce +// Store a multiplicative scale factor (as a TFloat) that will reproduce // the original value, subject to rounding errors. void WeightMatrix::ConvertToInt() { wi_.ResizeNoInit(wf_.dim1(), wf_.dim2()); scales_.reserve(wi_.dim1()); int dim2 = wi_.dim2(); for (int t = 0; t < wi_.dim1(); ++t) { - double *f_line = wf_[t]; + TFloat *f_line = wf_[t]; int8_t *i_line = wi_[t]; - double max_abs = 0.0; + TFloat max_abs = 0; for (int f = 0; f < dim2; ++f) { - double abs_val = fabs(f_line[f]); + TFloat abs_val = fabs(f_line[f]); if (abs_val > max_abs) { max_abs = abs_val; } } - double scale = max_abs / INT8_MAX; + TFloat scale = max_abs / INT8_MAX; scales_.push_back(scale / INT8_MAX); if (scale == 0.0) { scale = 1.0; @@ -194,29 +246,30 @@ bool WeightMatrix::Serialize(bool training, TFile *fp) const { if (!wi_.Serialize(fp)) { return false; } - // The scales stored in memory have an extra factor applied to them - // to allow faster operation. We have to remove that factor here - // before writing to disc. - auto scales = scales_; - for (auto &scale : scales) { - scale *= INT8_MAX; - } - uint32_t size = scales.size(); + uint32_t size = scales_.size(); if (!fp->Serialize(&size)) { return false; } - if (!fp->Serialize(&scales[0], size)) { - return false; + for (auto scale : scales_) { + // The scales stored in memory have an extra factor applied to them + // to allow faster operation. We have to remove that factor here + // before writing to disc. + double value = scale * INT8_MAX; + if (!fp->Serialize(&value)) { + return false; + } } } else { - if (!wf_.Serialize(fp)) { + if (!tesseract::Serialize(fp, wf_)) { return false; } - if (training && !updates_.Serialize(fp)) { - return false; - } - if (training && use_adam_ && !dw_sq_sum_.Serialize(fp)) { - return false; + if (training) { + if (!tesseract::Serialize(fp, updates_)) { + return false; + } + if (use_adam_ && !tesseract::Serialize(fp, dw_sq_sum_)) { + return false; + } } } return true; @@ -242,6 +295,16 @@ bool WeightMatrix::DeSerialize(bool training, TFile *fp) { if (!fp->DeSerialize(&size)) { return false; } +#ifdef FAST_FLOAT + scales_.reserve(size); + for (auto n = size; n > 0; n--) { + double val; + if (!fp->DeSerialize(&val)) { + return false; + } + scales_.push_back(val / INT8_MAX); + } +#else scales_.resize(size); if (!fp->DeSerialize(&scales_[0], size)) { return false; @@ -249,22 +312,25 @@ bool WeightMatrix::DeSerialize(bool training, TFile *fp) { for (auto &scale : scales_) { scale /= INT8_MAX; } +#endif if (IntSimdMatrix::intSimdMatrix) { int32_t rounded_num_out; IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_, rounded_num_out); scales_.resize(rounded_num_out); } } else { - if (!wf_.DeSerialize(fp)) { + if (!tesseract::DeSerialize(fp, wf_)) { return false; } if (training) { InitBackward(); - if (!updates_.DeSerialize(fp)) { + if (!tesseract::DeSerialize(fp, updates_)) { return false; } - if (use_adam_ && !dw_sq_sum_.DeSerialize(fp)) { - return false; + if (use_adam_) { + if (!tesseract::DeSerialize(fp, dw_sq_sum_)) { + return false; + } } } } @@ -274,7 +340,11 @@ bool WeightMatrix::DeSerialize(bool training, TFile *fp) { // As DeSerialize, but reads an old (float) format WeightMatrix for // backward compatibility. bool WeightMatrix::DeSerializeOld(bool training, TFile *fp) { - GENERIC_2D_ARRAY float_array; +#ifdef FAST_FLOAT + // Not implemented. + ASSERT_HOST(!"not implemented"); + return false; +#else if (int_mode_) { if (!wi_.DeSerialize(fp)) { return false; @@ -288,23 +358,26 @@ bool WeightMatrix::DeSerializeOld(bool training, TFile *fp) { scales_.push_back(old_scale); } } else { + GENERIC_2D_ARRAY float_array; if (!float_array.DeSerialize(fp)) { return false; } - FloatToDouble(float_array, &wf_); + FloatToDouble(float_array, wf_); } if (training) { InitBackward(); + GENERIC_2D_ARRAY float_array; if (!float_array.DeSerialize(fp)) { return false; } - FloatToDouble(float_array, &updates_); + FloatToDouble(float_array, updates_); // Errs was only used in int training, which is now dead. if (!float_array.DeSerialize(fp)) { return false; } } return true; +#endif } // Computes matrix.vector v = Wu. @@ -312,12 +385,12 @@ bool WeightMatrix::DeSerializeOld(bool training, TFile *fp) { // u is imagined to have an extra element at the end with value 1, to // implement the bias, but it doesn't actually have it. // Asserts that the call matches what we have. -void WeightMatrix::MatrixDotVector(const double *u, double *v) const { +void WeightMatrix::MatrixDotVector(const TFloat *u, TFloat *v) const { assert(!int_mode_); MatrixDotVectorInternal(wf_, true, false, u, v); } -void WeightMatrix::MatrixDotVector(const int8_t *u, double *v) const { +void WeightMatrix::MatrixDotVector(const int8_t *u, TFloat *v) const { assert(int_mode_); if (IntSimdMatrix::intSimdMatrix) { IntSimdMatrix::intSimdMatrix->matrixDotVectorFunction(wi_.dim1(), wi_.dim2(), &shaped_w_[0], @@ -329,11 +402,11 @@ void WeightMatrix::MatrixDotVector(const int8_t *u, double *v) const { // MatrixDotVector for peep weights, MultiplyAccumulate adds the // component-wise products of *this[0] and v to inout. -void WeightMatrix::MultiplyAccumulate(const double *v, double *inout) { +void WeightMatrix::MultiplyAccumulate(const TFloat *v, TFloat *inout) { assert(!int_mode_); assert(wf_.dim1() == 1); int n = wf_.dim2(); - const double *u = wf_[0]; + const TFloat *u = wf_[0]; for (int i = 0; i < n; ++i) { inout[i] += u[i] * v[i]; } @@ -343,7 +416,7 @@ void WeightMatrix::MultiplyAccumulate(const double *v, double *inout) { // u is of size W.dim1() and the output v is of size W.dim2() - 1. // The last result is discarded, as v is assumed to have an imaginary // last value of 1, as with MatrixDotVector. -void WeightMatrix::VectorDotMatrix(const double *u, double *v) const { +void WeightMatrix::VectorDotMatrix(const TFloat *u, TFloat *v) const { assert(!int_mode_); MatrixDotVectorInternal(wf_t_, false, true, u, v); } @@ -367,13 +440,13 @@ void WeightMatrix::SumOuterTransposed(const TransposedArray &u, const Transposed # pragma omp parallel for num_threads(4) if (in_parallel) #endif for (int i = 0; i < num_outputs; ++i) { - double *dwi = dw_[i]; - const double *ui = u[i]; + TFloat *dwi = dw_[i]; + const TFloat *ui = u[i]; for (int j = 0; j < num_inputs; ++j) { dwi[j] = DotProduct(ui, v[j], num_samples); } // The last element of v is missing, presumed 1.0f. - double total = 0.0; + TFloat total = 0; for (int k = 0; k < num_samples; ++k) { total += ui[k]; } @@ -419,17 +492,17 @@ void WeightMatrix::AddDeltas(const WeightMatrix &other) { // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. -void WeightMatrix::CountAlternators(const WeightMatrix &other, double *same, - double *changed) const { +void WeightMatrix::CountAlternators(const WeightMatrix &other, TFloat *same, + TFloat *changed) const { int num_outputs = updates_.dim1(); int num_inputs = updates_.dim2(); assert(num_outputs == other.updates_.dim1()); assert(num_inputs == other.updates_.dim2()); for (int i = 0; i < num_outputs; ++i) { - const double *this_i = updates_[i]; - const double *other_i = other.updates_[i]; + const TFloat *this_i = updates_[i]; + const TFloat *other_i = other.updates_[i]; for (int j = 0; j < num_inputs; ++j) { - double product = this_i[j] * other_i[j]; + TFloat product = this_i[j] * other_i[j]; if (product < 0.0) { *changed -= product; } else { @@ -442,10 +515,10 @@ void WeightMatrix::CountAlternators(const WeightMatrix &other, double *same, // Helper computes an integer histogram bucket for a weight and adds it // to the histogram. const int kHistogramBuckets = 16; -static void HistogramWeight(double weight, STATS *histogram) { +static void HistogramWeight(TFloat weight, STATS *histogram) { int bucket = kHistogramBuckets - 1; if (weight != 0.0) { - double logval = -log2(fabs(weight)); + TFloat logval = -log2(fabs(weight)); bucket = ClipToRange(IntCastRounded(logval), 0, kHistogramBuckets - 1); } histogram->add(bucket, 1); @@ -470,20 +543,4 @@ void WeightMatrix::Debug2D(const char *msg) { histogram.print(); } -// Utility function converts an array of float to the corresponding array -// of double. -/* static */ -void WeightMatrix::FloatToDouble(const GENERIC_2D_ARRAY &wf, GENERIC_2D_ARRAY *wd) { - int dim1 = wf.dim1(); - int dim2 = wf.dim2(); - wd->ResizeNoInit(dim1, dim2); - for (int i = 0; i < dim1; ++i) { - const float *wfi = wf[i]; - double *wdi = (*wd)[i]; - for (int j = 0; j < dim2; ++j) { - wdi[j] = static_cast(wfi[j]); - } - } -} - } // namespace tesseract. diff --git a/src/lstm/weightmatrix.h b/src/lstm/weightmatrix.h index bdcdc948a..a2cdaa527 100644 --- a/src/lstm/weightmatrix.h +++ b/src/lstm/weightmatrix.h @@ -22,17 +22,18 @@ #include #include "intsimdmatrix.h" #include "matrix.h" +#include "tesstypes.h" #include "tprintf.h" namespace tesseract { -// Convenience instantiation of GENERIC_2D_ARRAY with additional +// Convenience instantiation of GENERIC_2D_ARRAY with additional // operations to write a strided vector, so the transposed form of the input // is memory-contiguous. -class TransposedArray : public GENERIC_2D_ARRAY { +class TransposedArray : public GENERIC_2D_ARRAY { public: - // Copies the whole input transposed, converted to double, into *this. - void Transpose(const GENERIC_2D_ARRAY &input); + // Copies the whole input transposed, converted to TFloat, into *this. + void Transpose(const GENERIC_2D_ARRAY &input); // Writes a vector of data representing a timestep (gradients or sources). // The data is assumed to be of size1 in size (the strided dimension). ~TransposedArray() override; @@ -55,7 +56,7 @@ public: for (int y = 0; y < num_features; ++y) { for (int t = 0; t < width; ++t) { if (num == 0 || t < num || t + num >= width) { - tprintf(" %g", (*this)(y, t)); + tprintf(" %g", static_cast((*this)(y, t))); } } tprintf("\n"); @@ -107,11 +108,11 @@ public: return int_mode_ ? wi_.dim1() : wf_.dim1(); } // Provides one set of weights. Only used by peep weight maxpool. - const double *GetWeights(int index) const { + const TFloat *GetWeights(int index) const { return wf_[index]; } // Provides access to the deltas (dw_). - double GetDW(int i, int j) const { + TFloat GetDW(int i, int j) const { return dw_(i, j); } @@ -132,16 +133,16 @@ public: // u is imagined to have an extra element at the end with value 1, to // implement the bias, but it doesn't actually have it. // Asserts that the call matches what we have. - void MatrixDotVector(const double *u, double *v) const; - void MatrixDotVector(const int8_t *u, double *v) const; + void MatrixDotVector(const TFloat *u, TFloat *v) const; + void MatrixDotVector(const int8_t *u, TFloat *v) const; // MatrixDotVector for peep weights, MultiplyAccumulate adds the // component-wise products of *this[0] and v to inout. - void MultiplyAccumulate(const double *v, double *inout); + void MultiplyAccumulate(const TFloat *v, TFloat *inout); // Computes vector.matrix v = uW. // u is of size W.dim1() and the output v is of size W.dim2() - 1. // The last result is discarded, as v is assumed to have an imaginary // last value of 1, as with MatrixDotVector. - void VectorDotMatrix(const double *u, double *v) const; + void VectorDotMatrix(const TFloat *u, TFloat *v) const; // Fills dw_[i][j] with the dot product u[i][] . v[j][], using elements // from u and v, starting with u[i][offset] and v[j][offset]. // Note that (matching MatrixDotVector) v[last][] is missing, presumed 1.0. @@ -155,17 +156,13 @@ public: // Sums the products of weight updates in *this and other, splitting into // positive (same direction) in *same and negative (different direction) in // *changed. - void CountAlternators(const WeightMatrix &other, double *same, double *changed) const; + void CountAlternators(const WeightMatrix &other, TFloat *same, TFloat *changed) const; void Debug2D(const char *msg); - // Utility function converts an array of float to the corresponding array - // of double. - static void FloatToDouble(const GENERIC_2D_ARRAY &wf, GENERIC_2D_ARRAY *wd); - private: // Choice between float and 8 bit int implementations. - GENERIC_2D_ARRAY wf_; + GENERIC_2D_ARRAY wf_; GENERIC_2D_ARRAY wi_; // Transposed copy of wf_, used only for Backward, and set with each Update. TransposedArray wf_t_; @@ -175,14 +172,14 @@ private: bool use_adam_; // If we are using wi_, then scales_ is a factor to restore the row product // with a vector to the correct range. - std::vector scales_; + std::vector scales_; // Weight deltas. dw_ is the new delta, and updates_ the momentum-decaying // amount to be added to wf_/wi_. - GENERIC_2D_ARRAY dw_; - GENERIC_2D_ARRAY updates_; + GENERIC_2D_ARRAY dw_; + GENERIC_2D_ARRAY updates_; // Iff use_adam_, the sum of squares of dw_. The number of samples is // given to Update(). Serialized iff use_adam_. - GENERIC_2D_ARRAY dw_sq_sum_; + GENERIC_2D_ARRAY dw_sq_sum_; // The weights matrix reorganized in whatever way suits this instance. std::vector shaped_w_; }; diff --git a/src/opencl/openclwrapper.cpp b/src/opencl/openclwrapper.cpp index 578f8f505..587a8c972 100644 --- a/src/opencl/openclwrapper.cpp +++ b/src/opencl/openclwrapper.cpp @@ -47,10 +47,14 @@ # endif # include +# include # include // for memset, strcpy, ... # include # include "errcode.h" // for ASSERT_HOST +# include "image.h" // for Image + +namespace tesseract { GPUEnv OpenclDevice::gpuEnv; @@ -2193,7 +2197,7 @@ static double getLineMasksMorphMicroBench(GPUEnv *env, TessScoreEvaluationInputD # endif OpenclDevice::gpuEnv = *env; OpenclDevice::initMorphCLAllocations(wpl, input.height, input.pix); - Image pix_vline = nullptr, *pix_hline = nullptr, *pix_closed = nullptr; + Image pix_vline = nullptr, pix_hline = nullptr, pix_closed = nullptr; OpenclDevice::pixGetLinesCL(nullptr, input.pix, &pix_vline, &pix_hline, &pix_closed, true, closing_brick, closing_brick, max_line_width, max_line_width, min_line_length, min_line_length); @@ -2252,8 +2256,6 @@ static double getLineMasksMorphMicroBench(GPUEnv *env, TessScoreEvaluationInputD * Device Selection *****************************************************************************/ -# include - // encode score object as byte string static ds_status serializeScore(ds_device *device, uint8_t **serializedScore, unsigned int *serializedScoreSize) { @@ -2453,4 +2455,6 @@ bool OpenclDevice::selectedDeviceIsOpenCL() { return (device.type == DS_DEVICE_OPENCL_DEVICE); } +} // namespace + #endif diff --git a/src/opencl/openclwrapper.h b/src/opencl/openclwrapper.h index 9360b0d59..fcf535ad0 100644 --- a/src/opencl/openclwrapper.h +++ b/src/opencl/openclwrapper.h @@ -30,6 +30,9 @@ # include # endif +namespace tesseract { + +class Image; struct TessDeviceScore; // device type @@ -170,5 +173,7 @@ public: static bool selectedDeviceIsOpenCL(); }; +} + #endif // USE_OPENCL #endif // TESSERACT_OPENCL_OPENCLWRAPPER_H_ diff --git a/src/api/tesseractmain.cpp b/src/tesseract.cpp similarity index 94% rename from src/api/tesseractmain.cpp rename to src/tesseract.cpp index 71b237f82..9154db80a 100644 --- a/src/api/tesseractmain.cpp +++ b/src/tesseract.cpp @@ -1,5 +1,5 @@ /********************************************************************** - * File: tesseractmain.cpp + * File: tesseract.cpp * Description: Main program for merge of tess and editor. * Author: Ray Smith * @@ -25,8 +25,10 @@ #if defined(__USE_GNU) # include // for feenableexcept #endif +#include // for INT_MIN, INT_MAX #include // for std::getenv #include +#include // for std::map #include // std::unique_ptr #include @@ -238,6 +240,8 @@ static void PrintHelpExtra(const char *program) { " --user-words PATH Specify the location of user words file.\n" " --user-patterns PATH Specify the location of user patterns file.\n" " --dpi VALUE Specify DPI for input image.\n" + " --loglevel LEVEL Specify logging level. LEVEL can be\n" + " ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL or OFF.\n" " -l LANG[+LANG] Specify language(s) used for OCR.\n" " -c VAR=VALUE Set value for config variables.\n" " Multiple -c arguments are allowed.\n" @@ -325,18 +329,14 @@ static bool SetVariablesFromCLArgs(tesseract::TessBaseAPI &api, int argc, char * static void PrintLangsList(tesseract::TessBaseAPI &api) { std::vector languages; api.GetAvailableLanguagesAsVector(&languages); - printf("List of available languages (%zu):\n", languages.size()); + printf("List of available languages in \"%s\" (%zu):\n", + api.GetDatapath(), languages.size()); for (const auto &language : languages) { printf("%s\n", language.c_str()); } api.End(); } -static void PrintBanner() { - tprintf("Tesseract Open Source OCR Engine v%s with Leptonica\n", - tesseract::TessBaseAPI::Version()); -} - /** * We have 2 possible sources of pagesegmode: a config file and * the command line. For backwards compatibility reasons, the @@ -403,6 +403,27 @@ static bool ParseArgs(int argc, char **argv, const char **lang, const char **ima } else if (strcmp(argv[i], "--dpi") == 0 && i + 1 < argc) { *dpi = atoi(argv[i + 1]); ++i; + } else if (strcmp(argv[i], "--loglevel") == 0 && i + 1 < argc) { + // Allow the log levels which are used by log4cxx. + const std::string loglevel_string = argv[++i]; + static const std::map loglevels { + {"ALL", INT_MIN}, + {"TRACE", 5000}, + {"DEBUG", 10000}, + {"INFO", 20000}, + {"WARN", 30000}, + {"ERROR", 40000}, + {"FATAL", 50000}, + {"OFF", INT_MAX}, + }; + try { + auto loglevel = loglevels.at(loglevel_string); + log_level = loglevel; + } catch(const std::out_of_range& e) { + // TODO: Allow numeric argument? + tprintf("Error, unsupported --loglevel %s\n", loglevel_string.c_str()); + return false; + } } else if (strcmp(argv[i], "--user-words") == 0 && i + 1 < argc) { vars_vec->push_back("user_words_file"); vars_values->push_back(argv[i + 1]); @@ -608,7 +629,7 @@ static void PreloadRenderers(tesseract::TessBaseAPI &api, **********************************************************************/ int main(int argc, char **argv) { -#if defined(__USE_GNU) +#if defined(__USE_GNU) && defined(HAVE_FEENABLEEXCEPT) // Raise SIGFPE. # if defined(__clang__) // clang creates code which causes some FP exceptions, so don't enable those. @@ -654,12 +675,14 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } - if (lang == nullptr) { - // Set default language if none was given. + bool in_recognition_mode = !list_langs && !print_parameters && !print_fonts_table; + + if (lang == nullptr && in_recognition_mode) { + // Set default language model if none was given and a model file is needed. lang = "eng"; } - if (image == nullptr && !list_langs && !print_parameters && !print_fonts_table) { + if (image == nullptr && in_recognition_mode) { return EXIT_SUCCESS; } @@ -668,7 +691,7 @@ int main(int argc, char **argv) { // first TessBaseAPI must be destructed, DawgCache must be the last object. tesseract::Dict::GlobalDawgCache(); - tesseract::TessBaseAPI api; + TessBaseAPI api; api.SetOutputName(outputbase); @@ -791,15 +814,7 @@ int main(int argc, char **argv) { PreloadRenderers(api, renderers, pagesegmode, outputbase); } - bool banner = false; - if (outputbase != nullptr && strcmp(outputbase, "-") && strcmp(outputbase, "stdout")) { - banner = true; - } - if (!renderers.empty()) { - if (banner) { - PrintBanner(); - } #ifdef DISABLED_LEGACY_ENGINE if (!osd_warning.empty()) { fprintf(stderr, "%s", osd_warning.c_str()); diff --git a/src/textord/alignedblob.h b/src/textord/alignedblob.h index ee9637068..32ed07896 100644 --- a/src/textord/alignedblob.h +++ b/src/textord/alignedblob.h @@ -26,9 +26,9 @@ namespace tesseract { -extern INT_VAR_H(textord_debug_bugs, 0, "Turn on output related to bugs in tab finding"); -extern INT_VAR_H(textord_debug_tabfind, 2, "Debug tab finding"); -extern BOOL_VAR_H(textord_debug_printable, false, "Make debug windows printable"); +extern INT_VAR_H(textord_debug_bugs); +extern INT_VAR_H(textord_debug_tabfind); +extern BOOL_VAR_H(textord_debug_printable); // Simple structure to hold the search parameters for AlignedBlob. // The members are mostly derived from constants, which are diff --git a/src/textord/baselinedetect.cpp b/src/textord/baselinedetect.cpp index 97bbb4e26..309ee51bf 100644 --- a/src/textord/baselinedetect.cpp +++ b/src/textord/baselinedetect.cpp @@ -63,11 +63,11 @@ const double kMinFittingLinespacings = 0.25; namespace tesseract { BaselineRow::BaselineRow(double line_spacing, TO_ROW *to_row) - : blobs_(to_row->blob_list()) - , baseline_pt1_(0.0f, 0.0f) - , baseline_pt2_(0.0f, 0.0f) - , baseline_error_(0.0) - , good_baseline_(false) { + : blobs_(to_row->blob_list()), + baseline_pt1_(0.0f, 0.0f), + baseline_pt2_(0.0f, 0.0f), + baseline_error_(0.0), + good_baseline_(false) { ComputeBoundingBox(); // Compute a scale factor for rounding to ints. disp_quant_factor_ = kOffsetQuantizationFactor * line_spacing; @@ -87,11 +87,11 @@ void BaselineRow::SetupOldLineParameters(TO_ROW *row) const { // Outputs diagnostic information. void BaselineRow::Print() const { - tprintf("Baseline (%g,%g)->(%g,%g), angle=%g, intercept=%g\n", baseline_pt1_.x(), - baseline_pt1_.y(), baseline_pt2_.x(), baseline_pt2_.y(), BaselineAngle(), - StraightYAtX(0.0)); - tprintf("Quant factor=%g, error=%g, good=%d, box:", disp_quant_factor_, baseline_error_, - good_baseline_); + tprintf("Baseline (%g,%g)->(%g,%g), angle=%g, intercept=%g\n", + baseline_pt1_.x(), baseline_pt1_.y(), baseline_pt2_.x(), + baseline_pt2_.y(), BaselineAngle(), StraightYAtX(0.0)); + tprintf("Quant factor=%g, error=%g, good=%d, box:", disp_quant_factor_, + baseline_error_, good_baseline_); bounding_box_.print(); } @@ -133,8 +133,9 @@ double BaselineRow::StraightYAtX(double x) const { if (denominator == 0.0) { return (baseline_pt1_.y() + baseline_pt2_.y()) / 2.0; } - return baseline_pt1_.y() + - (x - baseline_pt1_.x()) * (baseline_pt2_.y() - baseline_pt1_.y()) / denominator; + return baseline_pt1_.y() + (x - baseline_pt1_.x()) * + (baseline_pt2_.y() - baseline_pt1_.y()) / + denominator; } // Fits a straight baseline to the points. Returns true if it had enough @@ -170,7 +171,8 @@ bool BaselineRow::FitBaseline(bool use_box_bottoms) { baseline_error_ = fitter_.Fit(&pt1, &pt2); baseline_pt1_ = pt1; baseline_pt2_ = pt2; - if (baseline_error_ > max_baseline_error_ && fitter_.SufficientPointsForIndependentFit()) { + if (baseline_error_ > max_baseline_error_ && + fitter_.SufficientPointsForIndependentFit()) { // The fit was bad but there were plenty of points, so try skipping // the first and last few, and use the new line if it dramatically improves // the error of fit. @@ -184,7 +186,10 @@ bool BaselineRow::FitBaseline(bool use_box_bottoms) { int debug = 0; #ifdef kDebugYCoord Print(); - debug = bounding_box_.bottom() < kDebugYCoord && bounding_box_.top() > kDebugYCoord ? 3 : 2; + debug = bounding_box_.bottom() < kDebugYCoord && + bounding_box_.top() > kDebugYCoord + ? 3 + : 2; #endif // Now we obtained a direction from that fit, see if we can improve the // fit using the same direction and some other start point. @@ -218,7 +223,8 @@ void BaselineRow::AdjustBaselineToParallel(int debug, const FCOORD &direction) { return; } #ifdef kDebugYCoord - if (bounding_box_.bottom() < kDebugYCoord && bounding_box_.top() > kDebugYCoord && debug < 3) + if (bounding_box_.bottom() < kDebugYCoord && + bounding_box_.top() > kDebugYCoord && debug < 3) debug = 3; #endif FitConstrainedIfBetter(debug, direction, 0.0, displacement_modes_[0]); @@ -226,7 +232,8 @@ void BaselineRow::AdjustBaselineToParallel(int debug, const FCOORD &direction) { // Modifies the baseline to snap to the textline grid if the existing // result is not good enough. -double BaselineRow::AdjustBaselineToGrid(int debug, const FCOORD &direction, double line_spacing, +double BaselineRow::AdjustBaselineToGrid(int debug, const FCOORD &direction, + double line_spacing, double line_offset) { if (blobs_->empty()) { if (debug > 1) { @@ -238,9 +245,10 @@ double BaselineRow::AdjustBaselineToGrid(int debug, const FCOORD &direction, dou // Find the displacement_modes_ entry nearest to the grid. double best_error = 0.0; int best_index = -1; - for (int i = 0; i < displacement_modes_.size(); ++i) { + for (unsigned i = 0; i < displacement_modes_.size(); ++i) { double blob_y = displacement_modes_[i]; - double error = BaselineBlock::SpacingModelError(blob_y, line_spacing, line_offset); + double error = + BaselineBlock::SpacingModelError(blob_y, line_spacing, line_offset); if (debug > 1) { tprintf("Mode at %g has error %g from model \n", blob_y, error); } @@ -263,9 +271,11 @@ double BaselineRow::AdjustBaselineToGrid(int debug, const FCOORD &direction, dou displacement_modes_[best_index]); bounding_box_.print(); } - FitConstrainedIfBetter(debug, direction, model_margin, displacement_modes_[best_index]); + FitConstrainedIfBetter(debug, direction, model_margin, + displacement_modes_[best_index]); } else if (debug > 1) { - tprintf("Linespacing model only moves current line by %g for row at:", shift); + tprintf("Linespacing model only moves current line by %g for row at:", + shift); bounding_box_.print(); } } else if (debug > 1) { @@ -296,7 +306,8 @@ void BaselineRow::SetupBlobDisplacements(const FCOORD &direction) { if (box.bottom() < kDebugYCoord && box.top() > kDebugYCoord) debug = true; #endif - FCOORD blob_pos((box.left() + box.right()) / 2.0f, blob->baseline_position()); + FCOORD blob_pos((box.left() + box.right()) / 2.0f, + blob->baseline_position()); double offset = direction * blob_pos; perp_blob_dists.push_back(offset); #ifdef kDebugYCoord @@ -338,24 +349,28 @@ void BaselineRow::SetupBlobDisplacements(const FCOORD &direction) { // Otherwise the new fit will only replace the old if it is really better, // or the old fit is marked bad and the new fit has sufficient points, as // well as being within the max_baseline_error_. -void BaselineRow::FitConstrainedIfBetter(int debug, const FCOORD &direction, double cheat_allowance, +void BaselineRow::FitConstrainedIfBetter(int debug, const FCOORD &direction, + double cheat_allowance, double target_offset) { double halfrange = fit_halfrange_ * direction.length(); double min_dist = target_offset - halfrange; double max_dist = target_offset + halfrange; ICOORD line_pt; - double new_error = fitter_.ConstrainedFit(direction, min_dist, max_dist, debug > 2, &line_pt); + double new_error = fitter_.ConstrainedFit(direction, min_dist, max_dist, + debug > 2, &line_pt); // Allow cheat_allowance off the new error new_error -= cheat_allowance; double old_angle = BaselineAngle(); double new_angle = direction.angle(); if (debug > 1) { - tprintf("Constrained error = %g, original = %g", new_error, baseline_error_); - tprintf(" angles = %g, %g, delta=%g vs threshold %g\n", old_angle, new_angle, - new_angle - old_angle, kMaxSkewDeviation); + tprintf("Constrained error = %g, original = %g", new_error, + baseline_error_); + tprintf(" angles = %g, %g, delta=%g vs threshold %g\n", old_angle, + new_angle, new_angle - old_angle, kMaxSkewDeviation); } - bool new_good_baseline = new_error <= max_baseline_error_ && - (cheat_allowance > 0.0 || fitter_.SufficientPointsForIndependentFit()); + bool new_good_baseline = + new_error <= max_baseline_error_ && + (cheat_allowance > 0.0 || fitter_.SufficientPointsForIndependentFit()); // The new will replace the old if any are true: // 1. the new error is better // 2. the old is NOT good, but the new is @@ -368,7 +383,8 @@ void BaselineRow::FitConstrainedIfBetter(int debug, const FCOORD &direction, dou baseline_pt2_ = baseline_pt1_ + direction; good_baseline_ = new_good_baseline; if (debug > 1) { - tprintf("Replacing with constrained baseline, good = %d\n", good_baseline_); + tprintf("Replacing with constrained baseline, good = %d\n", + good_baseline_); } } else if (debug > 1) { tprintf("Keeping old baseline\n"); @@ -400,14 +416,14 @@ void BaselineRow::ComputeBoundingBox() { } BaselineBlock::BaselineBlock(int debug_level, bool non_text, TO_BLOCK *block) - : block_(block) - , debug_level_(debug_level) - , non_text_block_(non_text) - , good_skew_angle_(false) - , skew_angle_(0.0) - , line_spacing_(block->line_spacing) - , line_offset_(0.0) - , model_error_(0.0) { + : block_(block), + debug_level_(debug_level), + non_text_block_(non_text), + good_skew_angle_(false), + skew_angle_(0.0), + line_spacing_(block->line_spacing), + line_offset_(0.0), + model_error_(0.0) { TO_ROW_IT row_it(block_->get_rows()); for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { // Sort the blobs on the rows. @@ -418,7 +434,8 @@ BaselineBlock::BaselineBlock(int debug_level, bool non_text, TO_BLOCK *block) // Computes and returns the absolute error of the given perp_disp from the // given linespacing model. -double BaselineBlock::SpacingModelError(double perp_disp, double line_spacing, double line_offset) { +double BaselineBlock::SpacingModelError(double perp_disp, double line_spacing, + double line_offset) { // Round to the nearest multiple of line_spacing + line offset. int multiple = IntCastRounded((perp_disp - line_offset) / line_spacing); double model_y = line_spacing * multiple + line_offset; @@ -452,7 +469,8 @@ bool BaselineBlock::FitBaselinesAndFindSkew(bool use_box_bottoms) { good_skew_angle_ = false; } if (debug_level_ > 0) { - tprintf("Initial block skew angle = %g, good = %d\n", skew_angle_, good_skew_angle_); + tprintf("Initial block skew angle = %g, good = %d\n", skew_angle_, + good_skew_angle_); } return good_skew_angle_; } @@ -482,10 +500,12 @@ void BaselineBlock::ParallelizeBaselines(double default_block_skew) { // Enforce the line spacing model on all lines that don't yet have a good // baseline. // Start by finding the row that is best fitted to the model. - int best_row = 0; - double best_error = SpacingModelError(rows_[0]->PerpDisp(direction), line_spacing_, line_offset_); - for (int r = 1; r < rows_.size(); ++r) { - double error = SpacingModelError(rows_[r]->PerpDisp(direction), line_spacing_, line_offset_); + unsigned best_row = 0; + double best_error = SpacingModelError(rows_[0]->PerpDisp(direction), + line_spacing_, line_offset_); + for (unsigned r = 1; r < rows_.size(); ++r) { + double error = SpacingModelError(rows_[r]->PerpDisp(direction), + line_spacing_, line_offset_); if (error < best_error) { best_error = error; best_row = r; @@ -493,12 +513,14 @@ void BaselineBlock::ParallelizeBaselines(double default_block_skew) { } // Starting at the best fitting row, work outwards, syncing the offset. double offset = line_offset_; - for (int r = best_row + 1; r < rows_.size(); ++r) { - offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction, line_spacing_, offset); + for (auto r = best_row + 1; r < rows_.size(); ++r) { + offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction, + line_spacing_, offset); } offset = line_offset_; for (int r = best_row - 1; r >= 0; --r) { - offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction, line_spacing_, offset); + offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction, + line_spacing_, offset); } } @@ -506,7 +528,8 @@ void BaselineBlock::ParallelizeBaselines(double default_block_skew) { void BaselineBlock::SetupBlockParameters() const { if (line_spacing_ > 0.0) { // Where was block_line_spacing set before? - float min_spacing = std::min(block_->line_spacing, static_cast(line_spacing_)); + float min_spacing = + std::min(block_->line_spacing, static_cast(line_spacing_)); if (min_spacing < block_->line_size) { block_->line_size = min_spacing; } @@ -516,7 +539,7 @@ void BaselineBlock::SetupBlockParameters() const { } // Setup the parameters on all the rows. TO_ROW_IT row_it(block_->get_rows()); - for (int r = 0; r < rows_.size(); ++r, row_it.forward()) { + for (unsigned r = 0; r < rows_.size(); ++r, row_it.forward()) { BaselineRow *row = rows_[r]; TO_ROW *to_row = row_it.data(); row->SetupOldLineParameters(to_row); @@ -549,8 +572,8 @@ void BaselineBlock::PrepareForSplineFitting(ICOORD page_tr, bool remove_noise) { // As a side-effect, computes the xheights of the rows and the block. // Although x-height estimation is conceptually separate, it is part of // detecting perspective distortion and therefore baseline fitting. -void BaselineBlock::FitBaselineSplines(bool enable_splines, bool show_final_rows, - Textord *textord) { +void BaselineBlock::FitBaselineSplines(bool enable_splines, + bool show_final_rows, Textord *textord) { double gradient = tan(skew_angle_); FCOORD rotation(1.0f, 0.0f); @@ -565,8 +588,8 @@ void BaselineBlock::FitBaselineSplines(bool enable_splines, bool show_final_rows int32_t xstarts[2] = {block_box.left(), block_box.right()}; double coeffs[3] = {0.0, row->line_m(), row->line_c()}; row->baseline = QSPLINE(1, xstarts, coeffs); - textord->compute_row_xheight(row, block_->block->classify_rotation(), row->line_m(), - block_->line_size); + textord->compute_row_xheight(row, block_->block->classify_rotation(), + row->line_m(), block_->line_size); } } textord->compute_block_xheight(block_, gradient); @@ -599,7 +622,8 @@ void BaselineBlock::DrawFinalRows(const ICOORD &page_tr) { } plot_blob_list(win, &block_->blobs, ScrollView::MAGENTA, ScrollView::WHITE); // Show discarded blobs. - plot_blob_list(win, &block_->underlines, ScrollView::YELLOW, ScrollView::CORAL); + plot_blob_list(win, &block_->underlines, ScrollView::YELLOW, + ScrollView::CORAL); if (block_->blobs.length() > 0) { tprintf("%d blobs discarded as noise\n", block_->blobs.length()); } @@ -637,7 +661,7 @@ bool BaselineBlock::ComputeLineSpacing() { double max_baseline_error = kMaxBaselineError * line_spacing_; int non_trivial_gaps = 0; int fitting_gaps = 0; - for (int i = 1; i < row_positions.size(); ++i) { + for (unsigned i = 1; i < row_positions.size(); ++i) { double row_gap = fabs(row_positions[i - 1] - row_positions[i]); if (row_gap > max_baseline_error) { ++non_trivial_gaps; @@ -647,8 +671,9 @@ bool BaselineBlock::ComputeLineSpacing() { } } if (debug_level_ > 0) { - tprintf("Spacing %g, in %zu rows, %d gaps fitted out of %d non-trivial\n", line_spacing_, - row_positions.size(), fitting_gaps, non_trivial_gaps); + tprintf("Spacing %g, in %zu rows, %d gaps fitted out of %d non-trivial\n", + line_spacing_, row_positions.size(), fitting_gaps, + non_trivial_gaps); } return fitting_gaps > non_trivial_gaps * kMinFittingLinespacings; } @@ -677,7 +702,7 @@ void BaselineBlock::ComputeBaselinePositions(const FCOORD &direction, // of the spacings between adjacent overlapping textlines. void BaselineBlock::EstimateLineSpacing() { std::vector spacings; - for (int r = 0; r < rows_.size(); ++r) { + for (unsigned r = 0; r < rows_.size(); ++r) { BaselineRow *row = rows_[r]; // Exclude silly lines. if (fabs(row->BaselineAngle()) > M_PI * 0.25) { @@ -685,8 +710,9 @@ void BaselineBlock::EstimateLineSpacing() { } // Find the first row after row that overlaps it significantly. const TBOX &row_box = row->bounding_box(); - int r2; - for (r2 = r + 1; r2 < rows_.size() && !row_box.major_x_overlap(rows_[r2]->bounding_box()); + unsigned r2; + for (r2 = r + 1; r2 < rows_.size() && + !row_box.major_x_overlap(rows_[r2]->bounding_box()); ++r2) { ; } @@ -703,7 +729,8 @@ void BaselineBlock::EstimateLineSpacing() { // If we have at least one value, use it, otherwise leave the previous // value unchanged. if (!spacings.empty()) { - std::nth_element(spacings.begin(), spacings.begin() + spacings.size() / 2, spacings.end()); + std::nth_element(spacings.begin(), spacings.begin() + spacings.size() / 2, + spacings.end()); line_spacing_ = spacings[spacings.size() / 2]; if (debug_level_ > 1) { tprintf("Estimate of linespacing = %g\n", line_spacing_); @@ -718,14 +745,16 @@ void BaselineBlock::EstimateLineSpacing() { void BaselineBlock::RefineLineSpacing(const std::vector &positions) { double spacings[3], offsets[3], errors[3]; int index_range; - errors[0] = - FitLineSpacingModel(positions, line_spacing_, &spacings[0], &offsets[0], &index_range); + errors[0] = FitLineSpacingModel(positions, line_spacing_, &spacings[0], + &offsets[0], &index_range); if (index_range > 1) { double spacing_plus = line_spacing_ / (1.0 + 1.0 / index_range); // Try the hypotheses that there might be index_range +/- 1 line spaces. - errors[1] = FitLineSpacingModel(positions, spacing_plus, &spacings[1], &offsets[1], nullptr); + errors[1] = FitLineSpacingModel(positions, spacing_plus, &spacings[1], + &offsets[1], nullptr); double spacing_minus = line_spacing_ / (1.0 - 1.0 / index_range); - errors[2] = FitLineSpacingModel(positions, spacing_minus, &spacings[2], &offsets[2], nullptr); + errors[2] = FitLineSpacingModel(positions, spacing_minus, &spacings[2], + &offsets[2], nullptr); for (int i = 1; i <= 2; ++i) { if (errors[i] < errors[0]) { spacings[0] = spacings[i]; @@ -739,8 +768,8 @@ void BaselineBlock::RefineLineSpacing(const std::vector &positions) { line_offset_ = offsets[0]; model_error_ = errors[0]; if (debug_level_ > 0) { - tprintf("Final linespacing model = %g + offset %g, error %g\n", line_spacing_, line_offset_, - model_error_); + tprintf("Final linespacing model = %g + offset %g, error %g\n", + line_spacing_, line_offset_, model_error_); } } } @@ -750,8 +779,9 @@ void BaselineBlock::RefineLineSpacing(const std::vector &positions) { // and the corresponding intercept in c_out, and the number of spacings seen // in index_delta. Returns the error of fit to the line spacing model. // Uses a simple linear regression, but optimized the offset using the median. -double BaselineBlock::FitLineSpacingModel(const std::vector &positions, double m_in, - double *m_out, double *c_out, int *index_delta) { +double BaselineBlock::FitLineSpacingModel(const std::vector &positions, + double m_in, double *m_out, + double *c_out, int *index_delta) { if (m_in == 0.0f || positions.size() < 2) { *m_out = m_in; *c_out = 0.0; @@ -762,6 +792,7 @@ double BaselineBlock::FitLineSpacingModel(const std::vector &positions, } std::vector offsets; // Get the offset (remainder) linespacing for each line and choose the median. + offsets.reserve(positions.size()); for (double position : positions) { offsets.push_back(fmod(position, m_in)); } @@ -786,8 +817,8 @@ double BaselineBlock::FitLineSpacingModel(const std::vector &positions, } // Get the median offset. if (debug_level_ > 2) { - for (int i = 0; i < offsets.size(); ++i) { - tprintf("%d: %g\n", i, offsets[i]); + for (unsigned i = 0; i < offsets.size(); ++i) { + tprintf("%u: %g\n", i, offsets[i]); } } *c_out = MedianOfCircularValues(*m_out, offsets); @@ -795,7 +826,8 @@ double BaselineBlock::FitLineSpacingModel(const std::vector &positions, *c_out = 0.0; } if (debug_level_ > 1) { - tprintf("Median offset = %g, compared to mean of %g.\n", *c_out, llsq.c(*m_out)); + tprintf("Median offset = %g, compared to mean of %g.\n", *c_out, + llsq.c(*m_out)); } // Index_delta is the number of hypothesized line gaps present. if (index_delta != nullptr) { @@ -805,13 +837,14 @@ double BaselineBlock::FitLineSpacingModel(const std::vector &positions, // a full line-spacing in disagreement with the median. double rms_error = llsq.rms(*m_out, llsq.c(*m_out)); if (debug_level_ > 1) { - tprintf("Linespacing of y=%g x + %g improved to %g x + %g, rms=%g\n", m_in, median_offset, - *m_out, *c_out, rms_error); + tprintf("Linespacing of y=%g x + %g improved to %g x + %g, rms=%g\n", m_in, + median_offset, *m_out, *c_out, rms_error); } return rms_error; } -BaselineDetect::BaselineDetect(int debug_level, const FCOORD &page_skew, TO_BLOCK_LIST *blocks) +BaselineDetect::BaselineDetect(int debug_level, const FCOORD &page_skew, + TO_BLOCK_LIST *blocks) : page_skew_(page_skew), debug_level_(debug_level) { TO_BLOCK_IT it(blocks); for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { @@ -863,8 +896,10 @@ void BaselineDetect::ComputeStraightBaselines(bool use_box_bottoms) { // x-heights and displaying debug information. // NOTE that ComputeStraightBaselines must have been called first as this // sets up data in the TO_ROWs upon which this function depends. -void BaselineDetect::ComputeBaselineSplinesAndXheights(const ICOORD &page_tr, bool enable_splines, - bool remove_noise, bool show_final_rows, +void BaselineDetect::ComputeBaselineSplinesAndXheights(const ICOORD &page_tr, + bool enable_splines, + bool remove_noise, + bool show_final_rows, Textord *textord) { for (auto bl_block : blocks_) { if (enable_splines) { diff --git a/src/textord/blkocc.cpp b/src/textord/blkocc.cpp index 9ad8d8342..26fe308c9 100644 --- a/src/textord/blkocc.cpp +++ b/src/textord/blkocc.cpp @@ -50,16 +50,11 @@ bool test_underline( // look for underlines int16_t baseline, ///< coords of baseline int16_t xheight ///< height of line ) { - int16_t occ; - int16_t blob_width; // width of blob - TBOX blob_box; // bounding box - int32_t desc_occ; - int32_t x_occ; - int32_t asc_occ; + TDimension occ; STATS projection; - blob_box = blob->bounding_box(); - blob_width = blob->bounding_box().width(); + auto blob_box = blob->bounding_box(); + auto blob_width = blob->bounding_box().width(); projection.set_range(blob_box.bottom(), blob_box.top() + 1); if (testing_on) { // blob->plot(to_win,GOLDENROD,GOLDENROD); @@ -73,21 +68,21 @@ bool test_underline( // look for underlines blob->bounding_box().right(), blob->bounding_box().top(), baseline); } horizontal_cblob_projection(blob, &projection); - desc_occ = 0; + int32_t desc_occ = 0; for (occ = blob_box.bottom(); occ < baseline; occ++) { if (occ <= blob_box.top() && projection.pile_count(occ) > desc_occ) { // max in region desc_occ = projection.pile_count(occ); } } - x_occ = 0; + int32_t x_occ = 0; for (occ = baseline; occ <= baseline + xheight; occ++) { if (occ >= blob_box.bottom() && occ <= blob_box.top() && projection.pile_count(occ) > x_occ) { // max in region x_occ = projection.pile_count(occ); } } - asc_occ = 0; + int32_t asc_occ = 0; for (occ = baseline + xheight + 1; occ <= blob_box.top(); occ++) { if (occ >= blob_box.bottom() && projection.pile_count(occ) > asc_occ) { asc_occ = projection.pile_count(occ); diff --git a/src/textord/blkocc.h b/src/textord/blkocc.h index d63a0ddb7..449cddbb2 100644 --- a/src/textord/blkocc.h +++ b/src/textord/blkocc.h @@ -234,11 +234,7 @@ public: #define END_OF_WERD_CODE 255 -extern BOOL_VAR_H(blockocc_show_result, false, "Show intermediate results"); -extern INT_VAR_H(blockocc_desc_height, 0, "Descender height after normalisation"); -extern INT_VAR_H(blockocc_asc_height, 255, "Ascender height after normalisation"); -extern INT_VAR_H(blockocc_band_count, 4, "Number of bands used"); -extern double_VAR_H(textord_underline_threshold, 0.9, "Fraction of width occupied"); +extern double_VAR_H(textord_underline_threshold); bool test_underline( // look for underlines bool testing_on, // drawing blob diff --git a/src/textord/cjkpitch.cpp b/src/textord/cjkpitch.cpp index 3726f2857..42bd488a6 100644 --- a/src/textord/cjkpitch.cpp +++ b/src/textord/cjkpitch.cpp @@ -22,6 +22,7 @@ #include "tovars.h" #include // for std::sort +#include #include // for std::vector namespace tesseract { @@ -137,13 +138,13 @@ public: float EstimateYFor(float x, float r) { ASSERT_HOST(finalized_); - int start = 0, end = values_.size(); + unsigned start = 0, end = values_.size(); // Because the number of samples (used_) is assumed to be small, // just use linear search to find values within the range. - while (start < values_.size() && values_[start].x < x * (1.0 - r)) { + while (start < values_.size() && values_[start].x < x * (1 - r)) { start++; } - while (end - 1 >= 0 && values_[end - 1].x > x * (1.0 + r)) { + while (end > 0 && values_[end - 1].x > x * (1 + r)) { end--; } @@ -157,7 +158,7 @@ public: // Compute weighted average of the values. float rc = 0; int vote = 0; - for (int i = start; i < end; i++) { + for (auto i = start; i < end; i++) { rc += values_[i].vote * x * values_[i].y / values_[i].x; vote += values_[i].vote; } @@ -437,7 +438,7 @@ private: } const float real_pitch = box_pitch(box1, box2); - if (fabs(real_pitch - pitch) < pitch * kFPTolerance) { + if (std::fabs(real_pitch - pitch) < pitch * kFPTolerance) { return true; } @@ -457,8 +458,8 @@ private: // Cleanup chars that are already merged to others. void DeleteChars() { - int index = 0; - for (int i = 0; i < characters_.size(); ++i) { + unsigned index = 0; + for (unsigned i = 0; i < characters_.size(); ++i) { if (!characters_[i].delete_flag()) { if (index != i) { characters_[index] = characters_[i]; @@ -645,7 +646,7 @@ void FPRow::EstimatePitch(bool pass1) { // So we collect only pitch values between two good // characters. and within tolerance in pass2. if (pass1 || - (prev_was_good && fabs(estimated_pitch_ - pitch) < kFPTolerance * estimated_pitch_)) { + (prev_was_good && std::fabs(estimated_pitch_ - pitch) < kFPTolerance * estimated_pitch_)) { good_pitches_.Add(pitch); if (!is_box_modified(i - 1) && !is_box_modified(i)) { good_gaps_.Add(gap); diff --git a/src/textord/colfind.cpp b/src/textord/colfind.cpp index 44f9e967b..1d4f5d614 100644 --- a/src/textord/colfind.cpp +++ b/src/textord/colfind.cpp @@ -38,7 +38,6 @@ #include "strokewidth.h" #include "tablefind.h" #include "workingpartset.h" -#include "tabletransfer.h" #include @@ -466,7 +465,9 @@ int ColumnFinder::FindBlocks(PageSegMode pageseg_mode, Image scaled_color, int s } #ifndef GRAPHICS_DISABLED - DisplayBlocks(blocks); + if (textord_tabfind_show_blocks) { + DisplayBlocks(blocks); + } #endif RotateAndReskewBlocks(input_is_rtl, to_blocks); int result = 0; @@ -513,22 +514,20 @@ void ColumnFinder::SetEquationDetect(EquationDetectBase *detect) { // Displays the blob and block bounding boxes in a window called Blocks. void ColumnFinder::DisplayBlocks(BLOCK_LIST *blocks) { - if (textord_tabfind_show_blocks) { - if (blocks_win_ == nullptr) { - blocks_win_ = MakeWindow(700, 300, "Blocks"); - } else { - blocks_win_->Clear(); - } - DisplayBoxes(blocks_win_); - BLOCK_IT block_it(blocks); - int serial = 1; - for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) { - BLOCK *block = block_it.data(); - block->pdblk.plot(blocks_win_, serial++, - textord_debug_printable ? ScrollView::BLUE : ScrollView::GREEN); - } - blocks_win_->Update(); + if (blocks_win_ == nullptr) { + blocks_win_ = MakeWindow(700, 300, "Blocks"); + } else { + blocks_win_->Clear(); } + DisplayBoxes(blocks_win_); + BLOCK_IT block_it(blocks); + int serial = 1; + for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) { + BLOCK *block = block_it.data(); + block->pdblk.plot(blocks_win_, serial++, + textord_debug_printable ? ScrollView::BLUE : ScrollView::GREEN); + } + blocks_win_->Update(); } // Displays the column edges at each grid y coordinate defined by @@ -709,7 +708,8 @@ bool ColumnFinder::AssignColumns(const PartSetVector &part_sets) { } else { column_set_costs[part_i][col_i] = INT32_MAX; if (debug) { - tprintf("Set id %d did not match at y=%d, lineset =%p\n", col_i, part_i, line_set); + tprintf("Set id %d did not match at y=%d, lineset =%p\n", + col_i, part_i, static_cast(line_set)); } } } @@ -1591,11 +1591,6 @@ void ColumnFinder::RotateAndReskewBlocks(bool input_is_rtl, TO_BLOCK_LIST *block tprintf("Block median size = (%d, %d)\n", block->median_size().x(), block->median_size().y()); } } - - auto &tables = uniqueInstance>(); - for (TessTable &mt : tables) { - mt.box.rotate_large(reskew_); - } } // Computes the rotations for the block (to make textlines horizontal) and diff --git a/src/textord/colpartition.cpp b/src/textord/colpartition.cpp index d57703ffa..7ddb88edb 100644 --- a/src/textord/colpartition.cpp +++ b/src/textord/colpartition.cpp @@ -25,9 +25,9 @@ #include "colpartitiongrid.h" #include "colpartitionset.h" #include "detlinefit.h" -#include "helpers.h" // for UpdateRange #include "dppoint.h" -#include "host.h" // for NearlyEqual +#include "helpers.h" // for UpdateRange +#include "host.h" // for NearlyEqual #include "imagefind.h" #include "workingpartset.h" @@ -89,14 +89,14 @@ const int kMaxColorDistance = 900; // blob_type is the blob_region_type_ of the blobs in this partition. // Vertical is the direction of logical vertical on the possibly skewed image. ColPartition::ColPartition(BlobRegionType blob_type, const ICOORD &vertical) - : left_margin_(-INT32_MAX) - , right_margin_(INT32_MAX) - , median_bottom_(INT32_MAX) - , median_top_(-INT32_MAX) - , median_left_(INT32_MAX) - , median_right_(-INT32_MAX) - , blob_type_(blob_type) - , vertical_(vertical) { + : left_margin_(-INT32_MAX), + right_margin_(INT32_MAX), + median_bottom_(INT32_MAX), + median_top_(-INT32_MAX), + median_left_(INT32_MAX), + median_right_(-INT32_MAX), + blob_type_(blob_type), + vertical_(vertical) { memset(special_blobs_densities_, 0, sizeof(special_blobs_densities_)); } @@ -105,8 +105,10 @@ ColPartition::ColPartition(BlobRegionType blob_type, const ICOORD &vertical) // WARNING: Despite being on C_LISTs, the BLOBNBOX owns the C_BLOB and // the ColPartition owns the BLOBNBOX!!! // Call DeleteBoxes before deleting the ColPartition. -ColPartition *ColPartition::FakePartition(const TBOX &box, PolyBlockType block_type, - BlobRegionType blob_type, BlobTextFlowType flow) { +ColPartition *ColPartition::FakePartition(const TBOX &box, + PolyBlockType block_type, + BlobRegionType blob_type, + BlobTextFlowType flow) { auto *part = new ColPartition(blob_type, ICOORD(0, 1)); part->set_type(block_type); part->set_flow(flow); @@ -124,7 +126,8 @@ ColPartition *ColPartition::FakePartition(const TBOX &box, PolyBlockType block_t // than the surrounding text that may be a dropcap, two or more vertically // touching characters, or some graphic element. // If the given list is not nullptr, the partition is also added to the list. -ColPartition *ColPartition::MakeBigPartition(BLOBNBOX *box, ColPartition_LIST *big_part_list) { +ColPartition *ColPartition::MakeBigPartition(BLOBNBOX *box, + ColPartition_LIST *big_part_list) { box->set_owner(nullptr); auto *single = new ColPartition(BRT_UNKNOWN, ICOORD(0, 1)); single->set_flow(BTFT_NONE); @@ -155,8 +158,9 @@ ColPartition::~ColPartition() { // Constructs a fake ColPartition with no BLOBNBOXes to represent a // horizontal or vertical line, given a type and a bounding box. -ColPartition *ColPartition::MakeLinePartition(BlobRegionType blob_type, const ICOORD &vertical, - int left, int bottom, int right, int top) { +ColPartition *ColPartition::MakeLinePartition(BlobRegionType blob_type, + const ICOORD &vertical, int left, + int bottom, int right, int top) { auto *part = new ColPartition(blob_type, vertical); part->bounding_box_ = TBOX(left, bottom, right, top); part->median_bottom_ = bottom; @@ -202,8 +206,9 @@ void ColPartition::AddBox(BLOBNBOX *bbox) { right_key_ = BoxRightKey(); } if (TabFind::WithinTestRegion(2, box.left(), box.bottom())) { - tprintf("Added box (%d,%d)->(%d,%d) left_blob_x_=%d, right_blob_x_ = %d\n", box.left(), - box.bottom(), box.right(), box.top(), bounding_box_.left(), bounding_box_.right()); + tprintf("Added box (%d,%d)->(%d,%d) left_blob_x_=%d, right_blob_x_ = %d\n", + box.left(), box.bottom(), box.right(), box.top(), + bounding_box_.left(), bounding_box_.right()); } } @@ -227,11 +232,13 @@ BLOBNBOX *ColPartition::BiggestBox() { for (bb_it.mark_cycle_pt(); !bb_it.cycled_list(); bb_it.forward()) { BLOBNBOX *bbox = bb_it.data(); if (IsVerticalType()) { - if (biggest == nullptr || bbox->bounding_box().width() > biggest->bounding_box().width()) { + if (biggest == nullptr || + bbox->bounding_box().width() > biggest->bounding_box().width()) { biggest = bbox; } } else { - if (biggest == nullptr || bbox->bounding_box().height() > biggest->bounding_box().height()) { + if (biggest == nullptr || + bbox->bounding_box().height() > biggest->bounding_box().height()) { biggest = bbox; } } @@ -362,7 +369,8 @@ bool ColPartition::IsLegal() { } return false; // Bounding box invalid. } - if (left_margin_ > bounding_box_.left() || right_margin_ < bounding_box_.right()) { + if (left_margin_ > bounding_box_.left() || + right_margin_ < bounding_box_.right()) { if (textord_debug_bugs) { tprintf("Margins invalid\n"); Print(); @@ -371,8 +379,8 @@ bool ColPartition::IsLegal() { } if (left_key_ > BoxLeftKey() || right_key_ < BoxRightKey()) { if (textord_debug_bugs) { - tprintf("Key inside box: %d v %d or %d v %d\n", left_key_, BoxLeftKey(), right_key_, - BoxRightKey()); + tprintf("Key inside box: %d v %d or %d v %d\n", left_key_, BoxLeftKey(), + right_key_, BoxRightKey()); Print(); } return false; // Keys inside the box. @@ -383,10 +391,12 @@ bool ColPartition::IsLegal() { // Returns true if the left and right edges are approximately equal. bool ColPartition::MatchingColumns(const ColPartition &other) const { int y = (MidY() + other.MidY()) / 2; - if (!NearlyEqual(other.LeftAtY(y) / kColumnWidthFactor, LeftAtY(y) / kColumnWidthFactor, 1)) { + if (!NearlyEqual(other.LeftAtY(y) / kColumnWidthFactor, + LeftAtY(y) / kColumnWidthFactor, 1)) { return false; } - if (!NearlyEqual(other.RightAtY(y) / kColumnWidthFactor, RightAtY(y) / kColumnWidthFactor, 1)) { + if (!NearlyEqual(other.RightAtY(y) / kColumnWidthFactor, + RightAtY(y) / kColumnWidthFactor, 1)) { return false; } return true; @@ -400,10 +410,14 @@ bool ColPartition::MatchingTextColor(const ColPartition &other) const { } // Colors must match for other to count. - double d_this1_o = ImageFind::ColorDistanceFromLine(other.color1_, other.color2_, color1_); - double d_this2_o = ImageFind::ColorDistanceFromLine(other.color1_, other.color2_, color2_); - double d_o1_this = ImageFind::ColorDistanceFromLine(color1_, color2_, other.color1_); - double d_o2_this = ImageFind::ColorDistanceFromLine(color1_, color2_, other.color2_); + double d_this1_o = + ImageFind::ColorDistanceFromLine(other.color1_, other.color2_, color1_); + double d_this2_o = + ImageFind::ColorDistanceFromLine(other.color1_, other.color2_, color2_); + double d_o1_this = + ImageFind::ColorDistanceFromLine(color1_, color2_, other.color1_); + double d_o2_this = + ImageFind::ColorDistanceFromLine(color1_, color2_, other.color2_); // All 4 distances must be small enough. return d_this1_o < kMaxColorDistance && d_this2_o < kMaxColorDistance && d_o1_this < kMaxColorDistance && d_o2_this < kMaxColorDistance; @@ -441,7 +455,8 @@ bool ColPartition::ConfirmNoTabViolation(const ColPartition &other) const { } // Returns true if other has a similar stroke width to this. -bool ColPartition::MatchingStrokeWidth(const ColPartition &other, double fractional_tolerance, +bool ColPartition::MatchingStrokeWidth(const ColPartition &other, + double fractional_tolerance, double constant_tolerance) const { int match_count = 0; int nonmatch_count = 0; @@ -450,8 +465,8 @@ bool ColPartition::MatchingStrokeWidth(const ColPartition &other, double fractio box_it.mark_cycle_pt(); other_it.mark_cycle_pt(); while (!box_it.cycled_list() && !other_it.cycled_list()) { - if (box_it.data()->MatchingStrokeWidth(*other_it.data(), fractional_tolerance, - constant_tolerance)) { + if (box_it.data()->MatchingStrokeWidth( + *other_it.data(), fractional_tolerance, constant_tolerance)) { ++match_count; } else { ++nonmatch_count; @@ -468,7 +483,8 @@ bool ColPartition::MatchingStrokeWidth(const ColPartition &other, double fractio // (1) this is a ColPartition containing only diacritics, and // (2) the base characters indicated on the diacritics all believably lie // within the text line of the candidate ColPartition. -bool ColPartition::OKDiacriticMerge(const ColPartition &candidate, bool debug) const { +bool ColPartition::OKDiacriticMerge(const ColPartition &candidate, + bool debug) const { BLOBNBOX_C_IT it(const_cast(&boxes_)); int min_top = INT32_MAX; int max_bottom = -INT32_MAX; @@ -490,13 +506,14 @@ bool ColPartition::OKDiacriticMerge(const ColPartition &candidate, bool debug) c } // If the intersection of all vertical ranges of all base characters // overlaps the median range of this, then it is OK. - bool result = min_top > candidate.median_bottom_ && max_bottom < candidate.median_top_; + bool result = + min_top > candidate.median_bottom_ && max_bottom < candidate.median_top_; if (debug) { if (result) { tprintf("OKDiacritic!\n"); } else { - tprintf("y ranges don\'t overlap: %d-%d / %d-%d\n", max_bottom, min_top, median_bottom_, - median_top_); + tprintf("y ranges don\'t overlap: %d-%d / %d-%d\n", max_bottom, min_top, + median_bottom_, median_top_); } } return result; @@ -591,7 +608,8 @@ int ColPartition::SpecialBlobsCount(const BlobSpecialTextType type) { return count; } -void ColPartition::SetSpecialBlobsDensity(const BlobSpecialTextType type, const float density) { +void ColPartition::SetSpecialBlobsDensity(const BlobSpecialTextType type, + const float density) { ASSERT_HOST(type < BSTT_COUNT); special_blobs_densities_[type] = density; } @@ -619,10 +637,12 @@ void ColPartition::ComputeSpecialBlobsDensity() { // Partnerships are added symmetrically to partner and this. void ColPartition::AddPartner(bool upper, ColPartition *partner) { if (upper) { - partner->lower_partners_.add_sorted(SortByBoxLeft, true, this); + partner->lower_partners_.add_sorted(SortByBoxLeft, true, + this); upper_partners_.add_sorted(SortByBoxLeft, true, partner); } else { - partner->upper_partners_.add_sorted(SortByBoxLeft, true, this); + partner->upper_partners_.add_sorted(SortByBoxLeft, true, + this); lower_partners_.add_sorted(SortByBoxLeft, true, partner); } } @@ -651,14 +671,16 @@ ColPartition *ColPartition::SingletonPartner(bool upper) { } // Merge with the other partition and delete it. -void ColPartition::Absorb(ColPartition *other, WidthCallback cb) { +void ColPartition::Absorb(ColPartition *other, const WidthCallback &cb) { // The result has to either own all of the blobs or none of them. // Verify the flag is consistent. ASSERT_HOST(owns_blobs() == other->owns_blobs()); // TODO(nbeato): check owns_blobs better. Right now owns_blobs // should always be true when this is called. So there is no issues. - if (TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom()) || - TabFind::WithinTestRegion(2, other->bounding_box_.left(), other->bounding_box_.bottom())) { + if (TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom()) || + TabFind::WithinTestRegion(2, other->bounding_box_.left(), + other->bounding_box_.bottom())) { tprintf("Merging:"); Print(); other->Print(); @@ -669,8 +691,8 @@ void ColPartition::Absorb(ColPartition *other, WidthCallback cb) { for (int type = 0; type < BSTT_COUNT; ++type) { unsigned w1 = boxes_.length(); unsigned w2 = other->boxes_.length(); - float new_val = - special_blobs_densities_[type] * w1 + other->special_blobs_densities_[type] * w2; + float new_val = special_blobs_densities_[type] * w1 + + other->special_blobs_densities_[type] * w2; if (!w1 || !w2) { ASSERT_HOST((w1 + w2) > 0); special_blobs_densities_[type] = new_val / (w1 + w2); @@ -723,7 +745,8 @@ void ColPartition::Absorb(ColPartition *other, WidthCallback cb) { for (int upper = 0; upper < 2; ++upper) { ColPartition_CLIST partners; ColPartition_C_IT part_it(&partners); - part_it.add_list_after(upper ? &other->upper_partners_ : &other->lower_partners_); + part_it.add_list_after(upper ? &other->upper_partners_ + : &other->lower_partners_); for (part_it.move_to_first(); !part_it.empty(); part_it.forward()) { ColPartition *partner = part_it.extract(); partner->RemovePartner(!upper, other); @@ -747,7 +770,8 @@ void ColPartition::Absorb(ColPartition *other, WidthCallback cb) { // the text involved, and is usually a fraction of the median size of merge1 // and/or merge2, or this. // TODO(rays) Determine whether vertical text needs to be considered. -bool ColPartition::OKMergeOverlap(const ColPartition &merge1, const ColPartition &merge2, +bool ColPartition::OKMergeOverlap(const ColPartition &merge1, + const ColPartition &merge2, int ok_box_overlap, bool debug) { // Vertical partitions are not allowed to be involved. if (IsVerticalType() || merge1.IsVerticalType() || merge2.IsVerticalType()) { @@ -916,7 +940,8 @@ void ColPartition::ComputeLimits() { if (it.empty()) { return; } - if (IsImageType() || blob_type() == BRT_RECTIMAGE || blob_type() == BRT_POLYIMAGE) { + if (IsImageType() || blob_type() == BRT_RECTIMAGE || + blob_type() == BRT_POLYIMAGE) { median_top_ = bounding_box_.top(); median_bottom_ = bounding_box_.bottom(); median_height_ = bounding_box_.height(); @@ -957,7 +982,8 @@ void ColPartition::ComputeLimits() { Print(); } if (left_margin_ > bounding_box_.left() && textord_debug_bugs) { - tprintf("Made partition with bad left coords, %d > %d\n", left_margin_, bounding_box_.left()); + tprintf("Made partition with bad left coords, %d > %d\n", left_margin_, + bounding_box_.left()); Print(); } // Fix partner lists. The bounding box has changed and partners are stored @@ -973,8 +999,9 @@ void ColPartition::ComputeLimits() { partner->AddPartner(!upper, this); } } - if (TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom())) { - tprintf("Recomputed box for partition %p\n", this); + if (TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom())) { + tprintf("Recomputed box for partition %p\n", static_cast(this)); Print(); } } @@ -998,10 +1025,12 @@ void ColPartition::SetPartitionType(int resolution, ColPartitionSet *columns) { int first_spanned_col = -1; ColumnSpanningType span_type = columns->SpanningType( resolution, bounding_box_.left(), bounding_box_.right(), - std::min(bounding_box_.height(), bounding_box_.width()), MidY(), left_margin_, right_margin_, - &first_column_, &last_column_, &first_spanned_col); + std::min(bounding_box_.height(), bounding_box_.width()), MidY(), + left_margin_, right_margin_, &first_column_, &last_column_, + &first_spanned_col); column_set_ = columns; - if (first_column_ < last_column_ && span_type == CST_PULLOUT && !IsLineType()) { + if (first_column_ < last_column_ && span_type == CST_PULLOUT && + !IsLineType()) { // Unequal columns may indicate that the pullout spans one of the columns // it lies in, so force it to be allocated to just that column. if (first_spanned_col >= 0) { @@ -1026,8 +1055,8 @@ void ColPartition::SetPartitionType(int resolution, ColPartitionSet *columns) { // in the columns. PolyBlockType ColPartition::PartitionType(ColumnSpanningType flow) const { if (flow == CST_NOISE) { - if (blob_type_ != BRT_HLINE && blob_type_ != BRT_VLINE && blob_type_ != BRT_RECTIMAGE && - blob_type_ != BRT_VERT_TEXT) { + if (blob_type_ != BRT_HLINE && blob_type_ != BRT_VLINE && + blob_type_ != BRT_RECTIMAGE && blob_type_ != BRT_VERT_TEXT) { return PT_NOISE; } flow = CST_FLOWING; @@ -1075,18 +1104,18 @@ PolyBlockType ColPartition::PartitionType(ColumnSpanningType flow) const { // Returns the first and last column touched by this partition. // resolution refers to the ppi resolution of the image. -void ColPartition::ColumnRange(int resolution, ColPartitionSet *columns, int *first_col, - int *last_col) { +void ColPartition::ColumnRange(int resolution, ColPartitionSet *columns, + int *first_col, int *last_col) { int first_spanned_col = -1; - ColumnSpanningType span_type = - columns->SpanningType(resolution, bounding_box_.left(), bounding_box_.right(), - std::min(bounding_box_.height(), bounding_box_.width()), MidY(), - left_margin_, right_margin_, first_col, last_col, &first_spanned_col); + ColumnSpanningType span_type = columns->SpanningType( + resolution, bounding_box_.left(), bounding_box_.right(), + std::min(bounding_box_.height(), bounding_box_.width()), MidY(), + left_margin_, right_margin_, first_col, last_col, &first_spanned_col); type_ = PartitionType(span_type); } // Sets the internal flags good_width_ and good_column_. -void ColPartition::SetColumnGoodness(WidthCallback cb) { +void ColPartition::SetColumnGoodness(const WidthCallback &cb) { int y = MidY(); int width = RightAtY(y) - LeftAtY(y); good_width_ = cb(width); @@ -1127,10 +1156,12 @@ bool ColPartition::MarkAsLeaderIfMonospaced() { double gap_iqr = gap_stats.ile(0.75f) - gap_stats.ile(0.25f); if (textord_debug_tabfind >= 4) { tprintf("gap iqr = %g, blob_count=%d, limits=%g,%g\n", gap_iqr, blob_count, - max_width * kMaxLeaderGapFractionOfMax, min_width * kMaxLeaderGapFractionOfMin); + max_width * kMaxLeaderGapFractionOfMax, + min_width * kMaxLeaderGapFractionOfMin); } if (gap_iqr < max_width * kMaxLeaderGapFractionOfMax && - gap_iqr < min_width * kMaxLeaderGapFractionOfMin && blob_count >= kMinLeaderCount) { + gap_iqr < min_width * kMaxLeaderGapFractionOfMin && + blob_count >= kMinLeaderCount) { // This is stable enough to be called a leader, so check the widths. // Since leader dashes can join, run a dp cutting algorithm and go // on the cost. @@ -1151,8 +1182,9 @@ bool ColPartition::MarkAsLeaderIfMonospaced() { projection[left - part_left].AddLocalCost(height); } } - DPPoint *best_end = DPPoint::Solve(min_step, max_step, false, &DPPoint::CostWithVariance, - part_width, projection); + DPPoint *best_end = + DPPoint::Solve(min_step, max_step, false, &DPPoint::CostWithVariance, + part_width, projection); if (best_end != nullptr && best_end->total_cost() < blob_count) { // Good enough. Call it a leader. result = true; @@ -1161,7 +1193,8 @@ bool ColPartition::MarkAsLeaderIfMonospaced() { BLOBNBOX *blob = it.data(); // If the first or last blob is spaced too much, don't mark it. if (it.at_first()) { - int gap = it.data_relative(1)->bounding_box().left() - blob->bounding_box().right(); + int gap = it.data_relative(1)->bounding_box().left() - + blob->bounding_box().right(); if (blob->bounding_box().width() + gap > max_step) { it.extract(); modified_blob_list = true; @@ -1169,7 +1202,8 @@ bool ColPartition::MarkAsLeaderIfMonospaced() { } } if (it.at_last()) { - int gap = blob->bounding_box().left() - it.data_relative(-1)->bounding_box().right(); + int gap = blob->bounding_box().left() - + it.data_relative(-1)->bounding_box().right(); if (blob->bounding_box().width() + gap > max_step) { it.extract(); modified_blob_list = true; @@ -1188,7 +1222,8 @@ bool ColPartition::MarkAsLeaderIfMonospaced() { if (best_end == nullptr) { tprintf("No path\n"); } else { - tprintf("Total cost = %d vs allowed %d\n", best_end->total_cost(), blob_count); + tprintf("Total cost = %d vs allowed %d\n", best_end->total_cost(), + blob_count); } } delete[] projection; @@ -1275,10 +1310,12 @@ void ColPartition::SetRegionAndFlowTypesFromProjectionValue(int value) { blob_type_ = BRT_NOISE; } } - if (TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom())) { - tprintf("RegionFlowTypesFromProjectionValue count=%d, noisy=%d, score=%d,", blob_count, - noisy_count, good_blob_score_); - tprintf(" Projection value=%d, flow=%d, blob_type=%d\n", value, flow_, blob_type_); + if (TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom())) { + tprintf("RegionFlowTypesFromProjectionValue count=%d, noisy=%d, score=%d,", + blob_count, noisy_count, good_blob_score_); + tprintf(" Projection value=%d, flow=%d, blob_type=%d\n", value, flow_, + blob_type_); Print(); } SetBlobTypes(); @@ -1371,7 +1408,8 @@ bool ColPartition::HasGoodBaseline() { // Adds this ColPartition to a matching WorkingPartSet if one can be found, // otherwise starts a new one in the appropriate column, ending the previous. -void ColPartition::AddToWorkingSet(const ICOORD &bleft, const ICOORD &tright, int resolution, +void ColPartition::AddToWorkingSet(const ICOORD &bleft, const ICOORD &tright, + int resolution, ColPartition_LIST *used_parts, WorkingPartSet_LIST *working_sets) { if (block_owned_) { @@ -1414,10 +1452,11 @@ void ColPartition::AddToWorkingSet(const ICOORD &bleft, const ICOORD &tright, in // Find the column that the right edge falls in. BLOCK_LIST completed_blocks; TO_BLOCK_LIST to_blocks; - for (; !it.cycled_list() && col_index <= last_column_; it.forward(), ++col_index) { + for (; !it.cycled_list() && col_index <= last_column_; + it.forward(), ++col_index) { WorkingPartSet *end_set = it.data(); - end_set->ExtractCompletedBlocks(bleft, tright, resolution, used_parts, &completed_blocks, - &to_blocks); + end_set->ExtractCompletedBlocks(bleft, tright, resolution, used_parts, + &completed_blocks, &to_blocks); } work_set->InsertCompletedBlocks(&completed_blocks, &to_blocks); } @@ -1431,9 +1470,12 @@ void ColPartition::AddToWorkingSet(const ICOORD &bleft, const ICOORD &tright, in // The used partitions are put onto used_parts, as they may still be referred // to in the partition grid. bleft, tright and resolution are the bounds // and resolution of the original image. -void ColPartition::LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, int resolution, - ColPartition_LIST *block_parts, ColPartition_LIST *used_parts, - BLOCK_LIST *completed_blocks, TO_BLOCK_LIST *to_blocks) { +void ColPartition::LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, + int resolution, + ColPartition_LIST *block_parts, + ColPartition_LIST *used_parts, + BLOCK_LIST *completed_blocks, + TO_BLOCK_LIST *to_blocks) { int page_height = tright.y() - bleft.y(); // Compute the initial spacing stats. ColPartition_IT it(block_parts); @@ -1466,7 +1508,8 @@ void ColPartition::LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, part->set_side_step(static_cast(side_steps.median() + 0.5)); if (!it.at_last()) { ColPartition *next_part = it.data_relative(1); - part->set_bottom_spacing(part->median_bottom() - next_part->median_bottom()); + part->set_bottom_spacing(part->median_bottom() - + next_part->median_bottom()); part->set_top_spacing(part->median_top() - next_part->median_top()); } else { part->set_bottom_spacing(page_height); @@ -1474,8 +1517,8 @@ void ColPartition::LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, } if (textord_debug_tabfind) { part->Print(); - tprintf("side step = %.2f, top spacing = %d, bottom spacing=%d\n", side_steps.median(), - part->top_spacing(), part->bottom_spacing()); + tprintf("side step = %.2f, top spacing = %d, bottom spacing=%d\n", + side_steps.median(), part->top_spacing(), part->bottom_spacing()); } ++part_count; } @@ -1508,21 +1551,25 @@ void ColPartition::LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, tprintf( "Spacings unequal: upper:%d/%d, lower:%d/%d," " sizes %d %d %d\n", - part->top_spacing(), part->bottom_spacing(), next_part->top_spacing(), - next_part->bottom_spacing(), part->median_height(), next_part->median_height(), + part->top_spacing(), part->bottom_spacing(), + next_part->top_spacing(), next_part->bottom_spacing(), + part->median_height(), next_part->median_height(), third_part != nullptr ? third_part->median_height() : 0); } // We can only consider adding the next line to the block if the sizes // match and the lines are close enough for their size. if (part->SizesSimilar(*next_part) && - next_part->median_height() * kMaxSameBlockLineSpacing > part->bottom_spacing() && - part->median_height() * kMaxSameBlockLineSpacing > part->top_spacing()) { + next_part->median_height() * kMaxSameBlockLineSpacing > + part->bottom_spacing() && + part->median_height() * kMaxSameBlockLineSpacing > + part->top_spacing()) { // Even now, we can only add it as long as the third line doesn't // match in the same way and have a smaller bottom spacing. if (third_part == nullptr || !next_part->SizesSimilar(*third_part) || third_part->median_height() * kMaxSameBlockLineSpacing <= next_part->bottom_spacing() || - next_part->median_height() * kMaxSameBlockLineSpacing <= next_part->top_spacing() || + next_part->median_height() * kMaxSameBlockLineSpacing <= + next_part->top_spacing() || next_part->bottom_spacing() > part->bottom_spacing()) { // Add to the current block. sp_block_it.add_to_end(it.extract()); @@ -1542,8 +1589,9 @@ void ColPartition::LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, } else { if (textord_debug_tabfind && !it.empty()) { ColPartition *next_part = it.data(); - tprintf("Spacings equal: upper:%d/%d, lower:%d/%d, median:%d/%d\n", part->top_spacing(), - part->bottom_spacing(), next_part->top_spacing(), next_part->bottom_spacing(), + tprintf("Spacings equal: upper:%d/%d, lower:%d/%d, median:%d/%d\n", + part->top_spacing(), part->bottom_spacing(), + next_part->top_spacing(), next_part->bottom_spacing(), part->median_height(), next_part->median_height()); } } @@ -1570,8 +1618,9 @@ static void ClipCoord(const ICOORD &bleft, const ICOORD &tright, ICOORD *pos) { // itself. Sets up the block for (old) textline formation correctly for // vertical and horizontal text. The partitions are moved to used_parts // afterwards, as they cannot be deleted yet. -static TO_BLOCK *MoveBlobsToBlock(bool vertical_text, int line_spacing, BLOCK *block, - ColPartition_LIST *block_parts, ColPartition_LIST *used_parts) { +static TO_BLOCK *MoveBlobsToBlock(bool vertical_text, int line_spacing, + BLOCK *block, ColPartition_LIST *block_parts, + ColPartition_LIST *used_parts) { // Make a matching TO_BLOCK and put all the BLOBNBOXes from the parts in it. // Move all the parts to a done list as they are no longer needed, except // that have have to continue to exist until the part grid is deleted. @@ -1646,7 +1695,8 @@ static TO_BLOCK *MoveBlobsToBlock(bool vertical_text, int line_spacing, BLOCK *b // Constructs a block from the given list of partitions. // Arguments are as LineSpacingBlocks above. TO_BLOCK *ColPartition::MakeBlock(const ICOORD &bleft, const ICOORD &tright, - ColPartition_LIST *block_parts, ColPartition_LIST *used_parts) { + ColPartition_LIST *block_parts, + ColPartition_LIST *used_parts) { if (block_parts->empty()) { return nullptr; // Nothing to do. } @@ -1704,7 +1754,8 @@ TO_BLOCK *ColPartition::MakeBlock(const ICOORD &bleft, const ICOORD &tright, // Constructs a block from the given list of vertical text partitions. // Currently only creates rectangular blocks. -TO_BLOCK *ColPartition::MakeVerticalTextBlock(const ICOORD &bleft, const ICOORD &tright, +TO_BLOCK *ColPartition::MakeVerticalTextBlock(const ICOORD &bleft, + const ICOORD &tright, ColPartition_LIST *block_parts, ColPartition_LIST *used_parts) { if (block_parts->empty()) { @@ -1722,8 +1773,8 @@ TO_BLOCK *ColPartition::MakeVerticalTextBlock(const ICOORD &bleft, const ICOORD tprintf("Making block at:"); block_box.print(); } - auto *block = new BLOCK("", true, 0, 0, block_box.left(), block_box.bottom(), block_box.right(), - block_box.top()); + auto *block = new BLOCK("", true, 0, 0, block_box.left(), block_box.bottom(), + block_box.right(), block_box.top()); block->pdblk.set_poly_block(new POLY_BLOCK(block_box, type)); return MoveBlobsToBlock(true, line_spacing, block, block_parts, used_parts); } @@ -1741,8 +1792,9 @@ TO_ROW *ColPartition::MakeToRow() { int top = blob->bounding_box().top(); int bottom = blob->bounding_box().bottom(); if (row == nullptr) { - row = new TO_ROW(blob, static_cast(top), static_cast(bottom), - static_cast(line_size)); + row = + new TO_ROW(blob, static_cast(top), static_cast(bottom), + static_cast(line_size)); } else { row->add_blob(blob, static_cast(top), static_cast(bottom), static_cast(line_size)); @@ -1785,7 +1837,8 @@ ColPartition *ColPartition::CopyButDontOwnBlobs() { copy->set_owns_blobs(false); BLOBNBOX_C_IT inserter(copy->boxes()); BLOBNBOX_C_IT traverser(boxes()); - for (traverser.mark_cycle_pt(); !traverser.cycled_list(); traverser.forward()) { + for (traverser.mark_cycle_pt(); !traverser.cycled_list(); + traverser.forward()) { inserter.add_after_then_move(traverser.data()); } return copy; @@ -1812,19 +1865,21 @@ void ColPartition::Print() const { "ColPart:%c(M%d-%c%d-B%d/%d,%d/%d)->(%dB-%d%c-%dM/%d,%d/%d)" " w-ok=%d, v-ok=%d, type=%d%c%d, fc=%d, lc=%d, boxes=%d" " ts=%d bs=%d ls=%d rs=%d\n", - boxes_.empty() ? 'E' : ' ', left_margin_, left_key_tab_ ? 'T' : 'B', LeftAtY(y), - bounding_box_.left(), median_left_, bounding_box_.bottom(), median_bottom_, - bounding_box_.right(), RightAtY(y), right_key_tab_ ? 'T' : 'B', right_margin_, median_right_, - bounding_box_.top(), median_top_, good_width_, good_column_, type_, kBlobTypes[blob_type_], - flow_, first_column_, last_column_, boxes_.length(), space_above_, space_below_, - space_to_left_, space_to_right_); + boxes_.empty() ? 'E' : ' ', left_margin_, left_key_tab_ ? 'T' : 'B', + LeftAtY(y), bounding_box_.left(), median_left_, bounding_box_.bottom(), + median_bottom_, bounding_box_.right(), RightAtY(y), + right_key_tab_ ? 'T' : 'B', right_margin_, median_right_, + bounding_box_.top(), median_top_, good_width_, good_column_, type_, + kBlobTypes[blob_type_], flow_, first_column_, last_column_, + boxes_.length(), space_above_, space_below_, space_to_left_, + space_to_right_); } // Prints debug information on the colors. void ColPartition::PrintColors() { - tprintf("Colors:(%d, %d, %d)%d -> (%d, %d, %d)\n", color1_[COLOR_RED], color1_[COLOR_GREEN], - color1_[COLOR_BLUE], color1_[L_ALPHA_CHANNEL], color2_[COLOR_RED], color2_[COLOR_GREEN], - color2_[COLOR_BLUE]); + tprintf("Colors:(%d, %d, %d)%d -> (%d, %d, %d)\n", color1_[COLOR_RED], + color1_[COLOR_GREEN], color1_[COLOR_BLUE], color1_[L_ALPHA_CHANNEL], + color2_[COLOR_RED], color2_[COLOR_GREEN], color2_[COLOR_BLUE]); } // Sets the types of all partitions in the run to be the max of the types. @@ -1898,7 +1953,8 @@ void ColPartition::SmoothPartnerRun(int working_set_count) { // one partner. This makes block creation simpler. // If get_desperate is true, goes to more desperate merge methods // to merge flowing text before breaking partnerships. -void ColPartition::RefinePartners(PolyBlockType type, bool get_desperate, ColPartitionGrid *grid) { +void ColPartition::RefinePartners(PolyBlockType type, bool get_desperate, + ColPartitionGrid *grid) { if (TypesSimilar(type_, type)) { RefinePartnersInternal(true, get_desperate, grid); RefinePartnersInternal(false, get_desperate, grid); @@ -1924,7 +1980,8 @@ void ColPartition::RefinePartners(PolyBlockType type, bool get_desperate, ColPar // Cleans up the partners above if upper is true, else below. // If get_desperate is true, goes to more desperate merge methods // to merge flowing text before breaking partnerships. -void ColPartition::RefinePartnersInternal(bool upper, bool get_desperate, ColPartitionGrid *grid) { +void ColPartition::RefinePartnersInternal(bool upper, bool get_desperate, + ColPartitionGrid *grid) { ColPartition_CLIST *partners = upper ? &upper_partners_ : &lower_partners_; if (!partners->empty() && !partners->singleton()) { RefinePartnersByType(upper, partners); @@ -1952,8 +2009,10 @@ void ColPartition::RefinePartnersInternal(bool upper, bool get_desperate, ColPar // Cleans up the partners above if upper is true, else below. // Restricts the partners to only desirable types. For text and BRT_HLINE this // means the same type_ , and for image types it means any image type. -void ColPartition::RefinePartnersByType(bool upper, ColPartition_CLIST *partners) { - bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom()); +void ColPartition::RefinePartnersByType(bool upper, + ColPartition_CLIST *partners) { + bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom()); if (debug) { tprintf("Refining %d %s partners by type for:\n", partners->length(), upper ? "Upper" : "Lower"); @@ -1983,7 +2042,8 @@ void ColPartition::RefinePartnersByType(bool upper, ColPartition_CLIST *partners // Only polyimages are allowed to have partners of any kind! for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { ColPartition *partner = it.data(); - if (partner->blob_type() != BRT_POLYIMAGE || blob_type() != BRT_POLYIMAGE) { + if (partner->blob_type() != BRT_POLYIMAGE || + blob_type() != BRT_POLYIMAGE) { if (debug) { tprintf("Removing partner:"); partner->Print(); @@ -2003,7 +2063,8 @@ void ColPartition::RefinePartnersByType(bool upper, ColPartition_CLIST *partners // Gets rid of this<->b, leaving a clean chain. // Also if we have this<->a and a<->this, then gets rid of this<->a, as // this has multiple partners. -void ColPartition::RefinePartnerShortcuts(bool upper, ColPartition_CLIST *partners) { +void ColPartition::RefinePartnerShortcuts(bool upper, + ColPartition_CLIST *partners) { bool done_any = false; do { done_any = false; @@ -2054,8 +2115,10 @@ void ColPartition::RefinePartnerShortcuts(bool upper, ColPartition_CLIST *partne // by aggressive line fitting/splitting, as there are probably vertically // joined blobs that cross textlines. void ColPartition::RefineTextPartnersByMerge(bool upper, bool desperate, - ColPartition_CLIST *partners, ColPartitionGrid *grid) { - bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom()); + ColPartition_CLIST *partners, + ColPartitionGrid *grid) { + bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom()); if (debug) { tprintf("Refining %d %s partners by merge for:\n", partners->length(), upper ? "Upper" : "Lower"); @@ -2078,12 +2141,13 @@ void ColPartition::RefineTextPartnersByMerge(bool upper, bool desperate, } } int overlap_increase; - ColPartition *candidate = - grid->BestMergeCandidate(part, &candidates, debug, nullptr, &overlap_increase); + ColPartition *candidate = grid->BestMergeCandidate( + part, &candidates, debug, nullptr, &overlap_increase); if (candidate != nullptr && (overlap_increase <= 0 || desperate)) { if (debug) { - tprintf("Merging:hoverlap=%d, voverlap=%d, OLI=%d\n", part->HCoreOverlap(*candidate), - part->VCoreOverlap(*candidate), overlap_increase); + tprintf("Merging:hoverlap=%d, voverlap=%d, OLI=%d\n", + part->HCoreOverlap(*candidate), part->VCoreOverlap(*candidate), + overlap_increase); } // Remove before merge and re-insert to keep the integrity of the grid. grid->RemoveBBox(candidate); @@ -2102,8 +2166,10 @@ void ColPartition::RefineTextPartnersByMerge(bool upper, bool desperate, // Cleans up the partners above if upper is true, else below. // Keep the partner with the biggest overlap. -void ColPartition::RefinePartnersByOverlap(bool upper, ColPartition_CLIST *partners) { - bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom()); +void ColPartition::RefinePartnersByOverlap(bool upper, + ColPartition_CLIST *partners) { + bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom()); if (debug) { tprintf("Refining %d %s partners by overlap for:\n", partners->length(), upper ? "Upper" : "Lower"); @@ -2115,8 +2181,9 @@ void ColPartition::RefinePartnersByOverlap(bool upper, ColPartition_CLIST *partn int best_overlap = 0; for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { ColPartition *partner = it.data(); - int overlap = std::min(bounding_box_.right(), partner->bounding_box_.right()) - - std::max(bounding_box_.left(), partner->bounding_box_.left()); + int overlap = + std::min(bounding_box_.right(), partner->bounding_box_.right()) - + std::max(bounding_box_.left(), partner->bounding_box_.left()); if (overlap > best_overlap) { best_overlap = overlap; best_partner = partner; @@ -2137,7 +2204,8 @@ void ColPartition::RefinePartnersByOverlap(bool upper, ColPartition_CLIST *partn } // Return true if bbox belongs better in this than other. -bool ColPartition::ThisPartitionBetter(BLOBNBOX *bbox, const ColPartition &other) { +bool ColPartition::ThisPartitionBetter(BLOBNBOX *bbox, + const ColPartition &other) { const TBOX &box = bbox->bounding_box(); // Margins take priority. int left = box.left(); @@ -2150,14 +2218,17 @@ bool ColPartition::ThisPartitionBetter(BLOBNBOX *bbox, const ColPartition &other } int top = box.top(); int bottom = box.bottom(); - int this_overlap = std::min(top, median_top_) - std::max(bottom, median_bottom_); - int other_overlap = std::min(top, other.median_top_) - std::max(bottom, other.median_bottom_); + int this_overlap = + std::min(top, median_top_) - std::max(bottom, median_bottom_); + int other_overlap = + std::min(top, other.median_top_) - std::max(bottom, other.median_bottom_); int this_miss = median_top_ - median_bottom_ - this_overlap; int other_miss = other.median_top_ - other.median_bottom_ - other_overlap; if (TabFind::WithinTestRegion(3, box.left(), box.bottom())) { - tprintf("Unique on (%d,%d)->(%d,%d) overlap %d/%d, miss %d/%d, mt=%d/%d\n", box.left(), - box.bottom(), box.right(), box.top(), this_overlap, other_overlap, this_miss, - other_miss, median_top_, other.median_top_); + tprintf("Unique on (%d,%d)->(%d,%d) overlap %d/%d, miss %d/%d, mt=%d/%d\n", + box.left(), box.bottom(), box.right(), box.top(), this_overlap, + other_overlap, this_miss, other_miss, median_top_, + other.median_top_); } if (this_miss < other_miss) { return true; @@ -2200,13 +2271,15 @@ bool ColPartition::IsInSameColumnAs(const ColPartition &part) const { // Overlap does not occur when last < part.first or first > part.last. // In other words, one is completely to the side of the other. // This is just DeMorgan's law applied to that so the function returns true. - return (last_column_ >= part.first_column_) && (first_column_ <= part.last_column_); + return (last_column_ >= part.first_column_) && + (first_column_ <= part.last_column_); } // Smoothes the spacings in the list into groups of equal linespacing. // resolution is the resolution of the original image, used as a basis // for thresholds in change of spacing. page_height is in pixels. -void ColPartition::SmoothSpacings(int resolution, int page_height, ColPartition_LIST *parts) { +void ColPartition::SmoothSpacings(int resolution, int page_height, + ColPartition_LIST *parts) { // The task would be trivial if we didn't have to allow for blips - // occasional offsets in spacing caused by anomalous text, such as all // caps, groups of descenders, joined words, Arabic etc. @@ -2258,13 +2331,17 @@ void ColPartition::SmoothSpacings(int resolution, int page_height, ColPartition_ // The last time, everything is shifted up 1, so we present OKSpacingBlip // with neighbourhood-1 and check that PN_LOWER matches the median. if (neighbourhood[PN_LOWER] == nullptr || - (!neighbourhood[PN_UPPER]->SpacingsEqual(*neighbourhood[PN_LOWER], resolution) && - (neighbourhood[PN_UPPER] == nullptr || neighbourhood[PN_LOWER] == nullptr || + (!neighbourhood[PN_UPPER]->SpacingsEqual(*neighbourhood[PN_LOWER], + resolution) && + (neighbourhood[PN_UPPER] == nullptr || + neighbourhood[PN_LOWER] == nullptr || !OKSpacingBlip(resolution, median_space, neighbourhood, 0)) && - (neighbourhood[PN_UPPER - 1] == nullptr || neighbourhood[PN_LOWER - 1] == nullptr || + (neighbourhood[PN_UPPER - 1] == nullptr || + neighbourhood[PN_LOWER - 1] == nullptr || !OKSpacingBlip(resolution, median_space, neighbourhood, -1) || !neighbourhood[PN_LOWER]->SpacingEqual(median_space, resolution)) && - (neighbourhood[PN_UPPER + 1] == nullptr || neighbourhood[PN_LOWER + 1] == nullptr || + (neighbourhood[PN_UPPER + 1] == nullptr || + neighbourhood[PN_LOWER + 1] == nullptr || !OKSpacingBlip(resolution, median_space, neighbourhood, 1) || !neighbourhood[PN_UPPER]->SpacingEqual(median_space, resolution)))) { // The group has ended. PN_UPPER is the last member. @@ -2297,7 +2374,8 @@ void ColPartition::SmoothSpacings(int resolution, int page_height, ColPartition_ if (neighbourhood[i] == nullptr) { tprintf("NULL"); if (i > 0 && neighbourhood[i - 1] != nullptr) { - if (neighbourhood[i - 1]->SingletonPartner(false) != nullptr) { + if (neighbourhood[i - 1]->SingletonPartner(false) != + nullptr) { tprintf(" Lower partner:"); neighbourhood[i - 1]->SingletonPartner(false)->Print(); } else { @@ -2307,7 +2385,8 @@ void ColPartition::SmoothSpacings(int resolution, int page_height, ColPartition_ tprintf("\n"); } } else { - tprintf("Top = %d, bottom = %d\n", neighbourhood[i]->top_spacing(), + tprintf("Top = %d, bottom = %d\n", + neighbourhood[i]->top_spacing(), neighbourhood[i]->bottom_spacing()); } } @@ -2350,12 +2429,13 @@ void ColPartition::SmoothSpacings(int resolution, int page_height, ColPartition_ // Returns true if the parts array of pointers to partitions matches the // condition for a spacing blip. See SmoothSpacings for what this means // and how it is used. -bool ColPartition::OKSpacingBlip(int resolution, int median_spacing, ColPartition **parts, - int offset) { +bool ColPartition::OKSpacingBlip(int resolution, int median_spacing, + ColPartition **parts, int offset) { // The blip is OK if upper and lower sum to an OK value and at least // one of above1 and below1 is equal to the median. parts += offset; - return parts[PN_UPPER]->SummedSpacingOK(*parts[PN_LOWER], median_spacing, resolution) && + return parts[PN_UPPER]->SummedSpacingOK(*parts[PN_LOWER], median_spacing, + resolution) && ((parts[PN_ABOVE1] != nullptr && parts[PN_ABOVE1]->SpacingEqual(median_spacing, resolution)) || (parts[PN_BELOW1] != nullptr && @@ -2373,22 +2453,27 @@ bool ColPartition::SpacingEqual(int spacing, int resolution) const { // Returns true if both the top and bottom spacings of this and other // match to within suitable margins dictated by the image resolution. -bool ColPartition::SpacingsEqual(const ColPartition &other, int resolution) const { - int bottom_error = - std::max(BottomSpacingMargin(resolution), other.BottomSpacingMargin(resolution)); - int top_error = std::max(TopSpacingMargin(resolution), other.TopSpacingMargin(resolution)); +bool ColPartition::SpacingsEqual(const ColPartition &other, + int resolution) const { + int bottom_error = std::max(BottomSpacingMargin(resolution), + other.BottomSpacingMargin(resolution)); + int top_error = std::max(TopSpacingMargin(resolution), + other.TopSpacingMargin(resolution)); return NearlyEqual(bottom_spacing_, other.bottom_spacing_, bottom_error) && (NearlyEqual(top_spacing_, other.top_spacing_, top_error) || - NearlyEqual(top_spacing_ + other.top_spacing_, bottom_spacing_ * 2, bottom_error)); + NearlyEqual(top_spacing_ + other.top_spacing_, bottom_spacing_ * 2, + bottom_error)); } // Returns true if the sum spacing of this and other match the given // spacing (or twice the given spacing) to within a suitable margin dictated // by the image resolution. -bool ColPartition::SummedSpacingOK(const ColPartition &other, int spacing, int resolution) const { - int bottom_error = - std::max(BottomSpacingMargin(resolution), other.BottomSpacingMargin(resolution)); - int top_error = std::max(TopSpacingMargin(resolution), other.TopSpacingMargin(resolution)); +bool ColPartition::SummedSpacingOK(const ColPartition &other, int spacing, + int resolution) const { + int bottom_error = std::max(BottomSpacingMargin(resolution), + other.BottomSpacingMargin(resolution)); + int top_error = std::max(TopSpacingMargin(resolution), + other.TopSpacingMargin(resolution)); int bottom_total = bottom_spacing_ + other.bottom_spacing_; int top_total = top_spacing_ + other.top_spacing_; return (NearlyEqual(spacing, bottom_total, bottom_error) && @@ -2420,7 +2505,8 @@ bool ColPartition::SizesSimilar(const ColPartition &other) const { // Helper updates margin_left and margin_right, being the bounds of the left // margin of part of a block. Returns false and does not update the bounds if // this partition has a disjoint margin with the established margin. -static bool UpdateLeftMargin(const ColPartition &part, int *margin_left, int *margin_right) { +static bool UpdateLeftMargin(const ColPartition &part, int *margin_left, + int *margin_right) { const TBOX &part_box = part.bounding_box(); int top = part_box.top(); int bottom = part_box.bottom(); @@ -2444,7 +2530,8 @@ static bool UpdateLeftMargin(const ColPartition &part, int *margin_left, int *ma // condition that the intersection of the left margins is non-empty, ie the // rightmost left margin is to the left of the leftmost left bounding box edge. // On return the iterator is set to the start of the next run. -void ColPartition::LeftEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD *end) { +void ColPartition::LeftEdgeRun(ColPartition_IT *part_it, ICOORD *start, + ICOORD *end) { ColPartition *part = part_it->data(); ColPartition *start_part = part; int start_y = part->bounding_box_.top(); @@ -2463,7 +2550,8 @@ void ColPartition::LeftEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD * do { part_it->forward(); part = part_it->data(); - } while (!part_it->at_first() && UpdateLeftMargin(*part, &margin_left, &margin_right)); + } while (!part_it->at_first() && + UpdateLeftMargin(*part, &margin_left, &margin_right)); // The run ended. If we were pushed inwards, compute the next run and // extend it backwards into the run we just calculated to find the end of // this run that provides a tight box. @@ -2475,13 +2563,15 @@ void ColPartition::LeftEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD * do { next_it.forward(); part = next_it.data(); - } while (!next_it.at_first() && UpdateLeftMargin(*part, &next_margin_left, &next_margin_right)); + } while (!next_it.at_first() && + UpdateLeftMargin(*part, &next_margin_left, &next_margin_right)); // Now extend the next run backwards into the original run to get the // tightest fit. do { part_it->backward(); part = part_it->data(); - } while (part != start_part && UpdateLeftMargin(*part, &next_margin_left, &next_margin_right)); + } while (part != start_part && + UpdateLeftMargin(*part, &next_margin_left, &next_margin_right)); part_it->forward(); } // Now calculate the end_y. @@ -2495,16 +2585,17 @@ void ColPartition::LeftEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD * end->set_y(end_y); end->set_x(part->XAtY(margin_right, end_y)); if (textord_debug_tabfind && !part_it->at_first()) { - tprintf("Left run from y=%d to %d terminated with sum %d-%d, new %d-%d\n", start_y, end_y, - part->XAtY(margin_left, end_y), end->x(), part->left_margin_, - part->bounding_box_.left()); + tprintf("Left run from y=%d to %d terminated with sum %d-%d, new %d-%d\n", + start_y, end_y, part->XAtY(margin_left, end_y), end->x(), + part->left_margin_, part->bounding_box_.left()); } } // Helper updates margin_left and margin_right, being the bounds of the right // margin of part of a block. Returns false and does not update the bounds if // this partition has a disjoint margin with the established margin. -static bool UpdateRightMargin(const ColPartition &part, int *margin_left, int *margin_right) { +static bool UpdateRightMargin(const ColPartition &part, int *margin_left, + int *margin_right) { const TBOX &part_box = part.bounding_box(); int top = part_box.top(); int bottom = part_box.bottom(); @@ -2529,7 +2620,8 @@ static bool UpdateRightMargin(const ColPartition &part, int *margin_left, int *m // leftmost right margin is to the right of the rightmost right bounding box // edge. // On return the iterator is set to the start of the next run. -void ColPartition::RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD *end) { +void ColPartition::RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, + ICOORD *end) { ColPartition *part = part_it->data(); ColPartition *start_part = part; int start_y = part->bounding_box_.bottom(); @@ -2548,7 +2640,8 @@ void ColPartition::RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD do { part_it->backward(); part = part_it->data(); - } while (!part_it->at_last() && UpdateRightMargin(*part, &margin_left, &margin_right)); + } while (!part_it->at_last() && + UpdateRightMargin(*part, &margin_left, &margin_right)); // The run ended. If we were pushed inwards, compute the next run and // extend it backwards to find the end of this run for a tight box. int next_margin_right = INT32_MAX; @@ -2559,13 +2652,15 @@ void ColPartition::RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD do { next_it.backward(); part = next_it.data(); - } while (!next_it.at_last() && UpdateRightMargin(*part, &next_margin_left, &next_margin_right)); + } while (!next_it.at_last() && + UpdateRightMargin(*part, &next_margin_left, &next_margin_right)); // Now extend the next run forwards into the original run to get the // tightest fit. do { part_it->forward(); part = part_it->data(); - } while (part != start_part && UpdateRightMargin(*part, &next_margin_left, &next_margin_right)); + } while (part != start_part && + UpdateRightMargin(*part, &next_margin_left, &next_margin_right)); part_it->backward(); } // Now calculate the end_y. @@ -2579,9 +2674,9 @@ void ColPartition::RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD end->set_y(end_y); end->set_x(part->XAtY(margin_left, end_y)); if (textord_debug_tabfind && !part_it->at_last()) { - tprintf("Right run from y=%d to %d terminated with sum %d-%d, new %d-%d\n", start_y, end_y, - end->x(), part->XAtY(margin_right, end_y), part->bounding_box_.right(), - part->right_margin_); + tprintf("Right run from y=%d to %d terminated with sum %d-%d, new %d-%d\n", + start_y, end_y, end->x(), part->XAtY(margin_right, end_y), + part->bounding_box_.right(), part->right_margin_); } } diff --git a/src/textord/colpartition.h b/src/textord/colpartition.h index 33596c214..8b44ad056 100644 --- a/src/textord/colpartition.h +++ b/src/textord/colpartition.h @@ -81,7 +81,8 @@ public: * Constructs a fake ColPartition with no BLOBNBOXes to represent a * horizontal or vertical line, given a type and a bounding box. */ - static ColPartition *MakeLinePartition(BlobRegionType blob_type, const ICOORD &vertical, int left, + static ColPartition *MakeLinePartition(BlobRegionType blob_type, + const ICOORD &vertical, int left, int bottom, int right, int top); // Constructs and returns a fake ColPartition with a single fake BLOBNBOX, @@ -90,14 +91,16 @@ public: // the ColPartition owns the BLOBNBOX!!! // Call DeleteBoxes before deleting the ColPartition. static ColPartition *FakePartition(const TBOX &box, PolyBlockType block_type, - BlobRegionType blob_type, BlobTextFlowType flow); + BlobRegionType blob_type, + BlobTextFlowType flow); // Constructs and returns a ColPartition with the given real BLOBNBOX, // and sets it up to be a "big" partition (single-blob partition bigger // than the surrounding text that may be a dropcap, two or more vertically // touching characters, or some graphic element. // If the given list is not nullptr, the partition is also added to the list. - static ColPartition *MakeBigPartition(BLOBNBOX *box, ColPartition_LIST *big_part_list); + static ColPartition *MakeBigPartition(BLOBNBOX *box, + ColPartition_LIST *big_part_list); ~ColPartition(); @@ -389,7 +392,8 @@ public: return false; } int overlap = VCoreOverlap(other); - int height = std::min(median_top_ - median_bottom_, other.median_top_ - other.median_bottom_); + int height = std::min(median_top_ - median_bottom_, + other.median_top_ - other.median_bottom_); return overlap * 3 > height; } // Returns true if this and other can be combined without putting a @@ -412,7 +416,8 @@ public: // Returns true if the types are similar to each other. static bool TypesSimilar(PolyBlockType type1, PolyBlockType type2) { - return (type1 == type2 || (type1 == PT_FLOWING_TEXT && type2 == PT_INLINE_EQUATION) || + return (type1 == type2 || + (type1 == PT_FLOWING_TEXT && type2 == PT_INLINE_EQUATION) || (type2 == PT_FLOWING_TEXT && type1 == PT_INLINE_EQUATION)); } @@ -519,7 +524,8 @@ public: bool ConfirmNoTabViolation(const ColPartition &other) const; // Returns true if other has a similar stroke width to this. - bool MatchingStrokeWidth(const ColPartition &other, double fractional_tolerance, + bool MatchingStrokeWidth(const ColPartition &other, + double fractional_tolerance, double constant_tolerance) const; // Returns true if candidate is an acceptable diacritic base char merge // with this as the diacritic. @@ -548,7 +554,8 @@ public: // Set the density value for a particular BlobSpecialTextType, should ONLY be // used for debugging or testing. In production code, use // ComputeSpecialBlobsDensity instead. - void SetSpecialBlobsDensity(const BlobSpecialTextType type, const float density); + void SetSpecialBlobsDensity(const BlobSpecialTextType type, + const float density); // Compute the SpecialTextType density of blobs, where we assume // that the SpecialTextType in the boxes_ has been set. void ComputeSpecialBlobsDensity(); @@ -565,14 +572,14 @@ public: ColPartition *SingletonPartner(bool upper); // Merge with the other partition and delete it. - void Absorb(ColPartition *other, WidthCallback cb); + void Absorb(ColPartition *other, const WidthCallback &cb); // Returns true if the overlap between this and the merged pair of // merge candidates is sufficiently trivial to be allowed. // The merged box can graze the edge of this by the ok_box_overlap // if that exceeds the margin to the median top and bottom. - bool OKMergeOverlap(const ColPartition &merge1, const ColPartition &merge2, int ok_box_overlap, - bool debug); + bool OKMergeOverlap(const ColPartition &merge1, const ColPartition &merge2, + int ok_box_overlap, bool debug); // Find the blob at which to split this to minimize the overlap with the // given box. Returns the first blob to go in the second partition. @@ -606,10 +613,11 @@ public: // Returns the first and last column touched by this partition. // resolution refers to the ppi resolution of the image. - void ColumnRange(int resolution, ColPartitionSet *columns, int *first_col, int *last_col); + void ColumnRange(int resolution, ColPartitionSet *columns, int *first_col, + int *last_col); // Sets the internal flags good_width_ and good_column_. - void SetColumnGoodness(WidthCallback cb); + void SetColumnGoodness(const WidthCallback &cb); // Determines whether the blobs in this partition mostly represent // a leader (fixed pitch sequence) and sets the member blobs accordingly. @@ -634,8 +642,9 @@ public: // Adds this ColPartition to a matching WorkingPartSet if one can be found, // otherwise starts a new one in the appropriate column, ending the previous. - void AddToWorkingSet(const ICOORD &bleft, const ICOORD &tright, int resolution, - ColPartition_LIST *used_parts, WorkingPartSet_LIST *working_set); + void AddToWorkingSet(const ICOORD &bleft, const ICOORD &tright, + int resolution, ColPartition_LIST *used_parts, + WorkingPartSet_LIST *working_set); // From the given block_parts list, builds one or more BLOCKs and // corresponding TO_BLOCKs, such that the line spacing is uniform in each. @@ -643,17 +652,21 @@ public: // The used partitions are put onto used_parts, as they may still be referred // to in the partition grid. bleft, tright and resolution are the bounds // and resolution of the original image. - static void LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, int resolution, - ColPartition_LIST *block_parts, ColPartition_LIST *used_parts, - BLOCK_LIST *completed_blocks, TO_BLOCK_LIST *to_blocks); + static void LineSpacingBlocks(const ICOORD &bleft, const ICOORD &tright, + int resolution, ColPartition_LIST *block_parts, + ColPartition_LIST *used_parts, + BLOCK_LIST *completed_blocks, + TO_BLOCK_LIST *to_blocks); // Constructs a block from the given list of partitions. // Arguments are as LineSpacingBlocks above. static TO_BLOCK *MakeBlock(const ICOORD &bleft, const ICOORD &tright, - ColPartition_LIST *block_parts, ColPartition_LIST *used_parts); + ColPartition_LIST *block_parts, + ColPartition_LIST *used_parts); // Constructs a block from the given list of vertical text partitions. // Currently only creates rectangular blocks. - static TO_BLOCK *MakeVerticalTextBlock(const ICOORD &bleft, const ICOORD &tright, + static TO_BLOCK *MakeVerticalTextBlock(const ICOORD &bleft, + const ICOORD &tright, ColPartition_LIST *block_parts, ColPartition_LIST *used_parts); @@ -686,7 +699,8 @@ public: // one partner. This makes block creation simpler. // If get_desperate is true, goes to more desperate merge methods // to merge flowing text before breaking partnerships. - void RefinePartners(PolyBlockType type, bool get_desperate, ColPartitionGrid *grid); + void RefinePartners(PolyBlockType type, bool get_desperate, + ColPartitionGrid *grid); // Returns true if this column partition is in the same column as // part. This function will only work after the SetPartitionType function @@ -700,8 +714,10 @@ public: const ColPartition *part2 = *static_cast(p2); int mid_y1 = part1->bounding_box_.y_middle(); int mid_y2 = part2->bounding_box_.y_middle(); - if ((part2->bounding_box_.bottom() <= mid_y1 && mid_y1 <= part2->bounding_box_.top()) || - (part1->bounding_box_.bottom() <= mid_y2 && mid_y2 <= part1->bounding_box_.top())) { + if ((part2->bounding_box_.bottom() <= mid_y1 && + mid_y1 <= part2->bounding_box_.top()) || + (part1->bounding_box_.bottom() <= mid_y2 && + mid_y2 <= part1->bounding_box_.top())) { // Sort by increasing x. return part1->bounding_box_.x_middle() - part2->bounding_box_.x_middle(); } @@ -721,7 +737,8 @@ private: // Cleans up the partners above if upper is true, else below. // If get_desperate is true, goes to more desperate merge methods // to merge flowing text before breaking partnerships. - void RefinePartnersInternal(bool upper, bool get_desperate, ColPartitionGrid *grid); + void RefinePartnersInternal(bool upper, bool get_desperate, + ColPartitionGrid *grid); // Restricts the partners to only desirable types. For text and BRT_HLINE this // means the same type_ , and for image types it means any image type. void RefinePartnersByType(bool upper, ColPartition_CLIST *partners); @@ -736,7 +753,8 @@ private: // is set, indicating that the textlines probably need to be regenerated // by aggressive line fitting/splitting, as there are probably vertically // joined blobs that cross textlines. - void RefineTextPartnersByMerge(bool upper, bool desperate, ColPartition_CLIST *partners, + void RefineTextPartnersByMerge(bool upper, bool desperate, + ColPartition_CLIST *partners, ColPartitionGrid *grid); // Keep the partner with the biggest overlap. void RefinePartnersByOverlap(bool upper, ColPartition_CLIST *partners); @@ -747,12 +765,14 @@ private: // Smoothes the spacings in the list into groups of equal linespacing. // resolution is the resolution of the original image, used as a basis // for thresholds in change of spacing. page_height is in pixels. - static void SmoothSpacings(int resolution, int page_height, ColPartition_LIST *parts); + static void SmoothSpacings(int resolution, int page_height, + ColPartition_LIST *parts); // Returns true if the parts array of pointers to partitions matches the // condition for a spacing blip. See SmoothSpacings for what this means // and how it is used. - static bool OKSpacingBlip(int resolution, int median_spacing, ColPartition **parts, int offset); + static bool OKSpacingBlip(int resolution, int median_spacing, + ColPartition **parts, int offset); // Returns true if both the top and bottom spacings of this match the given // spacing to within suitable margins dictated by the image resolution. @@ -765,7 +785,8 @@ private: // Returns true if the sum spacing of this and other match the given // spacing (or twice the given spacing) to within a suitable margin dictated // by the image resolution. - bool SummedSpacingOK(const ColPartition &other, int spacing, int resolution) const; + bool SummedSpacingOK(const ColPartition &other, int spacing, + int resolution) const; // Returns a suitable spacing margin that can be applied to bottoms of // text lines, based on the resolution and the stored side_step_. @@ -792,7 +813,8 @@ private: // rightmost right bounding box edge. // TODO(rays) Not good enough. Needs improving to tightly wrap text in both // directions, and to loosely wrap images. - static void RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, ICOORD *end); + static void RightEdgeRun(ColPartition_IT *part_it, ICOORD *start, + ICOORD *end); // The margins are determined by the position of the nearest vertically // overlapping neighbour to the side. They indicate the maximum extent @@ -893,7 +915,8 @@ private: }; // Typedef it now in case it becomes a class later. -using ColPartitionGridSearch = GridSearch; +using ColPartitionGridSearch = + GridSearch; } // namespace tesseract. diff --git a/src/textord/colpartitiongrid.cpp b/src/textord/colpartitiongrid.cpp index a2b622396..f2a842bb4 100644 --- a/src/textord/colpartitiongrid.cpp +++ b/src/textord/colpartitiongrid.cpp @@ -25,6 +25,7 @@ #include "imagefind.h" #include +#include namespace tesseract { @@ -63,12 +64,15 @@ const double kMaxPartitionSpacing = 1.75; // decision in GridSmoothNeighbour. const int kSmoothDecisionMargin = 4; -ColPartitionGrid::ColPartitionGrid(int gridsize, const ICOORD &bleft, const ICOORD &tright) - : BBGrid(gridsize, bleft, tright) {} +ColPartitionGrid::ColPartitionGrid(int gridsize, const ICOORD &bleft, + const ICOORD &tright) + : BBGrid( + gridsize, bleft, tright) {} // Handles a click event in a display window. void ColPartitionGrid::HandleClick(int x, int y) { - BBGrid::HandleClick(x, y); + BBGrid::HandleClick(x, + y); // Run a radial search for partitions that overlap. ColPartitionGridSearch radsearch(this); radsearch.SetUniqueMode(true); @@ -93,8 +97,9 @@ void ColPartitionGrid::HandleClick(int x, int y) { // true, then the partitions are merged. // Both callbacks are deleted before returning. void ColPartitionGrid::Merges( - std::function box_cb, - std::function confirm_cb) { + const std::function &box_cb, + const std::function + &confirm_cb) { // Iterate the ColPartitions in the grid. ColPartitionGridSearch gsearch(this); gsearch.StartFullSearch(); @@ -112,8 +117,9 @@ void ColPartitionGrid::Merges( // true, then the partitions are merged. // Returns true if the partition is consumed by one or more merges. bool ColPartitionGrid::MergePart( - std::function box_cb, - std::function confirm_cb, + const std::function &box_cb, + const std::function + &confirm_cb, ColPartition *part) { if (part->IsUnMergeableType()) { return false; @@ -138,12 +144,13 @@ bool ColPartitionGrid::MergePart( FindMergeCandidates(part, box, debug, &merge_candidates); // Find the best merge candidate based on minimal overlap increase. int overlap_increase; - ColPartition *neighbour = - BestMergeCandidate(part, &merge_candidates, debug, confirm_cb, &overlap_increase); + ColPartition *neighbour = BestMergeCandidate(part, &merge_candidates, debug, + confirm_cb, &overlap_increase); if (neighbour != nullptr && overlap_increase <= 0) { if (debug) { - tprintf("Merging:hoverlap=%d, voverlap=%d, OLI=%d\n", part->HCoreOverlap(*neighbour), - part->VCoreOverlap(*neighbour), overlap_increase); + tprintf("Merging:hoverlap=%d, voverlap=%d, OLI=%d\n", + part->HCoreOverlap(*neighbour), part->VCoreOverlap(*neighbour), + overlap_increase); } // Looks like a good candidate so merge it. RemoveBBox(neighbour); @@ -171,7 +178,8 @@ bool ColPartitionGrid::MergePart( // In general we only want to merge partitions that look like they // are on the same text line, ie their median limits overlap, but we have // to make exceptions for diacritics and stray punctuation. -static bool OKMergeCandidate(const ColPartition *part, const ColPartition *candidate, bool debug) { +static bool OKMergeCandidate(const ColPartition *part, + const ColPartition *candidate, bool debug) { const TBOX &part_box = part->bounding_box(); if (candidate == part) { return false; // Ignore itself. @@ -205,7 +213,8 @@ static bool OKMergeCandidate(const ColPartition *part, const ColPartition *candi } // Candidates must either overlap in median y, // or part or candidate must be an acceptable diacritic. - if (!part->VSignificantCoreOverlap(*candidate) && !part->OKDiacriticMerge(*candidate, debug) && + if (!part->VSignificantCoreOverlap(*candidate) && + !part->OKDiacriticMerge(*candidate, debug) && !candidate->OKDiacriticMerge(*part, debug)) { if (debug) { tprintf("Candidate fails overlap and diacritic tests!\n"); @@ -221,7 +230,8 @@ static bool OKMergeCandidate(const ColPartition *part, const ColPartition *candi // the overlap with them uncombined. // An overlap is not counted if passes the OKMergeOverlap test with ok_overlap // as the pixel overlap limit. merge1 and merge2 must both be non-nullptr. -static int IncreaseInOverlap(const ColPartition *merge1, const ColPartition *merge2, int ok_overlap, +static int IncreaseInOverlap(const ColPartition *merge1, + const ColPartition *merge2, int ok_overlap, ColPartition_CLIST *parts) { ASSERT_HOST(merge1 != nullptr && merge2 != nullptr); int total_area = 0; @@ -236,7 +246,8 @@ static int IncreaseInOverlap(const ColPartition *merge1, const ColPartition *mer TBOX part_box = part->bounding_box(); // Compute the overlap of the merged box with part. int overlap_area = part_box.intersection(merged_box).area(); - if (overlap_area > 0 && !part->OKMergeOverlap(*merge1, *merge2, ok_overlap, false)) { + if (overlap_area > 0 && + !part->OKMergeOverlap(*merge1, *merge2, ok_overlap, false)) { total_area += overlap_area; // Subtract the overlap of merge1 and merge2 individually. overlap_area = part_box.intersection(merge1->bounding_box()).area(); @@ -289,7 +300,8 @@ static bool TestCompatibleCandidates(const ColPartition &part, bool debug, ColPartition_C_IT it2(it); for (it2.mark_cycle_pt(); !it2.cycled_list(); it2.forward()) { ColPartition *candidate2 = it2.data(); - if (candidate2 != candidate && !OKMergeCandidate(candidate, candidate2, false)) { + if (candidate2 != candidate && + !OKMergeCandidate(candidate, candidate2, false)) { if (debug) { tprintf("NC overlap failed:Candidate:"); candidate2->bounding_box().print(); @@ -341,7 +353,8 @@ int ColPartitionGrid::ComputeTotalOverlap(ColPartitionGrid **overlap_grid) { // Finds all the ColPartitions in the grid that overlap with the given // box and returns them SortByBoxLeft(ed) and uniqued in the given list. // Any partition equal to not_this (may be nullptr) is excluded. -void ColPartitionGrid::FindOverlappingPartitions(const TBOX &box, const ColPartition *not_this, +void ColPartitionGrid::FindOverlappingPartitions(const TBOX &box, + const ColPartition *not_this, ColPartition_CLIST *parts) { ColPartitionGridSearch rsearch(this); rsearch.StartRectSearch(box); @@ -396,7 +409,8 @@ void ColPartitionGrid::FindOverlappingPartitions(const TBOX &box, const ColParti // in overlap, or tightly spaced text would end up in bits. ColPartition *ColPartitionGrid::BestMergeCandidate( const ColPartition *part, ColPartition_CLIST *candidates, bool debug, - std::function confirm_cb, + const std::function + &confirm_cb, int *overlap_increase) { if (overlap_increase != nullptr) { *overlap_increase = 0; @@ -404,7 +418,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( if (candidates->empty()) { return nullptr; } - int ok_overlap = static_cast(kTinyEnoughTextlineOverlapFraction * gridsize() + 0.5); + int ok_overlap = + static_cast(kTinyEnoughTextlineOverlapFraction * gridsize() + 0.5); // The best neighbour to merge with is the one that causes least // total pairwise overlap among all the neighbours. // If more than one offers the same total overlap, choose the one @@ -424,8 +439,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( // we need anything that might be overlapped by the merged box. FindOverlappingPartitions(full_box, part, &neighbours); if (debug) { - tprintf("Finding best merge candidate from %d, %d neighbours for box:", candidates->length(), - neighbours.length()); + tprintf("Finding best merge candidate from %d, %d neighbours for box:", + candidates->length(), neighbours.length()); part_box.print(); } // If the best increase in overlap is positive, then we also check the @@ -434,7 +449,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( // non-candidate overlap is better than the best overlap, then return // the worst non-candidate overlap instead. ColPartition_CLIST non_candidate_neighbours; - non_candidate_neighbours.set_subtract(SortByBoxLeft, true, &neighbours, candidates); + non_candidate_neighbours.set_subtract(SortByBoxLeft, true, + &neighbours, candidates); int worst_nc_increase = 0; int best_increase = INT32_MAX; int best_area = 0; @@ -454,8 +470,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( best_increase = increase; best_area = cand_box.bounding_union(part_box).area() - cand_box.area(); if (debug) { - tprintf("New best merge candidate has increase %d, area %d, over box:", increase, - best_area); + tprintf("New best merge candidate has increase %d, area %d, over box:", + increase, best_area); full_box.print(); candidate->Print(); } @@ -466,7 +482,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( best_candidate = candidate; } } - increase = IncreaseInOverlap(part, candidate, ok_overlap, &non_candidate_neighbours); + increase = IncreaseInOverlap(part, candidate, ok_overlap, + &non_candidate_neighbours); if (increase > worst_nc_increase) { worst_nc_increase = increase; } @@ -478,7 +495,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( // but only if each candidate is either a good diacritic merge with part, // or an ok merge candidate with all the others. // See TestCompatibleCandidates for more explanation and a picture. - if (worst_nc_increase < best_increase && TestCompatibleCandidates(*part, debug, candidates)) { + if (worst_nc_increase < best_increase && + TestCompatibleCandidates(*part, debug, candidates)) { best_increase = worst_nc_increase; } } @@ -490,7 +508,8 @@ ColPartition *ColPartitionGrid::BestMergeCandidate( // Helper to remove the given box from the given partition, put it in its // own partition, and add to the partition list. -static void RemoveBadBox(BLOBNBOX *box, ColPartition *part, ColPartition_LIST *part_list) { +static void RemoveBadBox(BLOBNBOX *box, ColPartition *part, + ColPartition_LIST *part_list) { part->RemoveBox(box); ColPartition::MakeBigPartition(box, part_list); } @@ -501,8 +520,10 @@ static void RemoveBadBox(BLOBNBOX *box, ColPartition *part, ColPartition_LIST *p // Blobs that cause overlaps get removed, put in individual partitions // and added to the big_parts list. They are most likely characters on // 2 textlines that touch, or something big like a dropcap. -void ColPartitionGrid::SplitOverlappingPartitions(ColPartition_LIST *big_parts) { - int ok_overlap = static_cast(kTinyEnoughTextlineOverlapFraction * gridsize() + 0.5); +void ColPartitionGrid::SplitOverlappingPartitions( + ColPartition_LIST *big_parts) { + int ok_overlap = + static_cast(kTinyEnoughTextlineOverlapFraction * gridsize() + 0.5); // Iterate the ColPartitions in the grid. ColPartitionGridSearch gsearch(this); gsearch.StartFullSearch(); @@ -534,7 +555,8 @@ void ColPartitionGrid::SplitOverlappingPartitions(ColPartition_LIST *big_parts) BLOBNBOX *excluded = part->BiggestBox(); TBOX shrunken = part->BoundsWithoutBox(excluded); if (!shrunken.overlap(neighbour_box) && - excluded->bounding_box().height() > kBigPartSizeRatio * shrunken.height()) { + excluded->bounding_box().height() > + kBigPartSizeRatio * shrunken.height()) { // Removing the biggest box fixes the overlap, so do it! gsearch.RemoveBBox(); RemoveBadBox(excluded, part, big_parts); @@ -550,7 +572,8 @@ void ColPartitionGrid::SplitOverlappingPartitions(ColPartition_LIST *big_parts) BLOBNBOX *excluded = neighbour->BiggestBox(); TBOX shrunken = neighbour->BoundsWithoutBox(excluded); if (!shrunken.overlap(box) && - excluded->bounding_box().height() > kBigPartSizeRatio * shrunken.height()) { + excluded->bounding_box().height() > + kBigPartSizeRatio * shrunken.height()) { // Removing the biggest box fixes the overlap, so do it! rsearch.RemoveBBox(); RemoveBadBox(excluded, neighbour, big_parts); @@ -562,7 +585,8 @@ void ColPartitionGrid::SplitOverlappingPartitions(ColPartition_LIST *big_parts) int part_overlap_count = part->CountOverlappingBoxes(neighbour_box); int neighbour_overlap_count = neighbour->CountOverlappingBoxes(box); ColPartition *right_part = nullptr; - if (neighbour_overlap_count <= part_overlap_count || part->IsSingleton()) { + if (neighbour_overlap_count <= part_overlap_count || + part->IsSingleton()) { // Try to split the neighbour to reduce overlap. BLOBNBOX *split_blob = neighbour->OverlapSplitBlob(box); if (split_blob != nullptr) { @@ -608,15 +632,18 @@ void ColPartitionGrid::SplitOverlappingPartitions(ColPartition_LIST *big_parts) // nontext_map, which is used to prevent the spread of text neighbourhoods // into images. // Returns true if anything was changed. -bool ColPartitionGrid::GridSmoothNeighbours(BlobTextFlowType source_type, Image nontext_map, - const TBOX &im_box, const FCOORD &rotation) { +bool ColPartitionGrid::GridSmoothNeighbours(BlobTextFlowType source_type, + Image nontext_map, + const TBOX &im_box, + const FCOORD &rotation) { // Iterate the ColPartitions in the grid. ColPartitionGridSearch gsearch(this); gsearch.StartFullSearch(); ColPartition *part; bool any_changed = false; while ((part = gsearch.NextFullSearch()) != nullptr) { - if (part->flow() != source_type || BLOBNBOX::IsLineType(part->blob_type())) { + if (part->flow() != source_type || + BLOBNBOX::IsLineType(part->blob_type())) { continue; } const TBOX &box = part->bounding_box(); @@ -658,7 +685,8 @@ void ColPartitionGrid::ReflectInYAxis() { // it into proper blocks or columns. // TODO(rays) some kind of sort function would be useful and probably better // than the default here, which is to sort by order of the grid search. -void ColPartitionGrid::ExtractPartitionsAsBlocks(BLOCK_LIST *blocks, TO_BLOCK_LIST *to_blocks) { +void ColPartitionGrid::ExtractPartitionsAsBlocks(BLOCK_LIST *blocks, + TO_BLOCK_LIST *to_blocks) { TO_BLOCK_IT to_block_it(to_blocks); BLOCK_IT block_it(blocks); // All partitions will be put on this list and deleted on return. @@ -672,8 +700,10 @@ void ColPartitionGrid::ExtractPartitionsAsBlocks(BLOCK_LIST *blocks, TO_BLOCK_LI part_it.add_after_then_move(part); // The partition has to be at least vaguely like text. BlobRegionType blob_type = part->blob_type(); - if (BLOBNBOX::IsTextType(blob_type) || (blob_type == BRT_UNKNOWN && part->boxes_count() > 1)) { - PolyBlockType type = blob_type == BRT_VERT_TEXT ? PT_VERTICAL_TEXT : PT_FLOWING_TEXT; + if (BLOBNBOX::IsTextType(blob_type) || + (blob_type == BRT_UNKNOWN && part->boxes_count() > 1)) { + PolyBlockType type = + blob_type == BRT_VERT_TEXT ? PT_VERTICAL_TEXT : PT_FLOWING_TEXT; // Get metrics from the row that will be used for the block. TBOX box = part->bounding_box(); int median_width = part->median_width(); @@ -685,7 +715,8 @@ void ColPartitionGrid::ExtractPartitionsAsBlocks(BLOCK_LIST *blocks, TO_BLOCK_LI part->DeleteBoxes(); continue; } - auto *block = new BLOCK("", true, 0, 0, box.left(), box.bottom(), box.right(), box.top()); + auto *block = new BLOCK("", true, 0, 0, box.left(), box.bottom(), + box.right(), box.top()); block->pdblk.set_poly_block(new POLY_BLOCK(box, type)); auto *to_block = new TO_BLOCK(block); TO_ROW_IT row_it(to_block->get_rows()); @@ -780,7 +811,8 @@ bool ColPartitionGrid::MakeColPartSets(PartSetVector *part_sets) { bool any_parts_found = false; while ((part = gsearch.NextFullSearch()) != nullptr) { BlobRegionType blob_type = part->blob_type(); - if (blob_type != BRT_NOISE && (blob_type != BRT_UNKNOWN || !part->boxes()->singleton())) { + if (blob_type != BRT_NOISE && + (blob_type != BRT_UNKNOWN || !part->boxes()->singleton())) { int grid_x, grid_y; const TBOX &part_box = part->bounding_box(); GridCoords(part_box.left(), part_box.bottom(), &grid_x, &grid_y); @@ -815,11 +847,13 @@ ColPartitionSet *ColPartitionGrid::MakeSingleColumnSet(WidthCallback cb) { ColPartition *part; while ((part = gsearch.NextFullSearch()) != nullptr) { BlobRegionType blob_type = part->blob_type(); - if (blob_type != BRT_NOISE && (blob_type != BRT_UNKNOWN || !part->boxes()->singleton())) { + if (blob_type != BRT_NOISE && + (blob_type != BRT_UNKNOWN || !part->boxes()->singleton())) { // Consider for single column. BlobTextFlowType flow = part->flow(); - if ((blob_type == BRT_TEXT && (flow == BTFT_STRONG_CHAIN || flow == BTFT_CHAIN || - flow == BTFT_LEADER || flow == BTFT_TEXT_ON_IMAGE)) || + if ((blob_type == BRT_TEXT && + (flow == BTFT_STRONG_CHAIN || flow == BTFT_CHAIN || + flow == BTFT_LEADER || flow == BTFT_TEXT_ON_IMAGE)) || blob_type == BRT_RECTIMAGE || blob_type == BRT_POLYIMAGE) { if (single_column_part == nullptr) { single_column_part = part->ShallowCopy(); @@ -923,7 +957,8 @@ void ColPartitionGrid::ReTypeBlobs(BLOBNBOX_LIST *im_blobs) { // The boxes within the partitions have changed (by deskew) so recompute // the bounds of all the partitions and reinsert them into the grid. -void ColPartitionGrid::RecomputeBounds(int gridsize, const ICOORD &bleft, const ICOORD &tright, +void ColPartitionGrid::RecomputeBounds(int gridsize, const ICOORD &bleft, + const ICOORD &tright, const ICOORD &vertical) { ColPartition_LIST saved_parts; ColPartition_IT part_it(&saved_parts); @@ -957,7 +992,8 @@ void ColPartitionGrid::GridFindMargins(ColPartitionSet **best_columns) { ColPartition *part; while ((part = gsearch.NextFullSearch()) != nullptr) { // Set up a rectangle search x-bounded by the column and y by the part. - ColPartitionSet *columns = best_columns != nullptr ? best_columns[gsearch.GridY()] : nullptr; + ColPartitionSet *columns = + best_columns != nullptr ? best_columns[gsearch.GridY()] : nullptr; FindPartitionMargins(columns, part); const TBOX &box = part->bounding_box(); if (AlignedBlob::WithinTestRegion(2, box.left(), box.bottom())) { @@ -972,7 +1008,8 @@ void ColPartitionGrid::GridFindMargins(ColPartitionSet **best_columns) { // best_columns, which may be nullptr, is an array of pointers indicating the // column set at each y-coordinate in the grid. // best_columns is usually the best_columns_ member of ColumnFinder. -void ColPartitionGrid::ListFindMargins(ColPartitionSet **best_columns, ColPartition_LIST *parts) { +void ColPartitionGrid::ListFindMargins(ColPartitionSet **best_columns, + ColPartition_LIST *parts) { ColPartition_IT part_it(parts); for (part_it.mark_cycle_pt(); !part_it.cycled_list(); part_it.forward()) { ColPartition *part = part_it.data(); @@ -1050,15 +1087,18 @@ void ColPartitionGrid::FindFigureCaptions() { while ((part = gsearch.NextFullSearch()) != nullptr) { if (part->IsImageType()) { const TBOX &part_box = part->bounding_box(); - bool debug = AlignedBlob::WithinTestRegion(2, part_box.left(), part_box.bottom()); + bool debug = + AlignedBlob::WithinTestRegion(2, part_box.left(), part_box.bottom()); ColPartition *best_caption = nullptr; int best_dist = 0; // Distance to best_caption. int best_upper = 0; // Direction of best_caption. // Handle both lower and upper directions. for (int upper = 0; upper < 2; ++upper) { - ColPartition_C_IT partner_it(upper ? part->upper_partners() : part->lower_partners()); + ColPartition_C_IT partner_it(upper ? part->upper_partners() + : part->lower_partners()); // If there are no image partners, then this direction is ok. - for (partner_it.mark_cycle_pt(); !partner_it.cycled_list(); partner_it.forward()) { + for (partner_it.mark_cycle_pt(); !partner_it.cycled_list(); + partner_it.forward()) { ColPartition *partner = partner_it.data(); if (partner->IsImageType()) { break; @@ -1068,7 +1108,8 @@ void ColPartitionGrid::FindFigureCaptions() { continue; } // Find the nearest totally overlapping text partner. - for (partner_it.mark_cycle_pt(); !partner_it.cycled_list(); partner_it.forward()) { + for (partner_it.mark_cycle_pt(); !partner_it.cycled_list(); + partner_it.forward()) { ColPartition *partner = partner_it.data(); if (!partner->IsTextType() || partner->type() == PT_TABLE) { continue; @@ -1080,7 +1121,8 @@ void ColPartitionGrid::FindFigureCaptions() { tprintf("Considering partner:"); partner_box.print(); } - if (partner_box.left() >= part_box.left() && partner_box.right() <= part_box.right()) { + if (partner_box.left() >= part_box.left() && + partner_box.right() <= part_box.right()) { int dist = partner_box.y_gap(part_box); if (best_caption == nullptr || dist < best_dist) { best_dist = dist; @@ -1106,7 +1148,8 @@ void ColPartitionGrid::FindFigureCaptions() { ColPartition *end_partner = nullptr; ColPartition *next_partner = nullptr; for (ColPartition *partner = best_caption; - partner != nullptr && line_count <= kMaxCaptionLines; partner = next_partner) { + partner != nullptr && line_count <= kMaxCaptionLines; + partner = next_partner) { if (!partner->IsTextType()) { end_partner = partner; break; @@ -1115,7 +1158,8 @@ void ColPartitionGrid::FindFigureCaptions() { total_height += partner->bounding_box().height(); next_partner = partner->SingletonPartner(best_upper); if (next_partner != nullptr) { - int gap = partner->bounding_box().y_gap(next_partner->bounding_box()); + int gap = + partner->bounding_box().y_gap(next_partner->bounding_box()); if (gap > biggest_gap) { biggest_gap = gap; end_partner = next_partner; @@ -1132,8 +1176,8 @@ void ColPartitionGrid::FindFigureCaptions() { } } if (debug) { - tprintf("Line count=%d, biggest gap %d, smallest%d, mean height %d\n", line_count, - biggest_gap, smallest_gap, mean_height); + tprintf("Line count=%d, biggest gap %d, smallest%d, mean height %d\n", + line_count, biggest_gap, smallest_gap, mean_height); if (end_partner != nullptr) { tprintf("End partner:"); end_partner->bounding_box().print(); @@ -1144,7 +1188,8 @@ void ColPartitionGrid::FindFigureCaptions() { } if (line_count <= kMaxCaptionLines) { // This is a qualified caption. Mark the text as caption. - for (ColPartition *partner = best_caption; partner != nullptr && partner != end_partner; + for (ColPartition *partner = best_caption; + partner != nullptr && partner != end_partner; partner = next_partner) { partner->set_type(PT_CAPTION_TEXT); partner->SetBlobTypes(); @@ -1232,7 +1277,8 @@ void ColPartitionGrid::FindPartitionPartners(bool upper, ColPartition *part) { // Finds the best partner in the given direction for the given partition. // Stores the result with AddPartner. -void ColPartitionGrid::FindVPartitionPartners(bool to_the_left, ColPartition *part) { +void ColPartitionGrid::FindVPartitionPartners(bool to_the_left, + ColPartition *part) { if (part->type() == PT_NOISE) { return; // Noise is not allowed to partner anything. } @@ -1292,7 +1338,8 @@ void ColPartitionGrid::RefinePartitionPartners(bool get_desperate) { gsearch.StartFullSearch(); ColPartition *part; while ((part = gsearch.NextFullSearch()) != nullptr) { - part->RefinePartners(static_cast(type), get_desperate, this); + part->RefinePartners(static_cast(type), get_desperate, + this); // Iterator may have been messed up by a merge. gsearch.RepositionIterator(); } @@ -1304,9 +1351,11 @@ void ColPartitionGrid::RefinePartitionPartners(bool get_desperate) { // Finds and returns a list of candidate ColPartitions to merge with part. // The candidates must overlap search_box, and when merged must not // overlap any other partitions that are not overlapped by each individually. -void ColPartitionGrid::FindMergeCandidates(const ColPartition *part, const TBOX &search_box, - bool debug, ColPartition_CLIST *candidates) { - int ok_overlap = static_cast(kTinyEnoughTextlineOverlapFraction * gridsize() + 0.5); +void ColPartitionGrid::FindMergeCandidates(const ColPartition *part, + const TBOX &search_box, bool debug, + ColPartition_CLIST *candidates) { + int ok_overlap = + static_cast(kTinyEnoughTextlineOverlapFraction * gridsize() + 0.5); const TBOX &part_box = part->bounding_box(); // Now run the rect search. ColPartitionGridSearch rsearch(this); @@ -1393,7 +1442,8 @@ void ColPartitionGrid::FindMergeCandidates(const ColPartition *part, const TBOX // into images. // Returns true if the partition was changed. bool ColPartitionGrid::SmoothRegionType(Image nontext_map, const TBOX &im_box, - const FCOORD &rerotation, bool debug, ColPartition *part) { + const FCOORD &rerotation, bool debug, + ColPartition *part) { const TBOX &part_box = part->bounding_box(); if (debug) { tprintf("Smooothing part at:"); @@ -1409,8 +1459,8 @@ bool ColPartitionGrid::SmoothRegionType(Image nontext_map, const TBOX &im_box, for (int d = 0; d < BND_COUNT; ++d) { int dist; auto dir = static_cast(d); - BlobRegionType type = - SmoothInOneDirection(dir, nontext_map, im_box, rerotation, debug, *part, &dist); + BlobRegionType type = SmoothInOneDirection(dir, nontext_map, im_box, + rerotation, debug, *part, &dist); if (debug) { tprintf("Result in dir %d = %d at dist %d\n", dir, type, dist); } @@ -1459,8 +1509,9 @@ bool ColPartitionGrid::SmoothRegionType(Image nontext_map, const TBOX &im_box, // Sets up a search box based on the part_box, padded in all directions // except direction. Also setup dist_scaling to weight x,y distances according // to the given direction. -static void ComputeSearchBoxAndScaling(BlobNeighbourDir direction, const TBOX &part_box, - int min_padding, TBOX *search_box, ICOORD *dist_scaling) { +static void ComputeSearchBoxAndScaling(BlobNeighbourDir direction, + const TBOX &part_box, int min_padding, + TBOX *search_box, ICOORD *dist_scaling) { *search_box = part_box; // Generate a pad value based on the min dimension of part_box, but at least // min_padding and then scaled by kMaxPadFactor. @@ -1511,25 +1562,26 @@ enum NeighbourPartitionType { // partitions that makes a decisive result (if any) and returns the type // and the distance of the collection. If there are any pixels in the // nontext_map, then the decision is biased towards image. -BlobRegionType ColPartitionGrid::SmoothInOneDirection(BlobNeighbourDir direction, Image nontext_map, - const TBOX &im_box, const FCOORD &rerotation, - bool debug, const ColPartition &part, - int *best_distance) { +BlobRegionType ColPartitionGrid::SmoothInOneDirection( + BlobNeighbourDir direction, Image nontext_map, const TBOX &im_box, + const FCOORD &rerotation, bool debug, const ColPartition &part, + int *best_distance) { // Set up a rectangle search bounded by the part. const TBOX &part_box = part.bounding_box(); TBOX search_box; ICOORD dist_scaling; - ComputeSearchBoxAndScaling(direction, part_box, gridsize(), &search_box, &dist_scaling); - bool image_region = - ImageFind::CountPixelsInRotatedBox(search_box, im_box, rerotation, nontext_map) > 0; + ComputeSearchBoxAndScaling(direction, part_box, gridsize(), &search_box, + &dist_scaling); + bool image_region = ImageFind::CountPixelsInRotatedBox( + search_box, im_box, rerotation, nontext_map) > 0; std::vector dists[NPT_COUNT]; - AccumulatePartDistances(part, dist_scaling, search_box, nontext_map, im_box, rerotation, debug, - dists); + AccumulatePartDistances(part, dist_scaling, search_box, nontext_map, im_box, + rerotation, debug, dists); // By iteratively including the next smallest distance across the vectors, // (as in a merge sort) we can use the vector indices as counts of each type // and find the nearest set of objects that give us a definite decision. - int counts[NPT_COUNT]; - memset(counts, 0, sizeof(counts[0]) * NPT_COUNT); + unsigned counts[NPT_COUNT]; + memset(counts, 0, sizeof(counts)); // If there is image in the search box, tip the balance in image's favor. int image_bias = image_region ? kSmoothDecisionMargin / 2 : 0; BlobRegionType text_dir = part.blob_type(); @@ -1551,33 +1603,35 @@ BlobRegionType ColPartitionGrid::SmoothInOneDirection(BlobNeighbourDir direction } *best_distance = min_dist; if (debug) { - tprintf("Totals: htext=%d+%d, vtext=%d+%d, image=%d+%d, at dist=%d\n", counts[NPT_HTEXT], - counts[NPT_WEAK_HTEXT], counts[NPT_VTEXT], counts[NPT_WEAK_VTEXT], counts[NPT_IMAGE], - image_bias, min_dist); + tprintf("Totals: htext=%u+%u, vtext=%u+%u, image=%u+%u, at dist=%d\n", + counts[NPT_HTEXT], counts[NPT_WEAK_HTEXT], counts[NPT_VTEXT], + counts[NPT_WEAK_VTEXT], counts[NPT_IMAGE], image_bias, min_dist); } // See if we have a decision yet. - int image_count = counts[NPT_IMAGE]; - int htext_score = - counts[NPT_HTEXT] + counts[NPT_WEAK_HTEXT] - (image_count + counts[NPT_WEAK_VTEXT]); - int vtext_score = - counts[NPT_VTEXT] + counts[NPT_WEAK_VTEXT] - (image_count + counts[NPT_WEAK_HTEXT]); + auto image_count = counts[NPT_IMAGE]; + auto htext_score = counts[NPT_HTEXT] + counts[NPT_WEAK_HTEXT] - + (image_count + counts[NPT_WEAK_VTEXT]); + auto vtext_score = counts[NPT_VTEXT] + counts[NPT_WEAK_VTEXT] - + (image_count + counts[NPT_WEAK_HTEXT]); if (image_count > 0 && image_bias - htext_score >= kSmoothDecisionMargin && image_bias - vtext_score >= kSmoothDecisionMargin) { *best_distance = dists[NPT_IMAGE][0]; - if (!dists[NPT_WEAK_VTEXT].empty() && *best_distance > dists[NPT_WEAK_VTEXT][0]) { + if (!dists[NPT_WEAK_VTEXT].empty() && + *best_distance > dists[NPT_WEAK_VTEXT][0]) { *best_distance = dists[NPT_WEAK_VTEXT][0]; } - if (!dists[NPT_WEAK_HTEXT].empty() && *best_distance > dists[NPT_WEAK_HTEXT][0]) { + if (!dists[NPT_WEAK_HTEXT].empty() && + *best_distance > dists[NPT_WEAK_HTEXT][0]) { *best_distance = dists[NPT_WEAK_HTEXT][0]; } return BRT_POLYIMAGE; } - if ((text_dir != BRT_VERT_TEXT || flow_type != BTFT_CHAIN) && counts[NPT_HTEXT] > 0 && - htext_score >= kSmoothDecisionMargin) { + if ((text_dir != BRT_VERT_TEXT || flow_type != BTFT_CHAIN) && + counts[NPT_HTEXT] > 0 && htext_score >= kSmoothDecisionMargin) { *best_distance = dists[NPT_HTEXT][0]; return BRT_TEXT; - } else if ((text_dir != BRT_TEXT || flow_type != BTFT_CHAIN) && counts[NPT_VTEXT] > 0 && - vtext_score >= kSmoothDecisionMargin) { + } else if ((text_dir != BRT_TEXT || flow_type != BTFT_CHAIN) && + counts[NPT_VTEXT] > 0 && vtext_score >= kSmoothDecisionMargin) { *best_distance = dists[NPT_VTEXT][0]; return BRT_VERT_TEXT; } @@ -1592,11 +1646,10 @@ BlobRegionType ColPartitionGrid::SmoothInOneDirection(BlobNeighbourDir direction // The nontext_map (+im_box, rerotation) is used to make text invisible if // there is non-text in between. // dists must be an array of vectors of size NPT_COUNT. -void ColPartitionGrid::AccumulatePartDistances(const ColPartition &base_part, - const ICOORD &dist_scaling, const TBOX &search_box, - Image nontext_map, const TBOX &im_box, - const FCOORD &rerotation, bool debug, - std::vector *dists) { +void ColPartitionGrid::AccumulatePartDistances( + const ColPartition &base_part, const ICOORD &dist_scaling, + const TBOX &search_box, Image nontext_map, const TBOX &im_box, + const FCOORD &rerotation, bool debug, std::vector *dists) { const TBOX &part_box = base_part.bounding_box(); ColPartitionGridSearch rsearch(this); rsearch.SetUniqueMode(true); @@ -1605,14 +1658,16 @@ void ColPartitionGrid::AccumulatePartDistances(const ColPartition &base_part, // Search for compatible neighbours with a similar strokewidth, but not // on the other side of a tab vector. while ((neighbour = rsearch.NextRectSearch()) != nullptr) { - if (neighbour->IsUnMergeableType() || !base_part.ConfirmNoTabViolation(*neighbour) || + if (neighbour->IsUnMergeableType() || + !base_part.ConfirmNoTabViolation(*neighbour) || neighbour == &base_part) { continue; } TBOX nbox = neighbour->bounding_box(); BlobRegionType n_type = neighbour->blob_type(); if ((n_type == BRT_TEXT || n_type == BRT_VERT_TEXT) && - !ImageFind::BlankImageInBetween(part_box, nbox, im_box, rerotation, nontext_map)) { + !ImageFind::BlankImageInBetween(part_box, nbox, im_box, rerotation, + nontext_map)) { continue; // Text not visible the other side of image. } if (BLOBNBOX::IsLineType(n_type)) { @@ -1673,7 +1728,8 @@ void ColPartitionGrid::AccumulatePartDistances(const ColPartition &base_part, // neighbours that vertically overlap significantly. // columns may be nullptr, and indicates the assigned column structure this // is applicable to part. -void ColPartitionGrid::FindPartitionMargins(ColPartitionSet *columns, ColPartition *part) { +void ColPartitionGrid::FindPartitionMargins(ColPartitionSet *columns, + ColPartition *part) { // Set up a rectangle search x-bounded by the column and y by the part. TBOX box = part->bounding_box(); int y = part->MidY(); @@ -1693,19 +1749,20 @@ void ColPartitionGrid::FindPartitionMargins(ColPartitionSet *columns, ColPartiti left_margin -= kColumnWidthFactor; right_margin += kColumnWidthFactor; // Search for ColPartitions that reduce the margin. - left_margin = - FindMargin(box.left() + box.height(), true, left_margin, box.bottom(), box.top(), part); + left_margin = FindMargin(box.left() + box.height(), true, left_margin, + box.bottom(), box.top(), part); part->set_left_margin(left_margin); // Search for ColPartitions that reduce the margin. - right_margin = - FindMargin(box.right() - box.height(), false, right_margin, box.bottom(), box.top(), part); + right_margin = FindMargin(box.right() - box.height(), false, right_margin, + box.bottom(), box.top(), part); part->set_right_margin(right_margin); } // Starting at x, and going in the specified direction, up to x_limit, finds // the margin for the given y range by searching sideways, // and ignoring not_this. -int ColPartitionGrid::FindMargin(int x, bool right_to_left, int x_limit, int y_bottom, int y_top, +int ColPartitionGrid::FindMargin(int x, bool right_to_left, int x_limit, + int y_bottom, int y_top, const ColPartition *not_this) { int height = y_top - y_bottom; // Iterate the ColPartitions in the grid. diff --git a/src/textord/colpartitiongrid.h b/src/textord/colpartitiongrid.h index e2b71c7bf..939fe1f7e 100644 --- a/src/textord/colpartitiongrid.h +++ b/src/textord/colpartitiongrid.h @@ -47,16 +47,18 @@ public: // calls the confirm_cb to check any more rules. If the confirm_cb returns // true, then the partitions are merged. // Both callbacks are deleted before returning. - void Merges(std::function box_cb, - std::function confirm_cb); + void Merges(const std::function &box_cb, + const std::function &confirm_cb); // For the given partition, calls the box_cb permanent callback // to compute the search box, searches the box, and if a candidate is found, // calls the confirm_cb to check any more rules. If the confirm_cb returns // true, then the partitions are merged. // Returns true if the partition is consumed by one or more merges. - bool MergePart(std::function box_cb, - std::function confirm_cb, + bool MergePart(const std::function &box_cb, + const std::function &confirm_cb, ColPartition *part); // Computes and returns the total overlap of all partitions in the grid. @@ -78,7 +80,8 @@ public: // See colpartitiongrid.cpp for a diagram. ColPartition *BestMergeCandidate( const ColPartition *part, ColPartition_CLIST *candidates, bool debug, - std::function confirm_cb, + const std::function + &confirm_cb, int *overlap_increase); // Split partitions where it reduces overlap between their bounding boxes. @@ -98,8 +101,8 @@ public: // nontext_map, which is used to prevent the spread of text neighbourhoods // into images. // Returns true if anything was changed. - bool GridSmoothNeighbours(BlobTextFlowType source_type, Image nontext_map, const TBOX &im_box, - const FCOORD &rerotation); + bool GridSmoothNeighbours(BlobTextFlowType source_type, Image nontext_map, + const TBOX &im_box, const FCOORD &rerotation); // Reflects the grid and its colpartitions in the y-axis, assuming that // all blob boxes have already been done. @@ -150,7 +153,8 @@ public: // Improves the margins of the ColPartitions in the list by calling // FindPartitionMargins on each. - void ListFindMargins(ColPartitionSet **best_columns, ColPartition_LIST *parts); + void ListFindMargins(ColPartitionSet **best_columns, + ColPartition_LIST *parts); // Deletes all the partitions in the grid after disowning all the blobs. void DeleteParts(); @@ -185,8 +189,8 @@ private: // Finds and returns a list of candidate ColPartitions to merge with part. // The candidates must overlap search_box, and when merged must not // overlap any other partitions that are not overlapped by each individually. - void FindMergeCandidates(const ColPartition *part, const TBOX &search_box, bool debug, - ColPartition_CLIST *candidates); + void FindMergeCandidates(const ColPartition *part, const TBOX &search_box, + bool debug, ColPartition_CLIST *candidates); // Smoothes the region type/flow type of the given part by looking at local // neighbours and the given image mask. Searches a padded rectangle with the @@ -199,7 +203,8 @@ private: // nontext_map, which is used to prevent the spread of text neighbourhoods // into images. // Returns true if the partition was changed. - bool SmoothRegionType(Image nontext_map, const TBOX &im_box, const FCOORD &rerotation, bool debug, + bool SmoothRegionType(Image nontext_map, const TBOX &im_box, + const FCOORD &rerotation, bool debug, ColPartition *part); // Executes the search for SmoothRegionType in a single direction. // Creates a bounding box that is padded in all directions except direction, @@ -207,17 +212,21 @@ private: // partitions that makes a decisive result (if any) and returns the type // and the distance of the collection. If there are any pixels in the // nontext_map, then the decision is biased towards image. - BlobRegionType SmoothInOneDirection(BlobNeighbourDir direction, Image nontext_map, - const TBOX &im_box, const FCOORD &rerotation, bool debug, - const ColPartition &part, int *best_distance); + BlobRegionType SmoothInOneDirection(BlobNeighbourDir direction, + Image nontext_map, const TBOX &im_box, + const FCOORD &rerotation, bool debug, + const ColPartition &part, + int *best_distance); // Counts the partitions in the given search_box by appending the gap // distance (scaled by dist_scaling) of the part from the base_part to the // vector of the appropriate type for the partition. Prior to return, the // vectors in the dists array are sorted in increasing order. // dists must be an array of vectors of size NPT_COUNT. - void AccumulatePartDistances(const ColPartition &base_part, const ICOORD &dist_scaling, - const TBOX &search_box, Image nontext_map, const TBOX &im_box, - const FCOORD &rerotation, bool debug, std::vector *dists); + void AccumulatePartDistances(const ColPartition &base_part, + const ICOORD &dist_scaling, + const TBOX &search_box, Image nontext_map, + const TBOX &im_box, const FCOORD &rerotation, + bool debug, std::vector *dists); // Improves the margins of the ColPartition by searching for // neighbours that vertically overlap significantly. @@ -226,8 +235,8 @@ private: // Starting at x, and going in the specified direction, up to x_limit, finds // the margin for the given y range by searching sideways, // and ignoring not_this. - int FindMargin(int x, bool right_to_left, int x_limit, int y_bottom, int y_top, - const ColPartition *not_this); + int FindMargin(int x, bool right_to_left, int x_limit, int y_bottom, + int y_top, const ColPartition *not_this); }; } // namespace tesseract. diff --git a/src/textord/colpartitionset.cpp b/src/textord/colpartitionset.cpp index 1252c2aed..a68275dfb 100644 --- a/src/textord/colpartitionset.cpp +++ b/src/textord/colpartitionset.cpp @@ -90,7 +90,8 @@ void ColPartitionSet::RelinquishParts() { } // Attempt to improve this by adding partitions or expanding partitions. -void ColPartitionSet::ImproveColumnCandidate(WidthCallback cb, PartSetVector *src_sets) { +void ColPartitionSet::ImproveColumnCandidate(const WidthCallback &cb, + PartSetVector *src_sets) { int set_size = src_sets->size(); // Iterate over the provided column sets, as each one may have something // to improve this. @@ -140,7 +141,8 @@ void ColPartitionSet::ImproveColumnCandidate(WidthCallback cb, PartSetVector *sr // it was before, so use the tab. part->CopyLeftTab(*col_part, false); part->SetColumnGoodness(cb); - } else if (col_box_left < part_left && (box_width_ok || !part_width_ok)) { + } else if (col_box_left < part_left && + (box_width_ok || !part_width_ok)) { // The box is leaving the good column metric at least as good as // it was before, so use the box. part->CopyLeftTab(*col_part, true); @@ -149,7 +151,8 @@ void ColPartitionSet::ImproveColumnCandidate(WidthCallback cb, PartSetVector *sr part_left = part->left_key(); } if (col_right > part_right && - (part_it.at_last() || part_it.data_relative(1)->left_key() > col_right)) { + (part_it.at_last() || + part_it.data_relative(1)->left_key() > col_right)) { // The right edge is better, so we can possibly expand it. int col_box_right = col_part->BoxRightKey(); bool tab_width_ok = cb(part->KeyWidth(part_left, col_right)); @@ -159,7 +162,8 @@ void ColPartitionSet::ImproveColumnCandidate(WidthCallback cb, PartSetVector *sr // it was before, so use the tab. part->CopyRightTab(*col_part, false); part->SetColumnGoodness(cb); - } else if (col_box_right > part_right && (box_width_ok || !part_width_ok)) { + } else if (col_box_right > part_right && + (box_width_ok || !part_width_ok)) { // The box is leaving the good column metric at least as good as // it was before, so use the box. part->CopyRightTab(*col_part, true); @@ -173,8 +177,10 @@ void ColPartitionSet::ImproveColumnCandidate(WidthCallback cb, PartSetVector *sr // If this set is good enough to represent a new partitioning into columns, // add it to the vector of sets, otherwise delete it. -void ColPartitionSet::AddToColumnSetsIfUnique(PartSetVector *column_sets, WidthCallback cb) { - bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), bounding_box_.bottom()); +void ColPartitionSet::AddToColumnSetsIfUnique(PartSetVector *column_sets, + const WidthCallback &cb) { + bool debug = TabFind::WithinTestRegion(2, bounding_box_.left(), + bounding_box_.bottom()); if (debug) { tprintf("Considering new column candidate:\n"); Print(); @@ -187,7 +193,7 @@ void ColPartitionSet::AddToColumnSetsIfUnique(PartSetVector *column_sets, WidthC delete this; return; } - for (int i = 0; i < column_sets->size(); ++i) { + for (unsigned i = 0; i < column_sets->size(); ++i) { ColPartitionSet *columns = column_sets->at(i); // In ordering the column set candidates, good_coverage_ is king, // followed by good_column_count_ and then bad_coverage_. @@ -222,7 +228,8 @@ void ColPartitionSet::AddToColumnSetsIfUnique(PartSetVector *column_sets, WidthC // Return true if the partitions in other are all compatible with the columns // in this. -bool ColPartitionSet::CompatibleColumns(bool debug, ColPartitionSet *other, WidthCallback cb) { +bool ColPartitionSet::CompatibleColumns(bool debug, ColPartitionSet *other, + const WidthCallback &cb) { if (debug) { tprintf("CompatibleColumns testing compatibility\n"); Print(); @@ -288,7 +295,8 @@ bool ColPartitionSet::CompatibleColumns(bool debug, ColPartitionSet *other, Widt if (debug) { int next_right = next_part->bounding_box().right(); tprintf("CompatibleColumns false due to 2 parts of good width\n"); - tprintf("part1 %d-%d, part2 %d-%d\n", left, right, next_left, next_right); + tprintf("part1 %d-%d, part2 %d-%d\n", left, right, next_left, + next_right); right_col->Print(); } return false; @@ -375,7 +383,8 @@ ColPartitionSet *ColPartitionSet::Copy(bool good_only) { } // Return the bounding boxes of columns at the given y-range -void ColPartitionSet::GetColumnBoxes(int y_bottom, int y_top, ColSegment_LIST *segments) { +void ColPartitionSet::GetColumnBoxes(int y_bottom, int y_top, + ColSegment_LIST *segments) { ColPartition_IT it(&parts_); ColSegment_IT col_it(segments); col_it.move_to_last(); @@ -392,7 +401,8 @@ void ColPartitionSet::GetColumnBoxes(int y_bottom, int y_top, ColSegment_LIST *s #ifndef GRAPHICS_DISABLED // Display the edges of the columns at the given y coords. -void ColPartitionSet::DisplayColumnEdges(int y_bottom, int y_top, ScrollView *win) { +void ColPartitionSet::DisplayColumnEdges(int y_bottom, int y_top, + ScrollView *win) { ColPartition_IT it(&parts_); for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { ColPartition *part = it.data(); @@ -410,10 +420,9 @@ void ColPartitionSet::DisplayColumnEdges(int y_bottom, int y_top, ScrollView *wi // Column indices are 2n + 1 for real columns (0 based) and even values // represent the gaps in between columns, with 0 being left of the leftmost. // resolution refers to the ppi resolution of the image. -ColumnSpanningType ColPartitionSet::SpanningType(int resolution, int left, int right, int height, - int y, int left_margin, int right_margin, - int *first_col, int *last_col, - int *first_spanned_col) { +ColumnSpanningType ColPartitionSet::SpanningType( + int resolution, int left, int right, int height, int y, int left_margin, + int right_margin, int *first_col, int *last_col, int *first_spanned_col) { *first_col = -1; *last_col = -1; *first_spanned_col = -1; @@ -505,7 +514,8 @@ ColumnSpanningType ColPartitionSet::SpanningType(int resolution, int left, int r // columns that do not match and start new ones for the new columns in this. // As ColPartitions are turned into BLOCKs, the used ones are put in // used_parts, as they still need to be referenced in the grid. -void ColPartitionSet::ChangeWorkColumns(const ICOORD &bleft, const ICOORD &tright, int resolution, +void ColPartitionSet::ChangeWorkColumns(const ICOORD &bleft, + const ICOORD &tright, int resolution, ColPartition_LIST *used_parts, WorkingPartSet_LIST *working_set_list) { // Move the input list to a temporary location so we can delete its elements @@ -525,11 +535,12 @@ void ColPartitionSet::ChangeWorkColumns(const ICOORD &bleft, const ICOORD &trigh for (col_it.mark_cycle_pt(); !col_it.cycled_list(); col_it.forward()) { ColPartition *column = col_it.data(); // Any existing column to the left of column is completed. - while (!src_it.empty() && ((working_set = src_it.data())->column() == nullptr || - working_set->column()->right_key() <= column->left_key())) { + while (!src_it.empty() && + ((working_set = src_it.data())->column() == nullptr || + working_set->column()->right_key() <= column->left_key())) { src_it.extract(); - working_set->ExtractCompletedBlocks(bleft, tright, resolution, used_parts, &completed_blocks, - &to_blocks); + working_set->ExtractCompletedBlocks(bleft, tright, resolution, used_parts, + &completed_blocks, &to_blocks); delete working_set; src_it.forward(); } @@ -542,7 +553,8 @@ void ColPartitionSet::ChangeWorkColumns(const ICOORD &bleft, const ICOORD &trigh // A matching column gets to stay, and first_new_set gets all the // completed_sets. working_set = src_it.empty() ? nullptr : src_it.data(); - if (working_set != nullptr && working_set->column()->MatchingColumns(*column)) { + if (working_set != nullptr && + working_set->column()->MatchingColumns(*column)) { working_set->set_column(column); dest_it.add_after_then_move(src_it.extract()); src_it.forward(); @@ -557,8 +569,8 @@ void ColPartitionSet::ChangeWorkColumns(const ICOORD &bleft, const ICOORD &trigh // Complete any remaining src working sets. while (!src_it.empty()) { working_set = src_it.extract(); - working_set->ExtractCompletedBlocks(bleft, tright, resolution, used_parts, &completed_blocks, - &to_blocks); + working_set->ExtractCompletedBlocks(bleft, tright, resolution, used_parts, + &completed_blocks, &to_blocks); delete working_set; src_it.forward(); } @@ -573,8 +585,10 @@ void ColPartitionSet::ChangeWorkColumns(const ICOORD &bleft, const ICOORD &trigh } // Accumulate the widths and gaps into the given variables. -void ColPartitionSet::AccumulateColumnWidthsAndGaps(int *total_width, int *width_samples, - int *total_gap, int *gap_samples) { +void ColPartitionSet::AccumulateColumnWidthsAndGaps(int *total_width, + int *width_samples, + int *total_gap, + int *gap_samples) { ColPartition_IT it(&parts_); for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { ColPartition *part = it.data(); @@ -597,8 +611,9 @@ void ColPartitionSet::Print() { tprintf( "Partition set of %d parts, %d good, coverage=%d+%d" " (%d,%d)->(%d,%d)\n", - it.length(), good_column_count_, good_coverage_, bad_coverage_, bounding_box_.left(), - bounding_box_.bottom(), bounding_box_.right(), bounding_box_.top()); + it.length(), good_column_count_, good_coverage_, bad_coverage_, + bounding_box_.left(), bounding_box_.bottom(), bounding_box_.right(), + bounding_box_.top()); for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { ColPartition *part = it.data(); part->Print(); @@ -608,7 +623,8 @@ void ColPartitionSet::Print() { // PRIVATE CODE. // Add the given partition to the list in the appropriate place. -void ColPartitionSet::AddPartition(ColPartition *new_part, ColPartition_IT *it) { +void ColPartitionSet::AddPartition(ColPartition *new_part, + ColPartition_IT *it) { AddPartitionCoverageAndBox(*new_part); int new_right = new_part->right_key(); if (it->data()->left_key() >= new_right) { diff --git a/src/textord/colpartitionset.h b/src/textord/colpartitionset.h index 7c3841831..6a0c0daef 100644 --- a/src/textord/colpartitionset.h +++ b/src/textord/colpartitionset.h @@ -20,9 +20,9 @@ #ifndef TESSERACT_TEXTORD_COLPARTITIONSET_H_ #define TESSERACT_TEXTORD_COLPARTITIONSET_H_ -#include "colpartition.h" // For ColPartition_LIST. -#include "rect.h" // For TBOX. -#include "tabvector.h" // For BLOBNBOX_CLIST. +#include "colpartition.h" // For ColPartition_LIST. +#include "rect.h" // For TBOX. +#include "tabvector.h" // For BLOBNBOX_CLIST. namespace tesseract { @@ -71,15 +71,17 @@ public: void RelinquishParts(); // Attempt to improve this by adding partitions or expanding partitions. - void ImproveColumnCandidate(WidthCallback cb, PartSetVector *src_sets); + void ImproveColumnCandidate(const WidthCallback &cb, PartSetVector *src_sets); // If this set is good enough to represent a new partitioning into columns, // add it to the vector of sets, otherwise delete it. - void AddToColumnSetsIfUnique(PartSetVector *column_sets, WidthCallback cb); + void AddToColumnSetsIfUnique(PartSetVector *column_sets, + const WidthCallback &cb); // Return true if the partitions in other are all compatible with the columns // in this. - bool CompatibleColumns(bool debug, ColPartitionSet *other, WidthCallback cb); + bool CompatibleColumns(bool debug, ColPartitionSet *other, + const WidthCallback &cb); // Returns the total width of all blobs in the part_set that do not lie // within an approved column. Used as a cost measure for using this @@ -104,20 +106,22 @@ public: // represent the gaps in between columns, with 0 being left of the leftmost. // resolution refers to the ppi resolution of the image. It may be 0 if only // the first_col and last_col are required. - ColumnSpanningType SpanningType(int resolution, int left, int right, int height, int y, - int left_margin, int right_margin, int *first_col, int *last_col, - int *first_spanned_col); + ColumnSpanningType SpanningType(int resolution, int left, int right, + int height, int y, int left_margin, + int right_margin, int *first_col, + int *last_col, int *first_spanned_col); // The column_set has changed. Close down all in-progress WorkingPartSets in // columns that do not match and start new ones for the new columns in this. // As ColPartitions are turned into BLOCKs, the used ones are put in // used_parts, as they still need to be referenced in the grid. - void ChangeWorkColumns(const ICOORD &bleft, const ICOORD &tright, int resolution, - ColPartition_LIST *used_parts, WorkingPartSet_LIST *working_set); + void ChangeWorkColumns(const ICOORD &bleft, const ICOORD &tright, + int resolution, ColPartition_LIST *used_parts, + WorkingPartSet_LIST *working_set); // Accumulate the widths and gaps into the given variables. - void AccumulateColumnWidthsAndGaps(int *total_width, int *width_samples, int *total_gap, - int *gap_samples); + void AccumulateColumnWidthsAndGaps(int *total_width, int *width_samples, + int *total_gap, int *gap_samples); // Provide debug output for this ColPartitionSet and all the ColPartitions. void Print(); diff --git a/src/textord/devanagari_processing.h b/src/textord/devanagari_processing.h index bb4248269..506f69b6d 100644 --- a/src/textord/devanagari_processing.h +++ b/src/textord/devanagari_processing.h @@ -23,10 +23,9 @@ struct Boxa; namespace tesseract { -extern INT_VAR_H(devanagari_split_debuglevel, 0, "Debug level for split shiro-rekha process."); +extern INT_VAR_H(devanagari_split_debuglevel); -extern BOOL_VAR_H(devanagari_split_debugimage, 0, - "Whether to create a debug image for split shiro-rekha process."); +extern BOOL_VAR_H(devanagari_split_debugimage); class TBOX; class DebugPixa; diff --git a/src/textord/drawtord.h b/src/textord/drawtord.h index 48510f54a..d1ad47686 100644 --- a/src/textord/drawtord.h +++ b/src/textord/drawtord.h @@ -28,9 +28,7 @@ namespace tesseract { #define NO_SMD "none" -extern BOOL_VAR_H(textord_show_fixed_cuts, false, "Draw fixed pitch cell boundaries"); -extern STRING_VAR_H(to_debugfile, DEBUG_WIN_NAME, "Name of debugfile"); -extern STRING_VAR_H(to_smdfile, NO_SMD, "Name of SMD file"); +extern BOOL_VAR_H(textord_show_fixed_cuts); extern ScrollView *to_win; extern FILE *to_debug; // Creates a static display window for textord, and returns a pointer to it. diff --git a/src/textord/edgblob.cpp b/src/textord/edgblob.cpp index 26b6d2251..ee3e1560f 100644 --- a/src/textord/edgblob.cpp +++ b/src/textord/edgblob.cpp @@ -77,8 +77,8 @@ OL_BUCKETS::OL_BUCKETS(ICOORD bleft, // corners */ C_OUTLINE_LIST *OL_BUCKETS::operator()( // array access - int16_t x, // image coords - int16_t y) { + TDimension x, // image coords + TDimension y) { return &buckets[(y - bl.y()) / BUCKETSIZE * bxdim + (x - bl.x()) / BUCKETSIZE]; } @@ -122,9 +122,8 @@ int32_t OL_BUCKETS::outline_complexity(C_OUTLINE *outline, // parent outline int32_t max_count, // max output int16_t depth // recurion depth ) { - int16_t xmin, xmax; // coord limits - int16_t ymin, ymax; - int16_t xindex, yindex; // current bucket + TDimension xmin, xmax; // coord limits + TDimension ymin, ymax; C_OUTLINE *child; // current child int32_t child_count; // no of children int32_t grandchild_count; // no of grandchildren @@ -141,8 +140,8 @@ int32_t OL_BUCKETS::outline_complexity(C_OUTLINE *outline, // parent outline return max_count + depth; } - for (yindex = ymin; yindex <= ymax; yindex++) { - for (xindex = xmin; xindex <= xmax; xindex++) { + for (auto yindex = ymin; yindex <= ymax; yindex++) { + for (auto xindex = xmin; xindex <= xmax; xindex++) { child_it.set_to_list(&buckets[yindex * bxdim + xindex]); if (child_it.empty()) { continue; @@ -198,9 +197,8 @@ int32_t OL_BUCKETS::count_children( // recursive count int32_t max_count // max output ) { bool parent_box; // could it be boxy - int16_t xmin, xmax; // coord limits - int16_t ymin, ymax; - int16_t xindex, yindex; // current bucket + TDimension xmin, xmax; // coord limits + TDimension ymin, ymax; C_OUTLINE *child; // current child int32_t child_count; // no of children int32_t grandchild_count; // no of grandchildren @@ -221,8 +219,8 @@ int32_t OL_BUCKETS::count_children( // recursive count parent_area = 0; max_parent_area = 0; parent_box = true; - for (yindex = ymin; yindex <= ymax; yindex++) { - for (xindex = xmin; xindex <= xmax; xindex++) { + for (auto yindex = ymin; yindex <= ymax; yindex++) { + for (auto xindex = xmin; xindex <= xmax; xindex++) { child_it.set_to_list(&buckets[yindex * bxdim + xindex]); if (child_it.empty()) { continue; @@ -321,9 +319,8 @@ void OL_BUCKETS::extract_children( // recursive count C_OUTLINE *outline, // parent outline C_OUTLINE_IT *it // destination iterator ) { - int16_t xmin, xmax; // coord limits - int16_t ymin, ymax; - int16_t xindex, yindex; // current bucket + TDimension xmin, xmax; // coord limits + TDimension ymin, ymax; TBOX olbox; C_OUTLINE_IT child_it; // search iterator @@ -332,8 +329,8 @@ void OL_BUCKETS::extract_children( // recursive count xmax = (olbox.right() - bl.x()) / BUCKETSIZE; ymin = (olbox.bottom() - bl.y()) / BUCKETSIZE; ymax = (olbox.top() - bl.y()) / BUCKETSIZE; - for (yindex = ymin; yindex <= ymax; yindex++) { - for (xindex = xmin; xindex <= xmax; xindex++) { + for (auto yindex = ymin; yindex <= ymax; yindex++) { + for (auto xindex = xmin; xindex <= xmax; xindex++) { child_it.set_to_list(&buckets[yindex * bxdim + xindex]); for (child_it.mark_cycle_pt(); !child_it.cycled_list(); child_it.forward()) { diff --git a/src/textord/edgblob.h b/src/textord/edgblob.h index c3db12be0..949e12e86 100644 --- a/src/textord/edgblob.h +++ b/src/textord/edgblob.h @@ -34,8 +34,8 @@ public: ICOORD tright); C_OUTLINE_LIST *operator()( // array access - int16_t x, // image coords - int16_t y); + TDimension x, // image coords + TDimension y); // first non-empty bucket C_OUTLINE_LIST *start_scan(); // next non-empty bucket diff --git a/src/textord/fpchop.cpp b/src/textord/fpchop.cpp index dfe3f95c4..c5db68c0e 100644 --- a/src/textord/fpchop.cpp +++ b/src/textord/fpchop.cpp @@ -32,7 +32,6 @@ namespace tesseract { INT_VAR(textord_fp_chop_error, 2, "Max allowed bending of chop cells"); -double_VAR(textord_fp_chop_snap, 0.5, "Max distance of chop pt from vertex"); static WERD *add_repeated_word(WERD_IT *rep_it, int16_t &rep_left, int16_t &prev_chop_coord, uint8_t &blanks, float pitch, WERD_IT *word_it); diff --git a/src/textord/fpchop.h b/src/textord/fpchop.h index d66f1a5d2..13f4c10aa 100644 --- a/src/textord/fpchop.h +++ b/src/textord/fpchop.h @@ -58,8 +58,7 @@ private: ELISTIZEH(C_OUTLINE_FRAG) -extern INT_VAR_H(textord_fp_chop_error, 2, "Max allowed bending of chop cells"); -extern double_VAR_H(textord_fp_chop_snap, 0.5, "Max distance of chop pt from vertex"); +extern INT_VAR_H(textord_fp_chop_error); ROW *fixed_pitch_words( // find lines TO_ROW *row, // row to do diff --git a/src/textord/gap_map.h b/src/textord/gap_map.h index e71dc8943..df6763ec9 100644 --- a/src/textord/gap_map.h +++ b/src/textord/gap_map.h @@ -40,10 +40,10 @@ private: /*-----------------------------*/ -extern BOOL_VAR_H(gapmap_debug, false, "Say which blocks have tables"); -extern BOOL_VAR_H(gapmap_use_ends, false, "Use large space at start and end of rows"); -extern BOOL_VAR_H(gapmap_no_isolated_quanta, false, "Ensure gaps not less than 2quanta wide"); -extern double_VAR_H(gapmap_big_gaps, 1.75, "xht multiplier"); +extern BOOL_VAR_H(gapmap_debug); +extern BOOL_VAR_H(gapmap_use_ends); +extern BOOL_VAR_H(gapmap_no_isolated_quanta); +extern double_VAR_H(gapmap_big_gaps); } // namespace tesseract diff --git a/src/textord/linefind.cpp b/src/textord/linefind.cpp index 45ead939e..d8a0a7961 100644 --- a/src/textord/linefind.cpp +++ b/src/textord/linefind.cpp @@ -206,8 +206,7 @@ static int FilterFalsePositives(int resolution, Image nonline_pix, Image interse // Too thick for the length. bad_line = true; } - if (!bad_line && - (intersection_pix == nullptr || NumTouchingIntersections(box, intersection_pix) < 2)) { + if (!bad_line && (NumTouchingIntersections(box, intersection_pix) < 2)) { // Test non-line density near the line. int nonline_count = CountPixelsAdjacentToLine(max_width, box, nonline_pix); if (nonline_count > box_height * box_width * kMaxNonLineDensity) { @@ -256,12 +255,11 @@ void LineFinder::FindAndRemoveLines(int resolution, bool debug, Image pix, int * // Find lines, convert to TabVector_LIST and remove those that are used. FindAndRemoveVLines(resolution, pix_intersections, vertical_x, vertical_y, &pix_vline, pix_non_vline, pix, v_lines); + pix_intersections.destroy(); if (pix_hline != nullptr) { // Recompute intersections and re-filter false positive h-lines. if (pix_vline != nullptr) { pix_intersections = pix_vline & pix_hline; - } else { - pix_intersections.destroy(); } if (!FilterFalsePositives(resolution, pix_non_hline, pix_intersections, pix_hline)) { pix_hline.destroy(); @@ -275,6 +273,7 @@ void LineFinder::FindAndRemoveLines(int resolution, bool debug, Image pix, int * if (pixa_display != nullptr && pix_hline != nullptr) { pixaAddPix(pixa_display, pix_hline, L_CLONE); } + pix_intersections.destroy(); if (pix_vline != nullptr && pix_hline != nullptr) { // Remove joins (intersections) where lines cross, and the residue. // Recalculate the intersections, since some lines have been deleted. diff --git a/src/textord/makerow.cpp b/src/textord/makerow.cpp index 1d2024083..61d8fde90 100644 --- a/src/textord/makerow.cpp +++ b/src/textord/makerow.cpp @@ -38,6 +38,7 @@ #include "underlin.h" #include +#include #include // for std::vector namespace tesseract { @@ -68,7 +69,6 @@ INT_VAR(textord_spline_medianwin, 6, "Size of window for spline segmentation"); static INT_VAR(textord_max_blob_overlaps, 4, "Max number of blobs a big blob can overlap"); INT_VAR(textord_min_xheight, 10, "Min credible pixel xheight"); double_VAR(textord_spline_shift_fraction, 0.02, "Fraction of line spacing for quad"); -double_VAR(textord_spline_outlier_fraction, 0.1, "Fraction of line spacing for outlier"); double_VAR(textord_skew_ile, 0.5, "Ile of gradients for page skew"); double_VAR(textord_skew_lag, 0.02, "Lag for skew on row accumulation"); double_VAR(textord_linespace_iqrlimit, 0.2, "Max iqr/median for linespace"); @@ -358,7 +358,7 @@ void compute_page_skew( // get average gradient for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { row = row_it.data(); blob_count = row->blob_list()->length(); - row_err = static_cast(ceil(row->line_error())); + row_err = static_cast(std::ceil(row->line_error())); if (row_err <= 0) { row_err = 1; } @@ -637,7 +637,7 @@ void delete_non_dropout_rows( // find lines min_y = block_box.bottom() - 1; max_y = block_box.top() + 1; for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { - line_index = static_cast(floor(row_it.data()->intercept())); + line_index = static_cast(std::floor(row_it.data()->intercept())); if (line_index <= min_y) { min_y = line_index - 1; } @@ -669,7 +669,7 @@ void delete_non_dropout_rows( // find lines compute_dropout_distances(&occupation[0], &deltas[0], line_count); for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { row = row_it.data(); - line_index = static_cast(floor(row->intercept())); + line_index = static_cast(std::floor(row->intercept())); distance = deltas[line_index - min_y]; if (find_best_dropout_row(row, distance, block->line_spacing / 2, line_index, &row_it, testing_on)) { @@ -727,7 +727,7 @@ bool find_best_dropout_row( // find neighbours row_offset = row_inc; do { next_row = row_it->data_relative(row_offset); - next_index = static_cast(floor(next_row->intercept())); + next_index = static_cast(std::floor(next_row->intercept())); if ((distance < 0 && next_index < line_index && next_index > line_index + distance + distance) || (distance >= 0 && next_index > line_index && @@ -775,7 +775,7 @@ TBOX deskew_block_coords( // block box BLOBNBOX *blob; // current blob BLOBNBOX_IT blob_it; // iterator - length = sqrt(gradient * gradient + 1); + length = std::sqrt(gradient * gradient + 1); rotation = FCOORD(1 / length, -gradient / length); for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { row = row_it.data(); @@ -816,7 +816,7 @@ void compute_line_occupation( // project blobs FCOORD rotation; // inverse of skew line_count = max_y - min_y + 1; - length = sqrt(gradient * gradient + 1); + length = std::sqrt(gradient * gradient + 1); rotation = FCOORD(1 / length, -gradient / length); for (line_index = 0; line_index < line_count; line_index++) { deltas[line_index] = 0; @@ -1181,6 +1181,11 @@ void compute_row_stats( // find lines if (prev_row != nullptr) { rows[rowcount++] = prev_row; prev_row->spacing = row->intercept() - prev_row->intercept(); + if (prev_row->spacing < 0.1 && prev_row->spacing > -0.1) { + // Avoid small spacing values which give a small disp_quant_factor_. + // That can cause large memory allocations with out-of-memory. + prev_row->spacing = 0; + } if (testing_on) { tprintf("Row at %g yields spacing of %g\n", row->intercept(), prev_row->spacing); } @@ -1189,7 +1194,7 @@ void compute_row_stats( // find lines row_it.backward(); } while (!row_it.at_last()); block->key_row = prev_row; - block->baseline_offset = fmod(prev_row->parallel_c(), block->line_spacing); + block->baseline_offset = std::fmod(prev_row->parallel_c(), block->line_spacing); if (testing_on) { tprintf("Blob based spacing=(%g,%g), offset=%g", block->line_size, block->line_spacing, block->baseline_offset); @@ -1233,7 +1238,7 @@ void compute_row_stats( // find lines block->line_spacing = rows[row_index]->spacing; block->max_blob_size = block->line_spacing * textord_excess_blobsize; } - block->baseline_offset = fmod(rows[row_index]->intercept(), block->line_spacing); + block->baseline_offset = std::fmod(rows[row_index]->intercept(), block->line_spacing); } if (testing_on) { tprintf("\nEstimate line size=%g, spacing=%g, offset=%g\n", block->line_size, @@ -1296,7 +1301,7 @@ void Textord::compute_block_xheight(TO_BLOCK *block, float gradient) { for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) { row = row_it.data(); // Compute the xheight of this row if it has not been computed before. - if (row->xheight <= 0.0) { + if (row->xheight <= 0) { compute_row_xheight(row, block->block->classify_rotation(), gradient, block->line_size); } ROW_CATEGORY row_category = get_row_category(row); @@ -1350,10 +1355,10 @@ void Textord::compute_block_xheight(TO_BLOCK *block, float gradient) { xheight = static_cast(textord_min_xheight); corrected_xheight = true; } - if (corrected_xheight || ascrise <= 0.0) { + if (corrected_xheight || ascrise <= 0) { ascrise = xheight * asc_frac_xheight; } - if (corrected_xheight || descdrop >= 0.0) { + if (corrected_xheight || descdrop >= 0) { descdrop = -(xheight * desc_frac_xheight); } block->xheight = xheight; @@ -1398,7 +1403,7 @@ void Textord::compute_row_xheight(TO_ROW *row, // row to do &heights, &floating_heights, textord_single_height_mode && rotation.y() == 0.0, min_height, max_height, &(row->xheight), &(row->ascrise)); row->descdrop = 0.0f; - if (row->xheight > 0.0) { + if (row->xheight > 0) { row->descdrop = static_cast(compute_row_descdrop(row, gradient, row->xheight_evidence, &heights)); } @@ -1700,7 +1705,7 @@ void correct_row_xheight(TO_ROW *row, float xheight, float ascrise, float descdr // -- the row does not have ascenders or descenders, but its xheight // is close to the average block xheight (e.g. row with "www.mmm.com") if (row_category == ROW_ASCENDERS_FOUND) { - if (row->descdrop >= 0.0) { + if (row->descdrop >= 0) { row->descdrop = row->xheight * (descdrop / xheight); } } else if (row_category == ROW_INVALID || @@ -1792,7 +1797,7 @@ void separate_underlines(TO_BLOCK *block, // block to do int min_blob_height = static_cast(textord_min_blob_height_fraction * block->line_size + 0.5); // length of vector - length = sqrt(1 + gradient * gradient); + length = std::sqrt(1 + gradient * gradient); g_vec = FCOORD(1 / length, -gradient / length); blob_rotation = FCOORD(rotation.x(), -rotation.y()); blob_rotation.rotate(g_vec); // undoing everything @@ -2291,7 +2296,7 @@ void assign_blobs_to_rows( // find lines (block->block->pdblk.bounding_box().bottom() + block->block->pdblk.bounding_box().top()) / 2.0f; if (gradient != nullptr) { - g_length = sqrt(1 + *gradient * *gradient); + g_length = std::sqrt(1 + *gradient * *gradient); } #ifndef GRAPHICS_DISABLED if (drawing_skew) { @@ -2331,7 +2336,6 @@ void assign_blobs_to_rows( // find lines if (!row_it.empty()) { for (row_it.move_to_first(); !row_it.at_last() && row_it.data()->min_y() > top; row_it.forward()) { - ; } row = row_it.data(); if (row->min_y() <= top && row->max_y() >= bottom) { diff --git a/src/textord/makerow.h b/src/textord/makerow.h index 0f91d8a18..91668dfab 100644 --- a/src/textord/makerow.h +++ b/src/textord/makerow.h @@ -40,52 +40,48 @@ enum ROW_CATEGORY { ROW_INVALID, }; -extern BOOL_VAR_H(textord_heavy_nr, false, "Vigorously remove noise"); -extern BOOL_VAR_H(textord_show_initial_rows, false, "Display row accumulation"); -extern BOOL_VAR_H(textord_show_parallel_rows, false, "Display page correlated rows"); -extern BOOL_VAR_H(textord_show_expanded_rows, false, "Display rows after expanding"); -extern BOOL_VAR_H(textord_show_final_rows, false, "Display rows after final fitting"); -extern BOOL_VAR_H(textord_show_final_blobs, false, "Display blob bounds after pre-ass"); -extern BOOL_VAR_H(textord_test_landscape, false, "Tests refer to land/port"); -extern BOOL_VAR_H(textord_parallel_baselines, true, "Force parallel baselines"); -extern BOOL_VAR_H(textord_straight_baselines, false, "Force straight baselines"); -extern BOOL_VAR_H(textord_quadratic_baselines, false, "Use quadratic splines"); -extern BOOL_VAR_H(textord_old_baselines, true, "Use old baseline algorithm"); -extern BOOL_VAR_H(textord_old_xheight, true, "Use old xheight algorithm"); -extern BOOL_VAR_H(textord_fix_xheight_bug, true, "Use spline baseline"); -extern BOOL_VAR_H(textord_fix_makerow_bug, true, "Prevent multiple baselines"); -extern BOOL_VAR_H(textord_cblob_blockocc, true, "Use new projection for underlines"); -extern BOOL_VAR_H(textord_debug_xheights, false, "Test xheight algorithms"); -extern INT_VAR_H(textord_test_x, -INT32_MAX, "coord of test pt"); -extern INT_VAR_H(textord_test_y, -INT32_MAX, "coord of test pt"); -extern INT_VAR_H(textord_min_blobs_in_row, 4, "Min blobs before gradient counted"); -extern INT_VAR_H(textord_spline_minblobs, 8, "Min blobs in each spline segment"); -extern INT_VAR_H(textord_spline_medianwin, 6, "Size of window for spline segmentation"); -extern INT_VAR_H(textord_min_xheight, 10, "Min credible pixel xheight"); -extern double_VAR_H(textord_spline_shift_fraction, 0.02, "Fraction of line spacing for quad"); -extern double_VAR_H(textord_spline_outlier_fraction, 0.1, "Fraction of line spacing for outlier"); -extern double_VAR_H(textord_skew_ile, 0.5, "Ile of gradients for page skew"); -extern double_VAR_H(textord_skew_lag, 0.75, "Lag for skew on row accumulation"); -extern double_VAR_H(textord_linespace_iqrlimit, 0.2, "Max iqr/median for linespace"); -extern double_VAR_H(textord_width_limit, 8, "Max width of blobs to make rows"); -extern double_VAR_H(textord_chop_width, 1.5, "Max width before chopping"); -extern double_VAR_H(textord_minxh, 0.25, "fraction of linesize for min xheight"); -extern double_VAR_H(textord_min_linesize, 1.25, "* blob height for initial linesize"); -extern double_VAR_H(textord_excess_blobsize, 1.3, "New row made if blob makes row this big"); -extern double_VAR_H(textord_occupancy_threshold, 0.4, "Fraction of neighbourhood"); -extern double_VAR_H(textord_underline_width, 2.0, "Multiple of line_size for underline"); -extern double_VAR_H(textord_min_blob_height_fraction, 0.75, - "Min blob height/top to include blob top into xheight stats"); -extern double_VAR_H(textord_xheight_mode_fraction, 0.4, "Min pile height to make xheight"); -extern double_VAR_H(textord_ascheight_mode_fraction, 0.15, "Min pile height to make ascheight"); -extern double_VAR_H(textord_ascx_ratio_min, 1.2, "Min cap/xheight"); -extern double_VAR_H(textord_ascx_ratio_max, 1.7, "Max cap/xheight"); -extern double_VAR_H(textord_descx_ratio_min, 0.15, "Min desc/xheight"); -extern double_VAR_H(textord_descx_ratio_max, 0.6, "Max desc/xheight"); -extern double_VAR_H(textord_xheight_error_margin, 0.1, "Accepted variation"); -extern INT_VAR_H(textord_lms_line_trials, 12, "Number of linew fits to do"); -extern BOOL_VAR_H(textord_new_initial_xheight, true, "Use test xheight mechanism"); -extern BOOL_VAR_H(textord_debug_blob, false, "Print test blob information"); +extern BOOL_VAR_H(textord_heavy_nr); +extern BOOL_VAR_H(textord_show_initial_rows); +extern BOOL_VAR_H(textord_show_parallel_rows); +extern BOOL_VAR_H(textord_show_expanded_rows); +extern BOOL_VAR_H(textord_show_final_rows); +extern BOOL_VAR_H(textord_show_final_blobs); +extern BOOL_VAR_H(textord_test_landscape); +extern BOOL_VAR_H(textord_parallel_baselines); +extern BOOL_VAR_H(textord_straight_baselines); +extern BOOL_VAR_H(textord_old_baselines); +extern BOOL_VAR_H(textord_old_xheight); +extern BOOL_VAR_H(textord_fix_xheight_bug); +extern BOOL_VAR_H(textord_fix_makerow_bug); +extern BOOL_VAR_H(textord_debug_xheights); +extern INT_VAR_H(textord_test_x); +extern INT_VAR_H(textord_test_y); +extern INT_VAR_H(textord_min_blobs_in_row); +extern INT_VAR_H(textord_spline_minblobs); +extern INT_VAR_H(textord_spline_medianwin); +extern INT_VAR_H(textord_min_xheight); +extern double_VAR_H(textord_spline_shift_fraction); +extern double_VAR_H(textord_skew_ile); +extern double_VAR_H(textord_skew_lag); +extern double_VAR_H(textord_linespace_iqrlimit); +extern double_VAR_H(textord_width_limit); +extern double_VAR_H(textord_chop_width); +extern double_VAR_H(textord_minxh); +extern double_VAR_H(textord_min_linesize); +extern double_VAR_H(textord_excess_blobsize); +extern double_VAR_H(textord_occupancy_threshold); +extern double_VAR_H(textord_underline_width); +extern double_VAR_H(textord_min_blob_height_fraction); +extern double_VAR_H(textord_xheight_mode_fraction); +extern double_VAR_H(textord_ascheight_mode_fraction); +extern double_VAR_H(textord_ascx_ratio_min); +extern double_VAR_H(textord_ascx_ratio_max); +extern double_VAR_H(textord_descx_ratio_min); +extern double_VAR_H(textord_descx_ratio_max); +extern double_VAR_H(textord_xheight_error_margin); +extern INT_VAR_H(textord_lms_line_trials); +extern BOOL_VAR_H(textord_new_initial_xheight); +extern BOOL_VAR_H(textord_debug_blob); inline void get_min_max_xheight(int block_linesize, int *min_height, int *max_height) { *min_height = static_cast(floor(block_linesize * textord_minxh)); diff --git a/src/textord/oldbasel.cpp b/src/textord/oldbasel.cpp index 3c0d34596..d5291fd4d 100644 --- a/src/textord/oldbasel.cpp +++ b/src/textord/oldbasel.cpp @@ -32,6 +32,7 @@ #include "textord.h" #include "tprintf.h" +#include #include // for std::vector #include @@ -173,14 +174,12 @@ void Textord::correlate_neighbours(TO_BLOCK *block, // block rows are in. otherrow >= 0 && (rows[otherrow]->xheight < 0.0 || !row->baseline.overlap(&rows[otherrow]->baseline, MAXOVERLAP)); otherrow--) { - ; } upperrow = otherrow; /*decent row above */ for (otherrow = rowindex + 1; otherrow < rowcount && (rows[otherrow]->xheight < 0.0 || !row->baseline.overlap(&rows[otherrow]->baseline, MAXOVERLAP)); otherrow++) { - ; } lowerrow = otherrow; /*decent row below */ if (upperrow >= 0) { @@ -1106,13 +1105,11 @@ int segment_spline( // make xstarts /*find rising y centre */ for (ptindex = turnpoints[segment - 1] + 1; ptindex < turnpoints[segment] && ycoords[ptindex + 1] <= lastmax; ptindex++) { - ; } } else { /*find falling y centre */ for (ptindex = turnpoints[segment - 1] + 1; ptindex < turnpoints[segment] && ycoords[ptindex + 1] >= lastmax; ptindex++) { - ; } } @@ -1450,7 +1447,7 @@ void make_first_xheight( // find xheight for (blobindex = 0; blobindex < blobcount; blobindex++) { int xcenter = (blobcoords[blobindex].left() + blobcoords[blobindex].right()) / 2; float base = baseline->y(xcenter); - float bottomdiff = fabs(base - blobcoords[blobindex].bottom()); + float bottomdiff = std::fabs(base - blobcoords[blobindex].bottom()); int strength = textord_ocropus_mode && bottomdiff <= kBaselineTouch ? kGoodStrength : 1; int height = static_cast(blobcoords[blobindex].top() - base + 0.5); if (blobcoords[blobindex].height() > init_lineheight * kMinHeight) { diff --git a/src/textord/oldbasel.h b/src/textord/oldbasel.h index e48e985d6..7b1d8d097 100644 --- a/src/textord/oldbasel.h +++ b/src/textord/oldbasel.h @@ -24,7 +24,7 @@ namespace tesseract { -extern BOOL_VAR_H(textord_oldbl_debug, false, "Debug old baseline generation"); +extern BOOL_VAR_H(textord_oldbl_debug); int get_blob_coords( // get boxes TO_ROW *row, // row to use diff --git a/src/textord/pithsync.cpp b/src/textord/pithsync.cpp index 36e80a7d1..3b56f7383 100644 --- a/src/textord/pithsync.cpp +++ b/src/textord/pithsync.cpp @@ -669,7 +669,6 @@ double check_pitch_sync3( // find segmentation do { for (x = best_end->position() - pitch + pitch_error; x < best_end->position() - pitch_error && projection->pile_count(x) == 0; x++) { - ; } if (x < best_end->position() - pitch_error) { occupation_count++; diff --git a/src/textord/pitsync1.cpp b/src/textord/pitsync1.cpp index 415d29ca3..5de3b768e 100644 --- a/src/textord/pitsync1.cpp +++ b/src/textord/pitsync1.cpp @@ -26,7 +26,6 @@ namespace tesseract { INT_VAR(pitsync_linear_version, 6, "Use new fast algorithm"); double_VAR(pitsync_joined_edge, 0.75, "Dist inside big blob for chopping"); double_VAR(pitsync_offset_freecut_fraction, 0.25, "Fraction of cut for free cuts"); -INT_VAR(pitsync_fake_depth, 1, "Max advance fake generation"); /********************************************************************** * FPSEGPT::FPSEGPT diff --git a/src/textord/pitsync1.h b/src/textord/pitsync1.h index cf1e5aabb..5df7ec488 100644 --- a/src/textord/pitsync1.h +++ b/src/textord/pitsync1.h @@ -80,10 +80,9 @@ private: ELISTIZEH(FPSEGPT) CLISTIZEH(FPSEGPT_LIST) -extern INT_VAR_H(pitsync_linear_version, 0, "Use new fast algorithm"); -extern double_VAR_H(pitsync_joined_edge, 0.75, "Dist inside big blob for chopping"); -extern double_VAR_H(pitsync_offset_freecut_fraction, 0.25, "Fraction of cut for free cuts"); -extern INT_VAR_H(pitsync_fake_depth, 1, "Max advance fake generation"); +extern INT_VAR_H(pitsync_linear_version); +extern double_VAR_H(pitsync_joined_edge); +extern double_VAR_H(pitsync_offset_freecut_fraction); double check_pitch_sync( // find segmentation BLOBNBOX_IT *blob_it, // blobs to do int16_t blob_count, // no of blobs diff --git a/src/textord/scanedg.cpp b/src/textord/scanedg.cpp index 6c18bdd76..d416695c1 100644 --- a/src/textord/scanedg.cpp +++ b/src/textord/scanedg.cpp @@ -44,11 +44,11 @@ static void free_crackedges(CRACKEDGE *start); static void join_edges(CRACKEDGE *edge1, CRACKEDGE *edge2, CRACKEDGE **free_cracks, C_OUTLINE_IT *outline_it); -static void line_edges(int16_t x, int16_t y, int16_t xext, uint8_t uppercolour, uint8_t *bwpos, +static void line_edges(TDimension x, TDimension y, TDimension xext, uint8_t uppercolour, uint8_t *bwpos, CRACKEDGE **prevline, CRACKEDGE **free_cracks, C_OUTLINE_IT *outline_it); static void make_margins(PDBLK *block, BLOCK_LINE_IT *line_it, uint8_t *pixels, uint8_t margin, - int16_t left, int16_t right, int16_t y); + TDimension left, TDimension right, TDimension y); static CRACKEDGE *h_edge(int sign, CRACKEDGE *join, CrackPos *pos); static CRACKEDGE *v_edge(int sign, CRACKEDGE *join, CrackPos *pos); @@ -114,14 +114,11 @@ static void make_margins( // get a line BLOCK_LINE_IT *line_it, // for old style uint8_t *pixels, // pixels to strip uint8_t margin, // white-out pixel - int16_t left, // block edges - int16_t right, - int16_t y // line coord + TDimension left, // block edges + TDimension right, + TDimension y // line coord ) { ICOORDELT_IT seg_it; - int32_t start; // of segment - int16_t xext; // of segment - int xindex; // index to pixel if (block->poly_block() != nullptr) { std::unique_ptr lines(new PB_LINE_IT(block->poly_block())); @@ -129,9 +126,9 @@ static void make_margins( // get a line if (!segments->empty()) { seg_it.set_to_list(segments.get()); seg_it.mark_cycle_pt(); - start = seg_it.data()->x(); - xext = seg_it.data()->y(); - for (xindex = left; xindex < right; xindex++) { + auto start = seg_it.data()->x(); + auto xext = seg_it.data()->y(); + for (auto xindex = left; xindex < right; xindex++) { if (xindex >= start && !seg_it.cycled_list()) { xindex = start + xext - 1; seg_it.forward(); @@ -142,16 +139,17 @@ static void make_margins( // get a line } } } else { - for (xindex = left; xindex < right; xindex++) { + for (auto xindex = left; xindex < right; xindex++) { pixels[xindex - left] = margin; } } } else { - start = line_it->get_line(y, xext); - for (xindex = left; xindex < start; xindex++) { + TDimension xext; // of segment + auto start = line_it->get_line(y, xext); + for (auto xindex = left; xindex < start; xindex++) { pixels[xindex - left] = margin; } - for (xindex = start + xext; xindex < right; xindex++) { + for (auto xindex = start + xext; xindex < right; xindex++) { pixels[xindex - left] = margin; } } @@ -164,9 +162,9 @@ static void make_margins( // get a line * When edges close into loops, send them for approximation. **********************************************************************/ -static void line_edges(int16_t x, // coord of line start - int16_t y, // coord of line - int16_t xext, // width of line +static void line_edges(TDimension x, // coord of line start + TDimension y, // coord of line + TDimension xext, // width of line uint8_t uppercolour, // start of prev line uint8_t *bwpos, // thresholded line CRACKEDGE **prevline, // edges in progress diff --git a/src/textord/strokewidth.cpp b/src/textord/strokewidth.cpp index 2829756d9..c1fa3c696 100644 --- a/src/textord/strokewidth.cpp +++ b/src/textord/strokewidth.cpp @@ -161,7 +161,6 @@ void StrokeWidth::FindTextlineDirectionAndFixBrokenCJK(PageSegMode pageseg_mode, InsertBlobs(input_block); // Repair broken CJK characters if needed. while (cjk_merge && FixBrokenCJK(input_block)) { - ; } // Grade blobs by inspection of neighbours. FindTextlineFlowDirection(pageseg_mode, false); diff --git a/src/textord/tablefind.cpp b/src/textord/tablefind.cpp index fe825f131..c0eed61d9 100644 --- a/src/textord/tablefind.cpp +++ b/src/textord/tablefind.cpp @@ -22,13 +22,13 @@ #include #include +#include #include "tablefind.h" #include #include "colpartitionset.h" #include "tablerecog.h" -#include "tabletransfer.h" namespace tesseract { @@ -158,11 +158,11 @@ void DeleteObject(T *object) { } TableFinder::TableFinder() - : resolution_(0) - , global_median_xheight_(0) - , global_median_blob_width_(0) - , global_median_ledding_(0) - , left_to_right_language_(true) {} + : resolution_(0), + global_median_xheight_(0), + global_median_blob_width_(0), + global_median_ledding_(0), + left_to_right_language_(true) {} TableFinder::~TableFinder() { // ColPartitions and ColSegments created by this class for storage in grids @@ -178,7 +178,8 @@ void TableFinder::set_left_to_right_language(bool order) { left_to_right_language_ = order; } -void TableFinder::Init(int grid_size, const ICOORD &bottom_left, const ICOORD &top_right) { +void TableFinder::Init(int grid_size, const ICOORD &bottom_left, + const ICOORD &top_right) { // Initialize clean partitions list and grid clean_part_grid_.Init(grid_size, bottom_left, top_right); leader_and_ruling_grid_.Init(grid_size, bottom_left, top_right); @@ -189,7 +190,8 @@ void TableFinder::Init(int grid_size, const ICOORD &bottom_left, const ICOORD &t // Copy cleaned partitions from part_grid_ to clean_part_grid_ and // insert leaders and rulers into the leader_and_ruling_grid_ -void TableFinder::InsertCleanPartitions(ColPartitionGrid *grid, TO_BLOCK *block) { +void TableFinder::InsertCleanPartitions(ColPartitionGrid *grid, + TO_BLOCK *block) { // Calculate stats. This lets us filter partitions in AllowTextPartition() // and filter blobs in AllowBlob(). SetGlobalSpacings(grid); @@ -256,20 +258,22 @@ void TableFinder::InsertCleanPartitions(ColPartitionGrid *grid, TO_BLOCK *block) } // High level function to perform table detection -void TableFinder::LocateTables(ColPartitionGrid *grid, ColPartitionSet **all_columns, +void TableFinder::LocateTables(ColPartitionGrid *grid, + ColPartitionSet **all_columns, WidthCallback width_cb, const FCOORD &reskew) { // initialize spacing, neighbors, and columns InitializePartitions(all_columns); #ifndef GRAPHICS_DISABLED if (textord_show_tables) { - ScrollView *table_win = MakeWindow(0, 300, - "Step 1: Column Partitions & Neighbors"); + ScrollView *table_win = MakeWindow(0, 300, "Column Partitions & Neighbors"); DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE); - DisplayColPartitions(table_win, &leader_and_ruling_grid_, ScrollView::AQUAMARINE); - DisplayColPartitionConnections(table_win, &clean_part_grid_, ScrollView::ORANGE); + DisplayColPartitions(table_win, &leader_and_ruling_grid_, + ScrollView::AQUAMARINE); + DisplayColPartitionConnections(table_win, &clean_part_grid_, + ScrollView::ORANGE); - table_win = MakeWindow(100, 300, "Step 2: Fragmented Text"); + table_win = MakeWindow(100, 300, "Fragmented Text"); DisplayColPartitions(table_win, &fragmented_text_grid_, ScrollView::BLUE); } #endif // !GRAPHICS_DISABLED @@ -304,8 +308,7 @@ void TableFinder::LocateTables(ColPartitionGrid *grid, ColPartitionSet **all_col #ifndef GRAPHICS_DISABLED if (textord_tablefind_show_mark) { - ScrollView *table_win = MakeWindow(1200, 300, - "Step 7: Table Columns and Regions"); + ScrollView *table_win = MakeWindow(1200, 300, "Table Columns and Regions"); DisplayColSegments(table_win, &table_columns, ScrollView::DARK_TURQUOISE); DisplayColSegments(table_win, &table_regions, ScrollView::YELLOW); } @@ -327,8 +330,7 @@ void TableFinder::LocateTables(ColPartitionGrid *grid, ColPartitionSet **all_col #ifndef GRAPHICS_DISABLED if (textord_show_tables) { - ScrollView *table_win = MakeWindow(1200, 300, - "Step 8: Detected Table Locations"); + ScrollView *table_win = MakeWindow(1200, 300, "Detected Table Locations"); DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE); DisplayColSegments(table_win, &table_columns, ScrollView::KHAKI); table_grid_.DisplayBoxes(table_win); @@ -342,10 +344,9 @@ void TableFinder::LocateTables(ColPartitionGrid *grid, ColPartitionSet **all_col #ifndef GRAPHICS_DISABLED if (textord_show_tables) { - ScrollView *table_win = MakeWindow(1400, 600, - "Step 10: Recognized Tables"); - DisplayColPartitions(table_win, &clean_part_grid_, - ScrollView::BLUE, ScrollView::BLUE); + ScrollView *table_win = MakeWindow(1400, 600, "Recognized Tables"); + DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE, + ScrollView::BLUE); table_grid_.DisplayBoxes(table_win); } #endif // !GRAPHICS_DISABLED @@ -358,9 +359,9 @@ void TableFinder::LocateTables(ColPartitionGrid *grid, ColPartitionSet **all_col #ifndef GRAPHICS_DISABLED if (textord_show_tables) { - ScrollView *table_win = MakeWindow(1500, 300, "Step 11: Detected Tables"); - DisplayColPartitions(table_win, &clean_part_grid_, - ScrollView::BLUE, ScrollView::BLUE); + ScrollView *table_win = MakeWindow(1500, 300, "Detected Tables"); + DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE, + ScrollView::BLUE); table_grid_.DisplayBoxes(table_win); } #endif // !GRAPHICS_DISABLED @@ -459,7 +460,8 @@ void TableFinder::SplitAndInsertFragmentedTextPartition(ColPartition *part) { // Look for the next split in the partition. for (box_it.mark_cycle_pt(); !box_it.cycled_list(); box_it.forward()) { const TBOX &box = box_it.data()->bounding_box(); - if (previous_right != INT32_MIN && box.left() - previous_right > kThreshold) { + if (previous_right != INT32_MIN && + box.left() - previous_right > kThreshold) { // We have a split position. Split the partition in two pieces. // Insert the left piece in the grid and keep processing the right. int mid_x = (box.left() + previous_right) / 2; @@ -491,7 +493,8 @@ bool TableFinder::AllowTextPartition(const ColPartition &part) const { const int median_area = global_median_xheight_ * global_median_blob_width_; const double kAreaPerBlobRequired = median_area * kAllowTextArea; // Keep comparisons strictly greater to disallow 0! - return part.median_height() > kHeightRequired && part.median_width() > kWidthRequired && + return part.median_height() > kHeightRequired && + part.median_width() > kWidthRequired && part.bounding_box().area() > kAreaPerBlobRequired * part.boxes_count(); } @@ -519,13 +522,15 @@ ScrollView *TableFinder::MakeWindow(int x, int y, const char *window_name) { #endif // Make single-column blocks from good_columns_ partitions. -void TableFinder::GetColumnBlocks(ColPartitionSet **all_columns, ColSegment_LIST *column_blocks) { +void TableFinder::GetColumnBlocks(ColPartitionSet **all_columns, + ColSegment_LIST *column_blocks) { for (int i = 0; i < gridheight(); ++i) { ColPartitionSet *columns = all_columns[i]; if (columns != nullptr) { ColSegment_LIST new_blocks; // Get boxes from the current vertical position on the grid - columns->GetColumnBoxes(i * gridsize(), (i + 1) * gridsize(), &new_blocks); + columns->GetColumnBoxes(i * gridsize(), (i + 1) * gridsize(), + &new_blocks); // Merge the new_blocks boxes into column_blocks if they are well-aligned GroupColumnBlocks(&new_blocks, column_blocks); } @@ -533,7 +538,8 @@ void TableFinder::GetColumnBlocks(ColPartitionSet **all_columns, ColSegment_LIST } // Merge column segments into the current list if they are well aligned. -void TableFinder::GroupColumnBlocks(ColSegment_LIST *new_blocks, ColSegment_LIST *column_blocks) { +void TableFinder::GroupColumnBlocks(ColSegment_LIST *new_blocks, + ColSegment_LIST *column_blocks) { ColSegment_IT src_it(new_blocks); ColSegment_IT dest_it(column_blocks); // iterate through the source list @@ -565,8 +571,10 @@ void TableFinder::GroupColumnBlocks(ColSegment_LIST *new_blocks, ColSegment_LIST bool TableFinder::ConsecutiveBoxes(const TBOX &b1, const TBOX &b2) { int x_margin = 20; int y_margin = 5; - return (abs(b1.left() - b2.left()) < x_margin) && (abs(b1.right() - b2.right()) < x_margin) && - (abs(b1.top() - b2.bottom()) < y_margin || abs(b2.top() - b1.bottom()) < y_margin); + return (abs(b1.left() - b2.left()) < x_margin) && + (abs(b1.right() - b2.right()) < x_margin) && + (abs(b1.top() - b2.bottom()) < y_margin || + abs(b2.top() - b1.bottom()) < y_margin); } // Set up info for clean_part_grid_ partitions to be valid during detection @@ -578,7 +586,8 @@ void TableFinder::InitializePartitions(ColPartitionSet **all_columns) { } // Set left, right and top, bottom spacings of each colpartition. -void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet **all_columns) { +void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, + ColPartitionSet **all_columns) { // Iterate the ColPartitions in the grid. ColPartitionGridSearch gsearch(grid); gsearch.StartFullSearch(); @@ -606,7 +615,8 @@ void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet * hsearch.StartSideSearch(box.left(), box.bottom(), box.top()); ColPartition *neighbor = nullptr; while ((neighbor = hsearch.NextSideSearch(true)) != nullptr) { - if (neighbor->type() == PT_PULLOUT_IMAGE || neighbor->type() == PT_FLOWING_IMAGE || + if (neighbor->type() == PT_PULLOUT_IMAGE || + neighbor->type() == PT_FLOWING_IMAGE || neighbor->type() == PT_HEADING_IMAGE) { int right = neighbor->bounding_box().right(); if (right < box.left()) { @@ -618,7 +628,8 @@ void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet * hsearch.StartSideSearch(box.left(), box.bottom(), box.top()); neighbor = nullptr; while ((neighbor = hsearch.NextSideSearch(false)) != nullptr) { - if (neighbor->type() == PT_PULLOUT_IMAGE || neighbor->type() == PT_FLOWING_IMAGE || + if (neighbor->type() == PT_PULLOUT_IMAGE || + neighbor->type() == PT_FLOWING_IMAGE || neighbor->type() == PT_HEADING_IMAGE) { int left = neighbor->bounding_box().left(); if (left > box.right()) { @@ -630,8 +641,9 @@ void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet * ColPartition *upper_part = part->SingletonPartner(true); if (upper_part) { - int space = std::max( - 0, static_cast(upper_part->bounding_box().bottom() - part->bounding_box().bottom())); + int space = + std::max(0, static_cast(upper_part->bounding_box().bottom() - + part->bounding_box().bottom())); part->set_space_above(space); } else { // TODO(nbeato): What constitutes a good value? @@ -642,8 +654,9 @@ void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet * ColPartition *lower_part = part->SingletonPartner(false); if (lower_part) { - int space = std::max( - 0, static_cast(part->bounding_box().bottom() - lower_part->bounding_box().bottom())); + int space = + std::max(0, static_cast(part->bounding_box().bottom() - + lower_part->bounding_box().bottom())); part->set_space_below(space); } else { // TODO(nbeato): What constitutes a good value? @@ -657,14 +670,17 @@ void TableFinder::SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet * // Set spacing and closest neighbors above and below a given colpartition. void TableFinder::SetVerticalSpacing(ColPartition *part) { TBOX box = part->bounding_box(); - int top_range = std::min(box.top() + kMaxVerticalSpacing, static_cast(tright().y())); - int bottom_range = std::max(box.bottom() - kMaxVerticalSpacing, static_cast(bleft().y())); + int top_range = + std::min(box.top() + kMaxVerticalSpacing, static_cast(tright().y())); + int bottom_range = std::max(box.bottom() - kMaxVerticalSpacing, + static_cast(bleft().y())); box.set_top(top_range); box.set_bottom(bottom_range); TBOX part_box = part->bounding_box(); // Start a rect search - GridSearch rectsearch(&clean_part_grid_); + GridSearch rectsearch( + &clean_part_grid_); rectsearch.StartRectSearch(box); ColPartition *neighbor; int min_space_above = kMaxVerticalSpacing; @@ -683,7 +699,8 @@ void TableFinder::SetVerticalSpacing(ColPartition *part) { min_space_below = gap; below_neighbor = neighbor; } // If neighbor is above current partition - else if (part_box.top() < neighbor_box.bottom() && gap < min_space_above) { + else if (part_box.top() < neighbor_box.bottom() && + gap < min_space_above) { min_space_above = gap; above_neighbor = neighbor; } @@ -782,37 +799,37 @@ void TableFinder::MarkTablePartitions() { MarkPartitionsUsingLocalInformation(); #ifndef GRAPHICS_DISABLED if (textord_tablefind_show_mark) { - ScrollView *table_win = MakeWindow(300, 300, - "Step 3: Initial Table Partitions"); + ScrollView *table_win = MakeWindow(300, 300, "Initial Table Partitions"); DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE); - DisplayColPartitions(table_win, &leader_and_ruling_grid_, ScrollView::AQUAMARINE); + DisplayColPartitions(table_win, &leader_and_ruling_grid_, + ScrollView::AQUAMARINE); } #endif FilterFalseAlarms(); #ifndef GRAPHICS_DISABLED if (textord_tablefind_show_mark) { - ScrollView *table_win = MakeWindow(600, 300, - "Step 4: Filtered Table Partitions"); + ScrollView *table_win = MakeWindow(600, 300, "Filtered Table Partitions"); DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE); - DisplayColPartitions(table_win, &leader_and_ruling_grid_, ScrollView::AQUAMARINE); + DisplayColPartitions(table_win, &leader_and_ruling_grid_, + ScrollView::AQUAMARINE); } #endif SmoothTablePartitionRuns(); #ifndef GRAPHICS_DISABLED if (textord_tablefind_show_mark) { - ScrollView *table_win = MakeWindow(900, 300, - "Step 5: Smoothed Table Partitions"); + ScrollView *table_win = MakeWindow(900, 300, "Smoothed Table Partitions"); DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE); - DisplayColPartitions(table_win, &leader_and_ruling_grid_, ScrollView::AQUAMARINE); + DisplayColPartitions(table_win, &leader_and_ruling_grid_, + ScrollView::AQUAMARINE); } #endif FilterFalseAlarms(); #ifndef GRAPHICS_DISABLED if (textord_tablefind_show_mark || textord_show_tables) { - ScrollView *table_win = MakeWindow(900, 300, - "Step 6: Final Table Partitions"); + ScrollView *table_win = MakeWindow(900, 300, "Final Table Partitions"); DisplayColPartitions(table_win, &clean_part_grid_, ScrollView::BLUE); - DisplayColPartitions(table_win, &leader_and_ruling_grid_, ScrollView::AQUAMARINE); + DisplayColPartitions(table_win, &leader_and_ruling_grid_, + ScrollView::AQUAMARINE); } #endif } @@ -826,7 +843,8 @@ void TableFinder::MarkTablePartitions() { // 4- Partitions with leaders before/after them. void TableFinder::MarkPartitionsUsingLocalInformation() { // Iterate the ColPartitions in the grid. - GridSearch gsearch(&clean_part_grid_); + GridSearch gsearch( + &clean_part_grid_); gsearch.StartFullSearch(); ColPartition *part = nullptr; while ((part = gsearch.NextFullSearch()) != nullptr) { @@ -861,7 +879,8 @@ bool TableFinder::HasWideOrNoInterWordGap(ColPartition *part) const { BLOBNBOX_CLIST *part_boxes = part->boxes(); BLOBNBOX_C_IT it(part_boxes); // Check if this is a relatively small partition (such as a single word) - if (part->bounding_box().width() < kMinBoxesInTextPartition * part->median_height() && + if (part->bounding_box().width() < + kMinBoxesInTextPartition * part->median_height() && part_boxes->length() < kMinBoxesInTextPartition) { return true; } @@ -918,7 +937,8 @@ bool TableFinder::HasWideOrNoInterWordGap(ColPartition *part) const { } // Since no large gap was found, return false if the partition is too // long to be a data cell - if (part->bounding_box().width() > kMaxBoxesInDataPartition * part->median_height() || + if (part->bounding_box().width() > + kMaxBoxesInDataPartition * part->median_height() || part_boxes->length() > kMaxBoxesInDataPartition) { return false; } @@ -1027,19 +1047,23 @@ void TableFinder::FilterParagraphEndings() { // To account for that, check if the partition center is to // the left of the one above it. int mid = (part->bounding_box().left() + part->bounding_box().right()) / 2; - int upper_mid = (upper_part->bounding_box().left() + upper_part->bounding_box().right()) / 2; + int upper_mid = (upper_part->bounding_box().left() + + upper_part->bounding_box().right()) / + 2; int current_spacing = 0; // spacing of the current line to margin int upper_spacing = 0; // spacing of the previous line to the margin if (left_to_right_language_) { // Left to right languages, use mid - left to figure out the distance // the middle is from the left margin. - int left = std::min(part->bounding_box().left(), upper_part->bounding_box().left()); + int left = std::min(part->bounding_box().left(), + upper_part->bounding_box().left()); current_spacing = mid - left; upper_spacing = upper_mid - left; } else { // Right to left languages, use right - mid to figure out the distance // the middle is from the right margin. - int right = std::max(part->bounding_box().right(), upper_part->bounding_box().right()); + int right = std::max(part->bounding_box().right(), + upper_part->bounding_box().right()); current_spacing = right - mid; upper_spacing = right - upper_mid; } @@ -1057,7 +1081,8 @@ void TableFinder::FilterParagraphEndings() { // The last line of a paragraph should be left aligned. // TODO(nbeato): This would be untrue if the text was right aligned. // How often is that? - if (part->space_to_left() > kMaxParagraphEndingLeftSpaceMultiple * part->median_height()) { + if (part->space_to_left() > + kMaxParagraphEndingLeftSpaceMultiple * part->median_height()) { continue; } // The line above it should be right aligned (assuming justified format). @@ -1066,7 +1091,8 @@ void TableFinder::FilterParagraphEndings() { // line could have fit on the previous line). So compare // whitespace to text. if (upper_part->bounding_box().width() < - kMinParagraphEndingTextToWhitespaceRatio * upper_part->space_to_right()) { + kMinParagraphEndingTextToWhitespaceRatio * + upper_part->space_to_right()) { continue; } @@ -1164,7 +1190,8 @@ void TableFinder::SetColumnsType(ColSegment_LIST *column_blocks) { TBOX box = seg->bounding_box(); int num_table_cells = 0; int num_text_cells = 0; - GridSearch rsearch(&clean_part_grid_); + GridSearch rsearch( + &clean_part_grid_); rsearch.SetUniqueMode(true); rsearch.StartRectSearch(box); ColPartition *part = nullptr; @@ -1189,7 +1216,8 @@ void TableFinder::SetColumnsType(ColSegment_LIST *column_blocks) { } // Move column blocks to grid -void TableFinder::MoveColSegmentsToGrid(ColSegment_LIST *segments, ColSegmentGrid *col_seg_grid) { +void TableFinder::MoveColSegmentsToGrid(ColSegment_LIST *segments, + ColSegmentGrid *col_seg_grid) { ColSegment_IT it(segments); for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) { ColSegment *seg = it.extract(); @@ -1211,7 +1239,8 @@ void TableFinder::GridMergeColumnBlocks() { int margin = gridsize(); // Iterate the Column Blocks in the grid. - GridSearch gsearch(&col_seg_grid_); + GridSearch gsearch( + &col_seg_grid_); gsearch.StartFullSearch(); ColSegment *seg; while ((seg = gsearch.NextFullSearch()) != nullptr) { @@ -1225,12 +1254,15 @@ void TableFinder::GridMergeColumnBlocks() { do { TBOX box = seg->bounding_box(); // slightly expand the search region vertically - int top_range = std::min(box.top() + margin, static_cast(tright().y())); - int bottom_range = std::max(box.bottom() - margin, static_cast(bleft().y())); + int top_range = + std::min(box.top() + margin, static_cast(tright().y())); + int bottom_range = + std::max(box.bottom() - margin, static_cast(bleft().y())); box.set_top(top_range); box.set_bottom(bottom_range); neighbor_found = false; - GridSearch rectsearch(&col_seg_grid_); + GridSearch rectsearch( + &col_seg_grid_); rectsearch.StartRectSearch(box); ColSegment *neighbor = nullptr; while ((neighbor = rectsearch.NextRectSearch()) != nullptr) { @@ -1288,7 +1320,8 @@ void TableFinder::GridMergeColumnBlocks() { void TableFinder::GetTableColumns(ColSegment_LIST *table_columns) { ColSegment_IT it(table_columns); // Iterate the ColPartitions in the grid. - GridSearch gsearch(&clean_part_grid_); + GridSearch gsearch( + &clean_part_grid_); gsearch.StartFullSearch(); ColPartition *part; while ((part = gsearch.NextFullSearch()) != nullptr) { @@ -1302,7 +1335,8 @@ void TableFinder::GetTableColumns(ColSegment_LIST *table_columns) { // Start a search below the current cell to find bottom neighbours // Note: a full search will always process things above it first, so // this should be starting at the highest cell and working its way down. - GridSearch vsearch(&clean_part_grid_); + GridSearch vsearch( + &clean_part_grid_); vsearch.StartVerticalSearch(box.left(), box.right(), box.bottom()); ColPartition *neighbor = nullptr; bool found_neighbours = false; @@ -1337,11 +1371,13 @@ void TableFinder::GetTableColumns(ColSegment_LIST *table_columns) { // Mark regions in a column that are x-bounded by the column boundaries and // y-bounded by the table columns' projection on the y-axis as table regions -void TableFinder::GetTableRegions(ColSegment_LIST *table_columns, ColSegment_LIST *table_regions) { +void TableFinder::GetTableRegions(ColSegment_LIST *table_columns, + ColSegment_LIST *table_regions) { ColSegment_IT cit(table_columns); ColSegment_IT rit(table_regions); // Iterate through column blocks - GridSearch gsearch(&col_seg_grid_); + GridSearch gsearch( + &col_seg_grid_); gsearch.StartFullSearch(); ColSegment *part; int page_height = tright().y() - bleft().y(); @@ -1400,7 +1436,8 @@ void TableFinder::GetTableRegions(ColSegment_LIST *table_columns, ColSegment_LIS // single line and hence the tables get merged together void TableFinder::GridMergeTableRegions() { // Iterate the table regions in the grid. - GridSearch gsearch(&table_grid_); + GridSearch gsearch( + &table_grid_); gsearch.StartFullSearch(); ColSegment *seg = nullptr; while ((seg = gsearch.NextFullSearch()) != nullptr) { @@ -1413,7 +1450,8 @@ void TableFinder::GridMergeTableRegions() { search_region.set_left(bleft().x()); search_region.set_right(tright().x()); neighbor_found = false; - GridSearch rectsearch(&table_grid_); + GridSearch rectsearch( + &table_grid_); rectsearch.StartRectSearch(search_region); ColSegment *neighbor = nullptr; while ((neighbor = rectsearch.NextRectSearch()) != nullptr) { @@ -1465,13 +1503,15 @@ bool TableFinder::BelongToOneTable(const TBOX &box1, const TBOX &box2) { // Check for ColPartitions spanning both table regions TBOX bbox = box1.bounding_union(box2); // Start a rect search on bbox - GridSearch rectsearch(&clean_part_grid_); + GridSearch rectsearch( + &clean_part_grid_); rectsearch.StartRectSearch(bbox); ColPartition *part = nullptr; while ((part = rectsearch.NextRectSearch()) != nullptr) { const TBOX &part_box = part->bounding_box(); // return true if a colpartition spanning both table regions is found - if (part_box.overlap(box1) && part_box.overlap(box2) && !part->IsImageType()) { + if (part_box.overlap(box1) && part_box.overlap(box2) && + !part->IsImageType()) { return true; } } @@ -1553,12 +1593,14 @@ void TableFinder::GrowTableBox(const TBOX &table_box, TBOX *result_box) { // Grow a table by increasing the size of the box to include // partitions with significant overlap with the table. -void TableFinder::GrowTableToIncludePartials(const TBOX &table_box, const TBOX &search_range, +void TableFinder::GrowTableToIncludePartials(const TBOX &table_box, + const TBOX &search_range, TBOX *result_box) { // Rulings are in a different grid, so search 2 grids for rulings, text, // and table partitions that are not entirely within the new box. for (int i = 0; i < 2; ++i) { - ColPartitionGrid *grid = (i == 0) ? &fragmented_text_grid_ : &leader_and_ruling_grid_; + ColPartitionGrid *grid = + (i == 0) ? &fragmented_text_grid_ : &leader_and_ruling_grid_; ColPartitionGridSearch rectsearch(grid); rectsearch.StartRectSearch(search_range); ColPartition *part = nullptr; @@ -1580,7 +1622,8 @@ void TableFinder::GrowTableToIncludePartials(const TBOX &table_box, const TBOX & // Grow a table by expanding to the extents of significantly // overlapping lines. -void TableFinder::GrowTableToIncludeLines(const TBOX &table_box, const TBOX &search_range, +void TableFinder::GrowTableToIncludeLines(const TBOX &table_box, + const TBOX &search_range, TBOX *result_box) { ColPartitionGridSearch rsearch(&leader_and_ruling_grid_); rsearch.SetUniqueMode(true); @@ -1612,7 +1655,8 @@ void TableFinder::GrowTableToIncludeLines(const TBOX &table_box, const TBOX &sea // Checks whether the horizontal line belong to the table by looking at the // side spacing of extra ColParitions that will be included in the table // due to expansion -bool TableFinder::HLineBelongsToTable(const ColPartition &part, const TBOX &table_box) { +bool TableFinder::HLineBelongsToTable(const ColPartition &part, + const TBOX &table_box) { if (!part.IsHorizontalLine()) { return false; } @@ -1638,7 +1682,8 @@ bool TableFinder::HLineBelongsToTable(const ColPartition &part, const TBOX &tabl // Rulings are in a different grid, so search 2 grids for rulings, text, // and table partitions that are introduced by the new box. for (int i = 0; i < 2; ++i) { - ColPartitionGrid *grid = (i == 0) ? &clean_part_grid_ : &leader_and_ruling_grid_; + ColPartitionGrid *grid = + (i == 0) ? &clean_part_grid_ : &leader_and_ruling_grid_; // Start a rect search on bbox ColPartitionGridSearch rectsearch(grid); rectsearch.SetUniqueMode(true); @@ -1683,12 +1728,14 @@ bool TableFinder::HLineBelongsToTable(const ColPartition &part, const TBOX &tabl void TableFinder::IncludeLeftOutColumnHeaders(TBOX *table_box) { // Start a search above the current table to look for column headers ColPartitionGridSearch vsearch(&clean_part_grid_); - vsearch.StartVerticalSearch(table_box->left(), table_box->right(), table_box->top()); + vsearch.StartVerticalSearch(table_box->left(), table_box->right(), + table_box->top()); ColPartition *neighbor = nullptr; ColPartition *previous_neighbor = nullptr; while ((neighbor = vsearch.NextVerticalSearch(false)) != nullptr) { // Max distance to find a table heading. - const int max_distance = kMaxColumnHeaderDistance * neighbor->median_height(); + const int max_distance = + kMaxColumnHeaderDistance * neighbor->median_height(); int table_top = table_box->top(); const TBOX &box = neighbor->bounding_box(); // Do not continue if the next box is way above @@ -1725,7 +1772,8 @@ void TableFinder::DeleteSingleColumnTables() { // create an integer array to hold projection on x-axis int *table_xprojection = new int[page_width]; // Iterate through all tables in the table grid - GridSearch table_search(&table_grid_); + GridSearch table_search( + &table_grid_); table_search.StartFullSearch(); ColSegment *table; while ((table = table_search.NextFullSearch()) != nullptr) { @@ -1735,7 +1783,8 @@ void TableFinder::DeleteSingleColumnTables() { table_xprojection[i] = 0; } // Start a rect search on table_box - GridSearch rectsearch(&clean_part_grid_); + GridSearch rectsearch( + &clean_part_grid_); rectsearch.SetUniqueMode(true); rectsearch.StartRectSearch(table_box); ColPartition *part; @@ -1842,7 +1891,7 @@ void TableFinder::RecognizeTables() { #ifndef GRAPHICS_DISABLED ScrollView *table_win = nullptr; if (textord_show_tables) { - table_win = MakeWindow(0, 0, "Step 9: Table Structure"); + table_win = MakeWindow(0, 0, "Table Structure"); DisplayColPartitions(table_win, &fragmented_text_grid_, ScrollView::BLUE, ScrollView::LIGHT_BLUE); // table_grid_.DisplayBoxes(table_win); @@ -1949,7 +1998,8 @@ void TableFinder::DisplayColPartitions(ScrollView *win, ColPartitionGrid *grid, DisplayColPartitions(win, grid, default_color, ScrollView::YELLOW); } -void TableFinder::DisplayColPartitionConnections(ScrollView *win, ColPartitionGrid *grid, +void TableFinder::DisplayColPartitionConnections(ScrollView *win, + ColPartitionGrid *grid, ScrollView::Color color) { // Iterate the ColPartitions in the grid. GridSearch gsearch(grid); @@ -1993,27 +2043,9 @@ void TableFinder::DisplayColPartitionConnections(ScrollView *win, ColPartitionGr // Merge all colpartitions in table regions to make them a single // colpartition and revert types of isolated table cells not // assigned to any table to their original types. -void TableFinder::MakeTableBlocks(ColPartitionGrid *grid, ColPartitionSet **all_columns, - WidthCallback width_cb) { -#ifndef GRAPHICS_DISABLED - ScrollView* table_win = nullptr; - if (textord_show_tables) { - table_win = MakeWindow(0, 0, "Step 12: Final tables"); - DisplayColPartitions(table_win, &fragmented_text_grid_, - ScrollView::BLUE, ScrollView::LIGHT_BLUE); - } -#endif // GRAPHICS_DISABLED - - // initializing recognizer in order to extract table row and columnd info - TableRecognizer recognizer; - { - recognizer.Init(); - recognizer.set_line_grid(&leader_and_ruling_grid_); - recognizer.set_text_grid(&fragmented_text_grid_); - recognizer.set_max_text_height(global_median_xheight_ * 2.0); - recognizer.set_min_height(1.5 * gridheight()); - } - +void TableFinder::MakeTableBlocks(ColPartitionGrid *grid, + ColPartitionSet **all_columns, + const WidthCallback &width_cb) { // Since we have table blocks already, remove table tags from all // colpartitions GridSearch gsearch(grid); @@ -2027,13 +2059,15 @@ void TableFinder::MakeTableBlocks(ColPartitionGrid *grid, ColPartitionSet **all_ } // Now make a single colpartition out of each table block and remove // all colpartitions contained within a table - GridSearch table_search(&table_grid_); + GridSearch table_search( + &table_grid_); table_search.StartFullSearch(); ColSegment *table; while ((table = table_search.NextFullSearch()) != nullptr) { const TBOX &table_box = table->bounding_box(); // Start a rect search on table_box - GridSearch rectsearch(grid); + GridSearch rectsearch( + grid); rectsearch.StartRectSearch(table_box); ColPartition *part; ColPartition *table_partition = nullptr; @@ -2068,36 +2102,17 @@ void TableFinder::MakeTableBlocks(ColPartitionGrid *grid, ColPartitionSet **all_ table_partition->set_flow(BTFT_CHAIN); table_partition->SetBlobTypes(); grid->InsertBBox(true, true, table_partition); - - // Insert table columns and rows into an api accessible object - StructuredTable* table_structure = recognizer.RecognizeTable(table_box); - if (table_structure != nullptr) { -#ifndef GRAPHICS_DISABLED - if (textord_show_tables) { - table_structure->Display(table_win, ScrollView::LIME_GREEN); - } -#endif // GRAPHICS_DISABLED - - auto &tables = uniqueInstance>(); - tables.push_back( - TessTable{table_box, table_structure->getRows(), table_structure->getCols()}); - - delete table_structure; - } } } - -#ifndef GRAPHICS_DISABLED - if (textord_show_tables) { - table_grid_.DisplayBoxes(table_win); - } -#endif // GRAPHICS_DISABLED } //////// ColSegment code //////// ColSegment::ColSegment() - : ELIST_LINK(), num_table_cells_(0), num_text_cells_(0), type_(COL_UNKNOWN) {} + : ELIST_LINK(), + num_table_cells_(0), + num_text_cells_(0), + type_(COL_UNKNOWN) {} // Provides a color for BBGrid to draw the rectangle. ScrollView::Color ColSegment::BoxColor() const { diff --git a/src/textord/tablefind.h b/src/textord/tablefind.h index 4b758b40f..e26404178 100644 --- a/src/textord/tablefind.h +++ b/src/textord/tablefind.h @@ -107,7 +107,8 @@ private: // Typedef BBGrid of ColSegments using ColSegmentGrid = BBGrid; -using ColSegmentGridSearch = GridSearch; +using ColSegmentGridSearch = + GridSearch; // TableFinder is a utility class to find a set of tables given a set of // ColPartitions and Columns. The TableFinder will mark candidate ColPartitions @@ -143,8 +144,8 @@ public: // tables. The columns and width callbacks are used to merge tables. // The reskew argument is only used to write the tables to the out.png // if that feature is enabled. - void LocateTables(ColPartitionGrid *grid, ColPartitionSet **columns, WidthCallback width_cb, - const FCOORD &reskew); + void LocateTables(ColPartitionGrid *grid, ColPartitionSet **columns, + WidthCallback width_cb, const FCOORD &reskew); protected: // Access for the grid dimensions. @@ -179,7 +180,8 @@ protected: // Utility function to move segments to col_seg_grid // Note: Move includes ownership, // so segments will be be owned by col_seg_grid - void MoveColSegmentsToGrid(ColSegment_LIST *segments, ColSegmentGrid *col_seg_grid); + void MoveColSegmentsToGrid(ColSegment_LIST *segments, + ColSegmentGrid *col_seg_grid); //////// Set up code to run during table detection to correctly //////// initialize variables on column partitions that are used later. @@ -191,7 +193,8 @@ protected: // Set left, right and top, bottom spacings of each colpartition. // Left/right spacings are w.r.t the column boundaries // Top/bottom spacings are w.r.t. previous and next colpartitions - static void SetPartitionSpacings(ColPartitionGrid *grid, ColPartitionSet **all_columns); + static void SetPartitionSpacings(ColPartitionGrid *grid, + ColPartitionSet **all_columns); // Set spacing and closest neighbors above and below a given colpartition. void SetVerticalSpacing(ColPartition *part); @@ -263,10 +266,12 @@ protected: //////// // Get Column segments from best_columns_ - void GetColumnBlocks(ColPartitionSet **columns, ColSegment_LIST *col_segments); + void GetColumnBlocks(ColPartitionSet **columns, + ColSegment_LIST *col_segments); // Group Column segments into consecutive single column regions. - void GroupColumnBlocks(ColSegment_LIST *current_segments, ColSegment_LIST *col_segments); + void GroupColumnBlocks(ColSegment_LIST *current_segments, + ColSegment_LIST *col_segments); // Check if two boxes are consecutive within the same column bool ConsecutiveBoxes(const TBOX &b1, const TBOX &b2); @@ -295,7 +300,8 @@ protected: // earlier functions) in the x direction and the min/max extent of // overlapping table columns in the y direction. // Section 4.2 of paper. - void GetTableRegions(ColSegment_LIST *table_columns, ColSegment_LIST *table_regions); + void GetTableRegions(ColSegment_LIST *table_columns, + ColSegment_LIST *table_regions); //////// Functions to "patch up" found tables //////// @@ -316,11 +322,12 @@ protected: void GrowTableBox(const TBOX &table_box, TBOX *result_box); // Grow a table by increasing the size of the box to include // partitions with significant overlap with the table. - void GrowTableToIncludePartials(const TBOX &table_box, const TBOX &search_range, - TBOX *result_box); + void GrowTableToIncludePartials(const TBOX &table_box, + const TBOX &search_range, TBOX *result_box); // Grow a table by expanding to the extents of significantly // overlapping lines. - void GrowTableToIncludeLines(const TBOX &table_box, const TBOX &search_range, TBOX *result_box); + void GrowTableToIncludeLines(const TBOX &table_box, const TBOX &search_range, + TBOX *result_box); // Checks whether the horizontal line belong to the table by looking at the // side spacing of extra ColParitions that will be included in the table // due to expansion @@ -351,12 +358,14 @@ protected: // Displays Colpartitions marked as table row. Overlays them on top of // part_grid_. - void DisplayColSegments(ScrollView *win, ColSegment_LIST *cols, ScrollView::Color color); + void DisplayColSegments(ScrollView *win, ColSegment_LIST *cols, + ScrollView::Color color); // Displays the colpartitions using a new coloring on an existing window. // Note: This method is only for debug purpose during development and // would not be part of checked in code - void DisplayColPartitions(ScrollView *win, ColPartitionGrid *grid, ScrollView::Color text_color, + void DisplayColPartitions(ScrollView *win, ColPartitionGrid *grid, + ScrollView::Color text_color, ScrollView::Color table_color); void DisplayColPartitions(ScrollView *win, ColPartitionGrid *grid, ScrollView::Color default_color); @@ -366,7 +375,8 @@ protected: // Merge all colpartitions in table regions to make them a single // colpartition and revert types of isolated table cells not // assigned to any table to their original types. - void MakeTableBlocks(ColPartitionGrid *grid, ColPartitionSet **columns, WidthCallback width_cb); + void MakeTableBlocks(ColPartitionGrid *grid, ColPartitionSet **columns, + const WidthCallback &width_cb); ///////////////////////////////////////////////// // Useful objects used during table find process. diff --git a/src/textord/tablerecog.cpp b/src/textord/tablerecog.cpp index c73d23998..2bcb4c07d 100644 --- a/src/textord/tablerecog.cpp +++ b/src/textord/tablerecog.cpp @@ -89,13 +89,13 @@ void StructuredTable::set_max_text_height(int height) { bool StructuredTable::is_lined() const { return is_lined_; } -int StructuredTable::row_count() const { +unsigned StructuredTable::row_count() const { return cell_y_.empty() ? 0 : cell_y_.size() - 1; } -int StructuredTable::column_count() const { +unsigned StructuredTable::column_count() const { return cell_x_.empty() ? 0 : cell_x_.size() - 1; } -int StructuredTable::cell_count() const { +unsigned StructuredTable::cell_count() const { return row_count() * column_count(); } void StructuredTable::set_bounding_box(const TBOX &box) { @@ -110,12 +110,12 @@ int StructuredTable::median_cell_height() { int StructuredTable::median_cell_width() { return median_cell_width_; } -int StructuredTable::row_height(int row) const { - ASSERT_HOST(0 <= row && row < row_count()); +int StructuredTable::row_height(unsigned row) const { + ASSERT_HOST(row < row_count()); return cell_y_[row + 1] - cell_y_[row]; } -int StructuredTable::column_width(int column) const { - ASSERT_HOST(0 <= column && column < column_count()); +int StructuredTable::column_width(unsigned column) const { + ASSERT_HOST(column < column_count()); return cell_x_[column + 1] - cell_x_[column]; } int StructuredTable::space_above() const { @@ -234,16 +234,16 @@ int StructuredTable::CountFilledCellsInRow(int row) { int StructuredTable::CountFilledCellsInColumn(int column) { return CountFilledCells(0, row_count() - 1, column, column); } -int StructuredTable::CountFilledCells(int row_start, int row_end, int column_start, - int column_end) { - ASSERT_HOST(0 <= row_start && row_start <= row_end && row_end < row_count()); - ASSERT_HOST(0 <= column_start && column_start <= column_end && column_end < column_count()); +int StructuredTable::CountFilledCells(unsigned row_start, unsigned row_end, unsigned column_start, + unsigned column_end) { + ASSERT_HOST(row_start <= row_end && row_end < row_count()); + ASSERT_HOST(column_start <= column_end && column_end < column_count()); int cell_count = 0; TBOX cell_box; - for (int row = row_start; row <= row_end; ++row) { + for (unsigned row = row_start; row <= row_end; ++row) { cell_box.set_bottom(cell_y_[row]); cell_box.set_top(cell_y_[row + 1]); - for (int col = column_start; col <= column_end; ++col) { + for (unsigned col = column_start; col <= column_end; ++col) { cell_box.set_left(cell_x_[col]); cell_box.set_right(cell_x_[col + 1]); if (CountPartitions(cell_box) > 0) { @@ -258,8 +258,8 @@ int StructuredTable::CountFilledCells(int row_start, int row_end, int column_sta // This can filter out large whitespace caused by growing tables too far // and page numbers. bool StructuredTable::VerifyRowFilled(int row) { - for (int i = 0; i < column_count(); ++i) { - double area_filled = CalculateCellFilledPercentage(row, i); + for (unsigned i = 0; i < column_count(); ++i) { + auto area_filled = CalculateCellFilledPercentage(row, i); if (area_filled >= kMinFilledArea) { return true; } @@ -269,9 +269,9 @@ bool StructuredTable::VerifyRowFilled(int row) { // Finds the filled area in a cell. // Assume ColPartitions do not overlap for simplicity (even though they do). -double StructuredTable::CalculateCellFilledPercentage(int row, int column) { - ASSERT_HOST(0 <= row && row <= row_count()); - ASSERT_HOST(0 <= column && column <= column_count()); +double StructuredTable::CalculateCellFilledPercentage(unsigned row, unsigned column) { + ASSERT_HOST(row <= row_count()); + ASSERT_HOST(column <= column_count()); const TBOX kCellBox(cell_x_[column], cell_y_[row], cell_x_[column + 1], cell_y_[row + 1]); ASSERT_HOST(!kCellBox.null_box()); @@ -310,39 +310,6 @@ void StructuredTable::Display(ScrollView *window, ScrollView::Color color) { #endif -std::vector StructuredTable::getRows() -{ - if (cell_y_.size() < 2) { - return std::vector(); - } - - std::vector rows(cell_y_.size() - 1); - unsigned ct = cell_y_.size() - 2; - for(unsigned i = 0; i + 1 < cell_y_.size(); i++) { - const ICOORD left(bounding_box_.left(), cell_y_[i]); - const ICOORD right(bounding_box_.right(), cell_y_[i + 1]); - rows[ct - i] = TBOX(left, right); - } - - return rows; -} - -std::vector StructuredTable::getCols() -{ - if (cell_x_.size() < 2) { - return std::vector(); - } - - std::vector cols(cell_x_.size() - 1); - for(unsigned i = 0; i + 1 < cell_x_.size(); i++) { - const ICOORD top(cell_x_[i], bounding_box_.top()); - const ICOORD bot(cell_x_[i+1], bounding_box_.bottom()); - cols[i] = TBOX(top, bot); - } - - return cols; -} - // Clear structure information. void StructuredTable::ClearStructure() { cell_x_.clear(); @@ -565,10 +532,10 @@ void StructuredTable::CalculateStats() { STATS height_stats(0, kMaxCellHeight + 1); STATS width_stats(0, kMaxCellWidth + 1); - for (int i = 0; i < row_count(); ++i) { + for (unsigned i = 0; i < row_count(); ++i) { height_stats.add(row_height(i), column_count()); } - for (int i = 0; i < column_count(); ++i) { + for (unsigned i = 0; i < column_count(); ++i) { width_stats.add(column_width(i), row_count()); } @@ -650,8 +617,8 @@ void StructuredTable::FindCellSplitLocations(const std::vector &min_list, ASSERT_HOST(min_list.at(min_list.size() - 1) < max_list.at(max_list.size() - 1)); locations->push_back(min_list.at(0)); - int min_index = 0; - int max_index = 0; + unsigned min_index = 0; + unsigned max_index = 0; int stacked_partitions = 0; int last_cross_position = INT32_MAX; // max_index will expire after min_index. @@ -753,15 +720,6 @@ int StructuredTable::CountPartitions(const TBOX &box) { //////// TableRecognizer Class //////// -TableRecognizer::TableRecognizer() - : text_grid_(nullptr) - , line_grid_(nullptr) - , min_height_(0) - , min_width_(0) - , max_text_height_(INT32_MAX) {} - -TableRecognizer::~TableRecognizer() = default; - void TableRecognizer::Init() {} void TableRecognizer::set_text_grid(ColPartitionGrid *text_grid) { @@ -937,7 +895,7 @@ bool TableRecognizer::RecognizeWhitespacedTable(const TBOX &guess_box, Structure const int kMidGuessY = (guess_box.bottom() + guess_box.top()) / 2; // Keeps track of the most columns in an accepted table. The resulting table // may be less than the max, but we don't want to stray too far. - int best_cols = 0; + unsigned best_cols = 0; // Make sure we find a good border. bool found_good_border = false; diff --git a/src/textord/tablerecog.h b/src/textord/tablerecog.h index a77125ec4..080cfaec3 100644 --- a/src/textord/tablerecog.h +++ b/src/textord/tablerecog.h @@ -2,7 +2,6 @@ // File: tablerecog.h // Description: Functions to detect structure of tables. // Author: Nicholas Beato -// Created: Aug 17, 2010 // // (C) Copyright 2010, Google Inc. // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +20,6 @@ #define TABLERECOG_H_ #include "colpartitiongrid.h" -#include namespace tesseract { @@ -88,15 +86,15 @@ public: // Basic accessors. Some are treated as attributes despite having indirect // representation. bool is_lined() const; - int row_count() const; - int column_count() const; - int cell_count() const; + unsigned row_count() const; + unsigned column_count() const; + unsigned cell_count() const; void set_bounding_box(const TBOX &box); const TBOX &bounding_box() const; int median_cell_height(); int median_cell_width(); - int row_height(int row) const; - int column_width(int column) const; + int row_height(unsigned row) const; + int column_width(unsigned column) const; int space_above() const; int space_below() const; @@ -122,7 +120,7 @@ public: int CountFilledCells(); int CountFilledCellsInRow(int row); int CountFilledCellsInColumn(int column); - int CountFilledCells(int row_start, int row_end, int column_start, int column_end); + int CountFilledCells(unsigned row_start, unsigned row_end, unsigned column_start, unsigned column_end); // Makes sure that at least one cell in a row has substantial area filled. // This can filter out large whitespace caused by growing tables too far @@ -130,16 +128,11 @@ public: // (currently bugged for some reason). bool VerifyRowFilled(int row); // Finds the filled area in a cell. - double CalculateCellFilledPercentage(int row, int column); + double CalculateCellFilledPercentage(unsigned row, unsigned column); // Debug display, draws the table in the given color. If the table is not // valid, the table and "best" grid lines are still drawn in the given color. void Display(ScrollView *window, ScrollView::Color color); - - /// Calculate bounding boxes of the rows and return them. - std::vector getRows(); - /// Calculate bounding boxes of the columns and return them. - std::vector getCols(); protected: // Clear the structure information. @@ -257,8 +250,8 @@ protected: class TESS_API TableRecognizer { public: - TableRecognizer(); - ~TableRecognizer(); + TableRecognizer() = default; + ~TableRecognizer() = default; // Initialization code. Must be called after the constructor. void Init(); @@ -365,13 +358,13 @@ protected: static bool IsWeakTableRow(StructuredTable *table, int row); // Input data, used as read only data to make decisions. - ColPartitionGrid *text_grid_; // Text ColPartitions - ColPartitionGrid *line_grid_; // Line ColPartitions + ColPartitionGrid *text_grid_ = nullptr; // Text ColPartitions + ColPartitionGrid *line_grid_ = nullptr; // Line ColPartitions // Table constraints, a "good" table must satisfy these. - int min_height_; - int min_width_; + int min_height_ = 0; + int min_width_ = 0; // Filters, used to prevent awkward partitions from destroying structure. - int max_text_height_; // Horizontal lines may intersect taller text. + int max_text_height_ = INT32_MAX; // Horizontal lines may intersect taller text. }; } // namespace tesseract diff --git a/src/textord/tabvector.h b/src/textord/tabvector.h index 99c1b0200..2c48d7219 100644 --- a/src/textord/tabvector.h +++ b/src/textord/tabvector.h @@ -33,11 +33,8 @@ class ScrollView; namespace tesseract { -extern double_VAR_H(textord_tabvector_vertical_gap_fraction, 0.5, - "Max fraction of mean blob width allowed for vertical gaps " - "in vertical text"); -extern double_VAR_H(textord_tabvector_vertical_box_ratio, 0.5, - "Fraction of box matches required to declare a line vertical"); +extern double_VAR_H(textord_tabvector_vertical_gap_fraction); +extern double_VAR_H(textord_tabvector_vertical_box_ratio); // The alignment type that a tab vector represents. // Keep this enum synced with kAlignmentNames in tabvector.cpp. diff --git a/src/textord/textord.h b/src/textord/textord.h index 88aca1df1..df6750a74 100644 --- a/src/textord/textord.h +++ b/src/textord/textord.h @@ -227,99 +227,91 @@ private: public: // makerow.cpp /////////////////////////////////////////// - BOOL_VAR_H(textord_single_height_mode, false, - "Script has no xheight, so use a single mode for horizontal text"); + BOOL_VAR_H(textord_single_height_mode); // tospace.cpp /////////////////////////////////////////// - BOOL_VAR_H(tosp_old_to_method, false, "Space stats use prechopping?"); - BOOL_VAR_H(tosp_old_to_constrain_sp_kn, false, - "Constrain relative values of inter and intra-word gaps for " - "old_to_method."); - BOOL_VAR_H(tosp_only_use_prop_rows, true, "Block stats to use fixed pitch rows?"); - BOOL_VAR_H(tosp_force_wordbreak_on_punct, false, - "Force word breaks on punct to break long lines in non-space " - "delimited langs"); - BOOL_VAR_H(tosp_use_pre_chopping, false, "Space stats use prechopping?"); - BOOL_VAR_H(tosp_old_to_bug_fix, false, "Fix suspected bug in old code"); - BOOL_VAR_H(tosp_block_use_cert_spaces, true, "Only stat OBVIOUS spaces"); - BOOL_VAR_H(tosp_row_use_cert_spaces, true, "Only stat OBVIOUS spaces"); - BOOL_VAR_H(tosp_narrow_blobs_not_cert, true, "Only stat OBVIOUS spaces"); - BOOL_VAR_H(tosp_row_use_cert_spaces1, true, "Only stat OBVIOUS spaces"); - BOOL_VAR_H(tosp_recovery_isolated_row_stats, true, "Use row alone when inadequate cert spaces"); - BOOL_VAR_H(tosp_only_small_gaps_for_kern, false, "Better guess"); - BOOL_VAR_H(tosp_all_flips_fuzzy, false, "Pass ANY flip to context?"); - BOOL_VAR_H(tosp_fuzzy_limit_all, true, "Don't restrict kn->sp fuzzy limit to tables"); - BOOL_VAR_H(tosp_stats_use_xht_gaps, true, "Use within xht gap for wd breaks"); - BOOL_VAR_H(tosp_use_xht_gaps, true, "Use within xht gap for wd breaks"); - BOOL_VAR_H(tosp_only_use_xht_gaps, false, "Only use within xht gap for wd breaks"); - BOOL_VAR_H(tosp_rule_9_test_punct, false, "Don't chng kn to space next to punct"); - BOOL_VAR_H(tosp_flip_fuzz_kn_to_sp, true, "Default flip"); - BOOL_VAR_H(tosp_flip_fuzz_sp_to_kn, true, "Default flip"); - BOOL_VAR_H(tosp_improve_thresh, false, "Enable improvement heuristic"); - INT_VAR_H(tosp_debug_level, 0, "Debug data"); - INT_VAR_H(tosp_enough_space_samples_for_median, 3, "or should we use mean"); - INT_VAR_H(tosp_redo_kern_limit, 10, "No.samples reqd to reestimate for row"); - INT_VAR_H(tosp_few_samples, 40, "No.gaps reqd with 1 large gap to treat as a table"); - INT_VAR_H(tosp_short_row, 20, "No.gaps reqd with few cert spaces to use certs"); - INT_VAR_H(tosp_sanity_method, 1, "How to avoid being silly"); - double_VAR_H(tosp_old_sp_kn_th_factor, 2.0, - "Factor for defining space threshold in terms of space and " - "kern sizes"); - double_VAR_H(tosp_threshold_bias1, 0, "how far between kern and space?"); - double_VAR_H(tosp_threshold_bias2, 0, "how far between kern and space?"); - double_VAR_H(tosp_narrow_fraction, 0.3, "Fract of xheight for narrow"); - double_VAR_H(tosp_narrow_aspect_ratio, 0.48, "narrow if w/h less than this"); - double_VAR_H(tosp_wide_fraction, 0.52, "Fract of xheight for wide"); - double_VAR_H(tosp_wide_aspect_ratio, 0.0, "wide if w/h less than this"); - double_VAR_H(tosp_fuzzy_space_factor, 0.6, "Fract of xheight for fuzz sp"); - double_VAR_H(tosp_fuzzy_space_factor1, 0.5, "Fract of xheight for fuzz sp"); - double_VAR_H(tosp_fuzzy_space_factor2, 0.72, "Fract of xheight for fuzz sp"); - double_VAR_H(tosp_gap_factor, 0.83, "gap ratio to flip sp->kern"); - double_VAR_H(tosp_kern_gap_factor1, 2.0, "gap ratio to flip kern->sp"); - double_VAR_H(tosp_kern_gap_factor2, 1.3, "gap ratio to flip kern->sp"); - double_VAR_H(tosp_kern_gap_factor3, 2.5, "gap ratio to flip kern->sp"); - double_VAR_H(tosp_ignore_big_gaps, -1, "xht multiplier"); - double_VAR_H(tosp_ignore_very_big_gaps, 3.5, "xht multiplier"); - double_VAR_H(tosp_rep_space, 1.6, "rep gap multiplier for space"); - double_VAR_H(tosp_enough_small_gaps, 0.65, "Fract of kerns reqd for isolated row stats"); - double_VAR_H(tosp_table_kn_sp_ratio, 2.25, "Min difference of kn & sp in table"); - double_VAR_H(tosp_table_xht_sp_ratio, 0.33, "Expect spaces bigger than this"); - double_VAR_H(tosp_table_fuzzy_kn_sp_ratio, 3.0, "Fuzzy if less than this"); - double_VAR_H(tosp_fuzzy_kn_fraction, 0.5, "New fuzzy kn alg"); - double_VAR_H(tosp_fuzzy_sp_fraction, 0.5, "New fuzzy sp alg"); - double_VAR_H(tosp_min_sane_kn_sp, 1.5, "Don't trust spaces less than this time kn"); - double_VAR_H(tosp_init_guess_kn_mult, 2.2, "Thresh guess - mult kn by this"); - double_VAR_H(tosp_init_guess_xht_mult, 0.28, "Thresh guess - mult xht by this"); - double_VAR_H(tosp_max_sane_kn_thresh, 5.0, "Multiplier on kn to limit thresh"); - double_VAR_H(tosp_flip_caution, 0.0, "Don't autoflip kn to sp when large separation"); - double_VAR_H(tosp_large_kerning, 0.19, "Limit use of xht gap with large kns"); - double_VAR_H(tosp_dont_fool_with_small_kerns, -1, "Limit use of xht gap with odd small kns"); - double_VAR_H(tosp_near_lh_edge, 0, "Don't reduce box if the top left is non blank"); - double_VAR_H(tosp_silly_kn_sp_gap, 0.2, "Don't let sp minus kn get too small"); - double_VAR_H(tosp_pass_wide_fuzz_sp_to_context, 0.75, "How wide fuzzies need context"); + BOOL_VAR_H(tosp_old_to_method); + BOOL_VAR_H(tosp_old_to_constrain_sp_kn); + BOOL_VAR_H(tosp_only_use_prop_rows); + BOOL_VAR_H(tosp_force_wordbreak_on_punct); + BOOL_VAR_H(tosp_use_pre_chopping); + BOOL_VAR_H(tosp_old_to_bug_fix); + BOOL_VAR_H(tosp_block_use_cert_spaces); + BOOL_VAR_H(tosp_row_use_cert_spaces); + BOOL_VAR_H(tosp_narrow_blobs_not_cert); + BOOL_VAR_H(tosp_row_use_cert_spaces1); + BOOL_VAR_H(tosp_recovery_isolated_row_stats); + BOOL_VAR_H(tosp_only_small_gaps_for_kern); + BOOL_VAR_H(tosp_all_flips_fuzzy); + BOOL_VAR_H(tosp_fuzzy_limit_all); + BOOL_VAR_H(tosp_stats_use_xht_gaps); + BOOL_VAR_H(tosp_use_xht_gaps); + BOOL_VAR_H(tosp_only_use_xht_gaps); + BOOL_VAR_H(tosp_rule_9_test_punct); + BOOL_VAR_H(tosp_flip_fuzz_kn_to_sp); + BOOL_VAR_H(tosp_flip_fuzz_sp_to_kn); + BOOL_VAR_H(tosp_improve_thresh); + INT_VAR_H(tosp_debug_level); + INT_VAR_H(tosp_enough_space_samples_for_median); + INT_VAR_H(tosp_redo_kern_limit); + INT_VAR_H(tosp_few_samples); + INT_VAR_H(tosp_short_row); + INT_VAR_H(tosp_sanity_method); + double_VAR_H(tosp_old_sp_kn_th_factor); + double_VAR_H(tosp_threshold_bias1); + double_VAR_H(tosp_threshold_bias2); + double_VAR_H(tosp_narrow_fraction); + double_VAR_H(tosp_narrow_aspect_ratio); + double_VAR_H(tosp_wide_fraction); + double_VAR_H(tosp_wide_aspect_ratio); + double_VAR_H(tosp_fuzzy_space_factor); + double_VAR_H(tosp_fuzzy_space_factor1); + double_VAR_H(tosp_fuzzy_space_factor2); + double_VAR_H(tosp_gap_factor); + double_VAR_H(tosp_kern_gap_factor1); + double_VAR_H(tosp_kern_gap_factor2); + double_VAR_H(tosp_kern_gap_factor3); + double_VAR_H(tosp_ignore_big_gaps); + double_VAR_H(tosp_ignore_very_big_gaps); + double_VAR_H(tosp_rep_space); + double_VAR_H(tosp_enough_small_gaps); + double_VAR_H(tosp_table_kn_sp_ratio); + double_VAR_H(tosp_table_xht_sp_ratio); + double_VAR_H(tosp_table_fuzzy_kn_sp_ratio); + double_VAR_H(tosp_fuzzy_kn_fraction); + double_VAR_H(tosp_fuzzy_sp_fraction); + double_VAR_H(tosp_min_sane_kn_sp); + double_VAR_H(tosp_init_guess_kn_mult); + double_VAR_H(tosp_init_guess_xht_mult); + double_VAR_H(tosp_max_sane_kn_thresh); + double_VAR_H(tosp_flip_caution); + double_VAR_H(tosp_large_kerning); + double_VAR_H(tosp_dont_fool_with_small_kerns); + double_VAR_H(tosp_near_lh_edge); + double_VAR_H(tosp_silly_kn_sp_gap); + double_VAR_H(tosp_pass_wide_fuzz_sp_to_context); // tordmain.cpp /////////////////////////////////////////// - BOOL_VAR_H(textord_no_rejects, false, "Don't remove noise blobs"); - BOOL_VAR_H(textord_show_blobs, false, "Display unsorted blobs"); - BOOL_VAR_H(textord_show_boxes, false, "Display boxes"); - INT_VAR_H(textord_max_noise_size, 7, "Pixel size of noise"); - INT_VAR_H(textord_baseline_debug, 0, "Baseline debug level"); - double_VAR_H(textord_noise_area_ratio, 0.7, "Fraction of bounding box for noise"); - double_VAR_H(textord_initialx_ile, 0.75, "Ile of sizes for xheight guess"); - double_VAR_H(textord_initialasc_ile, 0.90, "Ile of sizes for xheight guess"); - INT_VAR_H(textord_noise_sizefraction, 10, "Fraction of size for maxima"); - double_VAR_H(textord_noise_sizelimit, 0.5, "Fraction of x for big t count"); - INT_VAR_H(textord_noise_translimit, 16, "Transitions for normal blob"); - double_VAR_H(textord_noise_normratio, 2.0, "Dot to norm ratio for deletion"); - BOOL_VAR_H(textord_noise_rejwords, true, "Reject noise-like words"); - BOOL_VAR_H(textord_noise_rejrows, true, "Reject noise-like rows"); - double_VAR_H(textord_noise_syfract, 0.2, "xh fract error for norm blobs"); - double_VAR_H(textord_noise_sxfract, 0.4, "xh fract width error for norm blobs"); - double_VAR_H(textord_noise_hfract, 1.0 / 64, - "Height fraction to discard outlines as speckle noise"); - INT_VAR_H(textord_noise_sncount, 1, "super norm blobs to save row"); - double_VAR_H(textord_noise_rowratio, 6.0, "Dot to norm ratio for deletion"); - BOOL_VAR_H(textord_noise_debug, false, "Debug row garbage detector"); - double_VAR_H(textord_blshift_maxshift, 0.00, "Max baseline shift"); - double_VAR_H(textord_blshift_xfraction, 9.99, "Min size of baseline shift"); + BOOL_VAR_H(textord_no_rejects); + BOOL_VAR_H(textord_show_blobs); + BOOL_VAR_H(textord_show_boxes); + INT_VAR_H(textord_max_noise_size); + INT_VAR_H(textord_baseline_debug); + double_VAR_H(textord_noise_area_ratio); + double_VAR_H(textord_initialx_ile); + double_VAR_H(textord_initialasc_ile); + INT_VAR_H(textord_noise_sizefraction); + double_VAR_H(textord_noise_sizelimit); + INT_VAR_H(textord_noise_translimit); + double_VAR_H(textord_noise_normratio); + BOOL_VAR_H(textord_noise_rejwords); + BOOL_VAR_H(textord_noise_rejrows); + double_VAR_H(textord_noise_syfract); + double_VAR_H(textord_noise_sxfract); + double_VAR_H(textord_noise_hfract); + INT_VAR_H(textord_noise_sncount); + double_VAR_H(textord_noise_rowratio); + BOOL_VAR_H(textord_noise_debug); + double_VAR_H(textord_blshift_maxshift); + double_VAR_H(textord_blshift_xfraction); }; } // namespace tesseract diff --git a/src/textord/topitch.cpp b/src/textord/topitch.cpp index fdad4ac0b..53f9322dd 100644 --- a/src/textord/topitch.cpp +++ b/src/textord/topitch.cpp @@ -45,7 +45,6 @@ BOOL_VAR(textord_fast_pitch_test, false, "Do even faster pitch algorithm"); BOOL_VAR(textord_debug_pitch_metric, false, "Write full metric stuff"); BOOL_VAR(textord_show_row_cuts, false, "Draw row-level cuts"); BOOL_VAR(textord_show_page_cuts, false, "Draw page-level cuts"); -BOOL_VAR(textord_pitch_cheat, false, "Use correct answer for fixed/prop"); BOOL_VAR(textord_blockndoc_fixed, false, "Attempt whole doc/block fixed pitch"); double_VAR(textord_projection_scale, 0.200, "Ding rate for mid-cuts"); double_VAR(textord_balance_factor, 1.0, "Ding rate for unbalanced char cells"); diff --git a/src/textord/topitch.h b/src/textord/topitch.h index a6a693ae0..60def054a 100644 --- a/src/textord/topitch.h +++ b/src/textord/topitch.h @@ -25,15 +25,14 @@ namespace tesseract { class Tesseract; -extern BOOL_VAR_H(textord_debug_pitch_test, false, "Debug on fixed pitch test"); -extern BOOL_VAR_H(textord_debug_pitch_metric, false, "Write full metric stuff"); -extern BOOL_VAR_H(textord_show_row_cuts, false, "Draw row-level cuts"); -extern BOOL_VAR_H(textord_show_page_cuts, false, "Draw page-level cuts"); -extern BOOL_VAR_H(textord_pitch_cheat, false, "Use correct answer for fixed/prop"); -extern BOOL_VAR_H(textord_blockndoc_fixed, true, "Attempt whole doc/block fixed pitch"); -extern BOOL_VAR_H(textord_fast_pitch_test, false, "Do even faster pitch algorithm"); -extern double_VAR_H(textord_projection_scale, 0.125, "Ding rate for mid-cuts"); -extern double_VAR_H(textord_balance_factor, 2.0, "Ding rate for unbalanced char cells"); +extern BOOL_VAR_H(textord_debug_pitch_test); +extern BOOL_VAR_H(textord_debug_pitch_metric); +extern BOOL_VAR_H(textord_show_row_cuts); +extern BOOL_VAR_H(textord_show_page_cuts); +extern BOOL_VAR_H(textord_blockndoc_fixed); +extern BOOL_VAR_H(textord_fast_pitch_test); +extern double_VAR_H(textord_projection_scale); +extern double_VAR_H(textord_balance_factor); void compute_fixed_pitch(ICOORD page_tr, // top right TO_BLOCK_LIST *port_blocks, // input list diff --git a/src/textord/tordmain.cpp b/src/textord/tordmain.cpp index 3e44fead2..3305a4e44 100644 --- a/src/textord/tordmain.cpp +++ b/src/textord/tordmain.cpp @@ -325,7 +325,7 @@ float Textord::filter_noise_blobs(BLOBNBOX_LIST *src_list, // original list (tesseract::CCStruct::kDescenderFraction + tesseract::CCStruct::kXHeightFraction + 2 * tesseract::CCStruct::kAscenderFraction) / tesseract::CCStruct::kXHeightFraction); - min_y = floor(initial_x / 2); + min_y = std::floor(initial_x / 2); max_x = ceil(initial_x * textord_width_limit); small_it.move_to_first(); for (small_it.mark_cycle_pt(); !small_it.cycled_list(); small_it.forward()) { @@ -729,7 +729,7 @@ void Textord::TransferDiacriticsToBlockGroups(BLOBNBOX_LIST *diacritic_blobs, BL int best_g = 0; float best_angle_diff = FLT_MAX; for (const auto &group : groups) { - double angle_diff = fabs(block_angle - group->angle); + double angle_diff = std::fabs(block_angle - group->angle); if (angle_diff > M_PI) { angle_diff = fabs(angle_diff - 2.0 * M_PI); } diff --git a/src/textord/tospace.cpp b/src/textord/tospace.cpp index e6ee85502..75b3aed02 100644 --- a/src/textord/tospace.cpp +++ b/src/textord/tospace.cpp @@ -36,6 +36,7 @@ #endif #include +#include #include #define MAXSPACING 128 /*max expected spacing in pix */ @@ -64,13 +65,13 @@ void Textord::to_spacing(ICOORD page_tr, // topright of page block_non_space_gap_width); // Make sure relative values of block-level space and non-space gap // widths are reasonable. The ratio of 1:3 is also used in - // block_spacing_stats, to correct the block_space_gap_width + // block_spacing_stats, to correct the block_space_gap_width. // Useful for arabic and hindi, when the non-space gap width is // often over-estimated and should not be trusted. A similar ratio // is found in block_spacing_stats. if (tosp_old_to_method && tosp_old_to_constrain_sp_kn && - static_cast(block_space_gap_width) / block_non_space_gap_width < 3.0) { - block_non_space_gap_width = static_cast(floor(block_space_gap_width / 3.0)); + block_non_space_gap_width > block_space_gap_width / 3) { + block_non_space_gap_width = block_space_gap_width / 3; } // row iterator TO_ROW_IT row_it(block->get_rows()); @@ -295,7 +296,7 @@ void Textord::row_spacing_stats(TO_ROW *row, GAPMAP *gapmap, int16_t block_idx, /* Collect first pass stats for row */ if (!good_block_space_estimate) { - block_space_gap_width = int16_t(floor(row->xheight / 2)); + block_space_gap_width = int16_t(std::floor(row->xheight / 2)); } if (!row->blob_list()->empty()) { if (tosp_threshold_bias1 > 0) { @@ -435,7 +436,7 @@ are ignoring big gaps*/ if (suspected_table) { sane_space = std::max(tosp_table_kn_sp_ratio * row->kern_size, tosp_table_xht_sp_ratio * row->xheight); - sane_threshold = int32_t(floor((sane_space + row->kern_size) / 2)); + sane_threshold = int32_t(std::floor((sane_space + row->kern_size) / 2)); if ((row->space_size < sane_space) || (row->space_threshold < sane_threshold)) { if (tosp_debug_level > 5) { @@ -606,7 +607,7 @@ It comes to the same thing. (Though there is a difference in that old textor has integer space_size and kern_size.) */ - row->space_threshold = int32_t(floor((row->space_size + row->kern_size) / 2)); + row->space_threshold = int32_t(std::floor((row->space_size + row->kern_size) / 2)); } // Apply the same logic and ratios as in row_spacing_stats to @@ -648,7 +649,7 @@ bool Textord::isolated_row_stats(TO_ROW *row, GAPMAP *gapmap, STATS *all_gap_sta crude_threshold_estimate = std::max(tosp_init_guess_kn_mult * kern_estimate, tosp_init_guess_xht_mult * row->xheight); small_gaps_count = - stats_count_under(all_gap_stats, static_cast(ceil(crude_threshold_estimate))); + stats_count_under(all_gap_stats, static_cast(std::ceil(crude_threshold_estimate))); total = all_gap_stats->get_total(); if ((total <= tosp_redo_kern_limit) || @@ -718,7 +719,7 @@ bool Textord::isolated_row_stats(TO_ROW *row, GAPMAP *gapmap, STATS *all_gap_sta } else { row->kern_size = all_gap_stats->median(); } - row->space_threshold = int32_t(floor((row->space_size + row->kern_size) / 2)); + row->space_threshold = int32_t(std::floor((row->space_size + row->kern_size) / 2)); /* Sanity check */ if ((row->kern_size >= row->space_threshold) || (row->space_threshold >= row->space_size) || (row->space_threshold <= 0)) { @@ -793,7 +794,7 @@ threshold is not within it, move the threshold so that is is just inside it. reqd_zero_width = 3; } - for (index = int16_t(ceil(kn)); index < int16_t(floor(sp)); index++) { + for (index = int16_t(std::ceil(kn)); index < int16_t(std::floor(sp)); index++) { if (all_gap_stats->pile_count(index) == 0) { if (zero_width == 0) { zero_start = index; @@ -909,7 +910,7 @@ the gap between the word being built and the next one. */ current_gap = box_it.data()->bounding_box().left() - next_rep_char_word_right; current_within_xht_gap = current_gap; if (current_gap > tosp_rep_space * repetition_spacing) { - prev_blanks = static_cast(floor(current_gap / row->space_size)); + prev_blanks = static_cast(std::floor(current_gap / row->space_size)); if (prev_blanks < 1) { prev_blanks = 1; } @@ -1002,7 +1003,7 @@ the gap between the word being built and the next one. */ current_gap = word->bounding_box().left() - prev_x; current_within_xht_gap = current_gap; if (current_gap > tosp_rep_space * repetition_spacing) { - blanks = static_cast(floor(current_gap / row->space_size)); + blanks = static_cast(std::floor(current_gap / row->space_size)); if (blanks < 1) { blanks = 1; } @@ -1066,7 +1067,7 @@ the gap between the word being built and the next one. */ repetition_spacing = find_mean_blob_spacing(word); current_gap = word->bounding_box().left() - prev_x; if (current_gap > tosp_rep_space * repetition_spacing) { - blanks = static_cast(floor(current_gap / row->space_size)); + blanks = static_cast(std::floor(current_gap / row->space_size)); if (blanks < 1) { blanks = 1; } @@ -1124,8 +1125,6 @@ ROW *Textord::make_blob_words(TO_ROW *row, // row to make C_BLOB_IT cblob_it = &cblobs; WERD_LIST words; WERD *word; // new word - BLOBNBOX *bblob; // current blob - TBOX blob_box; // bounding box BLOBNBOX_IT box_it; // iterator int16_t word_count = 0; @@ -1136,18 +1135,21 @@ ROW *Textord::make_blob_words(TO_ROW *row, // row to make bol = true; if (!box_it.empty()) { do { - bblob = box_it.data(); - blob_box = bblob->bounding_box(); + auto bblob = box_it.data(); + auto blob_box = bblob->bounding_box(); if (bblob->joined_to_prev()) { - if (bblob->cblob() != nullptr) { + auto cblob = bblob->remove_cblob(); + if (cblob != nullptr) { cout_it.set_to_list(cblob_it.data()->out_list()); cout_it.move_to_last(); - cout_it.add_list_after(bblob->cblob()->out_list()); - delete bblob->cblob(); + cout_it.add_list_after(cblob->out_list()); + delete cblob; } } else { - if (bblob->cblob() != nullptr) { - cblob_it.add_after_then_move(bblob->cblob()); + auto cblob = bblob->cblob(); + if (cblob != nullptr) { + bblob->set_owns_cblob(false); + cblob_it.add_after_then_move(cblob); } } box_it.forward(); // next one @@ -1232,9 +1234,14 @@ OR The real gap is less than the kerning estimate fuzzy_non = true; } } else { - blanks = static_cast(current_gap / row->space_size); - if (blanks < 1) { + if (row->space_size == 0.0f) { + // Avoid FP division by 0. blanks = 1; + } else { + blanks = static_cast(current_gap / row->space_size); + if (blanks < 1) { + blanks = 1; + } } fuzzy_sp = false; fuzzy_non = false; @@ -1732,7 +1739,7 @@ caps ht chars which should NOT have their box reduced: T, Y, V, W etc if (left_limit > junk) { *left_above_xht = INT16_MAX; // No area above xht } else { - *left_above_xht = static_cast(floor(left_limit)); + *left_above_xht = static_cast(std::floor(left_limit)); } /* Find reduced LH limit of blob - the left extent of the region ABOVE the @@ -1756,7 +1763,7 @@ Find reduced RH limit of blob - the right extent of the region BELOW the xht. return TBOX(); // no area within xht so return empty box } - return TBOX(ICOORD(static_cast(floor(left_limit)), blob_box.bottom()), - ICOORD(static_cast(ceil(right_limit)), blob_box.top())); + return TBOX(ICOORD(static_cast(std::floor(left_limit)), blob_box.bottom()), + ICOORD(static_cast(std::ceil(right_limit)), blob_box.top())); } } // namespace tesseract diff --git a/src/textord/tovars.cpp b/src/textord/tovars.cpp index 93bb342ac..7b5ad2f0c 100644 --- a/src/textord/tovars.cpp +++ b/src/textord/tovars.cpp @@ -23,18 +23,12 @@ namespace tesseract { BOOL_VAR(textord_show_initial_words, false, "Display separate words"); -BOOL_VAR(textord_show_new_words, false, "Display separate words"); -BOOL_VAR(textord_show_fixed_words, false, "Display forced fixed pitch words"); BOOL_VAR(textord_blocksall_fixed, false, "Moan about prop blocks"); BOOL_VAR(textord_blocksall_prop, false, "Moan about fixed pitch blocks"); -BOOL_VAR(textord_blocksall_testing, false, "Dump stats when moaning"); -BOOL_VAR(textord_test_mode, false, "Do current test"); INT_VAR(textord_dotmatrix_gap, 3, "Max pixel gap for broken pixed pitch"); INT_VAR(textord_debug_block, 0, "Block to do debug on"); INT_VAR(textord_pitch_range, 2, "Max range test on pitch"); double_VAR(textord_wordstats_smooth_factor, 0.05, "Smoothing gap stats"); -double_VAR(textord_width_smooth_factor, 0.10, "Smoothing width stats"); -double_VAR(textord_words_width_ile, 0.4, "Ile of blob widths for space est"); double_VAR(textord_words_maxspace, 4.0, "Multiple of xheight"); double_VAR(textord_words_default_maxspace, 3.5, "Max believable third space"); double_VAR(textord_words_default_minspace, 0.6, "Fraction of xheight"); @@ -55,10 +49,8 @@ double_VAR(words_default_prop_nonspace, 0.25, "Fraction of xheight"); double_VAR(words_default_fixed_space, 0.75, "Fraction of xheight"); double_VAR(words_default_fixed_limit, 0.6, "Allowed size variance"); double_VAR(textord_words_definite_spread, 0.30, "Non-fuzzy spacing region"); -double_VAR(textord_spacesize_ratiofp, 2.8, "Min ratio space/nonspace"); double_VAR(textord_spacesize_ratioprop, 2.0, "Min ratio space/nonspace"); double_VAR(textord_fpiqr_ratio, 1.5, "Pitch IQR/Gap IQR threshold"); double_VAR(textord_max_pitch_iqr, 0.20, "Xh fraction noise in pitch"); -double_VAR(textord_fp_min_width, 0.5, "Min width of decent blobs"); } // namespace tesseract diff --git a/src/textord/tovars.h b/src/textord/tovars.h index 94e45af63..cf4486348 100644 --- a/src/textord/tovars.h +++ b/src/textord/tovars.h @@ -24,44 +24,36 @@ namespace tesseract { -extern BOOL_VAR_H(textord_show_initial_words, false, "Display separate words"); -extern BOOL_VAR_H(textord_show_new_words, false, "Display separate words"); -extern BOOL_VAR_H(textord_show_fixed_words, false, "Display forced fixed pitch words"); -extern BOOL_VAR_H(textord_blocksall_fixed, false, "Moan about prop blocks"); -extern BOOL_VAR_H(textord_blocksall_prop, false, "Moan about fixed pitch blocks"); -extern BOOL_VAR_H(textord_blocksall_testing, false, "Dump stats when moaning"); -extern BOOL_VAR_H(textord_test_mode, false, "Do current test"); -extern INT_VAR_H(textord_dotmatrix_gap, 3, "Max pixel gap for broken pixed pitch"); -extern INT_VAR_H(textord_debug_block, 0, "Block to do debug on"); -extern INT_VAR_H(textord_pitch_range, 2, "Max range test on pitch"); -extern double_VAR_H(textord_wordstats_smooth_factor, 0.05, "Smoothing gap stats"); -extern double_VAR_H(textord_width_smooth_factor, 0.10, "Smoothing width stats"); -extern double_VAR_H(textord_words_width_ile, 0.4, "Ile of blob widths for space est"); -extern double_VAR_H(textord_words_maxspace, 4.0, "Multiple of xheight"); -extern double_VAR_H(textord_words_default_maxspace, 3.5, "Max believable third space"); -extern double_VAR_H(textord_words_default_minspace, 0.6, "Fraction of xheight"); -extern double_VAR_H(textord_words_min_minspace, 0.3, "Fraction of xheight"); -extern double_VAR_H(textord_words_default_nonspace, 0.2, "Fraction of xheight"); -extern double_VAR_H(textord_words_initial_lower, 0.25, "Max initial cluster size"); -extern double_VAR_H(textord_words_initial_upper, 0.15, "Min initial cluster spacing"); -extern double_VAR_H(textord_words_minlarge, 0.75, "Fraction of valid gaps needed"); -extern double_VAR_H(textord_words_pitchsd_threshold, 0.025, "Pitch sync threshold"); -extern double_VAR_H(textord_words_def_fixed, 0.01, "Threshold for definite fixed"); -extern double_VAR_H(textord_words_def_prop, 0.06, "Threshold for definite prop"); -extern INT_VAR_H(textord_words_veto_power, 5, "Rows required to outvote a veto"); -extern double_VAR_H(textord_pitch_rowsimilarity, 0.08, "Fraction of xheight for sameness"); -extern BOOL_VAR_H(textord_pitch_scalebigwords, false, "Scale scores on big words"); -extern double_VAR_H(words_initial_lower, 0.5, "Max initial cluster size"); -extern double_VAR_H(words_initial_upper, 0.15, "Min initial cluster spacing"); -extern double_VAR_H(words_default_prop_nonspace, 0.25, "Fraction of xheight"); -extern double_VAR_H(words_default_fixed_space, 0.75, "Fraction of xheight"); -extern double_VAR_H(words_default_fixed_limit, 0.6, "Allowed size variance"); -extern double_VAR_H(textord_words_definite_spread, 0.30, "Non-fuzzy spacing region"); -extern double_VAR_H(textord_spacesize_ratiofp, 2.8, "Min ratio space/nonspace"); -extern double_VAR_H(textord_spacesize_ratioprop, 2.0, "Min ratio space/nonspace"); -extern double_VAR_H(textord_fpiqr_ratio, 1.5, "Pitch IQR/Gap IQR threshold"); -extern double_VAR_H(textord_max_pitch_iqr, 0.20, "Xh fraction noise in pitch"); -extern double_VAR_H(textord_fp_min_width, 0.5, "Min width of decent blobs"); +extern BOOL_VAR_H(textord_show_initial_words); +extern BOOL_VAR_H(textord_blocksall_fixed); +extern BOOL_VAR_H(textord_blocksall_prop); +extern INT_VAR_H(textord_dotmatrix_gap); +extern INT_VAR_H(textord_debug_block); +extern INT_VAR_H(textord_pitch_range); +extern double_VAR_H(textord_wordstats_smooth_factor); +extern double_VAR_H(textord_words_maxspace); +extern double_VAR_H(textord_words_default_maxspace); +extern double_VAR_H(textord_words_default_minspace); +extern double_VAR_H(textord_words_min_minspace); +extern double_VAR_H(textord_words_default_nonspace); +extern double_VAR_H(textord_words_initial_lower); +extern double_VAR_H(textord_words_initial_upper); +extern double_VAR_H(textord_words_minlarge); +extern double_VAR_H(textord_words_pitchsd_threshold); +extern double_VAR_H(textord_words_def_fixed); +extern double_VAR_H(textord_words_def_prop); +extern INT_VAR_H(textord_words_veto_power); +extern double_VAR_H(textord_pitch_rowsimilarity); +extern BOOL_VAR_H(textord_pitch_scalebigwords); +extern double_VAR_H(words_initial_lower); +extern double_VAR_H(words_initial_upper); +extern double_VAR_H(words_default_prop_nonspace); +extern double_VAR_H(words_default_fixed_space); +extern double_VAR_H(words_default_fixed_limit); +extern double_VAR_H(textord_words_definite_spread); +extern double_VAR_H(textord_spacesize_ratioprop); +extern double_VAR_H(textord_fpiqr_ratio); +extern double_VAR_H(textord_max_pitch_iqr); } // namespace tesseract diff --git a/src/textord/underlin.cpp b/src/textord/underlin.cpp index 8af534cf7..8d5abe249 100644 --- a/src/textord/underlin.cpp +++ b/src/textord/underlin.cpp @@ -162,7 +162,6 @@ void find_underlined_blobs( // get chop points float baseline_offset, // amount to shrinke it ICOORDELT_LIST *chop_cells // places to chop ) { - int16_t x, y; // sides of blob ICOORD blob_chop; // sides of blob TBOX blob_box = u_line->bounding_box(); // cell iterator @@ -180,9 +179,10 @@ void find_underlined_blobs( // get chop points &middle_proj, &upper_proj); } - for (x = blob_box.left(); x < blob_box.right(); x++) { + for (auto x = blob_box.left(); x < blob_box.right(); x++) { if (middle_proj.pile_count(x) > 0) { - for (y = x + 1; y < blob_box.right() && middle_proj.pile_count(y) > 0; y++) { + auto y = x + 1; + for (; y < blob_box.right() && middle_proj.pile_count(y) > 0; y++) { ; } blob_chop = ICOORD(x, y); diff --git a/src/textord/underlin.h b/src/textord/underlin.h index 9faf6efac..e6fcad277 100644 --- a/src/textord/underlin.h +++ b/src/textord/underlin.h @@ -23,8 +23,8 @@ namespace tesseract { -extern double_VAR_H(textord_underline_offset, 0.1, "Fraction of x to ignore"); -extern BOOL_VAR_H(textord_restore_underlines, false, "Chop underlines & put back"); +extern double_VAR_H(textord_underline_offset); +extern BOOL_VAR_H(textord_restore_underlines); void restore_underlined_blobs( // get chop points TO_BLOCK *block // block to do ); diff --git a/src/textord/wordseg.cpp b/src/textord/wordseg.cpp index be392e45c..3dd741a8f 100644 --- a/src/textord/wordseg.cpp +++ b/src/textord/wordseg.cpp @@ -23,6 +23,8 @@ #include "wordseg.h" +#include + #include "blobbox.h" #include "cjkpitch.h" #include "drawtord.h" @@ -36,7 +38,6 @@ namespace tesseract { -BOOL_VAR(textord_fp_chopping, true, "Do fixed pitch chopping"); BOOL_VAR(textord_force_make_prop_words, false, "Force proportional word segmentation on all rows"); BOOL_VAR(textord_chopper_test, false, "Chopper is being tested."); @@ -223,7 +224,7 @@ int32_t row_words( // compute space size lower = row->xheight * textord_words_initial_lower; upper = row->xheight * textord_words_initial_upper; cluster_count = gap_stats.cluster(lower, upper, textord_spacesize_ratioprop, 3, cluster_stats); - while (cluster_count < 2 && ceil(lower) < floor(upper)) { + while (cluster_count < 2 && std::ceil(lower) < std::floor(upper)) { // shrink gap upper = (upper * 3 + lower) / 4; lower = (lower * 3 + upper) / 4; diff --git a/src/textord/wordseg.h b/src/textord/wordseg.h index 064a60611..be437b748 100644 --- a/src/textord/wordseg.h +++ b/src/textord/wordseg.h @@ -26,10 +26,8 @@ namespace tesseract { class Tesseract; -extern BOOL_VAR_H(textord_fp_chopping, true, "Do fixed pitch chopping"); -extern BOOL_VAR_H(textord_force_make_prop_words, false, - "Force proportional word segmentation on all rows"); -extern BOOL_VAR_H(textord_chopper_test, false, "Chopper is being tested."); +extern BOOL_VAR_H(textord_force_make_prop_words); +extern BOOL_VAR_H(textord_chopper_test); void make_single_word(bool one_blob, TO_ROW_LIST *rows, ROW_LIST *real_rows); void make_words(tesseract::Textord *textord, diff --git a/src/training/CMakeLists.txt b/src/training/CMakeLists.txt index faf7b0499..f94dec7c2 100644 --- a/src/training/CMakeLists.txt +++ b/src/training/CMakeLists.txt @@ -1,74 +1,86 @@ # # tesseract # - -if (UNIX AND NOT ANDROID) - set(LIB_pthread pthread) +if(NOT ${CMAKE_VERSION} VERSION_LESS "3.12.0") + cmake_policy(SET CMP0074 NEW) endif() -if (SW_BUILD) - set(ICU_FOUND 1) +if(UNIX AND NOT ANDROID) + set(LIB_pthread pthread) +endif() + +if(SW_BUILD) + set(ICU_FOUND 1) +else() # NOT SW_BUILD + find_package(PkgConfig) endif() # experimental -if (MSVC AND NOT SW_BUILD AND NOT USE_SYSTEM_ICU) - include(CheckTypeSize) - check_type_size("void *" SIZEOF_VOID_P) +# If PkgConfig is not present training tools will not be build, +# so it does not make sense to set ICU. +if(MSVC + AND PKG_CONFIG_FOUND + AND NOT SW_BUILD + AND NOT USE_SYSTEM_ICU) + include(CheckTypeSize) + check_type_size("void *" SIZEOF_VOID_P) - if (SIZEOF_VOID_P EQUAL 8) - set(X64 1) - set(ARCH_DIR_NAME 64) - elseif (SIZEOF_VOID_P EQUAL 4) - set(X86 1) - set(ARCH_DIR_NAME 32) - else() - message(FATAL_ERROR "Cannot determine target architecture") - endif() + if(SIZEOF_VOID_P EQUAL 8) + set(X64 1) + set(ARCH_NAME 64) + elseif(SIZEOF_VOID_P EQUAL 4) + set(X86 1) + set(ARCH_NAME 32) + else() + message(FATAL_ERROR "Cannot determine target architecture") + endif() - set(icu_dir "${CMAKE_CURRENT_BINARY_DIR}/icu") - set(icu_archive "${icu_dir}/icu${ARCH_DIR_NAME}.zip") + set(ICU_DIR "${CMAKE_CURRENT_BINARY_DIR}/icu") + set(ICU_ARCHIVE "${ICU_DIR}/icu${ARCH_NAME}.zip") - if (X86) - set(icu_hash 45167a240b60e36b59a87eda23490ce4) - else() - set(icu_hash 480c72491576c048de1218c3c5519399) - endif() + if(X86) + set(ICU_HASH 45167a240b60e36b59a87eda23490ce4) + else() + set(ICU_HASH 480c72491576c048de1218c3c5519399) + endif() - message(STATUS "Downloading latest ICU binaries") + message(STATUS "Downloading latest ICU binaries") + set(COMPILER "msvc10") + set(ICU_URL "https://github.com/unicode-org/icu/releases/download") + set(ICU_R "56-1") + set(ICU_V "56_1") + file( + DOWNLOAD + "${ICU_URL}/release-${ICU_R}/icu4c-${ICU_V}-Win${ARCH_NAME}-${COMPILER}.zip" + "${ICU_ARCHIVE}" + SHOW_PROGRESS + INACTIVITY_TIMEOUT 300 # seconds + EXPECTED_HASH MD5=${ICU_HASH}) + execute_process( + COMMAND ${CMAKE_COMMAND} -E tar xz "${ICU_ARCHIVE}" + WORKING_DIRECTORY "${ICU_DIR}" + RESULT_VARIABLE __result) + if(NOT __result EQUAL 0) + message(FATAL_ERROR "error ${__result}") + endif() - file(DOWNLOAD - "https://github.com/unicode-org/icu/releases/download/release-56-1/icu4c-56_1-Win${ARCH_DIR_NAME}-msvc10.zip" - "${icu_archive}" - SHOW_PROGRESS - INACTIVITY_TIMEOUT 300 # seconds - EXPECTED_HASH MD5=${icu_hash} - ) - execute_process(COMMAND ${CMAKE_COMMAND} -E tar xz "${icu_archive}" - WORKING_DIRECTORY "${icu_dir}" - RESULT_VARIABLE __result - ) - if(NOT __result EQUAL 0) - message(FATAL_ERROR "error ${__result}") - endif() - - set(ICU_ROOT ${icu_dir}/icu) + set(ICU_ROOT ${ICU_DIR}/icu) endif() # experimental -if (NOT SW_BUILD) - if (PKG_CONFIG_FOUND) - pkg_check_modules(ICU REQUIRED icu-uc icu-i18n) - else() - find_package(ICU 52.1 COMPONENTS uc i18n) - endif() +if(NOT SW_BUILD) + if(PKG_CONFIG_FOUND) + pkg_check_modules(ICU REQUIRED IMPORTED_TARGET icu-uc icu-i18n) + else() + find_package(ICU 52.1 COMPONENTS uc i18n) + endif() endif() - -######################################## +# ############################################################################## # LIBRARY common_training -######################################## +# ############################################################################## -set(common_training_src +set(COMMON_TRAINING_SRC common/commandlineflags.cpp common/commandlineflags.h common/commontraining.cpp @@ -76,11 +88,12 @@ set(common_training_src common/ctc.cpp common/ctc.h common/networkbuilder.cpp - common/networkbuilder.h -) + common/networkbuilder.h) -if (NOT DISABLED_LEGACY_ENGINE) -list(APPEND common_training_src +if(NOT DISABLED_LEGACY_ENGINE) + list( + APPEND + COMMON_TRAINING_SRC common/errorcounter.cpp common/errorcounter.h common/intfeaturedist.cpp @@ -92,254 +105,301 @@ list(APPEND common_training_src common/sampleiterator.cpp common/sampleiterator.h common/trainingsampleset.cpp - common/trainingsampleset.h -) + common/trainingsampleset.h) endif() -add_library (common_training ${common_training_src}) -target_include_directories (common_training PUBLIC common ${CMAKE_CURRENT_BINARY_DIR}) -target_link_libraries (common_training PUBLIC libtesseract) -generate_export_header (common_training EXPORT_MACRO_NAME TESS_COMMON_TRAINING_API) -project_group (common_training "Training Tools") +add_library(common_training ${COMMON_TRAINING_SRC}) +target_include_directories(common_training PUBLIC common + ${CMAKE_CURRENT_BINARY_DIR}) +target_link_libraries(common_training PUBLIC libtesseract) +generate_export_header(common_training EXPORT_MACRO_NAME + TESS_COMMON_TRAINING_API) +project_group(common_training "Training Tools") -######################################## +# ############################################################################## # EXECUTABLE ambiguous_words -######################################## +# ############################################################################## -if (NOT DISABLED_LEGACY_ENGINE) -add_executable (ambiguous_words ambiguous_words.cpp) -target_link_libraries (ambiguous_words common_training) -project_group (ambiguous_words "Training Tools") -install (TARGETS ambiguous_words RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +if(NOT DISABLED_LEGACY_ENGINE) + add_executable(ambiguous_words ambiguous_words.cpp) + target_link_libraries(ambiguous_words common_training) + project_group(ambiguous_words "Training Tools") + install( + TARGETS ambiguous_words + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) endif() - -######################################## +# ############################################################################## # EXECUTABLE classifier_tester -######################################## +# ############################################################################## -if (NOT DISABLED_LEGACY_ENGINE) -add_executable (classifier_tester classifier_tester.cpp) -target_link_libraries (classifier_tester common_training) -project_group (classifier_tester "Training Tools") -install (TARGETS classifier_tester RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +if(NOT DISABLED_LEGACY_ENGINE) + add_executable(classifier_tester classifier_tester.cpp) + target_link_libraries(classifier_tester common_training) + project_group(classifier_tester "Training Tools") + install( + TARGETS classifier_tester + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) endif() - -######################################## +# ############################################################################## # EXECUTABLE combine_tessdata -######################################## +# ############################################################################## -add_executable (combine_tessdata combine_tessdata.cpp) -target_link_libraries (combine_tessdata common_training) -project_group (combine_tessdata "Training Tools") -install (TARGETS combine_tessdata RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +add_executable(combine_tessdata combine_tessdata.cpp) +target_link_libraries(combine_tessdata common_training) +project_group(combine_tessdata "Training Tools") +install( + TARGETS combine_tessdata + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) - -######################################## +# ############################################################################## # EXECUTABLE cntraining -######################################## +# ############################################################################## -if (NOT DISABLED_LEGACY_ENGINE) -add_executable (cntraining cntraining.cpp) -target_link_libraries (cntraining common_training) -project_group (cntraining "Training Tools") -install (TARGETS cntraining RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +if(NOT DISABLED_LEGACY_ENGINE) + add_executable(cntraining cntraining.cpp) + target_link_libraries(cntraining common_training) + project_group(cntraining "Training Tools") + install( + TARGETS cntraining + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) endif() - -######################################## +# ############################################################################## # EXECUTABLE dawg2wordlist -######################################## +# ############################################################################## -add_executable (dawg2wordlist dawg2wordlist.cpp) -target_link_libraries (dawg2wordlist common_training) -project_group (dawg2wordlist "Training Tools") -install (TARGETS dawg2wordlist RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +add_executable(dawg2wordlist dawg2wordlist.cpp) +target_link_libraries(dawg2wordlist common_training) +project_group(dawg2wordlist "Training Tools") +install( + TARGETS dawg2wordlist + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) - -######################################## +# ############################################################################## # EXECUTABLE mftraining -######################################## +# ############################################################################## -if (NOT DISABLED_LEGACY_ENGINE) -add_executable (mftraining mftraining.cpp mergenf.cpp mergenf.h) -target_link_libraries (mftraining common_training) -project_group (mftraining "Training Tools") -install (TARGETS mftraining RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +if(NOT DISABLED_LEGACY_ENGINE) + add_executable(mftraining mftraining.cpp mergenf.cpp mergenf.h) + target_link_libraries(mftraining common_training) + project_group(mftraining "Training Tools") + install( + TARGETS mftraining + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) endif() - -######################################## +# ############################################################################## # EXECUTABLE shapeclustering -######################################## +# ############################################################################## -if (NOT DISABLED_LEGACY_ENGINE) -add_executable (shapeclustering shapeclustering.cpp) -target_link_libraries (shapeclustering common_training) -project_group (shapeclustering "Training Tools") -install (TARGETS shapeclustering RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +if(NOT DISABLED_LEGACY_ENGINE) + add_executable(shapeclustering shapeclustering.cpp) + target_link_libraries(shapeclustering common_training) + project_group(shapeclustering "Training Tools") + install( + TARGETS shapeclustering + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) endif() - -######################################## +# ############################################################################## # EXECUTABLE wordlist2dawg -######################################## +# ############################################################################## -add_executable (wordlist2dawg wordlist2dawg.cpp) -target_link_libraries (wordlist2dawg common_training) -project_group (wordlist2dawg "Training Tools") -install (TARGETS wordlist2dawg RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +add_executable(wordlist2dawg wordlist2dawg.cpp) +target_link_libraries(wordlist2dawg common_training) +project_group(wordlist2dawg "Training Tools") +install( + TARGETS wordlist2dawg + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) +if(ICU_FOUND) -if (ICU_FOUND) - -if (NOT SW_BUILD) + if(NOT SW_BUILD) include_directories(${ICU_INCLUDE_DIRS}) -endif() + endif() -######################################## -# LIBRARY unicharset_training -######################################## + # ############################################################################ + # LIBRARY unicharset_training + # ############################################################################ -file(GLOB unicharset_training_src - unicharset/* -) + file(GLOB unicharset_training_src unicharset/*) -add_library (unicharset_training ${unicharset_training_src}) -if (SW_BUILD) -target_link_libraries (unicharset_training PUBLIC common_training org.sw.demo.unicode.icu.i18n) -else() -target_link_libraries (unicharset_training PUBLIC common_training ${ICU_LINK_LIBRARIES}) -endif() -target_include_directories (unicharset_training PUBLIC unicharset ${CMAKE_CURRENT_BINARY_DIR}) -generate_export_header (unicharset_training EXPORT_MACRO_NAME TESS_UNICHARSET_TRAINING_API) -project_group (unicharset_training "Training Tools") + add_library(unicharset_training ${unicharset_training_src}) + if(SW_BUILD) + target_link_libraries(unicharset_training + PUBLIC common_training org.sw.demo.unicode.icu.i18n) + else() + if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + target_link_libraries(unicharset_training PUBLIC common_training + PkgConfig::ICU) + else() + target_link_libraries(unicharset_training PUBLIC common_training + ${ICU_LINK_LIBRARIES}) + endif() + endif() + target_include_directories(unicharset_training + PUBLIC unicharset ${CMAKE_CURRENT_BINARY_DIR}) + generate_export_header(unicharset_training EXPORT_MACRO_NAME + TESS_UNICHARSET_TRAINING_API) + project_group(unicharset_training "Training Tools") + # ############################################################################ + # EXECUTABLE combine_lang_model + # ############################################################################ -######################################## -# EXECUTABLE combine_lang_model -######################################## + add_executable(combine_lang_model combine_lang_model.cpp) + target_link_libraries(combine_lang_model unicharset_training) + project_group(combine_lang_model "Training Tools") + install( + TARGETS combine_lang_model + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) -add_executable (combine_lang_model combine_lang_model.cpp) -target_link_libraries (combine_lang_model unicharset_training) -project_group (combine_lang_model "Training Tools") -install (TARGETS combine_lang_model RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) + # ############################################################################ + # EXECUTABLE lstmeval + # ############################################################################ + add_executable(lstmeval lstmeval.cpp) + target_link_libraries(lstmeval unicharset_training ${LIB_pthread}) + project_group(lstmeval "Training Tools") + install( + TARGETS lstmeval + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) -######################################## -# EXECUTABLE lstmeval -######################################## + # ############################################################################ + # EXECUTABLE lstmtraining + # ############################################################################ -add_executable (lstmeval lstmeval.cpp) -target_link_libraries (lstmeval unicharset_training ${LIB_pthread}) -project_group (lstmeval "Training Tools") -install (TARGETS lstmeval RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) + add_executable(lstmtraining lstmtraining.cpp) + target_link_libraries(lstmtraining unicharset_training ${LIB_pthread}) + project_group(lstmtraining "Training Tools") + install( + TARGETS lstmtraining + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) + # ############################################################################ + # EXECUTABLE merge_unicharsets + # ############################################################################ -######################################## -# EXECUTABLE lstmtraining -######################################## + add_executable(merge_unicharsets merge_unicharsets.cpp) + target_link_libraries(merge_unicharsets common_training) + project_group(merge_unicharsets "Training Tools") + install( + TARGETS merge_unicharsets + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) -add_executable (lstmtraining lstmtraining.cpp) -target_link_libraries (lstmtraining unicharset_training ${LIB_pthread}) -project_group (lstmtraining "Training Tools") -install (TARGETS lstmtraining RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) + # ############################################################################ + # EXECUTABLE set_unicharset_properties + # ############################################################################ + add_executable(set_unicharset_properties set_unicharset_properties.cpp) + target_link_libraries(set_unicharset_properties unicharset_training) + project_group(set_unicharset_properties "Training Tools") + install( + TARGETS set_unicharset_properties + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) -######################################## -# EXECUTABLE merge_unicharsets -######################################## + # ############################################################################ + # EXECUTABLE unicharset_extractor + # ############################################################################ -add_executable (merge_unicharsets merge_unicharsets.cpp) -target_link_libraries (merge_unicharsets common_training) -project_group (merge_unicharsets "Training Tools") -install (TARGETS merge_unicharsets RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) + add_executable(unicharset_extractor unicharset_extractor.cpp) + target_link_libraries(unicharset_extractor unicharset_training) + project_group(unicharset_extractor "Training Tools") + install( + TARGETS unicharset_extractor + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) + # ############################################################################ -######################################## -# EXECUTABLE set_unicharset_properties -######################################## + if(PKG_CONFIG_FOUND OR SW_BUILD) -add_executable (set_unicharset_properties set_unicharset_properties.cpp) -target_link_libraries (set_unicharset_properties unicharset_training) -project_group (set_unicharset_properties "Training Tools") -install (TARGETS set_unicharset_properties RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) + if(PKG_CONFIG_FOUND) + pkg_check_modules( + PANGO + REQUIRED + IMPORTED_TARGET + pango>=1.38.0 + cairo + pangoft2 + pangocairo + fontconfig) + endif() + # ########################################################################## + # LIBRARY pango_training + # ########################################################################## -######################################## -# EXECUTABLE unicharset_extractor -######################################## + file(GLOB pango_training_src pango/*) -add_executable (unicharset_extractor unicharset_extractor.cpp) -target_link_libraries (unicharset_extractor unicharset_training) -project_group (unicharset_extractor "Training Tools") -install (TARGETS unicharset_extractor RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) + add_library(pango_training ${pango_training_src}) + target_link_libraries(pango_training PUBLIC unicharset_training) + if(SW_BUILD) + target_link_libraries(pango_training + PUBLIC org.sw.demo.gnome.pango.pangocairo) + else() + if(PKG_CONFIG_FOUND) + target_include_directories(pango_training BEFORE + PUBLIC ${PANGO_INCLUDE_DIRS}) + target_compile_definitions(pango_training PUBLIC -DPANGO_ENABLE_ENGINE) + if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + target_link_libraries(pango_training PUBLIC PkgConfig::PANGO) + else() + target_link_libraries(pango_training PUBLIC ${PANGO_LINK_LIBRARIES}) + endif() + endif() + endif() + target_include_directories(pango_training + PUBLIC pango ${CMAKE_CURRENT_BINARY_DIR}) + generate_export_header(pango_training EXPORT_MACRO_NAME + TESS_PANGO_TRAINING_API) + project_group(pango_training "Training Tools") + # ########################################################################## + # EXECUTABLE text2image + # ########################################################################## -######################################## + set(TEXT2IMAGE_SRC text2image.cpp degradeimage.cpp degradeimage.h) -if (NOT SW_BUILD) - find_package(PkgConfig) -endif() + add_executable(text2image ${TEXT2IMAGE_SRC}) + target_link_libraries(text2image pango_training) + project_group(text2image "Training Tools") + install( + TARGETS text2image + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) -if (PKG_CONFIG_FOUND OR SW_BUILD) - -if (PKG_CONFIG_FOUND) -pkg_check_modules(Pango REQUIRED pango>=1.38.0) -pkg_check_modules(Cairo REQUIRED cairo) -pkg_check_modules(PangoFt2 REQUIRED pangoft2) -pkg_check_modules(PangoCairo REQUIRED pangocairo) -pkg_check_modules(FontConfig REQUIRED fontconfig) -endif() - - -######################################## -# LIBRARY pango_training -######################################## - -file(GLOB pango_training_src - pango/* -) - -add_library (pango_training ${pango_training_src}) -target_link_libraries (pango_training PUBLIC unicharset_training) -if (SW_BUILD) -target_link_libraries (pango_training PUBLIC org.sw.demo.gnome.pango.pangocairo) -else() -if (PKG_CONFIG_FOUND) -target_include_directories (pango_training BEFORE PUBLIC ${Cairo_INCLUDE_DIRS} ${Pango_INCLUDE_DIRS}) -target_compile_definitions (pango_training PUBLIC -DPANGO_ENABLE_ENGINE) -target_link_libraries (pango_training PUBLIC - ${Pango_LINK_LIBRARIES} - ${Cairo_LINK_LIBRARIES} - ${PangoCairo_LINK_LIBRARIES} - ${PangoFt2_LINK_LIBRARIES} - ${FontConfig_LINK_LIBRARIES} -) -endif() -endif() -target_include_directories (pango_training PUBLIC pango ${CMAKE_CURRENT_BINARY_DIR}) -generate_export_header (pango_training EXPORT_MACRO_NAME TESS_PANGO_TRAINING_API) -project_group (pango_training "Training Tools") - - -######################################## -# EXECUTABLE text2image -######################################## - -set(text2image_src - text2image.cpp - degradeimage.cpp - degradeimage.h -) - -add_executable (text2image ${text2image_src}) -target_link_libraries (text2image pango_training) -project_group (text2image "Training Tools") -install (TARGETS text2image RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) - -endif() + endif() endif(ICU_FOUND) -############################################################################### +# ############################################################################## diff --git a/src/training/classifier_tester.cpp b/src/training/classifier_tester.cpp index e1a6e32fe..3233e4301 100644 --- a/src/training/classifier_tester.cpp +++ b/src/training/classifier_tester.cpp @@ -102,7 +102,7 @@ int main(int argc, char **argv) { tesseract::CheckSharedLibraryVersion(); ParseArguments(&argc, &argv); std::string file_prefix; - auto trainer = tesseract::LoadTrainingData(argc, argv, false, nullptr, file_prefix); + auto trainer = tesseract::LoadTrainingData(argv + 1, false, nullptr, file_prefix); tesseract::TessBaseAPI *api; // Decode the classifier string. tesseract::ShapeClassifier *shape_classifier = diff --git a/src/training/cntraining.cpp b/src/training/cntraining.cpp index a020ac16e..2079e5298 100644 --- a/src/training/cntraining.cpp +++ b/src/training/cntraining.cpp @@ -106,7 +106,6 @@ int main(int argc, char *argv[]) { // Set the global Config parameters before parsing the command line. Config = CNConfig; - const char *PageName; LIST CharList = NIL_LIST; CLUSTERER *Clusterer = nullptr; LIST ProtoList = NIL_LIST; @@ -118,8 +117,7 @@ int main(int argc, char *argv[]) { ParseArguments(&argc, &argv); int num_fonts = 0; - int tessoptind = 1; - while ((PageName = GetNextFilename(argc, argv, tessoptind)) != nullptr) { + for (const char *PageName = *++argv; PageName != nullptr; PageName = *++argv) { printf("Reading %s ...\n", PageName); FILE *TrainingPage = fopen(PageName, "rb"); ASSERT_HOST(TrainingPage); diff --git a/src/training/combine_lang_model.cpp b/src/training/combine_lang_model.cpp index 2c58e642a..a819cbe83 100644 --- a/src/training/combine_lang_model.cpp +++ b/src/training/combine_lang_model.cpp @@ -60,7 +60,7 @@ int main(int argc, char **argv) { tprintf("Failed to load unicharset from %s\n", FLAGS_input_unicharset.c_str()); return 1; } - tprintf("Loaded unicharset of size %d from file %s\n", unicharset.size(), + tprintf("Loaded unicharset of size %zu from file %s\n", unicharset.size(), FLAGS_input_unicharset.c_str()); // Set unichar properties diff --git a/src/training/combine_tessdata.cpp b/src/training/combine_tessdata.cpp index 807185bb5..30edc8e92 100644 --- a/src/training/combine_tessdata.cpp +++ b/src/training/combine_tessdata.cpp @@ -26,6 +26,51 @@ using namespace tesseract; +static int list_components(TessdataManager &tm, const char *filename) { + // Initialize TessdataManager with the data in the given traineddata file. + if (filename != nullptr && !tm.Init(filename)) { + tprintf("Failed to read %s\n", filename); + return EXIT_FAILURE; + } + tm.Directory(); + return EXIT_SUCCESS; +} + +static int list_network(TessdataManager &tm, const char *filename) { + if (filename != nullptr && !tm.Init(filename)) { + tprintf("Failed to read %s\n", filename); + return EXIT_FAILURE; + } + tesseract::TFile fp; + if (tm.GetComponent(tesseract::TESSDATA_LSTM, &fp)) { + tesseract::LSTMRecognizer recognizer; + if (!recognizer.DeSerialize(&tm, &fp)) { + tprintf("Failed to deserialize LSTM in %s!\n", filename); + return EXIT_FAILURE; + } + std::cout << "LSTM: network=" << recognizer.GetNetwork() + << ", int_mode=" << recognizer.IsIntMode() + << ", recoding=" << recognizer.IsRecoding() + << ", iteration=" << recognizer.training_iteration() + << ", sample_iteration=" << recognizer.sample_iteration() + << ", null_char=" << recognizer.null_char() + << ", learning_rate=" << recognizer.learning_rate() + << ", momentum=" << recognizer.GetMomentum() + << ", adam_beta=" << recognizer.GetAdamBeta() << '\n'; + + std::cout << "Layer Learning Rates: "; + auto layers = recognizer.EnumerateLayers(); + for (const auto &id : layers) { + auto layer = recognizer.GetLayer(id); + std::cout << id << "(" << layer->name() << ")" + << "=" << recognizer.GetLayerLearningRate(id) + << (layers[layers.size() - 1] != id ? ", " : ""); + } + std::cout << "\n"; + } + return EXIT_SUCCESS; +} + // Main program to combine/extract/overwrite tessdata components // in [lang].traineddata files. // @@ -91,7 +136,8 @@ int main(int argc, char **argv) { } else { printf("Output %s created successfully.\n", output_file.c_str()); } - } else if (argc >= 4 && (strcmp(argv[1], "-e") == 0 || strcmp(argv[1], "-u") == 0)) { + } else if (argc >= 4 && + (strcmp(argv[1], "-e") == 0 || strcmp(argv[1], "-u") == 0)) { // Initialize TessdataManager with the data in the given traineddata file. if (!tm.Init(argv[2])) { tprintf("Failed to read %s\n", argv[2]); @@ -126,7 +172,8 @@ int main(int argc, char **argv) { if (tm.ExtractToFile(filename.c_str())) { printf("Wrote %s\n", filename.c_str()); } else if (errno != 0) { - printf("Error, could not extract %s: %s\n", filename.c_str(), strerror(errno)); + printf("Error, could not extract %s: %s\n", filename.c_str(), + strerror(errno)); return EXIT_FAILURE; } } @@ -137,7 +184,8 @@ int main(int argc, char **argv) { std::string traineddata_filename = new_traineddata_filename; traineddata_filename += ".__tmp__"; if (rename(new_traineddata_filename, traineddata_filename.c_str()) != 0) { - tprintf("Failed to create a temporary file %s\n", traineddata_filename.c_str()); + tprintf("Failed to create a temporary file %s\n", + traineddata_filename.c_str()); return EXIT_FAILURE; } @@ -165,37 +213,28 @@ int main(int argc, char **argv) { std::vector lstm_data; fp.OpenWrite(&lstm_data); ASSERT_HOST(recognizer.Serialize(&tm, &fp)); - tm.OverwriteEntry(tesseract::TESSDATA_LSTM, &lstm_data[0], lstm_data.size()); + tm.OverwriteEntry(tesseract::TESSDATA_LSTM, &lstm_data[0], + lstm_data.size()); if (!tm.SaveFile(argv[2], nullptr)) { tprintf("Failed to write modified traineddata:%s!\n", argv[2]); return EXIT_FAILURE; } } else if (argc == 3 && strcmp(argv[1], "-d") == 0) { - // Initialize TessdataManager with the data in the given traineddata file. - tm.Init(argv[2]); + return list_components(tm, argv[2]); } else if (argc == 3 && strcmp(argv[1], "-l") == 0) { - if (!tm.Init(argv[2])) { - tprintf("Failed to read %s\n", argv[2]); - return EXIT_FAILURE; + return list_network(tm, argv[2]); + } else if (argc == 3 && strcmp(argv[1], "-dl") == 0) { + int result = list_components(tm, argv[2]); + if (result == EXIT_SUCCESS) { + result = list_network(tm, nullptr); } - tesseract::TFile fp; - if (tm.GetComponent(tesseract::TESSDATA_LSTM, &fp)) { - tesseract::LSTMRecognizer recognizer; - if (!recognizer.DeSerialize(&tm, &fp)) { - tprintf("Failed to deserialize LSTM in %s!\n", argv[2]); - return EXIT_FAILURE; - } - std::cout << "LSTM: network=" << recognizer.GetNetwork() - << ", int_mode=" << recognizer.IsIntMode() - << ", recoding=" << recognizer.IsRecoding() - << ", iteration=" << recognizer.training_iteration() - << ", sample_iteration=" << recognizer.sample_iteration() - << ", null_char=" << recognizer.null_char() - << ", learning_rate=" << recognizer.learning_rate() - << ", momentum=" << recognizer.GetMomentum() - << ", adam_beta=" << recognizer.GetAdamBeta() << '\n'; + return result; + } else if (argc == 3 && strcmp(argv[1], "-ld") == 0) { + int result = list_network(tm, argv[2]); + if (result == EXIT_SUCCESS) { + result = list_components(tm, nullptr); } - return EXIT_SUCCESS; + return result; } else { printf( "Usage for combining tessdata components:\n" diff --git a/src/training/common/commandlineflags.h b/src/training/common/commandlineflags.h index 7c241dc63..6287eeaed 100644 --- a/src/training/common/commandlineflags.h +++ b/src/training/common/commandlineflags.h @@ -24,13 +24,13 @@ #include #define INT_PARAM_FLAG(name, val, comment) INT_VAR(FLAGS_##name, val, comment) -#define DECLARE_INT_PARAM_FLAG(name) extern INT_VAR_H(FLAGS_##name, 0, "") +#define DECLARE_INT_PARAM_FLAG(name) extern INT_VAR_H(FLAGS_##name) #define DOUBLE_PARAM_FLAG(name, val, comment) double_VAR(FLAGS_##name, val, comment) -#define DECLARE_DOUBLE_PARAM_FLAG(name) extern double_VAR_H(FLAGS_##name, "", "") +#define DECLARE_DOUBLE_PARAM_FLAG(name) extern double_VAR_H(FLAGS_##name) #define BOOL_PARAM_FLAG(name, val, comment) BOOL_VAR(FLAGS_##name, val, comment) -#define DECLARE_BOOL_PARAM_FLAG(name) extern BOOL_VAR_H(FLAGS_##name, 0, "") +#define DECLARE_BOOL_PARAM_FLAG(name) extern BOOL_VAR_H(FLAGS_##name) #define STRING_PARAM_FLAG(name, val, comment) STRING_VAR(FLAGS_##name, val, comment) -#define DECLARE_STRING_PARAM_FLAG(name) extern STRING_VAR_H(FLAGS_##name, "", "") +#define DECLARE_STRING_PARAM_FLAG(name) extern STRING_VAR_H(FLAGS_##name) namespace tesseract { diff --git a/src/training/common/commontraining.cpp b/src/training/common/commontraining.cpp index f225f7892..473cb70b4 100644 --- a/src/training/common/commontraining.cpp +++ b/src/training/common/commontraining.cpp @@ -197,7 +197,7 @@ void WriteShapeTable(const std::string &file_prefix, const ShapeTable &shape_tab * If shape_table is not nullptr, but failed to load, make a fake flat one, * as shape clustering was not run. */ -std::unique_ptr LoadTrainingData(int argc, const char *const *argv, bool replication, +std::unique_ptr LoadTrainingData(const char *const *filelist, bool replication, ShapeTable **shape_table, std::string &file_prefix) { InitFeatureDefs(&feature_defs); InitIntegerFX(); @@ -236,10 +236,8 @@ std::unique_ptr LoadTrainingData(int argc, const char *const *arg } } trainer->SetFeatureSpace(fs); - const char *page_name; - // Load training data from .tr files on the command line. - int tessoptind = 0; - while ((page_name = GetNextFilename(argc, argv, tessoptind)) != nullptr) { + // Load training data from .tr files in filelist (terminated by nullptr). + for (const char *page_name = *filelist++; page_name != nullptr; page_name = *filelist++) { tprintf("Reading %s ...\n", page_name); trainer->ReadTrainingSamples(page_name, feature_defs, false); @@ -291,25 +289,6 @@ std::unique_ptr LoadTrainingData(int argc, const char *const *arg return trainer; } -/*---------------------------------------------------------------------------*/ -/** - * This routine returns the next command line argument. If - * there are no remaining command line arguments, it returns - * nullptr. This routine should only be called after all option - * arguments have been parsed and removed with ParseArguments. - * - * Globals: - * - tessoptind defined by tessopt sys call - * @return Next command line argument or nullptr. - */ -const char *GetNextFilename(int argc, const char *const *argv, int &tessoptind) { - if (tessoptind < argc) { - return argv[tessoptind++]; - } else { - return nullptr; - } -} /* GetNextFilename */ - /*---------------------------------------------------------------------------*/ /** * This routine searches through a list of labeled lists to find @@ -455,7 +434,6 @@ CLUSTERER *SetUpForClustering(const FEATURE_DEFS_STRUCT &FeatureDefs, LABELEDLIS const char *program_feature_type) { uint16_t N; CLUSTERER *Clusterer; - int32_t CharID; LIST FeatureList = nullptr; FEATURE_SET FeatureSet = nullptr; @@ -464,7 +442,7 @@ CLUSTERER *SetUpForClustering(const FEATURE_DEFS_STRUCT &FeatureDefs, LABELEDLIS Clusterer = MakeClusterer(N, FeatureDefs.FeatureDesc[desc_index]->ParamDesc); FeatureList = char_sample->List; - CharID = 0; + uint32_t CharID = 0; std::vector Sample; iterate(FeatureList) { FeatureSet = reinterpret_cast(FeatureList->first_node()); @@ -512,8 +490,10 @@ void MergeInsignificantProtos(LIST ProtoList, const char *label, CLUSTERER *Clus } if (best_match != nullptr && !best_match->Significant) { if (debug) { - tprintf("Merging red clusters (%d+%d) at %g,%g and %g,%g\n", best_match->NumSamples, - Prototype->NumSamples, best_match->Mean[0], best_match->Mean[1], Prototype->Mean[0], + auto bestMatchNumSamples = best_match->NumSamples; + auto prototypeNumSamples = Prototype->NumSamples; + tprintf("Merging red clusters (%d+%d) at %g,%g and %g,%g\n", bestMatchNumSamples, + prototypeNumSamples, best_match->Mean[0], best_match->Mean[1], Prototype->Mean[0], Prototype->Mean[1]); } best_match->NumSamples = diff --git a/src/training/common/commontraining.h b/src/training/common/commontraining.h index 98b6e3437..f7c1bfe0b 100644 --- a/src/training/common/commontraining.h +++ b/src/training/common/commontraining.h @@ -121,12 +121,9 @@ void WriteShapeTable(const std::string &file_prefix, const ShapeTable &shape_tab // If shape_table is not nullptr, but failed to load, make a fake flat one, // as shape clustering was not run. TESS_COMMON_TRAINING_API -std::unique_ptr LoadTrainingData(int argc, const char *const *argv, bool replication, +std::unique_ptr LoadTrainingData(const char *const *filelist, bool replication, ShapeTable **shape_table, std::string &file_prefix); -TESS_COMMON_TRAINING_API -const char *GetNextFilename(int argc, const char *const *argv, int &tessoptind); - LABELEDLIST FindList(tesseract::LIST List, const std::string &Label); TESS_COMMON_TRAINING_API diff --git a/src/training/common/ctc.cpp b/src/training/common/ctc.cpp index 41f37b258..1f7613e8f 100644 --- a/src/training/common/ctc.cpp +++ b/src/training/common/ctc.cpp @@ -24,6 +24,7 @@ #include #include // for FLT_MAX +#include #include namespace tesseract { @@ -87,7 +88,9 @@ CTC::CTC(const std::vector &labels, int null_char, const GENERIC_2D_ARRAY *log_probs) const { const float *outputs_tp1 = outputs_[t + 1]; for (int u = min_labels_[t]; u <= max_labels_[t]; ++u) { // Continuing the same label. - double log_sum = log_probs->get(t + 1, u) + log(outputs_tp1[labels_[u]]); + double log_sum = log_probs->get(t + 1, u) + std::log(outputs_tp1[labels_[u]]); // Change from previous label. if (u + 1 < num_labels_) { double prev_prob = outputs_tp1[labels_[u + 1]]; diff --git a/src/training/common/errorcounter.cpp b/src/training/common/errorcounter.cpp index d43fe5856..0adf5d806 100644 --- a/src/training/common/errorcounter.cpp +++ b/src/training/common/errorcounter.cpp @@ -160,7 +160,9 @@ ErrorCounter::ErrorCounter(const UNICHARSET &unicharset, int fontsize) , bad_score_hist_(0, 101) , unicharset_(unicharset) { Counts empty_counts; + font_counts_.clear(); font_counts_.resize(fontsize, empty_counts); + multi_unichar_counts_.clear(); multi_unichar_counts_.resize(unicharset.size(), 0); } diff --git a/src/training/common/mastertrainer.cpp b/src/training/common/mastertrainer.cpp index 3ee28db7c..7e5ad4900 100644 --- a/src/training/common/mastertrainer.cpp +++ b/src/training/common/mastertrainer.cpp @@ -33,7 +33,7 @@ #include "shapeclassifier.h" #include "shapetable.h" #ifndef GRAPHICS_DISABLED -#include "svmnode.h" +# include "svmnode.h" #endif #include "scanutils.h" @@ -51,16 +51,16 @@ const float kFontMergeDistance = 0.025; MasterTrainer::MasterTrainer(NormalizationMode norm_mode, bool shape_analysis, bool replicate_samples, int debug_level) - : norm_mode_(norm_mode) - , samples_(fontinfo_table_) - , junk_samples_(fontinfo_table_) - , verify_samples_(fontinfo_table_) - , charsetsize_(0) - , enable_shape_analysis_(shape_analysis) - , enable_replication_(replicate_samples) - , fragments_(nullptr) - , prev_unichar_id_(-1) - , debug_level_(debug_level) {} + : norm_mode_(norm_mode), + samples_(fontinfo_table_), + junk_samples_(fontinfo_table_), + verify_samples_(fontinfo_table_), + charsetsize_(0), + enable_shape_analysis_(shape_analysis), + enable_replication_(replicate_samples), + fragments_(nullptr), + prev_unichar_id_(-1), + debug_level_(debug_level) {} MasterTrainer::~MasterTrainer() { delete[] fragments_; @@ -137,10 +137,14 @@ void MasterTrainer::ReadTrainingSamples(const char *page_name, const FEATURE_DEFS_STRUCT &feature_defs, bool verification) { char buffer[2048]; - const int int_feature_type = ShortNameToFeatureType(feature_defs, kIntFeatureType); - const int micro_feature_type = ShortNameToFeatureType(feature_defs, kMicroFeatureType); - const int cn_feature_type = ShortNameToFeatureType(feature_defs, kCNFeatureType); - const int geo_feature_type = ShortNameToFeatureType(feature_defs, kGeoFeatureType); + const int int_feature_type = + ShortNameToFeatureType(feature_defs, kIntFeatureType); + const int micro_feature_type = + ShortNameToFeatureType(feature_defs, kMicroFeatureType); + const int cn_feature_type = + ShortNameToFeatureType(feature_defs, kCNFeatureType); + const int geo_feature_type = + ShortNameToFeatureType(feature_defs, kGeoFeatureType); FILE *fp = fopen(page_name, "rb"); if (fp == nullptr) { @@ -175,8 +179,8 @@ void MasterTrainer::ReadTrainingSamples(const char *page_name, sample->set_font_id(font_id); sample->set_page_num(page_number + page_images_.size()); sample->set_bounding_box(bounding_box); - sample->ExtractCharDesc(int_feature_type, micro_feature_type, cn_feature_type, geo_feature_type, - char_desc); + sample->ExtractCharDesc(int_feature_type, micro_feature_type, + cn_feature_type, geo_feature_type, char_desc); AddSample(verification, unichar.c_str(), sample); delete char_desc; } @@ -186,7 +190,8 @@ void MasterTrainer::ReadTrainingSamples(const char *page_name, // Adds the given single sample to the trainer, setting the classid // appropriately from the given unichar_str. -void MasterTrainer::AddSample(bool verification, const char *unichar, TrainingSample *sample) { +void MasterTrainer::AddSample(bool verification, const char *unichar, + TrainingSample *sample) { if (verification) { verify_samples_.AddSample(unichar, sample); prev_unichar_id_ = -1; @@ -314,7 +319,8 @@ void MasterTrainer::SetupMasterShapes() { ClusterShapes(kMinClusteredShapes, kMaxUnicharsPerCluster, kFontMergeDistance, &char_shapes_end_fragment); char_shapes.AppendMasterShapes(char_shapes_end_fragment, nullptr); - ClusterShapes(kMinClusteredShapes, kMaxUnicharsPerCluster, kFontMergeDistance, &char_shapes); + ClusterShapes(kMinClusteredShapes, kMaxUnicharsPerCluster, kFontMergeDistance, + &char_shapes); master_shapes_.AppendMasterShapes(char_shapes, nullptr); tprintf("Master shape_table:%s\n", master_shapes_.SummaryStr().c_str()); } @@ -383,14 +389,15 @@ bool MasterTrainer::LoadFontInfo(const char *filename) { fontinfo.name = font_name; fontinfo.properties = 0; fontinfo.universal_id = 0; - if (tfscanf(fp, "%1024s %i %i %i %i %i\n", font_name, &italic, &bold, &fixed, &serif, - &fraktur) != 6) { + if (tfscanf(fp, "%1024s %i %i %i %i %i\n", font_name, &italic, &bold, + &fixed, &serif, &fraktur) != 6) { delete[] font_name; continue; } - fontinfo.properties = - (italic << 0) + (bold << 1) + (fixed << 2) + (serif << 3) + (fraktur << 4); - if (!fontinfo_table_.contains(fontinfo)) { + fontinfo.properties = (italic << 0) + (bold << 1) + (fixed << 2) + + (serif << 3) + (fraktur << 4); + if (fontinfo_table_.get_index(fontinfo) < 0) { + // fontinfo not in table. fontinfo_table_.push_back(fontinfo); } else { delete[] font_name; @@ -404,6 +411,7 @@ bool MasterTrainer::LoadFontInfo(const char *filename) { // Returns false on failure. bool MasterTrainer::LoadXHeights(const char *filename) { tprintf("fontinfo table is of size %d\n", fontinfo_table_.size()); + xheights_.clear(); xheights_.resize(fontinfo_table_.size(), -1); if (filename == nullptr) { return true; @@ -427,10 +435,11 @@ bool MasterTrainer::LoadXHeights(const char *filename) { } buffer[1023] = '\0'; fontinfo.name = buffer; - if (!fontinfo_table_.contains(fontinfo)) { + auto fontinfo_id = fontinfo_table_.get_index(fontinfo); + if (fontinfo_id < 0) { + // fontinfo not in table. continue; } - int fontinfo_id = fontinfo_table_.get_index(fontinfo); xheights_[fontinfo_id] = xht; total_xheight += xht; ++xheight_count; @@ -476,7 +485,8 @@ bool MasterTrainer::AddSpacingInfo(const char *filename) { fi->init_spacing(unicharset_.size()); FontSpacingInfo *spacing = nullptr; for (int l = 0; l < num_unichars; ++l) { - if (tfscanf(fontinfo_file, "%s %d %d %d", uch, &x_gap_before, &x_gap_after, &num_kerned) != 4) { + if (tfscanf(fontinfo_file, "%s %d %d %d", uch, &x_gap_before, &x_gap_after, + &num_kerned) != 4) { tprintf("Bad format of font spacing file %s\n", filename); fclose(fontinfo_file); return false; @@ -497,7 +507,8 @@ bool MasterTrainer::AddSpacingInfo(const char *filename) { if (!valid || !unicharset_.contains_unichar(kerned_uch)) { continue; } - spacing->kerned_unichar_ids.push_back(unicharset_.unichar_to_id(kerned_uch)); + spacing->kerned_unichar_ids.push_back( + unicharset_.unichar_to_id(kerned_uch)); spacing->kerned_x_gaps.push_back(static_cast(x_gap * scale)); } if (valid) { @@ -571,13 +582,14 @@ void MasterTrainer::SetupFlatShapeTable(ShapeTable *shape_table) { // Sets up a Clusterer for mftraining on a single shape_id. // Call FreeClusterer on the return value after use. -CLUSTERER *MasterTrainer::SetupForClustering(const ShapeTable &shape_table, - const FEATURE_DEFS_STRUCT &feature_defs, int shape_id, - int *num_samples) { +CLUSTERER *MasterTrainer::SetupForClustering( + const ShapeTable &shape_table, const FEATURE_DEFS_STRUCT &feature_defs, + int shape_id, int *num_samples) { int desc_index = ShortNameToFeatureType(feature_defs, kMicroFeatureType); int num_params = feature_defs.FeatureDesc[desc_index]->NumParams; ASSERT_HOST(num_params == (int)MicroFeatureParameter::MFCount); - CLUSTERER *clusterer = MakeClusterer(num_params, feature_defs.FeatureDesc[desc_index]->ParamDesc); + CLUSTERER *clusterer = MakeClusterer( + num_params, feature_defs.FeatureDesc[desc_index]->ParamDesc); // We want to iterate over the samples of just the one shape. IndexMapBiDi shape_map; @@ -591,7 +603,7 @@ CLUSTERER *MasterTrainer::SetupForClustering(const ShapeTable &shape_table, for (it.Begin(); !it.AtEnd(); it.Next()) { sample_ptrs.push_back(&it.GetSample()); } - int sample_id = 0; + uint32_t sample_id = 0; for (int i = sample_ptrs.size() - 1; i >= 0; --i) { const TrainingSample *sample = sample_ptrs[i]; uint32_t num_features = sample->num_micro_features(); @@ -611,12 +623,14 @@ CLUSTERER *MasterTrainer::SetupForClustering(const ShapeTable &shape_table, void MasterTrainer::WriteInttempAndPFFMTable(const UNICHARSET &unicharset, const UNICHARSET &shape_set, const ShapeTable &shape_table, - CLASS_STRUCT *float_classes, const char *inttemp_file, + CLASS_STRUCT *float_classes, + const char *inttemp_file, const char *pffmtable_file) { auto *classify = new tesseract::Classify(); // Move the fontinfo table to classify. fontinfo_table_.MoveTo(&classify->get_fontinfo_table()); - INT_TEMPLATES_STRUCT *int_templates = classify->CreateIntTemplates(float_classes, shape_set); + INT_TEMPLATES_STRUCT *int_templates = + classify->CreateIntTemplates(float_classes, shape_set); FILE *fp = fopen(inttemp_file, "wb"); if (fp == nullptr) { tprintf("Error, failed to open file \"%s\"\n", inttemp_file); @@ -630,10 +644,7 @@ void MasterTrainer::WriteInttempAndPFFMTable(const UNICHARSET &unicharset, // We put the shapetable_cutoffs in a vector, and compute the // unicharset cutoffs along the way. std::vector shapetable_cutoffs; - std::vector unichar_cutoffs; - for (int c = 0; c < unicharset.size(); ++c) { - unichar_cutoffs.push_back(0); - } + std::vector unichar_cutoffs(unicharset.size()); /* then write out each class */ for (int i = 0; i < int_templates->NumClasses; ++i) { INT_CLASS_STRUCT *Class = ClassForClassId(int_templates, i); @@ -678,7 +689,8 @@ void MasterTrainer::WriteInttempAndPFFMTable(const UNICHARSET &unicharset, // Generate debug output relating to the canonical distance between the // two given UTF8 grapheme strings. -void MasterTrainer::DebugCanonical(const char *unichar_str1, const char *unichar_str2) { +void MasterTrainer::DebugCanonical(const char *unichar_str1, + const char *unichar_str2) { int class_id1 = unicharset_.unichar_to_id(unichar_str1); int class_id2 = unicharset_.unichar_to_id(unichar_str2); if (class_id2 == INVALID_UNICHAR_ID) { @@ -688,8 +700,8 @@ void MasterTrainer::DebugCanonical(const char *unichar_str1, const char *unichar tprintf("No unicharset entry found for %s\n", unichar_str1); return; } else { - tprintf("Font ambiguities for unichar %d = %s and %d = %s\n", class_id1, unichar_str1, - class_id2, unichar_str2); + tprintf("Font ambiguities for unichar %d = %s and %d = %s\n", class_id1, + unichar_str1, class_id2, unichar_str2); } int num_fonts = samples_.NumFonts(); const IntFeatureMap &feature_map = feature_map_; @@ -713,7 +725,8 @@ void MasterTrainer::DebugCanonical(const char *unichar_str1, const char *unichar if (samples_.NumClassSamples(f2, class_id2, false) == 0) { continue; } - float dist = samples_.ClusterDistance(f1, class_id1, f2, class_id2, feature_map); + float dist = + samples_.ClusterDistance(f1, class_id1, f2, class_id2, feature_map); tprintf(" %5.3f", dist); } tprintf("\n"); @@ -724,7 +737,8 @@ void MasterTrainer::DebugCanonical(const char *unichar_str1, const char *unichar if (samples_.NumClassSamples(f, class_id1, true) > 0) { shapes.AddShape(class_id1, f); } - if (class_id1 != class_id2 && samples_.NumClassSamples(f, class_id2, true) > 0) { + if (class_id1 != class_id2 && + samples_.NumClassSamples(f, class_id2, true) > 0) { shapes.AddShape(class_id2, f); } } @@ -742,14 +756,17 @@ void MasterTrainer::DebugCanonical(const char *unichar_str1, const char *unichar // Until the features window is destroyed, each click in the features window // will display the samples that have that feature in a separate window. void MasterTrainer::DisplaySamples(const char *unichar_str1, int cloud_font, - const char *unichar_str2, int canonical_font) { + const char *unichar_str2, + int canonical_font) { const IntFeatureMap &feature_map = feature_map_; const IntFeatureSpace &feature_space = feature_map.feature_space(); ScrollView *f_window = CreateFeatureSpaceWindow("Features", 100, 500); - ClearFeatureSpaceWindow(norm_mode_ == NM_BASELINE ? baseline : character, f_window); + ClearFeatureSpaceWindow(norm_mode_ == NM_BASELINE ? baseline : character, + f_window); int class_id2 = samples_.unicharset().unichar_to_id(unichar_str2); if (class_id2 != INVALID_UNICHAR_ID && canonical_font >= 0) { - const TrainingSample *sample = samples_.GetCanonicalSample(canonical_font, class_id2); + const TrainingSample *sample = + samples_.GetCanonicalSample(canonical_font, class_id2); for (uint32_t f = 0; f < sample->num_features(); ++f) { RenderIntFeature(f_window, &sample->features()[f], ScrollView::RED); } @@ -779,8 +796,8 @@ void MasterTrainer::DisplaySamples(const char *unichar_str1, int cloud_font, Shape shape; shape.AddToShape(class_id1, cloud_font); s_window->Clear(); - samples_.DisplaySamplesWithFeature(feature_index, shape, feature_space, ScrollView::GREEN, - s_window); + samples_.DisplaySamplesWithFeature(feature_index, shape, feature_space, + ScrollView::GREEN, s_window); s_window->Update(); } } @@ -789,22 +806,25 @@ void MasterTrainer::DisplaySamples(const char *unichar_str1, int cloud_font, } #endif // !GRAPHICS_DISABLED -void MasterTrainer::TestClassifierVOld(bool replicate_samples, ShapeClassifier *test_classifier, +void MasterTrainer::TestClassifierVOld(bool replicate_samples, + ShapeClassifier *test_classifier, ShapeClassifier *old_classifier) { SampleIterator sample_it; sample_it.Init(nullptr, nullptr, replicate_samples, &samples_); - ErrorCounter::DebugNewErrors(test_classifier, old_classifier, CT_UNICHAR_TOPN_ERR, - fontinfo_table_, page_images_, &sample_it); + ErrorCounter::DebugNewErrors(test_classifier, old_classifier, + CT_UNICHAR_TOPN_ERR, fontinfo_table_, + page_images_, &sample_it); } // Tests the given test_classifier on the internal samples. // See TestClassifier for details. -void MasterTrainer::TestClassifierOnSamples(CountTypes error_mode, int report_level, +void MasterTrainer::TestClassifierOnSamples(CountTypes error_mode, + int report_level, bool replicate_samples, ShapeClassifier *test_classifier, std::string *report_string) { - TestClassifier(error_mode, report_level, replicate_samples, &samples_, test_classifier, - report_string); + TestClassifier(error_mode, report_level, replicate_samples, &samples_, + test_classifier, report_string); } // Tests the given test_classifier on the given samples. @@ -821,8 +841,10 @@ void MasterTrainer::TestClassifierOnSamples(CountTypes error_mode, int report_le // If report_string is non-nullptr, a summary of the results for each font // is appended to the report_string. double MasterTrainer::TestClassifier(CountTypes error_mode, int report_level, - bool replicate_samples, TrainingSampleSet *samples, - ShapeClassifier *test_classifier, std::string *report_string) { + bool replicate_samples, + TrainingSampleSet *samples, + ShapeClassifier *test_classifier, + std::string *report_string) { SampleIterator sample_it; sample_it.Init(nullptr, nullptr, replicate_samples, samples); if (report_level > 0) { @@ -836,8 +858,9 @@ double MasterTrainer::TestClassifier(CountTypes error_mode, int report_level, tprintf("Testing %sREPLICATED:\n", replicate_samples ? "" : "NON-"); } double unichar_error = 0.0; - ErrorCounter::ComputeErrorRate(test_classifier, report_level, error_mode, fontinfo_table_, - page_images_, &sample_it, &unichar_error, nullptr, report_string); + ErrorCounter::ComputeErrorRate(test_classifier, report_level, error_mode, + fontinfo_table_, page_images_, &sample_it, + &unichar_error, nullptr, report_string); return unichar_error; } @@ -856,14 +879,16 @@ float MasterTrainer::ShapeDistance(const ShapeTable &shapes, int s1, int s2) { // distances between characters of matching font where possible. for (int c1 = 0; c1 < num_chars1; ++c1) { for (int c2 = 0; c2 < num_chars2; ++c2) { - dist_sum += samples_.UnicharDistance(shape1[c1], shape2[c2], true, feature_map); + dist_sum += + samples_.UnicharDistance(shape1[c1], shape2[c2], true, feature_map); ++dist_count; } } } else { // In the single unichar case, there is little alternative, but to compute // the squared-order distance between pairs of fonts. - dist_sum = samples_.UnicharDistance(shape1[0], shape2[0], false, feature_map); + dist_sum = + samples_.UnicharDistance(shape1[0], shape2[0], false, feature_map); ++dist_count; } return dist_sum / dist_count; @@ -941,8 +966,8 @@ void MasterTrainer::ReplaceFragmentedSamples() { // * No shape shall have more than max_shape_unichars in it, // * Don't merge shapes where the distance between them exceeds max_dist. const float kInfiniteDist = 999.0f; -void MasterTrainer::ClusterShapes(int min_shapes, int max_shape_unichars, float max_dist, - ShapeTable *shapes) { +void MasterTrainer::ClusterShapes(int min_shapes, int max_shape_unichars, + float max_dist, ShapeTable *shapes) { int num_shapes = shapes->NumShapes(); int max_merges = num_shapes - min_shapes; // TODO: avoid new / delete. @@ -970,8 +995,8 @@ void MasterTrainer::ClusterShapes(int min_shapes, int max_shape_unichars, float int num_unichars = shapes->MergedUnicharCount(min_s1, min_s2); shape_dists[min_s1][min_s2 - min_s1 - 1].distance = kInfiniteDist; if (num_unichars > max_shape_unichars) { - tprintf("Merge of %d and %d with %d would exceed max of %d unichars\n", min_s1, min_s2, - num_unichars, max_shape_unichars); + tprintf("Merge of %d and %d with %d would exceed max of %d unichars\n", + min_s1, min_s2, num_unichars, max_shape_unichars); } else { shapes->MergeShapes(min_s1, min_s2); shape_dists[min_s2].clear(); @@ -979,13 +1004,15 @@ void MasterTrainer::ClusterShapes(int min_shapes, int max_shape_unichars, float for (int s = 0; s < min_s1; ++s) { if (!shape_dists[s].empty()) { - shape_dists[s][min_s1 - s - 1].distance = ShapeDistance(*shapes, s, min_s1); + shape_dists[s][min_s1 - s - 1].distance = + ShapeDistance(*shapes, s, min_s1); shape_dists[s][min_s2 - s - 1].distance = kInfiniteDist; } } for (int s2 = min_s1 + 1; s2 < num_shapes; ++s2) { if (shape_dists[min_s1][s2 - min_s1 - 1].distance < kInfiniteDist) { - shape_dists[min_s1][s2 - min_s1 - 1].distance = ShapeDistance(*shapes, min_s1, s2); + shape_dists[min_s1][s2 - min_s1 - 1].distance = + ShapeDistance(*shapes, min_s1, s2); } } for (int s = min_s1 + 1; s < min_s2; ++s) { diff --git a/src/training/common/trainingsampleset.cpp b/src/training/common/trainingsampleset.cpp index 43cf7827a..1b397c066 100644 --- a/src/training/common/trainingsampleset.cpp +++ b/src/training/common/trainingsampleset.cpp @@ -380,7 +380,7 @@ float TrainingSampleSet::ClusterDistance(int font_id1, int class_id1, int font_i } // Both font and class are different. Linear search for class_id2/font_id2 // in what is a hopefully short list of distances. - int cache_index = 0; + size_t cache_index = 0; while (cache_index < fc_info.distance_cache.size() && (fc_info.distance_cache[cache_index].unichar_id != class_id2 || fc_info.distance_cache[cache_index].font_id != font_id2)) { @@ -470,14 +470,14 @@ int TrainingSampleSet::ReliablySeparable(int font_id1, int class_id1, int font_i std::vector good_features; AddNearFeatures(feature_map, feature, 1, &good_features); // Check that none of the good_features are in the cloud. - int i; - for (i = 0; i < good_features.size(); ++i) { - int good_f = good_features[i]; + bool found = false; + for (auto good_f : good_features) { if (cloud1[good_f]) { + found = true; break; } } - if (i < good_features.size()) { + if (found) { continue; // Found one in the cloud. } ++result; @@ -570,11 +570,11 @@ void TrainingSampleSet::OrganizeByFontAndClass() { FontClassInfo empty; font_class_array_ = new GENERIC_2D_ARRAY(compact_font_size, unicharset_size_, empty); - for (int s = 0; s < samples_.size(); ++s) { + for (size_t s = 0; s < samples_.size(); ++s) { int font_id = samples_[s]->font_id(); int class_id = samples_[s]->class_id(); if (font_id < 0 || font_id >= font_id_map_.SparseSize()) { - tprintf("Font id = %d/%d, class id = %d/%d on sample %d\n", font_id, + tprintf("Font id = %d/%d, class id = %d/%d on sample %zu\n", font_id, font_id_map_.SparseSize(), class_id, unicharset_size_, s); } ASSERT_HOST(font_id >= 0 && font_id < font_id_map_.SparseSize()); @@ -607,7 +607,7 @@ void TrainingSampleSet::SetupFontIdMap() { ++font_counts[font_id]; } font_id_map_.Init(font_counts.size(), false); - for (int f = 0; f < font_counts.size(); ++f) { + for (size_t f = 0; f < font_counts.size(); ++f) { font_id_map_.SetMap(f, font_counts[f] > 0); } font_id_map_.Setup(); @@ -651,8 +651,7 @@ void TrainingSampleSet::ComputeCanonicalSamples(const IntFeatureMap &map, bool d int max_s2 = 0; fcinfo.canonical_sample = fcinfo.samples[0]; fcinfo.canonical_dist = 0.0f; - for (int i = 0; i < fcinfo.samples.size(); ++i) { - int s1 = fcinfo.samples[i]; + for (auto s1 : fcinfo.samples) { const std::vector &features1 = samples_[s1]->indexed_features(); f_table.Set(features1, features1.size(), true); double max_dist = 0.0; diff --git a/src/training/lstmtraining.cpp b/src/training/lstmtraining.cpp index 297870d7d..a1068bdb5 100644 --- a/src/training/lstmtraining.cpp +++ b/src/training/lstmtraining.cpp @@ -36,6 +36,8 @@ static INT_PARAM_FLAG(perfect_sample_delay, 0, "How many imperfect samples betwe static DOUBLE_PARAM_FLAG(target_error_rate, 0.01, "Final error rate in percent."); static DOUBLE_PARAM_FLAG(weight_range, 0.1, "Range of initial random weights."); static DOUBLE_PARAM_FLAG(learning_rate, 10.0e-4, "Weight factor for new deltas."); +static BOOL_PARAM_FLAG(reset_learning_rate, false, + "Resets all stored learning rates to the value specified by --learning_rate."); static DOUBLE_PARAM_FLAG(momentum, 0.5, "Decay factor for repeating deltas."); static DOUBLE_PARAM_FLAG(adam_beta, 0.999, "Decay factor for repeating deltas."); static INT_PARAM_FLAG(max_image_MB, 6000, "Max memory to use for images."); @@ -110,7 +112,10 @@ int main(int argc, char **argv) { tesseract::LSTMTrainer trainer(FLAGS_model_output.c_str(), checkpoint_file.c_str(), FLAGS_debug_interval, static_cast(FLAGS_max_image_MB) * 1048576); - trainer.InitCharSet(FLAGS_traineddata.c_str()); + if (!trainer.InitCharSet(FLAGS_traineddata.c_str())) { + tprintf("Error, failed to read %s\n", FLAGS_traineddata.c_str()); + return EXIT_FAILURE; + } // Reading something from an existing model doesn't require many flags, // so do it now and exit. @@ -157,6 +162,10 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } tprintf("Continuing from %s\n", FLAGS_continue_from.c_str()); + if (FLAGS_reset_learning_rate) { + trainer.SetLearningRate(FLAGS_learning_rate); + tprintf("Set learning rate to %f\n", static_cast(FLAGS_learning_rate)); + } trainer.InitIterations(); } if (FLAGS_continue_from.empty() || FLAGS_append_index >= 0) { @@ -218,6 +227,7 @@ int main(int argc, char **argv) { tprintf("%s\n", log_str.c_str()); } while (trainer.best_error_rate() > FLAGS_target_error_rate && (trainer.training_iteration() < max_iterations)); - tprintf("Finished! Error rate = %g\n", trainer.best_error_rate()); + tprintf("Finished! Selected model with minimal training error rate (BCER) = %g\n", + trainer.best_error_rate()); return EXIT_SUCCESS; } /* main */ diff --git a/src/training/merge_unicharsets.cpp b/src/training/merge_unicharsets.cpp index 7ac26c09a..974c61eed 100644 --- a/src/training/merge_unicharsets.cpp +++ b/src/training/merge_unicharsets.cpp @@ -38,7 +38,7 @@ int main(int argc, char **argv) { for (int arg = 1; arg < argc - 1; ++arg) { // Load the input unicharset if (input_unicharset.load_from_file(argv[arg])) { - printf("Loaded unicharset of size %d from file %s\n", input_unicharset.size(), argv[arg]); + printf("Loaded unicharset of size %zu from file %s\n", input_unicharset.size(), argv[arg]); result_unicharset.AppendOtherUnicharset(input_unicharset); } else { printf("Failed to load unicharset from file %s!!\n", argv[arg]); diff --git a/src/training/mergenf.cpp b/src/training/mergenf.cpp index fd1aa2d9b..7e027ab96 100644 --- a/src/training/mergenf.cpp +++ b/src/training/mergenf.cpp @@ -69,7 +69,7 @@ float CompareProtos(PROTO_STRUCT *p1, PROTO_STRUCT *p2) { float Angle, Length; /* if p1 and p2 are not close in length, don't let them match */ - Length = fabs(p1->Length - p2->Length); + Length = std::fabs(p1->Length - p2->Length); if (Length > MAX_LENGTH_MISMATCH) { return (0.0); } @@ -88,8 +88,8 @@ float CompareProtos(PROTO_STRUCT *p1, PROTO_STRUCT *p2) { } /* set the dummy pico-feature at one end of p1 and match it to p2 */ - Feature->Params[PicoFeatX] = p1->X + cos(Angle) * Length; - Feature->Params[PicoFeatY] = p1->Y + sin(Angle) * Length; + Feature->Params[PicoFeatX] = p1->X + std::cos(Angle) * Length; + Feature->Params[PicoFeatY] = p1->Y + std::sin(Angle) * Length; if (DummyFastMatch(Feature, p2)) { Evidence = SubfeatureEvidence(Feature, p2); if (Evidence < WorstEvidence) { @@ -101,8 +101,8 @@ float CompareProtos(PROTO_STRUCT *p1, PROTO_STRUCT *p2) { } /* set the dummy pico-feature at the other end of p1 and match it to p2 */ - Feature->Params[PicoFeatX] = p1->X - cos(Angle) * Length; - Feature->Params[PicoFeatY] = p1->Y - sin(Angle) * Length; + Feature->Params[PicoFeatX] = p1->X - std::cos(Angle) * Length; + Feature->Params[PicoFeatY] = p1->Y - std::sin(Angle) * Length; if (DummyFastMatch(Feature, p2)) { Evidence = SubfeatureEvidence(Feature, p2); if (Evidence < WorstEvidence) { @@ -266,7 +266,7 @@ bool DummyFastMatch(FEATURE Feature, PROTO_STRUCT *Proto) { float AngleError; MaxAngleError = training_angle_pad / 360.0; - AngleError = fabs(Proto->Angle - Feature->Params[PicoFeatDir]); + AngleError = std::fabs(Proto->Angle - Feature->Params[PicoFeatDir]); if (AngleError > 0.5) { AngleError = 1.0 - AngleError; } @@ -296,8 +296,8 @@ void ComputePaddedBoundingBox(PROTO_STRUCT *Proto, float TangentPad, float Ortho FRECT *BoundingBox) { float Length = Proto->Length / 2.0 + TangentPad; float Angle = Proto->Angle * 2.0 * M_PI; - float CosOfAngle = fabs(cos(Angle)); - float SinOfAngle = fabs(sin(Angle)); + float CosOfAngle = fabs(std::cos(Angle)); + float SinOfAngle = fabs(std::sin(Angle)); float Pad = std::max(CosOfAngle * Length, SinOfAngle * OrthogonalPad); BoundingBox->MinX = Proto->X - Pad; diff --git a/src/training/mftraining.cpp b/src/training/mftraining.cpp index d0975c370..dc75f3396 100644 --- a/src/training/mftraining.cpp +++ b/src/training/mftraining.cpp @@ -75,10 +75,11 @@ static void DisplayProtoList(const char *ch, LIST protolist) { auto dy = static_cast(LengthOf(prototype->Mean) * sin(angle) / 2); window->SetCursor((x - dx) * 256, (y - dy) * 256); window->DrawTo((x + dx) * 256, (y + dy) * 256); + auto prototypeNumSamples = prototype->NumSamples; if (prototype->Significant) { - tprintf("Green proto at (%g,%g)+(%g,%g) %d samples\n", x, y, dx, dy, prototype->NumSamples); + tprintf("Green proto at (%g,%g)+(%g,%g) %d samples\n", x, y, dx, dy, prototypeNumSamples); } else if (prototype->NumSamples > 0 && !prototype->Merged) { - tprintf("Red proto at (%g,%g)+(%g,%g) %d samples\n", x, y, dx, dy, prototype->NumSamples); + tprintf("Red proto at (%g,%g)+(%g,%g) %d samples\n", x, y, dx, dy, prototypeNumSamples); } } window->Update(); @@ -198,7 +199,7 @@ int main(int argc, char **argv) { ShapeTable *shape_table = nullptr; std::string file_prefix; // Load the training data. - auto trainer = tesseract::LoadTrainingData(argc, argv, false, &shape_table, file_prefix); + auto trainer = tesseract::LoadTrainingData(argv + 1, false, &shape_table, file_prefix); if (trainer == nullptr) { return 1; // Failed. } @@ -254,7 +255,7 @@ int main(int argc, char **argv) { // Now write the inttemp and pffmtable. trainer->WriteInttempAndPFFMTable(trainer->unicharset(), *unicharset, *shape_table, float_classes, inttemp_file.c_str(), pffmtable_file.c_str()); - for (int c = 0; c < unicharset->size(); ++c) { + for (size_t c = 0; c < unicharset->size(); ++c) { FreeClassFields(&float_classes[c]); } delete[] float_classes; diff --git a/src/training/pango/ligature_table.cpp b/src/training/pango/ligature_table.cpp index 925abeb20..18bfbed94 100644 --- a/src/training/pango/ligature_table.cpp +++ b/src/training/pango/ligature_table.cpp @@ -4,7 +4,6 @@ * conditional on codepoint support by a specified font * (if specified). * Author: Ranjith Unnikrishnan - * Created: Mon Nov 18 2013 * * (C) Copyright 2013, Google Inc. * Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,20 +62,15 @@ LigatureTable::LigatureTable() void LigatureTable::Init() { if (norm_to_lig_table_.empty()) { for (char32 lig = kMinLigature; lig <= kMaxLigature; ++lig) { - // For each char in the range, convert to utf8, nfkc normalize, and if + // For each char in the range, convert to utf8, nfc normalize, and if // the strings are different put the both mappings in the hash_maps. std::string lig8 = EncodeAsUTF8(lig); icu::UnicodeString unicode_lig8(static_cast(lig)); icu::UnicodeString normed8_result; icu::ErrorCode status; - icu::Normalizer::normalize(unicode_lig8, UNORM_NFKC, 0, normed8_result, status); + icu::Normalizer::normalize(unicode_lig8, UNORM_NFC, 0, normed8_result, status); std::string normed8; normed8_result.toUTF8String(normed8); - // The icu::Normalizer maps the "LONG S T" ligature to "st". Correct that - // here manually so that AddLigatures() will work as desired. - if (lig8 == "\uFB05") { - normed8 = "ſt"; - } int lig_length = lig8.length(); int norm_length = normed8.size(); if (normed8 != lig8 && lig_length > 1 && norm_length > 1) { diff --git a/src/training/pango/pango_font_info.cpp b/src/training/pango/pango_font_info.cpp index c4c2a3089..076f0badc 100644 --- a/src/training/pango/pango_font_info.cpp +++ b/src/training/pango/pango_font_info.cpp @@ -21,7 +21,7 @@ # include "config_auto.h" #endif -#if (defined __MINGW32__) || (defined __CYGWIN__) +#if (defined __CYGWIN__) // workaround for stdlib.h and putenv # undef __STRICT_ANSI__ #endif diff --git a/src/training/shapeclustering.cpp b/src/training/shapeclustering.cpp index 242688bb1..eb95b1237 100644 --- a/src/training/shapeclustering.cpp +++ b/src/training/shapeclustering.cpp @@ -47,7 +47,7 @@ int main(int argc, char **argv) { ParseArguments(&argc, &argv); std::string file_prefix; - auto trainer = tesseract::LoadTrainingData(argc, argv, false, nullptr, file_prefix); + auto trainer = tesseract::LoadTrainingData(argv + 1, false, nullptr, file_prefix); if (!trainer) { return 1; diff --git a/src/training/unicharset/lstmtester.cpp b/src/training/unicharset/lstmtester.cpp index 08a08b8af..bd0f222a5 100644 --- a/src/training/unicharset/lstmtester.cpp +++ b/src/training/unicharset/lstmtester.cpp @@ -106,7 +106,7 @@ std::string LSTMTester::RunEvalSync(int iteration, const double *training_errors std::string ocr_text = trainer.DecodeLabels(ocr_labels); tprintf("OCR :%s\n", ocr_text.c_str()); if (verbosity > 2 || (verbosity > 1 && result != PERFECT)) { - tprintf("Line Char error rate=%f, Word error rate=%f\n\n", + tprintf("Line BCER=%f, BWER=%f\n\n", trainer.NewSingleError(tesseract::ET_CHAR_ERROR), trainer.NewSingleError(tesseract::ET_WORD_RECERR)); } @@ -116,10 +116,12 @@ std::string LSTMTester::RunEvalSync(int iteration, const double *training_errors char_error *= 100.0 / total_pages_; word_error *= 100.0 / total_pages_; std::string result; - result += "At iteration " + std::to_string(iteration); - result += ", stage " + std::to_string(training_stage); - result += ", Eval Char error rate=" + std::to_string(char_error); - result += ", Word error rate=" + std::to_string(word_error); + if (iteration != 0 || training_stage != 0) { + result += "At iteration " + std::to_string(iteration); + result += ", stage " + std::to_string(training_stage) + ", "; + } + result += "BCER eval=" + std::to_string(char_error); + result += ", BWER eval=" + std::to_string(word_error); return result; } diff --git a/src/training/unicharset/lstmtrainer.cpp b/src/training/unicharset/lstmtrainer.cpp index ccfb55309..e002e653b 100644 --- a/src/training/unicharset/lstmtrainer.cpp +++ b/src/training/unicharset/lstmtrainer.cpp @@ -22,6 +22,7 @@ # include "config_auto.h" #endif +#include #include #include "lstmtrainer.h" @@ -71,14 +72,17 @@ const int kTargetXScale = 5; const int kTargetYScale = 100; #endif // !GRAPHICS_DISABLED -LSTMTrainer::LSTMTrainer() : randomly_rotate_(false), training_data_(0), sub_trainer_(nullptr) { +LSTMTrainer::LSTMTrainer() + : randomly_rotate_(false), training_data_(0), sub_trainer_(nullptr) { EmptyConstructor(); debug_interval_ = 0; } -LSTMTrainer::LSTMTrainer(const char *model_base, const char *checkpoint_name, int debug_interval, - int64_t max_memory) - : randomly_rotate_(false), training_data_(max_memory), sub_trainer_(nullptr) { +LSTMTrainer::LSTMTrainer(const char *model_base, const char *checkpoint_name, + int debug_interval, int64_t max_memory) + : randomly_rotate_(false), + training_data_(max_memory), + sub_trainer_(nullptr) { EmptyConstructor(); debug_interval_ = debug_interval; model_base_ = model_base; @@ -96,7 +100,8 @@ LSTMTrainer::~LSTMTrainer() { // Tries to deserialize a trainer from the given file and silently returns // false in case of failure. -bool LSTMTrainer::TryLoadingCheckpoint(const char *filename, const char *old_traineddata) { +bool LSTMTrainer::TryLoadingCheckpoint(const char *filename, + const char *old_traineddata) { std::vector data; if (!LoadDataFromFile(filename, &data)) { return false; @@ -105,12 +110,18 @@ bool LSTMTrainer::TryLoadingCheckpoint(const char *filename, const char *old_tra if (!ReadTrainingDump(data, *this)) { return false; } + if (IsIntMode()) { + tprintf("Error, %s is an integer (fast) model, cannot continue training\n", + filename); + return false; + } if (((old_traineddata == nullptr || *old_traineddata == '\0') && network_->NumOutputs() == recoder_.code_range()) || filename == old_traineddata) { return true; // Normal checkpoint load complete. } - tprintf("Code range changed from %d to %d!\n", network_->NumOutputs(), recoder_.code_range()); + tprintf("Code range changed from %d to %d!\n", network_->NumOutputs(), + recoder_.code_range()); if (old_traineddata == nullptr || *old_traineddata == '\0') { tprintf("Must supply the old traineddata for code conversion!\n"); return false; @@ -148,21 +159,23 @@ bool LSTMTrainer::TryLoadingCheckpoint(const char *filename, const char *old_tra // are implemented. // For other args see NetworkBuilder::InitNetwork. // Note: Be sure to call InitCharSet before InitNetwork! -bool LSTMTrainer::InitNetwork(const char *network_spec, int append_index, int net_flags, - float weight_range, float learning_rate, float momentum, +bool LSTMTrainer::InitNetwork(const char *network_spec, int append_index, + int net_flags, float weight_range, + float learning_rate, float momentum, float adam_beta) { mgr_.SetVersionString(mgr_.VersionString() + ":" + network_spec); adam_beta_ = adam_beta; learning_rate_ = learning_rate; momentum_ = momentum; SetNullChar(); - if (!NetworkBuilder::InitNetwork(recoder_.code_range(), network_spec, append_index, net_flags, - weight_range, &randomizer_, &network_)) { + if (!NetworkBuilder::InitNetwork(recoder_.code_range(), network_spec, + append_index, net_flags, weight_range, + &randomizer_, &network_)) { return false; } network_str_ += network_spec; - tprintf("Built network:%s from request %s\n", - network_->spec().c_str(), network_spec); + tprintf("Built network:%s from request %s\n", network_->spec().c_str(), + network_spec); tprintf( "Training parameters:\n Debug interval = %d," " weights = %g, learning rate = %g, momentum=%g\n", @@ -200,13 +213,16 @@ void LSTMTrainer::InitIterations() { worst_error_rate_ = 0.0; worst_iteration_ = 0; stall_iteration_ = kMinStallIterations; + best_error_history_.clear(); + best_error_iterations_.clear(); improvement_steps_ = kMinStallIterations; perfect_delay_ = 0; last_perfect_training_iteration_ = 0; for (int i = 0; i < ET_COUNT; ++i) { best_error_rates_[i] = 100.0; worst_error_rates_[i] = 0.0; - error_buffers_[i].resize(kRollingBufferSize_, 0.0); + error_buffers_[i].clear(); + error_buffers_[i].resize(kRollingBufferSize_); error_rates_[i] = 100.0; } error_rate_of_last_saved_best_ = kMinStartedErrorRate; @@ -215,14 +231,14 @@ void LSTMTrainer::InitIterations() { // If the training sample is usable, grid searches for the optimal // dict_ratio/cert_offset, and returns the results in a string of space- // separated triplets of ratio,offset=worderr. -Trainability LSTMTrainer::GridSearchDictParams(const ImageData *trainingdata, int iteration, - double min_dict_ratio, double dict_ratio_step, - double max_dict_ratio, double min_cert_offset, - double cert_offset_step, double max_cert_offset, - std::string &results) { +Trainability LSTMTrainer::GridSearchDictParams( + const ImageData *trainingdata, int iteration, double min_dict_ratio, + double dict_ratio_step, double max_dict_ratio, double min_cert_offset, + double cert_offset_step, double max_cert_offset, std::string &results) { sample_iteration_ = iteration; NetworkIO fwd_outputs, targets; - Trainability result = PrepareForBackward(trainingdata, &fwd_outputs, &targets); + Trainability result = + PrepareForBackward(trainingdata, &fwd_outputs, &targets); if (result == UNENCODABLE || result == HI_PRECISION_ERR || dict_ == nullptr) { return result; } @@ -231,8 +247,10 @@ Trainability LSTMTrainer::GridSearchDictParams(const ImageData *trainingdata, in std::vector truth_labels, ocr_labels, xcoords; ASSERT_HOST(EncodeString(trainingdata->transcription(), &truth_labels)); // NO-dict error. - RecodeBeamSearch base_search(recoder_, null_char_, SimpleTextOutput(), nullptr); - base_search.Decode(fwd_outputs, 1.0, 0.0, RecodeBeamSearch::kMinCertainty, nullptr); + RecodeBeamSearch base_search(recoder_, null_char_, SimpleTextOutput(), + nullptr); + base_search.Decode(fwd_outputs, 1.0, 0.0, RecodeBeamSearch::kMinCertainty, + nullptr); base_search.ExtractBestPathAsLabels(&ocr_labels, &xcoords); std::string truth_text = DecodeLabels(truth_labels); std::string ocr_text = DecodeLabels(ocr_labels); @@ -241,18 +259,21 @@ Trainability LSTMTrainer::GridSearchDictParams(const ImageData *trainingdata, in RecodeBeamSearch search(recoder_, null_char_, SimpleTextOutput(), dict_); for (double r = min_dict_ratio; r < max_dict_ratio; r += dict_ratio_step) { - for (double c = min_cert_offset; c < max_cert_offset; c += cert_offset_step) { - search.Decode(fwd_outputs, r, c, RecodeBeamSearch::kMinCertainty, nullptr); + for (double c = min_cert_offset; c < max_cert_offset; + c += cert_offset_step) { + search.Decode(fwd_outputs, r, c, RecodeBeamSearch::kMinCertainty, + nullptr); search.ExtractBestPathAsLabels(&ocr_labels, &xcoords); truth_text = DecodeLabels(truth_labels); ocr_text = DecodeLabels(ocr_labels); // This is destructive on both strings. double word_error = ComputeWordError(&truth_text, &ocr_text); - if ((r == min_dict_ratio && c == min_cert_offset) || !std::isfinite(word_error)) { + if ((r == min_dict_ratio && c == min_cert_offset) || + !std::isfinite(word_error)) { std::string t = DecodeLabels(truth_labels); - std::string o = DecodeLabels(ocr_labels); - tprintf("r=%g, c=%g, truth=%s, ocr=%s, wderr=%g, truth[0]=%d\n", r, c, t.c_str(), o.c_str(), - word_error, truth_labels[0]); + std::string o = DecodeLabels(ocr_labels); + tprintf("r=%g, c=%g, truth=%s, ocr=%s, wderr=%g, truth[0]=%d\n", r, c, + t.c_str(), o.c_str(), word_error, truth_labels[0]); } results += " " + std::to_string(r); results += "," + std::to_string(c); @@ -271,17 +292,20 @@ void LSTMTrainer::DebugNetwork() { // tesseract into memory ready for training. Returns false if nothing was // loaded. bool LSTMTrainer::LoadAllTrainingData(const std::vector &filenames, - CachingStrategy cache_strategy, bool randomly_rotate) { + CachingStrategy cache_strategy, + bool randomly_rotate) { randomly_rotate_ = randomly_rotate; training_data_.Clear(); - return training_data_.LoadDocuments(filenames, cache_strategy, LoadDataFromFile); + return training_data_.LoadDocuments(filenames, cache_strategy, + LoadDataFromFile); } // Keeps track of best and locally worst char error_rate and launches tests // using tester, when a new min or max is reached. // Writes checkpoints at appropriate times and builds and returns a log message // to indicate progress. Returns false if nothing interesting happened. -bool LSTMTrainer::MaintainCheckpoints(TestCallback tester, std::string &log_msg) { +bool LSTMTrainer::MaintainCheckpoints(const TestCallback &tester, + std::string &log_msg) { PrepareLogMsg(log_msg); double error_rate = CharError(); int iteration = learning_iteration(); @@ -306,14 +330,15 @@ bool LSTMTrainer::MaintainCheckpoints(TestCallback tester, std::string &log_msg) std::vector rec_model_data; if (error_rate < best_error_rate_) { SaveRecognitionDump(&rec_model_data); - log_msg += " New best char error = " + std::to_string(error_rate); + log_msg += " New best BCER = " + std::to_string(error_rate); log_msg += UpdateErrorGraph(iteration, error_rate, rec_model_data, tester); // If sub_trainer_ is not nullptr, either *this beat it to a new best, or it // just overwrote *this. In either case, we have finished with it. sub_trainer_.reset(); stall_iteration_ = learning_iteration() + kMinStallIterations; if (TransitionTrainingStage(kStageTransitionThreshold)) { - log_msg += " Transitioned to stage " + std::to_string(CurrentTrainingStage()); + log_msg += + " Transitioned to stage " + std::to_string(CurrentTrainingStage()); } SaveTrainingDump(NO_BEST_TRAINER, *this, &best_trainer_); if (error_rate < error_rate_of_last_saved_best_ * kBestCheckpointFraction) { @@ -328,7 +353,7 @@ bool LSTMTrainer::MaintainCheckpoints(TestCallback tester, std::string &log_msg) } } else if (error_rate > worst_error_rate_) { SaveRecognitionDump(&rec_model_data); - log_msg += " New worst char error = " + std::to_string(error_rate); + log_msg += " New worst BCER = " + std::to_string(error_rate); log_msg += UpdateErrorGraph(iteration, error_rate, rec_model_data, tester); if (worst_error_rate_ > best_error_rate_ + kMinDivergenceRate && best_error_rate_ < kMinStartedErrorRate && !best_trainer_.empty()) { @@ -371,15 +396,16 @@ void LSTMTrainer::PrepareLogMsg(std::string &log_msg) const { LogIterations("At", log_msg); log_msg += ", Mean rms=" + std::to_string(error_rates_[ET_RMS]); log_msg += "%, delta=" + std::to_string(error_rates_[ET_DELTA]); - log_msg += "%, char train=" + std::to_string(error_rates_[ET_CHAR_ERROR]); - log_msg += "%, word train=" + std::to_string(error_rates_[ET_WORD_RECERR]); + log_msg += "%, BCER train=" + std::to_string(error_rates_[ET_CHAR_ERROR]); + log_msg += "%, BWER train=" + std::to_string(error_rates_[ET_WORD_RECERR]); log_msg += "%, skip ratio=" + std::to_string(error_rates_[ET_SKIP_RATIO]); log_msg += "%, "; } // Appends iteration learning_iteration()/training_iteration()/ // sample_iteration() to the log_msg. -void LSTMTrainer::LogIterations(const char *intro_str, std::string &log_msg) const { +void LSTMTrainer::LogIterations(const char *intro_str, + std::string &log_msg) const { log_msg += intro_str; log_msg += " iteration " + std::to_string(learning_iteration()); log_msg += "/" + std::to_string(training_iteration()); @@ -389,7 +415,8 @@ void LSTMTrainer::LogIterations(const char *intro_str, std::string &log_msg) con // Returns true and increments the training_stage_ if the error rate has just // passed through the given threshold for the first time. bool LSTMTrainer::TransitionTrainingStage(float error_threshold) { - if (best_error_rate_ < error_threshold && training_stage_ + 1 < num_training_stages_) { + if (best_error_rate_ < error_threshold && + training_stage_ + 1 < num_training_stages_) { ++training_stage_; return true; } @@ -397,8 +424,8 @@ bool LSTMTrainer::TransitionTrainingStage(float error_threshold) { } // Writes to the given file. Returns false in case of error. -bool LSTMTrainer::Serialize(SerializeAmount serialize_amount, const TessdataManager *mgr, - TFile *fp) const { +bool LSTMTrainer::Serialize(SerializeAmount serialize_amount, + const TessdataManager *mgr, TFile *fp) const { if (!LSTMRecognizer::Serialize(mgr, fp)) { return false; } @@ -463,7 +490,8 @@ bool LSTMTrainer::Serialize(SerializeAmount serialize_amount, const TessdataMana return false; } std::vector sub_data; - if (sub_trainer_ != nullptr && !SaveTrainingDump(LIGHT, *sub_trainer_, &sub_data)) { + if (sub_trainer_ != nullptr && + !SaveTrainingDump(LIGHT, *sub_trainer_, &sub_data)) { return false; } if (!fp->Serialize(sub_data)) { @@ -580,11 +608,13 @@ void LSTMTrainer::StartSubtrainer(std::string &log_msg) { log_msg += " Failed to revert to previous best for trial!"; sub_trainer_.reset(); } else { - log_msg += " Trial sub_trainer_ from iteration " + std::to_string(sub_trainer_->training_iteration()); + log_msg += " Trial sub_trainer_ from iteration " + + std::to_string(sub_trainer_->training_iteration()); // Reduce learning rate so it doesn't diverge this time. sub_trainer_->ReduceLearningRates(this, log_msg); // If it fails again, we will wait twice as long before reverting again. - int stall_offset = learning_iteration() - sub_trainer_->learning_iteration(); + int stall_offset = + learning_iteration() - sub_trainer_->learning_iteration(); stall_iteration_ = learning_iteration() + 2 * stall_offset; sub_trainer_->stall_iteration_ = stall_iteration_; // Re-save the best trainer with the new learning rates and stall iteration. @@ -612,7 +642,8 @@ SubTrainerResult LSTMTrainer::UpdateSubtrainer(std::string &log_msg) { int end_iteration = training_iteration(); while (sub_trainer_->training_iteration() < end_iteration && sub_margin >= kSubTrainerMarginFraction) { - int target_iteration = sub_trainer_->training_iteration() + kNumPagesPerBatch; + int target_iteration = + sub_trainer_->training_iteration() + kNumPagesPerBatch; while (sub_trainer_->training_iteration() < target_iteration) { sub_trainer_->TrainOnLine(this, false); } @@ -624,12 +655,14 @@ SubTrainerResult LSTMTrainer::UpdateSubtrainer(std::string &log_msg) { sub_error = sub_trainer_->CharError(); sub_margin = (training_error - sub_error) / sub_error; } - if (sub_error < best_error_rate_ && sub_margin >= kSubTrainerMarginFraction) { + if (sub_error < best_error_rate_ && + sub_margin >= kSubTrainerMarginFraction) { // The sub_trainer_ has won the race to a new best. Switch to it. std::vector updated_trainer; SaveTrainingDump(LIGHT, *sub_trainer_, &updated_trainer); ReadTrainingDump(updated_trainer, *this); - log_msg += " Sub trainer wins at iteration " + std::to_string(training_iteration()); + log_msg += " Sub trainer wins at iteration " + + std::to_string(training_iteration()); log_msg += "\n"; return STR_REPLACED; } @@ -640,11 +673,13 @@ SubTrainerResult LSTMTrainer::UpdateSubtrainer(std::string &log_msg) { // Reduces network learning rates, either for everything, or for layers // independently, according to NF_LAYER_SPECIFIC_LR. -void LSTMTrainer::ReduceLearningRates(LSTMTrainer *samples_trainer, std::string &log_msg) { +void LSTMTrainer::ReduceLearningRates(LSTMTrainer *samples_trainer, + std::string &log_msg) { if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) { - int num_reduced = - ReduceLayerLearningRates(kLearningRateDecay, kNumAdjustmentIterations, samples_trainer); - log_msg += "\nReduced learning rate on layers: " + std::to_string(num_reduced); + int num_reduced = ReduceLayerLearningRates( + kLearningRateDecay, kNumAdjustmentIterations, samples_trainer); + log_msg += + "\nReduced learning rate on layers: " + std::to_string(num_reduced); } else { ScaleLearningRate(kLearningRateDecay); log_msg += "\nReduced learning rate to :" + std::to_string(learning_rate_); @@ -658,7 +693,7 @@ void LSTMTrainer::ReduceLearningRates(LSTMTrainer *samples_trainer, std::string // Even if it looks like all weights should remain the same, an adjustment // will be made to guarantee a different result when reverting to an old best. // Returns the number of layer learning rates that were reduced. -int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, +int LSTMTrainer::ReduceLayerLearningRates(TFloat factor, int num_samples, LSTMTrainer *samples_trainer) { enum WhichWay { LR_DOWN, // Learning rate will go down by factor. @@ -667,15 +702,14 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, }; std::vector layers = EnumerateLayers(); int num_layers = layers.size(); - std::vector num_weights; - num_weights.resize(num_layers, 0); - std::vector bad_sums[LR_COUNT]; - std::vector ok_sums[LR_COUNT]; + std::vector num_weights(num_layers); + std::vector bad_sums[LR_COUNT]; + std::vector ok_sums[LR_COUNT]; for (int i = 0; i < LR_COUNT; ++i) { bad_sums[i].resize(num_layers, 0.0); ok_sums[i].resize(num_layers, 0.0); } - double momentum_factor = 1.0 / (1.0 - momentum_); + auto momentum_factor = 1 / (1 - momentum_); std::vector orig_trainer; samples_trainer->SaveTrainingDump(LIGHT, *this, &orig_trainer); for (int i = 0; i < num_layers; ++i) { @@ -687,7 +721,7 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, // Which way will we modify the learning rate? for (int ww = 0; ww < LR_COUNT; ++ww) { // Transfer momentum to learning rate and adjust by the ww factor. - float ww_factor = momentum_factor; + auto ww_factor = momentum_factor; if (ww == LR_DOWN) { ww_factor *= factor; } @@ -706,7 +740,8 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, copy_trainer.SetIteration(iteration); // Train on the sample, but keep the update in updates_ instead of // applying to the weights. - const ImageData *trainingdata = copy_trainer.TrainOnLine(samples_trainer, true); + const ImageData *trainingdata = + copy_trainer.TrainOnLine(samples_trainer, true); if (trainingdata == nullptr) { continue; } @@ -721,7 +756,8 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, samples_trainer->ReadTrainingDump(updated_trainer, layer_trainer); Network *layer = layer_trainer.GetLayer(layers[i]); // Update the weights in just the layer, using Adam if enabled. - layer->Update(0.0, momentum_, adam_beta_, layer_trainer.training_iteration_ + 1); + layer->Update(0.0, momentum_, adam_beta_, + layer_trainer.training_iteration_ + 1); // Zero the updates matrix again. layer->Update(0.0, 0.0, 0.0, 0); // Train again on the same sample, again holding back the updates. @@ -729,9 +765,10 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, // Count the sign changes in the updates in layer vs in copy_trainer. float before_bad = bad_sums[ww][i]; float before_ok = ok_sums[ww][i]; - layer->CountAlternators(*copy_trainer.GetLayer(layers[i]), &ok_sums[ww][i], - &bad_sums[ww][i]); - float bad_frac = bad_sums[ww][i] + ok_sums[ww][i] - before_bad - before_ok; + layer->CountAlternators(*copy_trainer.GetLayer(layers[i]), + &ok_sums[ww][i], &bad_sums[ww][i]); + float bad_frac = + bad_sums[ww][i] + ok_sums[ww][i] - before_bad - before_ok; if (bad_frac > 0.0f) { bad_frac = (bad_sums[ww][i] - before_bad) / bad_frac; } @@ -746,12 +783,12 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, } Network *layer = GetLayer(layers[i]); float lr = GetLayerLearningRate(layers[i]); - double total_down = bad_sums[LR_DOWN][i] + ok_sums[LR_DOWN][i]; - double total_same = bad_sums[LR_SAME][i] + ok_sums[LR_SAME][i]; - double frac_down = bad_sums[LR_DOWN][i] / total_down; - double frac_same = bad_sums[LR_SAME][i] / total_same; - tprintf("Layer %d=%s: lr %g->%g%%, lr %g->%g%%", i, layer->name().c_str(), lr * factor, - 100.0 * frac_down, lr, 100.0 * frac_same); + TFloat total_down = bad_sums[LR_DOWN][i] + ok_sums[LR_DOWN][i]; + TFloat total_same = bad_sums[LR_SAME][i] + ok_sums[LR_SAME][i]; + TFloat frac_down = bad_sums[LR_DOWN][i] / total_down; + TFloat frac_same = bad_sums[LR_SAME][i] / total_same; + tprintf("Layer %d=%s: lr %g->%g%%, lr %g->%g%%", i, layer->name().c_str(), + lr * factor, 100.0 * frac_down, lr, 100.0 * frac_same); if (frac_down < frac_same * kImprovementFraction) { tprintf(" REDUCED\n"); ScaleLayerLearningRate(layers[i], factor); @@ -775,9 +812,10 @@ int LSTMTrainer::ReduceLayerLearningRates(double factor, int num_samples, // Converts the string to integer class labels, with appropriate null_char_s // in between if not in SimpleTextOutput mode. Returns false on failure. /* static */ -bool LSTMTrainer::EncodeString(const std::string &str, const UNICHARSET &unicharset, - const UnicharCompress *recoder, bool simple_text, int null_char, - std::vector *labels) { +bool LSTMTrainer::EncodeString(const std::string &str, + const UNICHARSET &unicharset, + const UnicharCompress *recoder, bool simple_text, + int null_char, std::vector *labels) { if (str.c_str() == nullptr || str.length() <= 0) { tprintf("Empty truth string!\n"); return false; @@ -789,7 +827,8 @@ bool LSTMTrainer::EncodeString(const std::string &str, const UNICHARSET &unichar labels->push_back(null_char); } std::string cleaned = unicharset.CleanupString(str.c_str()); - if (unicharset.encode_string(cleaned.c_str(), true, &internal_labels, nullptr, &err_index)) { + if (unicharset.encode_string(cleaned.c_str(), true, &internal_labels, nullptr, + &err_index)) { bool success = true; for (auto internal_label : internal_labels) { if (recoder != nullptr) { @@ -829,19 +868,23 @@ bool LSTMTrainer::EncodeString(const std::string &str, const UNICHARSET &unichar // Performs forward-backward on the given trainingdata. // Returns a Trainability enum to indicate the suitability of the sample. -Trainability LSTMTrainer::TrainOnLine(const ImageData *trainingdata, bool batch) { +Trainability LSTMTrainer::TrainOnLine(const ImageData *trainingdata, + bool batch) { NetworkIO fwd_outputs, targets; - Trainability trainable = PrepareForBackward(trainingdata, &fwd_outputs, &targets); + Trainability trainable = + PrepareForBackward(trainingdata, &fwd_outputs, &targets); ++sample_iteration_; if (trainable == UNENCODABLE || trainable == NOT_BOXED) { return trainable; // Sample was unusable. } - bool debug = debug_interval_ > 0 && training_iteration() % debug_interval_ == 0; + bool debug = + debug_interval_ > 0 && training_iteration() % debug_interval_ == 0; // Run backprop on the output. NetworkIO bp_deltas; if (network_->IsTraining() && (trainable != PERFECT || - training_iteration() > last_perfect_training_iteration_ + perfect_delay_)) { + training_iteration() > + last_perfect_training_iteration_ + perfect_delay_)) { network_->Backward(debug, targets, &scratch_space_, &bp_deltas); network_->Update(learning_rate_, batch ? -1.0f : momentum_, adam_beta_, training_iteration_ + 1); @@ -858,18 +901,21 @@ Trainability LSTMTrainer::TrainOnLine(const ImageData *trainingdata, bool batch) // Prepares the ground truth, runs forward, and prepares the targets. // Returns a Trainability enum to indicate the suitability of the sample. -Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, NetworkIO *fwd_outputs, +Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, + NetworkIO *fwd_outputs, NetworkIO *targets) { if (trainingdata == nullptr) { tprintf("Null trainingdata.\n"); return UNENCODABLE; } // Ensure repeatability of random elements even across checkpoints. - bool debug = debug_interval_ > 0 && training_iteration() % debug_interval_ == 0; + bool debug = + debug_interval_ > 0 && training_iteration() % debug_interval_ == 0; std::vector truth_labels; if (!EncodeString(trainingdata->transcription(), &truth_labels)) { tprintf("Can't encode transcription: '%s' in language '%s'\n", - trainingdata->transcription().c_str(), trainingdata->language().c_str()); + trainingdata->transcription().c_str(), + trainingdata->language().c_str()); return UNENCODABLE; } bool upside_down = false; @@ -879,7 +925,7 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw upside_down = randomizer_.SignedRand(1.0) > 0.0; if (upside_down) { // Modify the truth labels to match the rotation: - // Apart from space and null, increment the label. This is changes the + // Apart from space and null, increment the label. This changes the // script-id to the same script-id but upside-down. // The labels need to be reversed in order, as the first is now the last. for (auto truth_label : truth_labels) { @@ -902,8 +948,8 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw float image_scale; NetworkIO inputs; bool invert = trainingdata->boxes().empty(); - if (!RecognizeLine(*trainingdata, invert, debug, invert, upside_down, &image_scale, &inputs, - fwd_outputs)) { + if (!RecognizeLine(*trainingdata, invert, debug, invert, upside_down, + &image_scale, &inputs, fwd_outputs)) { tprintf("Image %s not trainable\n", trainingdata->imagefilename().c_str()); return UNENCODABLE; } @@ -911,12 +957,14 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw LossType loss_type = OutputLossType(); if (loss_type == LT_SOFTMAX) { if (!ComputeTextTargets(*fwd_outputs, truth_labels, targets)) { - tprintf("Compute simple targets failed for %s!\n", trainingdata->imagefilename().c_str()); + tprintf("Compute simple targets failed for %s!\n", + trainingdata->imagefilename().c_str()); return UNENCODABLE; } } else if (loss_type == LT_CTC) { if (!ComputeCTCTargets(truth_labels, fwd_outputs, targets)) { - tprintf("Compute CTC targets failed for %s!\n", trainingdata->imagefilename().c_str()); + tprintf("Compute CTC targets failed for %s!\n", + trainingdata->imagefilename().c_str()); return UNENCODABLE; } } else { @@ -930,7 +978,8 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw if (loss_type != LT_CTC) { LabelsFromOutputs(*targets, &truth_labels, &xcoords); } - if (!DebugLSTMTraining(inputs, *trainingdata, *fwd_outputs, truth_labels, *targets)) { + if (!DebugLSTMTraining(inputs, *trainingdata, *fwd_outputs, truth_labels, + *targets)) { tprintf("Input width was %d\n", inputs.Width()); return UNENCODABLE; } @@ -939,7 +988,8 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw targets->SubtractAllFromFloat(*fwd_outputs); if (debug_interval_ != 0) { if (truth_text != ocr_text) { - tprintf("Iteration %d: BEST OCR TEXT : %s\n", training_iteration(), ocr_text.c_str()); + tprintf("Iteration %d: BEST OCR TEXT : %s\n", training_iteration(), + ocr_text.c_str()); } } double char_error = ComputeCharError(truth_labels, ocr_labels); @@ -962,7 +1012,8 @@ Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata, Netw // restored. *this must always be the master trainer that retains the only // copy of the training data and language model. trainer is the model that is // actually serialized. -bool LSTMTrainer::SaveTrainingDump(SerializeAmount serialize_amount, const LSTMTrainer &trainer, +bool LSTMTrainer::SaveTrainingDump(SerializeAmount serialize_amount, + const LSTMTrainer &trainer, std::vector *data) const { TFile fp; fp.OpenWrite(data); @@ -970,7 +1021,8 @@ bool LSTMTrainer::SaveTrainingDump(SerializeAmount serialize_amount, const LSTMT } // Restores the model to *this. -bool LSTMTrainer::ReadLocalTrainingDump(const TessdataManager *mgr, const char *data, int size) { +bool LSTMTrainer::ReadLocalTrainingDump(const TessdataManager *mgr, + const char *data, int size) { if (size == 0) { tprintf("Warning: data size is 0 in LSTMTrainer::ReadLocalTrainingDump\n"); return false; @@ -984,7 +1036,8 @@ bool LSTMTrainer::ReadLocalTrainingDump(const TessdataManager *mgr, const char * bool LSTMTrainer::SaveTraineddata(const char *filename) { std::vector recognizer_data; SaveRecognitionDump(&recognizer_data); - mgr_.OverwriteEntry(TESSDATA_LSTM, &recognizer_data[0], recognizer_data.size()); + mgr_.OverwriteEntry(TESSDATA_LSTM, &recognizer_data[0], + recognizer_data.size()); return mgr_.SaveFile(filename, SaveDataToFile); } @@ -1019,8 +1072,8 @@ void LSTMTrainer::FillErrorBuffer(double new_error, ErrorTypes type) { // Helper generates a map from each current recoder_ code (ie softmax index) // to the corresponding old_recoder code, or -1 if there isn't one. -std::vector LSTMTrainer::MapRecoder(const UNICHARSET &old_chset, - const UnicharCompress &old_recoder) const { +std::vector LSTMTrainer::MapRecoder( + const UNICHARSET &old_chset, const UnicharCompress &old_recoder) const { int num_new_codes = recoder_.code_range(); int num_new_unichars = GetUnicharset().size(); std::vector code_map(num_new_codes, -1); @@ -1039,9 +1092,10 @@ std::vector LSTMTrainer::MapRecoder(const UNICHARSET &old_chset, continue; } // The old unicharset must have the same unichar. - int old_uid = uid < num_new_unichars - ? old_chset.unichar_to_id(GetUnicharset().id_to_unichar(uid)) - : old_chset.size() - 1; + int old_uid = + uid < num_new_unichars + ? old_chset.unichar_to_id(GetUnicharset().id_to_unichar(uid)) + : old_chset.size() - 1; if (old_uid == INVALID_UNICHAR_ID) { continue; } @@ -1073,7 +1127,8 @@ void LSTMTrainer::InitCharSet() { // Helper computes and sets the null_char_. void LSTMTrainer::SetNullChar() { - null_char_ = GetUnicharset().has_special_codes() ? UNICHAR_BROKEN : GetUnicharset().size(); + null_char_ = GetUnicharset().has_special_codes() ? UNICHAR_BROKEN + : GetUnicharset().size(); RecodedCharID code; recoder_.EncodeUnichar(null_char_, &code); null_char_ = code(0); @@ -1097,7 +1152,8 @@ void LSTMTrainer::EmptyConstructor() { // as an image in the given window, and the corresponding labels at the // corresponding x_starts. // Returns false if the truth string is empty. -bool LSTMTrainer::DebugLSTMTraining(const NetworkIO &inputs, const ImageData &trainingdata, +bool LSTMTrainer::DebugLSTMTraining(const NetworkIO &inputs, + const ImageData &trainingdata, const NetworkIO &fwd_outputs, const std::vector &truth_labels, const NetworkIO &outputs) { @@ -1112,12 +1168,15 @@ bool LSTMTrainer::DebugLSTMTraining(const NetworkIO &inputs, const ImageData &tr std::vector xcoords; LabelsFromOutputs(outputs, &labels, &xcoords); std::string text = DecodeLabels(labels); - tprintf("Iteration %d: GROUND TRUTH : %s\n", training_iteration(), truth_text.c_str()); + tprintf("Iteration %d: GROUND TRUTH : %s\n", training_iteration(), + truth_text.c_str()); if (truth_text != text) { - tprintf("Iteration %d: ALIGNED TRUTH : %s\n", training_iteration(), text.c_str()); + tprintf("Iteration %d: ALIGNED TRUTH : %s\n", training_iteration(), + text.c_str()); } if (debug_interval_ > 0 && training_iteration() % debug_interval_ == 0) { - tprintf("TRAINING activation path for truth string %s\n", truth_text.c_str()); + tprintf("TRAINING activation path for truth string %s\n", + truth_text.c_str()); DebugActivationPath(outputs, labels, xcoords); #ifndef GRAPHICS_DISABLED DisplayForward(inputs, labels, xcoords, "LSTMTraining", &align_win_); @@ -1134,11 +1193,12 @@ bool LSTMTrainer::DebugLSTMTraining(const NetworkIO &inputs, const ImageData &tr #ifndef GRAPHICS_DISABLED // Displays the network targets as line a line graph. -void LSTMTrainer::DisplayTargets(const NetworkIO &targets, const char *window_name, - ScrollView **window) { +void LSTMTrainer::DisplayTargets(const NetworkIO &targets, + const char *window_name, ScrollView **window) { int width = targets.Width(); int num_features = targets.NumFeatures(); - Network::ClearWindow(true, window_name, width * kTargetXScale, kTargetYScale, window); + Network::ClearWindow(true, window_name, width * kTargetXScale, kTargetYScale, + window); for (int c = 0; c < num_features; ++c) { int color = c % (ScrollView::GREEN_YELLOW - 1) + 2; (*window)->Pen(static_cast(color)); @@ -1170,7 +1230,8 @@ void LSTMTrainer::DisplayTargets(const NetworkIO &targets, const char *window_na // Builds a no-compromises target where the first positions should be the // truth labels and the rest is padded with the null_char_. -bool LSTMTrainer::ComputeTextTargets(const NetworkIO &outputs, const std::vector &truth_labels, +bool LSTMTrainer::ComputeTextTargets(const NetworkIO &outputs, + const std::vector &truth_labels, NetworkIO *targets) { if (truth_labels.size() > targets->Width()) { tprintf("Error: transcription %s too long to fit into target of width %d\n", @@ -1191,18 +1252,19 @@ bool LSTMTrainer::ComputeTextTargets(const NetworkIO &outputs, const std::vector // Builds a target using standard CTC. truth_labels should be pre-padded with // nulls wherever desired. They don't have to be between all labels. // outputs is input-output, as it gets clipped to minimum probability. -bool LSTMTrainer::ComputeCTCTargets(const std::vector &truth_labels, NetworkIO *outputs, - NetworkIO *targets) { +bool LSTMTrainer::ComputeCTCTargets(const std::vector &truth_labels, + NetworkIO *outputs, NetworkIO *targets) { // Bottom-clip outputs to a minimum probability. CTC::NormalizeProbs(outputs); - return CTC::ComputeCTCTargets(truth_labels, null_char_, outputs->float_array(), targets); + return CTC::ComputeCTCTargets(truth_labels, null_char_, + outputs->float_array(), targets); } // Computes network errors, and stores the results in the rolling buffers, // along with the supplied text_error. // Returns the delta error of the current sample (not running average.) -double LSTMTrainer::ComputeErrorRates(const NetworkIO &deltas, double char_error, - double word_error) { +double LSTMTrainer::ComputeErrorRates(const NetworkIO &deltas, + double char_error, double word_error) { UpdateErrorBuffer(ComputeRMSError(deltas), ET_RMS); // Delta error is the fraction of timesteps with >0.5 error in the top choice // score. If zero, then the top choice characters are guaranteed correct, @@ -1247,7 +1309,7 @@ double LSTMTrainer::ComputeWinnerError(const NetworkIO &deltas) { for (int t = 0; t < width; ++t) { const float *class_errs = deltas.f(t); for (int c = 0; c < num_classes; ++c) { - float abs_delta = fabs(class_errs[c]); + float abs_delta = std::fabs(class_errs[c]); // TODO(rays) Filtering cases where the delta is very large to cut out // GT errors doesn't work. Find a better way or get better truth. if (0.5 <= abs_delta) { @@ -1261,9 +1323,8 @@ double LSTMTrainer::ComputeWinnerError(const NetworkIO &deltas) { // Computes a very simple bag of chars char error rate. double LSTMTrainer::ComputeCharError(const std::vector &truth_str, const std::vector &ocr_str) { - std::vector label_counts; - label_counts.resize(NumOutputs(), 0); - int truth_size = 0; + std::vector label_counts(NumOutputs()); + unsigned truth_size = 0; for (auto ch : truth_str) { if (ch != null_char_) { ++label_counts[ch]; @@ -1275,11 +1336,12 @@ double LSTMTrainer::ComputeCharError(const std::vector &truth_str, --label_counts[ch]; } } - int char_errors = 0; + unsigned char_errors = 0; for (auto label_count : label_counts) { char_errors += abs(label_count); } - if (truth_size == 0) { + // Limit BCER to interval [0,1] and avoid division by zero. + if (truth_size <= char_errors) { return (char_errors == 0) ? 0.0 : 1.0; } return static_cast(char_errors) / truth_size; @@ -1287,7 +1349,8 @@ double LSTMTrainer::ComputeCharError(const std::vector &truth_str, // Computes word recall error rate using a very simple bag of words algorithm. // NOTE that this is destructive on both input strings. -double LSTMTrainer::ComputeWordError(std::string *truth_str, std::string *ocr_str) { +double LSTMTrainer::ComputeWordError(std::string *truth_str, + std::string *ocr_str) { using StrMap = std::unordered_map>; std::vector truth_words = split(*truth_str, ' '); if (truth_words.empty()) { @@ -1295,7 +1358,7 @@ double LSTMTrainer::ComputeWordError(std::string *truth_str, std::string *ocr_st } std::vector ocr_words = split(*ocr_str, ' '); StrMap word_counts; - for (auto truth_word : truth_words) { + for (const auto &truth_word : truth_words) { std::string truth_word_string(truth_word.c_str()); auto it = word_counts.find(truth_word_string); if (it == word_counts.end()) { @@ -1304,7 +1367,7 @@ double LSTMTrainer::ComputeWordError(std::string *truth_str, std::string *ocr_st ++it->second; } } - for (auto ocr_word : ocr_words) { + for (const auto &ocr_word : ocr_words) { std::string ocr_word_string(ocr_word.c_str()); auto it = word_counts.find(ocr_word_string); if (it == word_counts.end()) { @@ -1328,7 +1391,8 @@ void LSTMTrainer::UpdateErrorBuffer(double new_error, ErrorTypes type) { int index = training_iteration_ % kRollingBufferSize_; error_buffers_[type][index] = new_error; // Compute the mean error. - int mean_count = std::min(training_iteration_ + 1, error_buffers_[type].size()); + int mean_count = + std::min(training_iteration_ + 1, error_buffers_[type].size()); double buffer_sum = 0.0; for (int i = 0; i < mean_count; ++i) { buffer_sum += error_buffers_[type][i]; @@ -1348,8 +1412,9 @@ void LSTMTrainer::RollErrorBuffers() { } ++training_iteration_; if (debug_interval_ != 0) { - tprintf("Mean rms=%g%%, delta=%g%%, train=%g%%(%g%%), skip ratio=%g%%\n", error_rates_[ET_RMS], - error_rates_[ET_DELTA], error_rates_[ET_CHAR_ERROR], error_rates_[ET_WORD_RECERR], + tprintf("Mean rms=%g%%, delta=%g%%, train=%g%%(%g%%), skip ratio=%g%%\n", + error_rates_[ET_RMS], error_rates_[ET_DELTA], + error_rates_[ET_CHAR_ERROR], error_rates_[ET_WORD_RECERR], error_rates_[ET_SKIP_RATIO]); } } @@ -1359,11 +1424,14 @@ void LSTMTrainer::RollErrorBuffers() { // Tester is an externally supplied callback function that tests on some // data set with a given model and records the error rates in a graph. std::string LSTMTrainer::UpdateErrorGraph(int iteration, double error_rate, - const std::vector &model_data, TestCallback tester) { - if (error_rate > best_error_rate_ && iteration < best_iteration_ + kErrorGraphInterval) { + const std::vector &model_data, + const TestCallback &tester) { + if (error_rate > best_error_rate_ && + iteration < best_iteration_ + kErrorGraphInterval) { // Too soon to record a new point. if (tester != nullptr && !worst_model_data_.empty()) { - mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0], worst_model_data_.size()); + mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0], + worst_model_data_.size()); return tester(worst_iteration_, nullptr, mgr_, CurrentTrainingStage()); } else { return ""; @@ -1379,8 +1447,10 @@ std::string LSTMTrainer::UpdateErrorGraph(int iteration, double error_rate, if (error_rate < best_error_rate_) { // This is a new (global) minimum. if (tester != nullptr && !worst_model_data_.empty()) { - mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0], worst_model_data_.size()); - result = tester(worst_iteration_, worst_error_rates_, mgr_, CurrentTrainingStage()); + mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0], + worst_model_data_.size()); + result = tester(worst_iteration_, worst_error_rates_, mgr_, + CurrentTrainingStage()); worst_model_data_.clear(); best_model_data_ = model_data; } @@ -1392,23 +1462,28 @@ std::string LSTMTrainer::UpdateErrorGraph(int iteration, double error_rate, // Compute 2% decay time. double two_percent_more = error_rate + 2.0; int i; - for (i = best_error_history_.size() - 1; i >= 0 && best_error_history_[i] < two_percent_more; - --i) { + for (i = best_error_history_.size() - 1; + i >= 0 && best_error_history_[i] < two_percent_more; --i) { } int old_iteration = i >= 0 ? best_error_iterations_[i] : 0; improvement_steps_ = iteration - old_iteration; - tprintf("2 Percent improvement time=%d, best error was %g @ %d\n", improvement_steps_, - i >= 0 ? best_error_history_[i] : 100.0, old_iteration); + tprintf("2 Percent improvement time=%d, best error was %g @ %d\n", + improvement_steps_, i >= 0 ? best_error_history_[i] : 100.0, + old_iteration); } else if (error_rate > best_error_rate_) { // This is a new (local) maximum. if (tester != nullptr) { if (!best_model_data_.empty()) { - mgr_.OverwriteEntry(TESSDATA_LSTM, &best_model_data_[0], best_model_data_.size()); - result = tester(best_iteration_, best_error_rates_, mgr_, CurrentTrainingStage()); + mgr_.OverwriteEntry(TESSDATA_LSTM, &best_model_data_[0], + best_model_data_.size()); + result = tester(best_iteration_, best_error_rates_, mgr_, + CurrentTrainingStage()); } else if (!worst_model_data_.empty()) { // Allow for multiple data points with "worst" error rate. - mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0], worst_model_data_.size()); - result = tester(worst_iteration_, worst_error_rates_, mgr_, CurrentTrainingStage()); + mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0], + worst_model_data_.size()); + result = tester(worst_iteration_, worst_error_rates_, mgr_, + CurrentTrainingStage()); } if (result.length() > 0) { best_model_data_.clear(); diff --git a/src/training/unicharset/lstmtrainer.h b/src/training/unicharset/lstmtrainer.h index a57c81908..be309195e 100644 --- a/src/training/unicharset/lstmtrainer.h +++ b/src/training/unicharset/lstmtrainer.h @@ -73,7 +73,8 @@ class LSTMTrainer; // Function to compute and record error rates on some external test set(s). // Args are: iteration, mean errors, model, training stage. // Returns a string containing logging information about the tests. -using TestCallback = std::function; +using TestCallback = std::function; // Trainer class for LSTM networks. Most of the effort is in creating the // ideal target outputs from the transcription. A box file is used if it is @@ -82,8 +83,8 @@ using TestCallback = std::function &filenames, CachingStrategy cache_strategy, + bool LoadAllTrainingData(const std::vector &filenames, + CachingStrategy cache_strategy, bool randomly_rotate); // Keeps track of best and locally worst error rate, using internally computed // values. See MaintainCheckpointsSpecific for more detail. - bool MaintainCheckpoints(TestCallback tester, std::string &log_msg); + bool MaintainCheckpoints(const TestCallback &tester, std::string &log_msg); // Keeps track of best and locally worst error_rate (whatever it is) and // launches tests using rec_model, when a new min or max is reached. // Writes checkpoints using train_model at appropriate times and builds and // returns a log message to indicate progress. Returns false if nothing // interesting happened. - bool MaintainCheckpointsSpecific(int iteration, const std::vector *train_model, - const std::vector *rec_model, TestCallback tester, - std::string &log_msg); + bool MaintainCheckpointsSpecific(int iteration, + const std::vector *train_model, + const std::vector *rec_model, + TestCallback tester, std::string &log_msg); // Builds a string containing a progress message with current error rates. void PrepareLogMsg(std::string &log_msg) const; // Appends iteration learning_iteration()/training_iteration()/ @@ -211,7 +218,8 @@ public: } // Writes to the given file. Returns false in case of error. - bool Serialize(SerializeAmount serialize_amount, const TessdataManager *mgr, TFile *fp) const; + bool Serialize(SerializeAmount serialize_amount, const TessdataManager *mgr, + TFile *fp) const; // Reads from the given file. Returns false in case of error. bool DeSerialize(const TessdataManager *mgr, TFile *fp); @@ -237,18 +245,20 @@ public: // Even if it looks like all weights should remain the same, an adjustment // will be made to guarantee a different result when reverting to an old best. // Returns the number of layer learning rates that were reduced. - int ReduceLayerLearningRates(double factor, int num_samples, LSTMTrainer *samples_trainer); + int ReduceLayerLearningRates(TFloat factor, int num_samples, + LSTMTrainer *samples_trainer); // Converts the string to integer class labels, with appropriate null_char_s // in between if not in SimpleTextOutput mode. Returns false on failure. bool EncodeString(const std::string &str, std::vector *labels) const { - return EncodeString(str, GetUnicharset(), IsRecoding() ? &recoder_ : nullptr, - SimpleTextOutput(), null_char_, labels); + return EncodeString(str, GetUnicharset(), + IsRecoding() ? &recoder_ : nullptr, SimpleTextOutput(), + null_char_, labels); } // Static version operates on supplied unicharset, encoder, simple_text. static bool EncodeString(const std::string &str, const UNICHARSET &unicharset, - const UnicharCompress *recoder, bool simple_text, int null_char, - std::vector *labels); + const UnicharCompress *recoder, bool simple_text, + int null_char, std::vector *labels); // Performs forward-backward on the given trainingdata. // Returns the sample that was used or nullptr if the next sample was deemed @@ -256,7 +266,8 @@ public: // holds the training samples. const ImageData *TrainOnLine(LSTMTrainer *samples_trainer, bool batch) { int sample_index = sample_iteration(); - const ImageData *image = samples_trainer->training_data_.GetPageBySerial(sample_index); + const ImageData *image = + samples_trainer->training_data_.GetPageBySerial(sample_index); if (image != nullptr) { Trainability trainable = TrainOnLine(image, batch); if (trainable == UNENCODABLE || trainable == NOT_BOXED) { @@ -271,30 +282,34 @@ public: // Prepares the ground truth, runs forward, and prepares the targets. // Returns a Trainability enum to indicate the suitability of the sample. - Trainability PrepareForBackward(const ImageData *trainingdata, NetworkIO *fwd_outputs, - NetworkIO *targets); + Trainability PrepareForBackward(const ImageData *trainingdata, + NetworkIO *fwd_outputs, NetworkIO *targets); // Writes the trainer to memory, so that the current training state can be // restored. *this must always be the master trainer that retains the only // copy of the training data and language model. trainer is the model that is // actually serialized. - bool SaveTrainingDump(SerializeAmount serialize_amount, const LSTMTrainer &trainer, + bool SaveTrainingDump(SerializeAmount serialize_amount, + const LSTMTrainer &trainer, std::vector *data) const; // Reads previously saved trainer from memory. *this must always be the // master trainer that retains the only copy of the training data and // language model. trainer is the model that is restored. - bool ReadTrainingDump(const std::vector &data, LSTMTrainer &trainer) const { + bool ReadTrainingDump(const std::vector &data, + LSTMTrainer &trainer) const { if (data.empty()) { return false; } return ReadSizedTrainingDump(&data[0], data.size(), trainer); } - bool ReadSizedTrainingDump(const char *data, int size, LSTMTrainer &trainer) const { + bool ReadSizedTrainingDump(const char *data, int size, + LSTMTrainer &trainer) const { return trainer.ReadLocalTrainingDump(&mgr_, data, size); } // Restores the model to *this. - bool ReadLocalTrainingDump(const TessdataManager *mgr, const char *data, int size); + bool ReadLocalTrainingDump(const TessdataManager *mgr, const char *data, + int size); // Sets up the data for MaintainCheckpoints from a light ReadTrainingDump. void SetupCheckpointInfo(); @@ -331,26 +346,30 @@ protected: // corresponding x_starts. // Returns false if the truth string is empty. bool DebugLSTMTraining(const NetworkIO &inputs, const ImageData &trainingdata, - const NetworkIO &fwd_outputs, const std::vector &truth_labels, + const NetworkIO &fwd_outputs, + const std::vector &truth_labels, const NetworkIO &outputs); // Displays the network targets as line a line graph. - void DisplayTargets(const NetworkIO &targets, const char *window_name, ScrollView **window); + void DisplayTargets(const NetworkIO &targets, const char *window_name, + ScrollView **window); // Builds a no-compromises target where the first positions should be the // truth labels and the rest is padded with the null_char_. - bool ComputeTextTargets(const NetworkIO &outputs, const std::vector &truth_labels, + bool ComputeTextTargets(const NetworkIO &outputs, + const std::vector &truth_labels, NetworkIO *targets); // Builds a target using standard CTC. truth_labels should be pre-padded with // nulls wherever desired. They don't have to be between all labels. // outputs is input-output, as it gets clipped to minimum probability. - bool ComputeCTCTargets(const std::vector &truth_labels, NetworkIO *outputs, - NetworkIO *targets); + bool ComputeCTCTargets(const std::vector &truth_labels, + NetworkIO *outputs, NetworkIO *targets); // Computes network errors, and stores the results in the rolling buffers, // along with the supplied text_error. // Returns the delta error of the current sample (not running average.) - double ComputeErrorRates(const NetworkIO &deltas, double char_error, double word_error); + double ComputeErrorRates(const NetworkIO &deltas, double char_error, + double word_error); // Computes the network activation RMS error rate. double ComputeRMSError(const NetworkIO &deltas); @@ -363,7 +382,8 @@ protected: double ComputeWinnerError(const NetworkIO &deltas); // Computes a very simple bag of chars char error rate. - double ComputeCharError(const std::vector &truth_str, const std::vector &ocr_str); + double ComputeCharError(const std::vector &truth_str, + const std::vector &ocr_str); // Computes a very simple bag of words word recall error rate. // NOTE that this is destructive on both input strings. double ComputeWordError(std::string *truth_str, std::string *ocr_str); @@ -377,8 +397,9 @@ protected: // Given that error_rate is either a new min or max, updates the best/worst // error rates, and record of progress. - std::string UpdateErrorGraph(int iteration, double error_rate, const std::vector &model_data, - TestCallback tester); + std::string UpdateErrorGraph(int iteration, double error_rate, + const std::vector &model_data, + const TestCallback &tester); protected: #ifndef GRAPHICS_DISABLED diff --git a/src/training/unicharset/unicharset_training_utils.cpp b/src/training/unicharset/unicharset_training_utils.cpp index aeaa30e91..63075345c 100644 --- a/src/training/unicharset/unicharset_training_utils.cpp +++ b/src/training/unicharset/unicharset_training_utils.cpp @@ -128,7 +128,7 @@ void SetupBasicProperties(bool report_errors, bool decompose, UNICHARSET *unicha std::string normed_str; if (unichar_id != 0 && tesseract::NormalizeUTF8String( - decompose ? tesseract::UnicodeNormMode::kNFKD : tesseract::UnicodeNormMode::kNFKC, + decompose ? tesseract::UnicodeNormMode::kNFD : tesseract::UnicodeNormMode::kNFC, tesseract::OCRNorm::kNormalize, tesseract::GraphemeNorm::kNone, unichar_str, &normed_str) && !normed_str.empty()) { @@ -189,7 +189,7 @@ void SetPropertiesForInputFile(const std::string &script_dir, // Load the input unicharset unicharset.load_from_file(input_unicharset_file.c_str()); - tprintf("Loaded unicharset of size %d from file %s\n", unicharset.size(), + tprintf("Loaded unicharset of size %zu from file %s\n", unicharset.size(), input_unicharset_file.c_str()); // Set unichar properties diff --git a/src/training/unicharset/unicharset_training_utils.h b/src/training/unicharset/unicharset_training_utils.h index 7a070bbc4..622e7b186 100644 --- a/src/training/unicharset/unicharset_training_utils.h +++ b/src/training/unicharset/unicharset_training_utils.h @@ -2,7 +2,6 @@ // File: unicharset_training_utils.h // Description: Training utilities for UNICHARSET. // Author: Ray Smith -// Created: Fri Oct 17 17:14:01 PDT 2014 // // (C) Copyright 2014, Google Inc. // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,7 +27,6 @@ namespace tesseract { -class STATS; class UNICHARSET; // Helper sets the character attribute properties and sets up the script table. diff --git a/src/training/unicharset/validate_indic.cpp b/src/training/unicharset/validate_indic.cpp index f0c40d918..1ee5322ed 100644 --- a/src/training/unicharset/validate_indic.cpp +++ b/src/training/unicharset/validate_indic.cpp @@ -35,7 +35,8 @@ bool ValidateIndic::ConsumeGraphemeIfValid() { return true; default: if (report_errors_) { - tprintf("Invalid start of grapheme sequence:%c=0x%x\n", codes_[codes_used_].first, + tprintf("Invalid start of grapheme sequence:%c=0x%x\n", + static_cast(codes_[codes_used_].first), codes_[codes_used_].second); } return false; diff --git a/src/training/unicharset/validate_javanese.cpp b/src/training/unicharset/validate_javanese.cpp index 44c8c5878..422c0e7c4 100644 --- a/src/training/unicharset/validate_javanese.cpp +++ b/src/training/unicharset/validate_javanese.cpp @@ -56,7 +56,8 @@ bool ValidateJavanese::ConsumeGraphemeIfValid() { return true; default: if (report_errors_) { - tprintf("Invalid start of grapheme sequence:%c=0x%x\n", codes_[codes_used_].first, + tprintf("Invalid start of grapheme sequence:%c=0x%x\n", + static_cast(codes_[codes_used_].first), codes_[codes_used_].second); } return false; diff --git a/src/viewer/scrollview.cpp b/src/viewer/scrollview.cpp index 1d572036b..66fdd1df7 100644 --- a/src/viewer/scrollview.cpp +++ b/src/viewer/scrollview.cpp @@ -33,6 +33,7 @@ #include #include #include +#include // for std::unique_ptr #include // for std::mutex #include #include // for std::thread @@ -59,7 +60,7 @@ static std::map, std::paircommand_id = command_id; any->counter = counter; @@ -97,7 +98,7 @@ void ScrollView::MessageReceiver() { // the events accordingly. while (true) { // The new event we create. - auto *cur = new SVEvent; + std::unique_ptr cur(new SVEvent); // The ID of the corresponding window. int window_id; @@ -113,10 +114,11 @@ void ScrollView::MessageReceiver() { cur->window = svmap[window_id]; if (cur->window != nullptr) { - cur->parameter = new char[strlen(p) + 1]; + auto length = strlen(p); + cur->parameter = new char[length + 1]; strcpy(cur->parameter, p); - if (strlen(p) > 0) { // remove the last \n - cur->parameter[strlen(p)] = '\0'; + if (length > 0) { // remove the last \n + cur->parameter[length - 1] = '\0'; } cur->type = static_cast(ev_type); // Correct selection coordinates so x,y is the min pt and size is +ve. @@ -142,11 +144,12 @@ void ScrollView::MessageReceiver() { // In case of an SVET_EXIT event, quit the whole application. if (ev_type == SVET_EXIT) { - ScrollView::Exit(); + SendRawMessage("svmain:exit()"); + break; } // Place two copies of it in the table for the window. - cur->window->SetEvent(cur); + cur->window->SetEvent(cur.get()); // Check if any of the threads currently waiting want it. std::pair awaiting_list(cur->window, cur->type); @@ -155,17 +158,14 @@ void ScrollView::MessageReceiver() { SVET_ANY); waiting_for_events_mu->lock(); if (waiting_for_events.count(awaiting_list) > 0) { - waiting_for_events[awaiting_list].second = cur; + waiting_for_events[awaiting_list].second = cur.get(); waiting_for_events[awaiting_list].first->Signal(); } else if (waiting_for_events.count(awaiting_list_any) > 0) { - waiting_for_events[awaiting_list_any].second = cur; + waiting_for_events[awaiting_list_any].second = cur.get(); waiting_for_events[awaiting_list_any].first->Signal(); } else if (waiting_for_events.count(awaiting_list_any_window) > 0) { - waiting_for_events[awaiting_list_any_window].second = cur; + waiting_for_events[awaiting_list_any_window].second = cur.get(); waiting_for_events[awaiting_list_any_window].first->Signal(); - } else { - // No one wanted it, so delete it. - delete cur; } waiting_for_events_mu->unlock(); // Signal the corresponding semaphore twice (for both copies). @@ -174,8 +174,6 @@ void ScrollView::MessageReceiver() { sv->Signal(); sv->Signal(); } - } else { - delete cur; // Applied to no window. } svmap_mu->unlock(); @@ -425,7 +423,7 @@ void ScrollView::Signal() { semaphore_->Signal(); } -void ScrollView::SetEvent(SVEvent *svevent) { +void ScrollView::SetEvent(const SVEvent *svevent) { // Copy event SVEvent *any = svevent->copy(); SVEvent *specific = svevent->copy(); @@ -463,26 +461,6 @@ SVEvent *ScrollView::AwaitEvent(SVEventType type) { return ret; } -// Block until any event on any window is received. -// No event is returned here! -SVEvent *ScrollView::AwaitEventAnyWindow() { - // Initialize the waiting semaphore. - auto *sem = new SVSemaphore(); - std::pair ea((ScrollView *)nullptr, SVET_ANY); - waiting_for_events_mu->lock(); - waiting_for_events[ea] = std::pair(sem, (SVEvent *)nullptr); - waiting_for_events_mu->unlock(); - // Wait on it. - stream_->Flush(); - sem->Wait(); - // Process the event we got woken up for (its in waiting_for_events pair). - waiting_for_events_mu->lock(); - SVEvent *ret = waiting_for_events[ea].second; - waiting_for_events.erase(ea); - waiting_for_events_mu->unlock(); - return ret; -} - // Send the current buffered polygon (if any) and clear it. void ScrollView::SendPolygon() { if (!points_->empty) { @@ -565,7 +543,16 @@ void ScrollView::AlwaysOnTop(bool b) { } // Adds a message entry to the message box. -void ScrollView::AddMessage(const char *format, ...) { +void ScrollView::AddMessage(const char *message) { + char form[kMaxMsgSize]; + snprintf(form, sizeof(form), "w%u:%s", window_id_, message); + + char *esc = AddEscapeChars(form); + SendMsg("addMessage(\"%s\")", esc); + delete[] esc; +} + +void ScrollView::AddMessageF(const char *format, ...) { va_list args; char message[kMaxMsgSize - 4]; @@ -573,12 +560,7 @@ void ScrollView::AddMessage(const char *format, ...) { vsnprintf(message, sizeof(message), format, args); va_end(args); - char form[kMaxMsgSize]; - snprintf(form, sizeof(form), "w%u:%s", window_id_, message); - - char *esc = AddEscapeChars(form); - SendMsg("addMessage(\"%s\")", esc); - delete[] esc; + AddMessage(message); } // Set a messagebox. diff --git a/src/viewer/scrollview.h b/src/viewer/scrollview.h index 69f054cd1..a11ebed91 100644 --- a/src/viewer/scrollview.h +++ b/src/viewer/scrollview.h @@ -40,6 +40,10 @@ namespace tesseract { +#if !defined(__GNUC__) && !defined(__attribute__) +# define __attribute__(attr) // compiler without support for __attribute__ +#endif + class ScrollView; class SVNetwork; class SVSemaphore; @@ -65,7 +69,7 @@ struct SVEvent { ~SVEvent() { delete[] parameter; } - SVEvent *copy(); + SVEvent *copy() const; SVEventType type = SVET_DESTROY; // What kind of event. ScrollView *window = nullptr; // Window event relates to. char *parameter = nullptr; // Any string that might have been passed as argument. @@ -184,9 +188,6 @@ public: // Block until an event of the given type is received. SVEvent *AwaitEvent(SVEventType type); - // Block until any event on any window is received. - SVEvent *AwaitEventAnyWindow(); - /******************************************************************************* * Getters and Setters *******************************************************************************/ @@ -295,7 +296,8 @@ public: // ...which can be added by this command. // This is intended as an "debug" output window. - void AddMessage(const char *format, ...); + void AddMessage(const char *message); + void AddMessageF(const char *format, ...) __attribute__((format(printf, 2, 3))); // Zoom the window to the rectangle given upper left corner and // lower right corner. @@ -308,7 +310,7 @@ public: // this just for fun will likely break your application! // It is public so you can actually take use of the LUA functionalities, but // be careful! - void SendMsg(const char *msg, ...); + void SendMsg(const char* msg, ...) __attribute__((format(printf, 2, 3))); // Custom messages (manipulating java code directly) can be send through this. // Send a message to the server without adding the @@ -370,7 +372,7 @@ private: static void MessageReceiver(); // Place an event into the event_table (synchronized). - void SetEvent(SVEvent *svevent); + void SetEvent(const SVEvent *svevent); // Wake up the semaphore. void Signal(); diff --git a/src/viewer/svutil.cpp b/src/viewer/svutil.cpp index e57814cef..eec8627e9 100644 --- a/src/viewer/svutil.cpp +++ b/src/viewer/svutil.cpp @@ -74,9 +74,9 @@ void SVSync::StartProcess(const char *executable, const char *args) { STARTUPINFO start_info; PROCESS_INFORMATION proc_info; GetStartupInfo(&start_info); - if (!CreateProcess(nullptr, const_cast(proc.c_str()), nullptr, nullptr, FALSE, - CREATE_NO_WINDOW | DETACHED_PROCESS, nullptr, nullptr, &start_info, - &proc_info)) + if (!CreateProcess(nullptr, const_cast(proc.c_str()), nullptr, + nullptr, FALSE, CREATE_NO_WINDOW | DETACHED_PROCESS, + nullptr, nullptr, &start_info, &proc_info)) return; # else int pid = fork(); @@ -131,13 +131,13 @@ SVSemaphore::SVSemaphore() { } SVSemaphore::~SVSemaphore() { -#ifdef _WIN32 +# ifdef _WIN32 CloseHandle(semaphore_); -#elif defined(__APPLE__) +# elif defined(__APPLE__) sem_close(semaphore_); -#else +# else sem_close(&semaphore_); -#endif +# endif } void SVSemaphore::Signal() { @@ -243,14 +243,15 @@ static const char *ScrollViewProg() { } // The arguments to the program to invoke to start ScrollView -static std::string ScrollViewCommand(std::string scrollview_path) { +static std::string ScrollViewCommand(const std::string &scrollview_path) { // The following ugly ifdef is to enable the output of the java runtime // to be sent down a black hole on non-windows to ignore all the // exceptions in piccolo. Ideally piccolo would be debugged to make // this unnecessary. // Also the path has to be separated by ; on windows and : otherwise. # ifdef _WIN32 - const char cmd_template[] = "-Djava.library.path=\"%s\" -jar \"%s/ScrollView.jar\""; + const char cmd_template[] = + "-Djava.library.path=\"%s\" -jar \"%s/ScrollView.jar\""; # else const char cmd_template[] = @@ -289,14 +290,15 @@ SVNetwork::SVNetwork(const char *hostname, int port) { # endif // _WIN32 if (getaddrinfo(hostname, port_string.c_str(), nullptr, &addr_info) != 0) { - std::cerr << "Error resolving name for ScrollView host " << std::string(hostname) << ":" << port - << std::endl; + std::cerr << "Error resolving name for ScrollView host " + << std::string(hostname) << ":" << port << std::endl; # ifdef _WIN32 WSACleanup(); # endif // _WIN32 } - stream_ = socket(addr_info->ai_family, addr_info->ai_socktype, addr_info->ai_protocol); + stream_ = socket(addr_info->ai_family, addr_info->ai_socktype, + addr_info->ai_protocol); if (stream_ < 0) { std::cerr << "Failed to open socket" << std::endl; @@ -324,7 +326,8 @@ SVNetwork::SVNetwork(const char *hostname, int port) { Close(); for (;;) { - stream_ = socket(addr_info->ai_family, addr_info->ai_socktype, addr_info->ai_protocol); + stream_ = socket(addr_info->ai_family, addr_info->ai_socktype, + addr_info->ai_protocol); if (stream_ >= 0) { if (connect(stream_, addr_info->ai_addr, addr_info->ai_addrlen) == 0) { break; diff --git a/src/wordrec/chop.cpp b/src/wordrec/chop.cpp index 78a042406..64db4a29b 100644 --- a/src/wordrec/chop.cpp +++ b/src/wordrec/chop.cpp @@ -111,7 +111,7 @@ int Wordrec::angle_change(EDGEPT *point1, EDGEPT *point2, EDGEPT *point3) { if (static_cast(length) == 0) { return (0); } - angle = static_cast(floor(asin(vector1.cross(vector2) / length) / M_PI * 180.0 + 0.5)); + angle = static_cast(floor(std::asin(vector1.cross(vector2) / length) / M_PI * 180.0 + 0.5)); /* Use dot product */ if (vector1.dot(vector2) < 0) { diff --git a/src/wordrec/chopper.cpp b/src/wordrec/chopper.cpp index cc57c1f23..030735f8c 100644 --- a/src/wordrec/chopper.cpp +++ b/src/wordrec/chopper.cpp @@ -264,7 +264,7 @@ SEAM *Wordrec::chop_numbered_blob(TWERD *word, int32_t blob_number, bool italic_ } SEAM *Wordrec::chop_overlapping_blob(const std::vector &boxes, bool italic_blob, - WERD_RES *word_res, int *blob_number) { + WERD_RES *word_res, unsigned *blob_number) { TWERD *word = word_res->chopped_word; for (*blob_number = 0; *blob_number < word->NumBlobs(); ++*blob_number) { TBLOB *blob = word->blobs[*blob_number]; @@ -301,7 +301,7 @@ SEAM *Wordrec::chop_overlapping_blob(const std::vector &boxes, bool italic } } - *blob_number = -1; + *blob_number = UINT_MAX; return nullptr; } @@ -319,24 +319,25 @@ SEAM *Wordrec::chop_overlapping_blob(const std::vector &boxes, bool italic */ SEAM *Wordrec::improve_one_blob(const std::vector &blob_choices, DANGERR *fixpt, bool split_next_to_fragment, bool italic_blob, WERD_RES *word, - int *blob_number) { + unsigned *blob_number) { float rating_ceiling = FLT_MAX; SEAM *seam = nullptr; do { - *blob_number = select_blob_to_split_from_fixpt(fixpt); + auto blob = select_blob_to_split_from_fixpt(fixpt); if (chop_debug) { - tprintf("blob_number from fixpt = %d\n", *blob_number); + tprintf("blob_number from fixpt = %d\n", blob); } - bool split_point_from_dict = (*blob_number != -1); + bool split_point_from_dict = (blob != -1); if (split_point_from_dict) { fixpt->clear(); } else { - *blob_number = select_blob_to_split(blob_choices, rating_ceiling, split_next_to_fragment); + blob = select_blob_to_split(blob_choices, rating_ceiling, split_next_to_fragment); } if (chop_debug) { - tprintf("blob_number = %d\n", *blob_number); + tprintf("blob_number = %d\n", blob); } - if (*blob_number == -1) { + *blob_number = blob; + if (blob == -1) { return nullptr; } @@ -365,7 +366,7 @@ SEAM *Wordrec::improve_one_blob(const std::vector &blob_choices, */ SEAM *Wordrec::chop_one_blob(const std::vector &boxes, const std::vector &blob_choices, WERD_RES *word_res, - int *blob_number) { + unsigned *blob_number) { if (prioritize_division) { return chop_overlapping_blob(boxes, true, word_res, blob_number); } else { @@ -445,7 +446,7 @@ void Wordrec::improve_by_chopping(float rating_cert_scale, WERD_RES *word, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle, LMPainPoints *pain_points, std::vector *pending) { - int blob_number; + unsigned blob_number; do { // improvement loop. // Make a simple vector of BLOB_CHOICEs to make it easy to pick which // one to chop. @@ -522,12 +523,11 @@ void Wordrec::improve_by_chopping(float rating_cert_scale, WERD_RES *word, int Wordrec::select_blob_to_split(const std::vector &blob_choices, float rating_ceiling, bool split_next_to_fragment) { BLOB_CHOICE *blob_choice; - int x; float worst = -FLT_MAX; int worst_index = -1; float worst_near_fragment = -FLT_MAX; int worst_index_near_fragment = -1; - const CHAR_FRAGMENT **fragments = nullptr; + std::vector fragments; if (chop_debug) { if (rating_ceiling < FLT_MAX) { @@ -538,7 +538,7 @@ int Wordrec::select_blob_to_split(const std::vector &blob_choices } if (split_next_to_fragment && blob_choices.size() > 0) { - fragments = new const CHAR_FRAGMENT *[blob_choices.size()]; + fragments.resize(blob_choices.size()); if (blob_choices[0] != nullptr) { fragments[0] = getDict().getUnicharset().get_fragment(blob_choices[0]->unichar_id()); } else { @@ -546,9 +546,8 @@ int Wordrec::select_blob_to_split(const std::vector &blob_choices } } - for (x = 0; x < blob_choices.size(); ++x) { + for (unsigned x = 0; x < blob_choices.size(); ++x) { if (blob_choices[x] == nullptr) { - delete[] fragments; return x; } else { blob_choice = blob_choices[x]; @@ -591,7 +590,6 @@ int Wordrec::select_blob_to_split(const std::vector &blob_choices } } } - delete[] fragments; // TODO(daria): maybe a threshold of badness for // worst_near_fragment would be useful. return worst_index_near_fragment != -1 ? worst_index_near_fragment : worst_index; diff --git a/src/wordrec/drawfx.cpp b/src/wordrec/drawfx.cpp index 32918fc8e..98fc062cc 100644 --- a/src/wordrec/drawfx.cpp +++ b/src/wordrec/drawfx.cpp @@ -1,5 +1,5 @@ /********************************************************************** - * File: drawfx.cpp (Formerly drawfx.c) + * File: drawfx.cpp * Description: Draw things to do with feature extraction. * Author: Ray Smith * @@ -39,10 +39,7 @@ namespace tesseract { // title of window # define DEBUG_WIN_NAME "FXDebug" -STRING_VAR(fx_debugfile, DEBUG_WIN_NAME, "Name of debugfile"); - ScrollView *fx_win = nullptr; -FILE *fx_debug = nullptr; /********************************************************************** * create_fx_win diff --git a/src/wordrec/drawfx.h b/src/wordrec/drawfx.h index ea208ca4a..f670fafaa 100644 --- a/src/wordrec/drawfx.h +++ b/src/wordrec/drawfx.h @@ -1,5 +1,5 @@ /********************************************************************** - * File: drawfx.h (Formerly drawfx.h) + * File: drawfx.h * Description: Draw things to do with feature extraction. * Author: Ray Smith * @@ -24,11 +24,9 @@ namespace tesseract { -extern STRING_VAR_H(fx_debugfile, DEBUG_WIN_NAME, "Name of debugfile"); #ifndef GRAPHICS_DISABLED extern ScrollView *fx_win; #endif // !GRAPHICS_DISABLED -extern FILE *fx_debug; void create_fx_win(); // make features win void clear_fx_win(); // make features win void create_fxdebug_win(); // make gradients win diff --git a/src/wordrec/findseam.cpp b/src/wordrec/findseam.cpp index 745c07190..74a0578c1 100644 --- a/src/wordrec/findseam.cpp +++ b/src/wordrec/findseam.cpp @@ -46,7 +46,7 @@ /* How many to keep */ #define MAX_NUM_SEAMS 150 /* How many to keep */ -#define NO_FULL_PRIORITY -1 /* Special marker for pri. */ +#define NO_FULL_PRIORITY (-1) // Special marker for pri. /* Evaluate right away */ #define BAD_PRIORITY 9999.0 diff --git a/src/wordrec/gradechop.cpp b/src/wordrec/gradechop.cpp index bd7f60284..6f8c98fd0 100644 --- a/src/wordrec/gradechop.cpp +++ b/src/wordrec/gradechop.cpp @@ -51,7 +51,7 @@ PRIORITY Wordrec::grade_split_length(SPLIT *split) { if (split_length <= 0) { grade = 0; } else { - grade = sqrt(split_length) * chop_split_dist_knob; + grade = std::sqrt(split_length) * chop_split_dist_knob; } return (std::max(0.0f, grade)); diff --git a/src/wordrec/language_model.cpp b/src/wordrec/language_model.cpp index d1418bb89..50747607f 100644 --- a/src/wordrec/language_model.cpp +++ b/src/wordrec/language_model.cpp @@ -254,7 +254,7 @@ bool LanguageModel::UpdateState(bool just_classified, int curr_col, int curr_row tprintf("\nUpdateState: col=%d row=%d %s", curr_col, curr_row, just_classified ? "just_classified" : ""); if (language_model_debug_level > 5) { - tprintf("(parent=%p)\n", parent_node); + tprintf("(parent=%p)\n", static_cast(parent_node)); } else { tprintf("\n"); } @@ -588,7 +588,7 @@ bool LanguageModel::AddViterbiStateEntry(LanguageModelFlagsType top_choice_flags dict_->getUnicharset().id_to_unichar(b->unichar_id()), b->rating(), b->certainty(), top_choice_flags); if (language_model_debug_level > 5) { - tprintf(" parent_vse=%p\n", parent_vse); + tprintf(" parent_vse=%p\n", static_cast(parent_vse)); } else { tprintf("\n"); } @@ -828,10 +828,9 @@ LanguageModelDawgInfo *LanguageModel::GenerateDawgInfo(bool word_end, int curr_c return nullptr; } - int i; - // Check a that the path terminated before the current character is a word. + // Check that the path terminated before the current character is a word. bool has_word_ending = false; - for (i = 0; i < parent_vse->dawg_info->active_dawgs.size(); ++i) { + for (unsigned i = 0; i < parent_vse->dawg_info->active_dawgs.size(); ++i) { const DawgPosition &pos = parent_vse->dawg_info->active_dawgs[i]; const Dawg *pdawg = pos.dawg_index < 0 ? nullptr : dict_->GetDawg(pos.dawg_index); if (pdawg == nullptr || pos.back_to_punc) { @@ -860,7 +859,7 @@ LanguageModelDawgInfo *LanguageModel::GenerateDawgInfo(bool word_end, int curr_c // like don't. const auto &normed_ids = dict_->getUnicharset().normed_ids(b.unichar_id()); DawgPositionVector tmp_active_dawgs; - for (int i = 0; i < normed_ids.size(); ++i) { + for (unsigned i = 0; i < normed_ids.size(); ++i) { if (language_model_debug_level > 2) { tprintf("Test Letter OK for unichar %d, normed %d\n", b.unichar_id(), normed_ids[i]); } @@ -985,8 +984,8 @@ float LanguageModel::ComputeNgramCost(const char *unichar, float certainty, floa *found_small_prob = true; prob = language_model_ngram_small_prob; } - *ngram_cost = -1.0 * log2(prob); - float ngram_and_classifier_cost = -1.0 * log2(CertaintyScore(certainty) / denom) + + *ngram_cost = -1 * std::log2(prob); + float ngram_and_classifier_cost = -1 * std::log2(CertaintyScore(certainty) / denom) + *ngram_cost * language_model_ngram_scale_factor; if (language_model_debug_level > 1) { tprintf("-log [ p(%s) * p(%s | %s) ] = -log2(%g*%g) = %g\n", unichar, unichar, context_ptr, @@ -1342,24 +1341,24 @@ void LanguageModel::ExtractFeaturesFromPath(const ViterbiStateEntry &vse, float int permuter = vse.dawg_info->permuter; if (permuter == NUMBER_PERM || permuter == USER_PATTERN_PERM) { if (vse.consistency_info.num_digits == vse.length) { - features[PTRAIN_DIGITS_SHORT + len] = 1.0; + features[PTRAIN_DIGITS_SHORT + len] = 1.0f; } else { - features[PTRAIN_NUM_SHORT + len] = 1.0; + features[PTRAIN_NUM_SHORT + len] = 1.0f; } } else if (permuter == DOC_DAWG_PERM) { - features[PTRAIN_DOC_SHORT + len] = 1.0; + features[PTRAIN_DOC_SHORT + len] = 1.0f; } else if (permuter == SYSTEM_DAWG_PERM || permuter == USER_DAWG_PERM || permuter == COMPOUND_PERM) { - features[PTRAIN_DICT_SHORT + len] = 1.0; + features[PTRAIN_DICT_SHORT + len] = 1.0f; } else if (permuter == FREQ_DAWG_PERM) { - features[PTRAIN_FREQ_SHORT + len] = 1.0; + features[PTRAIN_FREQ_SHORT + len] = 1.0f; } } // Record shape cost feature (normalized by path length). features[PTRAIN_SHAPE_COST_PER_CHAR] = vse.associate_stats.shape_cost / static_cast(vse.length); // Record ngram cost. (normalized by the path length). - features[PTRAIN_NGRAM_COST_PER_CHAR] = 0.0; + features[PTRAIN_NGRAM_COST_PER_CHAR] = 0.0f; if (vse.ngram_info != nullptr) { features[PTRAIN_NGRAM_COST_PER_CHAR] = vse.ngram_info->ngram_cost / static_cast(vse.length); @@ -1370,7 +1369,7 @@ void LanguageModel::ExtractFeaturesFromPath(const ViterbiStateEntry &vse, float features[PTRAIN_NUM_BAD_CASE] = vse.consistency_info.NumInconsistentCase(); features[PTRAIN_XHEIGHT_CONSISTENCY] = vse.consistency_info.xht_decision; features[PTRAIN_NUM_BAD_CHAR_TYPE] = - vse.dawg_info == nullptr ? vse.consistency_info.NumInconsistentChartype() : 0.0; + vse.dawg_info == nullptr ? vse.consistency_info.NumInconsistentChartype() : 0.0f; features[PTRAIN_NUM_BAD_SPACING] = vse.consistency_info.NumInconsistentSpaces(); // Disabled this feature for now due to its poor performance. // features[PTRAIN_NUM_BAD_FONT] = vse.consistency_info.inconsistent_font; diff --git a/src/wordrec/language_model.h b/src/wordrec/language_model.h index f71700b44..e4ba1aaf6 100644 --- a/src/wordrec/language_model.h +++ b/src/wordrec/language_model.h @@ -289,43 +289,30 @@ protected: public: // Parameters. - INT_VAR_H(language_model_debug_level, 0, "Language model debug level"); - BOOL_VAR_H(language_model_ngram_on, false, "Turn on/off the use of character ngram model"); - INT_VAR_H(language_model_ngram_order, 8, "Maximum order of the character ngram model"); - INT_VAR_H(language_model_viterbi_list_max_num_prunable, 10, - "Maximum number of prunable (those for which PrunablePath() is" - " true) entries in each viterbi list recorded in BLOB_CHOICEs"); - INT_VAR_H(language_model_viterbi_list_max_size, 500, - "Maximum size of viterbi lists recorded in BLOB_CHOICEs"); - double_VAR_H(language_model_ngram_small_prob, 0.000001, - "To avoid overly small denominators use this as the floor" - " of the probability returned by the ngram model"); - double_VAR_H(language_model_ngram_nonmatch_score, -40.0, - "Average classifier score of a non-matching unichar"); - BOOL_VAR_H(language_model_ngram_use_only_first_uft8_step, false, - "Use only the first UTF8 step of the given string" - " when computing log probabilities"); - double_VAR_H(language_model_ngram_scale_factor, 0.03, - "Strength of the character ngram model relative to the" - " character classifier "); - double_VAR_H(language_model_ngram_rating_factor, 16.0, - "Factor to bring log-probs into the same range as ratings" - " when multiplied by outline length "); - BOOL_VAR_H(language_model_ngram_space_delimited_language, true, "Words are delimited by space"); - INT_VAR_H(language_model_min_compound_length, 3, "Minimum length of compound words"); + INT_VAR_H(language_model_debug_level); + BOOL_VAR_H(language_model_ngram_on); + INT_VAR_H(language_model_ngram_order); + INT_VAR_H(language_model_viterbi_list_max_num_prunable); + INT_VAR_H(language_model_viterbi_list_max_size); + double_VAR_H(language_model_ngram_small_prob); + double_VAR_H(language_model_ngram_nonmatch_score); + BOOL_VAR_H(language_model_ngram_use_only_first_uft8_step); + double_VAR_H(language_model_ngram_scale_factor); + double_VAR_H(language_model_ngram_rating_factor); + BOOL_VAR_H(language_model_ngram_space_delimited_language); + INT_VAR_H(language_model_min_compound_length); // Penalties used for adjusting path costs and final word rating. - double_VAR_H(language_model_penalty_non_freq_dict_word, 0.1, - "Penalty for words not in the frequent word dictionary"); - double_VAR_H(language_model_penalty_non_dict_word, 0.15, "Penalty for non-dictionary words"); - double_VAR_H(language_model_penalty_punc, 0.2, "Penalty for inconsistent punctuation"); - double_VAR_H(language_model_penalty_case, 0.1, "Penalty for inconsistent case"); - double_VAR_H(language_model_penalty_script, 0.5, "Penalty for inconsistent script"); - double_VAR_H(language_model_penalty_chartype, 0.3, "Penalty for inconsistent character type"); - double_VAR_H(language_model_penalty_font, 0.00, "Penalty for inconsistent font"); - double_VAR_H(language_model_penalty_spacing, 0.05, "Penalty for inconsistent spacing"); - double_VAR_H(language_model_penalty_increment, 0.01, "Penalty increment"); - INT_VAR_H(wordrec_display_segmentations, 0, "Display Segmentations"); - BOOL_VAR_H(language_model_use_sigmoidal_certainty, false, "Use sigmoidal score for certainty"); + double_VAR_H(language_model_penalty_non_freq_dict_word); + double_VAR_H(language_model_penalty_non_dict_word); + double_VAR_H(language_model_penalty_punc); + double_VAR_H(language_model_penalty_case); + double_VAR_H(language_model_penalty_script); + double_VAR_H(language_model_penalty_chartype); + double_VAR_H(language_model_penalty_font); + double_VAR_H(language_model_penalty_spacing); + double_VAR_H(language_model_penalty_increment); + INT_VAR_H(wordrec_display_segmentations); + BOOL_VAR_H(language_model_use_sigmoidal_certainty); protected: // Member Variables. diff --git a/src/wordrec/params_model.cpp b/src/wordrec/params_model.cpp index 340723e99..3b57dc34e 100644 --- a/src/wordrec/params_model.cpp +++ b/src/wordrec/params_model.cpp @@ -94,7 +94,7 @@ bool ParamsModel::Equivalent(const ParamsModel &that) const { } for (unsigned i = 0; i < weights_vec_[p].size(); i++) { if (weights_vec_[p][i] != that.weights_vec_[p][i] && - fabs(weights_vec_[p][i] - that.weights_vec_[p][i]) > epsilon) { + std::fabs(weights_vec_[p][i] - that.weights_vec_[p][i]) > epsilon) { return false; } } @@ -110,6 +110,7 @@ bool ParamsModel::LoadFromFp(const char *lang, TFile *fp) { lang_ = lang; // Load weights for passes with adaption on. std::vector &weights = weights_vec_[pass_]; + weights.clear(); weights.resize(PTRAIN_NUM_FEATURE_TYPES, 0.0f); while (fp->FGets(line, kMaxLineSize) != nullptr) { @@ -153,7 +154,7 @@ bool ParamsModel::SaveToFile(const char *full_path) const { return false; } bool all_good = true; - for (int i = 0; i < weights.size(); i++) { + for (unsigned i = 0; i < weights.size(); i++) { if (fprintf(fp, "%s %f\n", kParamsTrainingFeatureTypeName[i], weights[i]) < 0) { all_good = false; } diff --git a/src/wordrec/pieces.cpp b/src/wordrec/pieces.cpp index c02a24a25..d90a4a7cd 100644 --- a/src/wordrec/pieces.cpp +++ b/src/wordrec/pieces.cpp @@ -1,6 +1,6 @@ /****************************************************************************** * - * File: pieces.cpp (Formerly pieces.c) + * File: pieces.cpp * Description: * Author: Mark Seaman, OCR Technology * @@ -86,239 +86,4 @@ int SortByRating(const void *void1, const void *void2) { return -1; } -/********************************************************************** - * fill_filtered_fragment_list - * - * Filter the fragment list so that the filtered_choices only contain - * fragments that are in the correct position. choices is the list - * that we are going to filter. fragment_pos is the position in the - * fragment that we are looking for and num_frag_parts is the the - * total number of pieces. The result will be appended to - * filtered_choices. - **********************************************************************/ -void Wordrec::fill_filtered_fragment_list(BLOB_CHOICE_LIST *choices, int fragment_pos, - int num_frag_parts, BLOB_CHOICE_LIST *filtered_choices) { - BLOB_CHOICE_IT filtered_choices_it(filtered_choices); - BLOB_CHOICE_IT choices_it(choices); - - for (choices_it.mark_cycle_pt(); !choices_it.cycled_list(); choices_it.forward()) { - UNICHAR_ID choice_unichar_id = choices_it.data()->unichar_id(); - const CHAR_FRAGMENT *frag = unicharset.get_fragment(choice_unichar_id); - - if (frag != nullptr && frag->get_pos() == fragment_pos && frag->get_total() == num_frag_parts) { - // Recover the unichar_id of the unichar that this fragment is - // a part of - auto *b = new BLOB_CHOICE(*choices_it.data()); - int original_unichar = unicharset.unichar_to_id(frag->get_unichar()); - b->set_unichar_id(original_unichar); - filtered_choices_it.add_to_end(b); - } - } - - filtered_choices->sort(SortByUnicharID); -} - -/********************************************************************** - * merge_and_put_fragment_lists - * - * Merge the fragment lists in choice_lists and append it to the - * ratings matrix. - **********************************************************************/ -void Wordrec::merge_and_put_fragment_lists(int16_t row, int16_t column, int16_t num_frag_parts, - BLOB_CHOICE_LIST *choice_lists, MATRIX *ratings) { - auto *choice_lists_it = new BLOB_CHOICE_IT[num_frag_parts]; - - for (int i = 0; i < num_frag_parts; i++) { - choice_lists_it[i].set_to_list(&choice_lists[i]); - choice_lists_it[i].mark_cycle_pt(); - } - - BLOB_CHOICE_LIST *merged_choice = ratings->get(row, column); - if (merged_choice == nullptr) { - merged_choice = new BLOB_CHOICE_LIST; - } - - bool end_of_list = false; - BLOB_CHOICE_IT merged_choice_it(merged_choice); - while (!end_of_list) { - // Find the maximum unichar_id of the current entry the iterators - // are pointing at - UNICHAR_ID max_unichar_id = choice_lists_it[0].data()->unichar_id(); - for (int i = 0; i < num_frag_parts; i++) { - UNICHAR_ID unichar_id = choice_lists_it[i].data()->unichar_id(); - if (max_unichar_id < unichar_id) { - max_unichar_id = unichar_id; - } - } - - // Move the each iterators until it gets to an entry that has a - // value greater than or equal to max_unichar_id - for (int i = 0; i < num_frag_parts; i++) { - UNICHAR_ID unichar_id = choice_lists_it[i].data()->unichar_id(); - while (!choice_lists_it[i].cycled_list() && unichar_id < max_unichar_id) { - choice_lists_it[i].forward(); - unichar_id = choice_lists_it[i].data()->unichar_id(); - } - if (choice_lists_it[i].cycled_list()) { - end_of_list = true; - break; - } - } - - if (end_of_list) { - break; - } - - // Checks if the fragments are parts of the same character - UNICHAR_ID first_unichar_id = choice_lists_it[0].data()->unichar_id(); - bool same_unichar = true; - for (int i = 1; i < num_frag_parts; i++) { - UNICHAR_ID unichar_id = choice_lists_it[i].data()->unichar_id(); - if (unichar_id != first_unichar_id) { - same_unichar = false; - break; - } - } - - if (same_unichar) { - // Add the merged character to the result - UNICHAR_ID merged_unichar_id = first_unichar_id; - auto merged_fonts = choice_lists_it[0].data()->fonts(); - float merged_min_xheight = choice_lists_it[0].data()->min_xheight(); - float merged_max_xheight = choice_lists_it[0].data()->max_xheight(); - float positive_yshift = 0, negative_yshift = 0; - int merged_script_id = choice_lists_it[0].data()->script_id(); - BlobChoiceClassifier classifier = choice_lists_it[0].data()->classifier(); - - float merged_rating = 0, merged_certainty = 0; - for (int i = 0; i < num_frag_parts; i++) { - float rating = choice_lists_it[i].data()->rating(); - float certainty = choice_lists_it[i].data()->certainty(); - - if (i == 0 || certainty < merged_certainty) { - merged_certainty = certainty; - } - merged_rating += rating; - - choice_lists_it[i].forward(); - if (choice_lists_it[i].cycled_list()) { - end_of_list = true; - } - IntersectRange(choice_lists_it[i].data()->min_xheight(), - choice_lists_it[i].data()->max_xheight(), &merged_min_xheight, - &merged_max_xheight); - float yshift = choice_lists_it[i].data()->yshift(); - if (yshift > positive_yshift) { - positive_yshift = yshift; - } - if (yshift < negative_yshift) { - negative_yshift = yshift; - } - // Use the min font rating over the parts. - // TODO(rays) font lists are unsorted. Need to be faster? - const auto &frag_fonts = choice_lists_it[i].data()->fonts(); - for (auto frag_font : frag_fonts) { - int merged_f = 0; - for (; merged_f < merged_fonts.size() && - merged_fonts[merged_f].fontinfo_id != frag_font.fontinfo_id; - ++merged_f) { - } - if (merged_f == merged_fonts.size()) { - merged_fonts.push_back(frag_font); - } else if (merged_fonts[merged_f].score > frag_font.score) { - merged_fonts[merged_f].score = frag_font.score; - } - } - } - - float merged_yshift = - positive_yshift != 0 ? (negative_yshift != 0 ? 0 : positive_yshift) : negative_yshift; - auto *choice = - new BLOB_CHOICE(merged_unichar_id, merged_rating, merged_certainty, merged_script_id, - merged_min_xheight, merged_max_xheight, merged_yshift, classifier); - choice->set_fonts(merged_fonts); - merged_choice_it.add_to_end(choice); - } - } - - if (classify_debug_level) { - print_ratings_list("Merged Fragments", merged_choice, unicharset); - } - - if (merged_choice->empty()) { - delete merged_choice; - } else { - ratings->put(row, column, merged_choice); - } - - delete[] choice_lists_it; -} - -/********************************************************************** - * get_fragment_lists - * - * Recursively go through the ratings matrix to find lists of fragments - * to be merged in the function merge_and_put_fragment_lists. - * current_frag is the position of the piece we are looking for. - * current_row is the row in the rating matrix we are currently at. - * start is the row we started initially, so that we can know where - * to append the results to the matrix. num_frag_parts is the total - * number of pieces we are looking for and num_blobs is the size of the - * ratings matrix. - **********************************************************************/ -void Wordrec::get_fragment_lists(int16_t current_frag, int16_t current_row, int16_t start, - int16_t num_frag_parts, int16_t num_blobs, MATRIX *ratings, - BLOB_CHOICE_LIST *choice_lists) { - if (current_frag == num_frag_parts) { - merge_and_put_fragment_lists(start, current_row - 1, num_frag_parts, choice_lists, ratings); - return; - } - - for (int16_t x = current_row; x < num_blobs; x++) { - BLOB_CHOICE_LIST *choices = ratings->get(current_row, x); - if (choices == nullptr) { - continue; - } - - fill_filtered_fragment_list(choices, current_frag, num_frag_parts, &choice_lists[current_frag]); - if (!choice_lists[current_frag].empty()) { - get_fragment_lists(current_frag + 1, x + 1, start, num_frag_parts, num_blobs, ratings, - choice_lists); - choice_lists[current_frag].clear(); - } - } -} - -/********************************************************************** - * merge_fragments - * - * Try to merge fragments in the ratings matrix and put the result in - * the corresponding row and column - **********************************************************************/ -void Wordrec::merge_fragments(MATRIX *ratings, int16_t num_blobs) { - BLOB_CHOICE_LIST choice_lists[CHAR_FRAGMENT::kMaxChunks]; - for (int16_t start = 0; start < num_blobs; start++) { - for (int frag_parts = 2; frag_parts <= CHAR_FRAGMENT::kMaxChunks; frag_parts++) { - get_fragment_lists(0, start, start, frag_parts, num_blobs, ratings, choice_lists); - } - } - - // Delete fragments from the rating matrix - for (int16_t x = 0; x < num_blobs; x++) { - for (int16_t y = x; y < num_blobs; y++) { - BLOB_CHOICE_LIST *choices = ratings->get(x, y); - if (choices != nullptr) { - BLOB_CHOICE_IT choices_it(choices); - for (choices_it.mark_cycle_pt(); !choices_it.cycled_list(); choices_it.forward()) { - UNICHAR_ID choice_unichar_id = choices_it.data()->unichar_id(); - const CHAR_FRAGMENT *frag = unicharset.get_fragment(choice_unichar_id); - if (frag != nullptr) { - delete choices_it.extract(); - } - } - } - } - } -} - } // namespace tesseract diff --git a/src/wordrec/render.h b/src/wordrec/render.h index bc0c99b26..268e85ac5 100644 --- a/src/wordrec/render.h +++ b/src/wordrec/render.h @@ -34,9 +34,9 @@ struct TESSLINE; extern ScrollView *blob_window; // Window for blobs extern ScrollView::Color color_list[]; // Colors for outlines -extern BOOL_VAR_H(wordrec_display_all_blobs, 0, "Display Blobs"); +extern BOOL_VAR_H(wordrec_display_all_blobs); -extern BOOL_VAR_H(wordrec_blob_pause, 0, "Blob pause"); +extern BOOL_VAR_H(wordrec_blob_pause); #define NUM_COLORS 6 diff --git a/src/wordrec/segsearch.cpp b/src/wordrec/segsearch.cpp index 05841a28d..4e5ac6ecf 100644 --- a/src/wordrec/segsearch.cpp +++ b/src/wordrec/segsearch.cpp @@ -30,12 +30,6 @@ namespace tesseract { -void Wordrec::DoSegSearch(WERD_RES *word_res) { - BestChoiceBundle best_choice_bundle(word_res->ratings->dimension()); - // Run Segmentation Search. - SegSearch(word_res, &best_choice_bundle, nullptr); -} - void Wordrec::SegSearch(WERD_RES *word_res, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle) { LMPainPoints pain_points(segsearch_max_pain_points, segsearch_max_char_wh_ratio, @@ -156,6 +150,7 @@ void Wordrec::InitialSegSearch(WERD_RES *word_res, LMPainPoints *pain_points, // children are considered in the non-decreasing order of their column, since // this guarantees that all the parents would be up to date before an update // of a child is done. + pending->clear(); pending->resize(word_res->ratings->dimension(), SegSearchPending()); // Search the ratings matrix for the initial best path. @@ -169,8 +164,8 @@ void Wordrec::UpdateSegSearchNodes(float rating_cert_scale, int starting_col, LMPainPoints *pain_points, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle) { MATRIX *ratings = word_res->ratings; - ASSERT_HOST(ratings->dimension() == pending->size()); - ASSERT_HOST(ratings->dimension() == best_choice_bundle->beam.size()); + ASSERT_HOST(static_cast(ratings->dimension()) == pending->size()); + ASSERT_HOST(static_cast(ratings->dimension()) == best_choice_bundle->beam.size()); for (int col = starting_col; col < ratings->dimension(); ++col) { if (!(*pending)[col].WorkToDo()) { continue; diff --git a/src/wordrec/wordrec.h b/src/wordrec/wordrec.h index b2aed84be..2ddf4be4a 100644 --- a/src/wordrec/wordrec.h +++ b/src/wordrec/wordrec.h @@ -42,9 +42,8 @@ class TESS_API Wordrec : public Classify { public: // config parameters - BOOL_VAR_H(wordrec_debug_blamer, false, "Print blamer debug messages"); - - BOOL_VAR_H(wordrec_run_blamer, false, "Try to set the blame for errors"); + BOOL_VAR_H(wordrec_debug_blamer); + BOOL_VAR_H(wordrec_run_blamer); // methods Wordrec(); @@ -187,52 +186,41 @@ ELISTIZEH(FRAGMENT) class TESS_API Wordrec : public Classify { public: // config parameters ******************************************************* - BOOL_VAR_H(merge_fragments_in_matrix, true, - "Merge the fragments in the ratings matrix and delete them " - "after merging"); - BOOL_VAR_H(wordrec_enable_assoc, true, "Associator Enable"); - BOOL_VAR_H(force_word_assoc, false, - "force associator to run regardless of what enable_assoc is." - "This is used for CJK where component grouping is necessary."); - INT_VAR_H(repair_unchopped_blobs, 1, "Fix blobs that aren't chopped"); - double_VAR_H(tessedit_certainty_threshold, -2.25, "Good blob limit"); - INT_VAR_H(chop_debug, 0, "Chop debug"); - BOOL_VAR_H(chop_enable, 1, "Chop enable"); - BOOL_VAR_H(chop_vertical_creep, 0, "Vertical creep"); - INT_VAR_H(chop_split_length, 10000, "Split Length"); - INT_VAR_H(chop_same_distance, 2, "Same distance"); - INT_VAR_H(chop_min_outline_points, 6, "Min Number of Points on Outline"); - INT_VAR_H(chop_seam_pile_size, 150, "Max number of seams in seam_pile"); - BOOL_VAR_H(chop_new_seam_pile, 1, "Use new seam_pile"); - INT_VAR_H(chop_inside_angle, -50, "Min Inside Angle Bend"); - INT_VAR_H(chop_min_outline_area, 2000, "Min Outline Area"); - double_VAR_H(chop_split_dist_knob, 0.5, "Split length adjustment"); - double_VAR_H(chop_overlap_knob, 0.9, "Split overlap adjustment"); - double_VAR_H(chop_center_knob, 0.15, "Split center adjustment"); - INT_VAR_H(chop_centered_maxwidth, 90, - "Width of (smaller) chopped blobs " - "above which we don't care that a chop is not near the center."); - double_VAR_H(chop_sharpness_knob, 0.06, "Split sharpness adjustment"); - double_VAR_H(chop_width_change_knob, 5.0, "Width change adjustment"); - double_VAR_H(chop_ok_split, 100.0, "OK split limit"); - double_VAR_H(chop_good_split, 50.0, "Good split limit"); - INT_VAR_H(chop_x_y_weight, 3, "X / Y length weight"); - BOOL_VAR_H(assume_fixed_pitch_char_segment, false, - "include fixed-pitch heuristics in char segmentation"); - INT_VAR_H(wordrec_debug_level, 0, "Debug level for wordrec"); - INT_VAR_H(wordrec_max_join_chunks, 4, "Max number of broken pieces to associate"); - BOOL_VAR_H(wordrec_skip_no_truth_words, false, - "Only run OCR for words that had truth recorded in BlamerBundle"); - BOOL_VAR_H(wordrec_debug_blamer, false, "Print blamer debug messages"); - BOOL_VAR_H(wordrec_run_blamer, false, "Try to set the blame for errors"); - INT_VAR_H(segsearch_debug_level, 0, "SegSearch debug level"); - INT_VAR_H(segsearch_max_pain_points, 2000, "Maximum number of pain points stored in the queue"); - INT_VAR_H(segsearch_max_futile_classifications, 10, - "Maximum number of pain point classifications per word."); - double_VAR_H(segsearch_max_char_wh_ratio, 2.0, "Maximum character width-to-height ratio"); - BOOL_VAR_H(save_alt_choices, true, - "Save alternative paths found during chopping " - "and segmentation search"); + BOOL_VAR_H(merge_fragments_in_matrix); + BOOL_VAR_H(wordrec_enable_assoc); + BOOL_VAR_H(force_word_assoc); + INT_VAR_H(repair_unchopped_blobs); + double_VAR_H(tessedit_certainty_threshold); + INT_VAR_H(chop_debug); + BOOL_VAR_H(chop_enable); + BOOL_VAR_H(chop_vertical_creep); + INT_VAR_H(chop_split_length); + INT_VAR_H(chop_same_distance); + INT_VAR_H(chop_min_outline_points); + INT_VAR_H(chop_seam_pile_size); + BOOL_VAR_H(chop_new_seam_pile); + INT_VAR_H(chop_inside_angle); + INT_VAR_H(chop_min_outline_area); + double_VAR_H(chop_split_dist_knob); + double_VAR_H(chop_overlap_knob); + double_VAR_H(chop_center_knob); + INT_VAR_H(chop_centered_maxwidth); + double_VAR_H(chop_sharpness_knob); + double_VAR_H(chop_width_change_knob); + double_VAR_H(chop_ok_split); + double_VAR_H(chop_good_split); + INT_VAR_H(chop_x_y_weight); + BOOL_VAR_H(assume_fixed_pitch_char_segment); + INT_VAR_H(wordrec_debug_level); + INT_VAR_H(wordrec_max_join_chunks); + BOOL_VAR_H(wordrec_skip_no_truth_words); + BOOL_VAR_H(wordrec_debug_blamer); + BOOL_VAR_H(wordrec_run_blamer); + INT_VAR_H(segsearch_debug_level); + INT_VAR_H(segsearch_max_pain_points); + INT_VAR_H(segsearch_max_futile_classifications); + double_VAR_H(segsearch_max_char_wh_ratio); + BOOL_VAR_H(save_alt_choices); // methods from wordrec/*.cpp *********************************************** Wordrec(); @@ -330,10 +318,6 @@ public: std::vector *pending, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle); - // Runs SegSearch() function (above) without needing a best_choice_bundle - // or blamer_bundle. Used for testing. - void DoSegSearch(WERD_RES *word_res); - // chop.cpp PRIORITY point_priority(EDGEPT *point); void add_point_to_list(PointHeap *point_heap, EDGEPT *point); @@ -354,13 +338,13 @@ public: SEAM *chop_numbered_blob(TWERD *word, int32_t blob_number, bool italic_blob, const std::vector &seams); SEAM *chop_overlapping_blob(const std::vector &boxes, bool italic_blob, WERD_RES *word_res, - int *blob_number); + unsigned *blob_number); SEAM *improve_one_blob(const std::vector &blob_choices, DANGERR *fixpt, bool split_next_to_fragment, bool italic_blob, WERD_RES *word, - int *blob_number); + unsigned *blob_number); SEAM *chop_one_blob(const std::vector &boxes, const std::vector &blob_choices, WERD_RES *word_res, - int *blob_number); + unsigned *blob_number); void chop_word_main(WERD_RES *word); void improve_by_chopping(float rating_cert_scale, WERD_RES *word, BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle, @@ -392,32 +376,6 @@ public: virtual BLOB_CHOICE_LIST *classify_piece(const std::vector &seams, int16_t start, int16_t end, const char *description, TWERD *word, BlamerBundle *blamer_bundle); - // Try to merge fragments in the ratings matrix and put the result in - // the corresponding row and column - void merge_fragments(MATRIX *ratings, int16_t num_blobs); - // Recursively go through the ratings matrix to find lists of fragments - // to be merged in the function merge_and_put_fragment_lists. - // current_frag is the position of the piece we are looking for. - // current_row is the row in the rating matrix we are currently at. - // start is the row we started initially, so that we can know where - // to append the results to the matrix. num_frag_parts is the total - // number of pieces we are looking for and num_blobs is the size of the - // ratings matrix. - void get_fragment_lists(int16_t current_frag, int16_t current_row, int16_t start, - int16_t num_frag_parts, int16_t num_blobs, MATRIX *ratings, - BLOB_CHOICE_LIST *choice_lists); - // Merge the fragment lists in choice_lists and append it to the - // ratings matrix - void merge_and_put_fragment_lists(int16_t row, int16_t column, int16_t num_frag_parts, - BLOB_CHOICE_LIST *choice_lists, MATRIX *ratings); - // Filter the fragment list so that the filtered_choices only contain - // fragments that are in the correct position. choices is the list - // that we are going to filter. fragment_pos is the position in the - // fragment that we are looking for and num_frag_parts is the the - // total number of pieces. The result will be appended to - // filtered_choices. - void fill_filtered_fragment_list(BLOB_CHOICE_LIST *choices, int fragment_pos, int num_frag_parts, - BLOB_CHOICE_LIST *filtered_choices); // Member variables. diff --git a/sw.cpp b/sw.cpp index 117072d30..05dc89c0e 100644 --- a/sw.cpp +++ b/sw.cpp @@ -20,7 +20,7 @@ void build(Solution &s) libtesseract -= "src/training/.*"_rr; libtesseract -= - "src/api/tesseractmain.cpp", + "src/tesseract.cpp", "src/viewer/svpaint.cpp"; libtesseract.Public += "include"_idir; @@ -69,6 +69,8 @@ void build(Solution &s) // check fma flags libtesseract -= "src/arch/dotproductfma.cpp"; + // check arch (arm) + libtesseract -= "src/arch/dotproductneon.cpp"; if (libtesseract.getBuildSettings().TargetOS.Type != OSType::Windows) { @@ -107,7 +109,7 @@ void build(Solution &s) auto &tesseract = tess.addExecutable("tesseract"); { tesseract += cppstd; - tesseract += "src/api/tesseractmain.cpp"; + tesseract += "src/tesseract.cpp"; tesseract += libtesseract; } @@ -203,6 +205,7 @@ void build(Solution &s) auto &t = test.addTarget(name); t += cppstd; t += FileRegex("unittest", name + "_test.*", false); + t += "unittest"_idir; t += "SW_TESTING"_def; @@ -222,7 +225,6 @@ void build(Solution &s) t += pango_training; t += "org.sw.demo.google.googletest.gmock.main"_dep; t += "org.sw.demo.google.googletest.gtest.main"_dep; - t += "org.sw.demo.google.abseil"_dep; if (t.getCompilerType() == CompilerType::MSVC) t.CompileOptions.push_back("-utf-8"); @@ -301,7 +303,6 @@ void build(Solution &s) "tablefind", "tablerecog", "tabvector", - "tatweel", "textlineprojection", "tfile", "unichar", @@ -318,6 +319,11 @@ void build(Solution &s) auto &dt = add_test("dawg"); dt += Definition("wordlist2dawg_prog=\"" + to_printable_string(normalize_path(wordlist2dawg.getOutputFile())) + "\""); dt += Definition("dawg2wordlist_prog=\"" + to_printable_string(normalize_path(dawg2wordlist.getOutputFile())) + "\""); + + auto &tw = add_test("tatweel"); + tw += "unittest/util/.*"_rr; + tw += "unittest/third_party/.*"_rr; + tw -= "unittest/third_party/googletest/.*"_rr; } } diff --git a/test b/test index ebaee164b..3ea109966 160000 --- a/test +++ b/test @@ -1 +1 @@ -Subproject commit ebaee164bb39fe55b601b95b92db686d3c7da265 +Subproject commit 3ea1099664211958cb5c66c2bc69fb6652254a37 diff --git a/unittest/README.md b/unittest/README.md index f047f028c..02303b04e 100644 --- a/unittest/README.md +++ b/unittest/README.md @@ -60,10 +60,10 @@ │   └── script │   └── Latin.traineddata └── tesseract - ├── abseil ... ├── test ├── unittest + │   └── third_party/googletest └── VERSION ``` diff --git a/unittest/baseapi_test.cc b/unittest/baseapi_test.cc index 84cf92979..7c70b13dd 100644 --- a/unittest/baseapi_test.cc +++ b/unittest/baseapi_test.cc @@ -19,8 +19,6 @@ #include #include -#include "absl/strings/ascii.h" -#include "absl/strings/str_cat.h" #include "gmock/gmock-matchers.h" #include @@ -49,7 +47,7 @@ std::string GetCleanedTextResult(tesseract::TessBaseAPI *tess, Image pix) { char *result = tess->GetUTF8Text(); std::string ocr_result = result; delete[] result; - absl::StripAsciiWhitespace(&ocr_result); + trim(ocr_result); return ocr_result; } @@ -64,6 +62,12 @@ protected: } }; +// Test static TessBaseAPI (like it is used by tesserocr). +TEST_F(TesseractTest, StaticTessBaseAPI) { + static tesseract::TessBaseAPI api; + api.End(); +} + // Tests that Tesseract gets exactly the right answer on phototest. TEST_F(TesseractTest, BasicTesseractTest) { tesseract::TessBaseAPI api; @@ -75,7 +79,7 @@ TEST_F(TesseractTest, BasicTesseractTest) { ocr_text = GetCleanedTextResult(&api, src_pix); CHECK_OK( file::GetContents(TestDataNameToPath("phototest.gold.txt"), &truth_text, file::Defaults())); - absl::StripAsciiWhitespace(&truth_text); + trim(truth_text); EXPECT_STREQ(truth_text.c_str(), ocr_text.c_str()); src_pix.destroy(); } else { @@ -196,7 +200,7 @@ TEST_F(TesseractTest, AdaptToWordStrTest) { Image src_pix = pixRead(TestDataNameToPath(kTestPages[i]).c_str()); CHECK(src_pix); ocr_text = GetCleanedTextResult(&api, src_pix); - absl::StripAsciiWhitespace(&truth_text); + trim(truth_text); EXPECT_STREQ(kTestText[i], ocr_text.c_str()); src_pix.destroy(); } @@ -218,7 +222,7 @@ TEST_F(TesseractTest, BasicLSTMTest) { ocr_text = GetCleanedTextResult(&api, src_pix); CHECK_OK( file::GetContents(TestDataNameToPath("phototest.gold.txt"), &truth_text, file::Defaults())); - absl::StripAsciiWhitespace(&truth_text); + trim(truth_text); EXPECT_STREQ(truth_text.c_str(), ocr_text.c_str()); src_pix.destroy(); } @@ -317,7 +321,9 @@ TEST(TesseractInstanceTest, TestMultipleTessInstances) { // Preload images and verify that OCR is correct on them individually. std::vector pix(num_langs); for (int i = 0; i < num_langs; ++i) { - SCOPED_TRACE(absl::StrCat("Single instance test with lang = ", langs[i])); + std::string tracestring = "Single instance test with lang = "; + tracestring += langs[i]; + SCOPED_TRACE(tracestring); std::string path = file::JoinPath(TESTING_DIR, image_files[i]); pix[i] = pixRead(path.c_str()); QCHECK(pix[i] != nullptr) << "Could not read " << path; diff --git a/unittest/baseapi_thread_test.cc b/unittest/baseapi_thread_test.cc index 94557463b..dc3b9e943 100644 --- a/unittest/baseapi_thread_test.cc +++ b/unittest/baseapi_thread_test.cc @@ -28,7 +28,6 @@ #endif #include #include -#include "absl/strings/ascii.h" // for absl::StripAsciiWhitespace #include "commandlineflags.h" #include "include_gunit.h" #include "log.h" @@ -148,14 +147,15 @@ static void InitTessInstance(TessBaseAPI *tess, const std::string &lang) { EXPECT_EQ(0, tess->Init(TESSDATA_DIR, lang.c_str())); } -static void GetCleanedText(TessBaseAPI *tess, Image pix, std::string *ocr_text) { +static void GetCleanedText(TessBaseAPI *tess, Image pix, std::string &ocr_text) { tess->SetImage(pix); char *result = tess->GetUTF8Text(); - *ocr_text = result; + ocr_text = result; delete[] result; - absl::StripAsciiWhitespace(ocr_text); + trim(ocr_text); } +#ifdef INCLUDE_TENSORFLOW static void VerifyTextResult(TessBaseAPI *tess, Image pix, const std::string &lang, const std::string &expected_text) { TessBaseAPI *tess_local = nullptr; @@ -166,12 +166,13 @@ static void VerifyTextResult(TessBaseAPI *tess, Image pix, const std::string &la InitTessInstance(tess_local, lang); } std::string ocr_text; - GetCleanedText(tess_local, pix, &ocr_text); + GetCleanedText(tess_local, pix, ocr_text); EXPECT_STREQ(expected_text.c_str(), ocr_text.c_str()); if (tess_local != tess) { delete tess_local; } } +#endif // Check that Tesseract/Cube produce the correct results in single-threaded // operation. If not, it is pointless to run the real multi-threaded tests. @@ -180,7 +181,7 @@ TEST_F(BaseapiThreadTest, TestBasicSanity) { TessBaseAPI tess; InitTessInstance(&tess, langs_[i]); std::string ocr_text; - GetCleanedText(&tess, pix_[i], &ocr_text); + GetCleanedText(&tess, pix_[i], ocr_text); CHECK(strcmp(gt_text_[i].c_str(), ocr_text.c_str()) == 0) << "Failed with lang = " << langs_[i]; } } diff --git a/unittest/cycletimer.h b/unittest/cycletimer.h index 6a61a86b8..65947fd1d 100644 --- a/unittest/cycletimer.h +++ b/unittest/cycletimer.h @@ -13,10 +13,16 @@ #ifndef TESSERACT_UNITTEST_CYCLETIMER_H #define TESSERACT_UNITTEST_CYCLETIMER_H -#include "absl/time/clock.h" // for GetCurrentTimeNanos +#include // for std::chrono // See https://github.com/google/or-tools/blob/master/ortools/base/timer.h class CycleTimer { +private: + static int64_t now() { + return std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch()).count(); + } + public: CycleTimer() { Reset(); @@ -31,7 +37,7 @@ public: // When Start() is called multiple times, only the most recent is used. void Start() { running_ = true; - start_ = absl::GetCurrentTimeNanos(); + start_ = now(); } void Restart() { @@ -41,17 +47,12 @@ public: void Stop() { if (running_) { - sum_ += absl::GetCurrentTimeNanos() - start_; + sum_ += now() - start_; running_ = false; } } int64_t GetInMs() const { - return GetNanos() / 1000000; - } - -protected: - int64_t GetNanos() const { - return running_ ? absl::GetCurrentTimeNanos() - start_ + sum_ : sum_; + return running_ ? now() - start_ + sum_ : sum_; } private: diff --git a/unittest/fileio_test.cc b/unittest/fileio_test.cc index 630311ad7..798b4e9cf 100644 --- a/unittest/fileio_test.cc +++ b/unittest/fileio_test.cc @@ -12,8 +12,6 @@ #include #include -#include "absl/strings/str_split.h" - #include "fileio.h" #include "include_gunit.h" @@ -58,7 +56,7 @@ TEST(InputBufferTest, Read) { std::string str; auto input = std::make_unique(fp); EXPECT_TRUE(input->Read(&str)); - std::vector lines = absl::StrSplit(str, '\n', absl::SkipEmpty()); + std::vector lines = split(str, '\n'); EXPECT_EQ(2, lines.size()); EXPECT_EQ("Hello", lines[0]); EXPECT_EQ(" world!", lines[1]); diff --git a/unittest/imagedata_test.cc b/unittest/imagedata_test.cc index 77c0c5baa..2b185ef9f 100644 --- a/unittest/imagedata_test.cc +++ b/unittest/imagedata_test.cc @@ -12,9 +12,6 @@ #include #include -#include "absl/strings/str_cat.h" -#include "absl/strings/str_format.h" - #include "imagedata.h" #include "include_gunit.h" #include "log.h" @@ -42,7 +39,9 @@ protected: DocumentData write_doc("My document"); for (int p = 0; p < num_pages; ++p) { // Make some fake text that is different for each page and save it. - page_texts->push_back(absl::StrFormat("Page %d of %d in doc %u", p, num_pages, doc_id)); + char text[80]; + snprintf(text, sizeof(text), "Page %d of %d in doc %u", p, num_pages, doc_id); + page_texts->push_back(text); // Make an imagedata and put it in the document. ImageData *imagedata = ImageData::Build("noname", p, "eng", fake_image.data(), fake_image.size(), (*page_texts)[p].c_str(), nullptr); @@ -51,7 +50,8 @@ protected: } // Write it to a file. std::string filename = - file::JoinPath(FLAGS_test_tmpdir, absl::StrCat("documentdata", doc_id, ".lstmf")); + file::JoinPath(FLAGS_test_tmpdir, "documentdata"); + filename += std::to_string(doc_id) + ".lstmf"; EXPECT_TRUE(write_doc.SaveDocument(filename.c_str(), nullptr)); return filename; } diff --git a/unittest/include_gunit.h b/unittest/include_gunit.h index 47914a040..812eadb3b 100644 --- a/unittest/include_gunit.h +++ b/unittest/include_gunit.h @@ -18,7 +18,20 @@ #include "gtest/gtest.h" #include "log.h" // for LOG -const char *FLAGS_test_tmpdir = "./tmp"; +static const char *FLAGS_test_tmpdir = "./tmp"; + +namespace tesseract { + +static inline void trim(std::string &s) { + s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](unsigned char ch) { + return !std::isspace(ch); + })); + s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { + return !std::isspace(ch); + }).base(), s.end()); +} + +} // namespace tesseract class file : public tesseract::File { public: @@ -64,6 +77,7 @@ public: if (!(condition)) \ LOG(FATAL) << "Check failed: " #condition " " # define CHECK_EQ(test, value) CHECK((test) == (value)) +# define CHECK_GE(test, value) CHECK((test) >= (value)) # define CHECK_GT(test, value) CHECK((test) > (value)) # define CHECK_LT(test, value) CHECK((test) < (value)) # define CHECK_LE(test, value) CHECK((test) <= (value)) diff --git a/unittest/intsimdmatrix_test.cc b/unittest/intsimdmatrix_test.cc index 048ceb422..95688eed5 100644 --- a/unittest/intsimdmatrix_test.cc +++ b/unittest/intsimdmatrix_test.cc @@ -52,8 +52,8 @@ protected: return v; } // Makes a random scales vector of the given size. - std::vector RandomScales(int size) { - std::vector v(size); + std::vector RandomScales(int size) { + std::vector v(size); for (int i = 0; i < size; ++i) { v[i] = (1.0 + random_.SignedRand(1.0)) / INT8_MAX; } @@ -61,25 +61,23 @@ protected: } // Tests a range of sizes and compares the results against the generic version. void ExpectEqualResults(const IntSimdMatrix &matrix) { - double total = 0.0; + TFloat total = 0.0; for (int num_out = 1; num_out < 130; ++num_out) { for (int num_in = 1; num_in < 130; ++num_in) { GENERIC_2D_ARRAY w = InitRandom(num_out, num_in + 1); std::vector u = RandomVector(num_in, matrix); - std::vector scales = RandomScales(num_out); + std::vector scales = RandomScales(num_out); int ro = num_out; if (IntSimdMatrix::intSimdMatrix) { ro = IntSimdMatrix::intSimdMatrix->RoundOutputs(ro); } - std::vector base_result(ro); - base_result.resize(num_out); + std::vector base_result(num_out); IntSimdMatrix::MatrixDotVector(w, scales, u.data(), base_result.data()); - std::vector test_result(ro); - test_result.resize(num_out); + std::vector test_result(ro); std::vector shaped_wi; int32_t rounded_num_out; matrix.Init(w, shaped_wi, rounded_num_out); - scales.reserve(rounded_num_out); + scales.resize(rounded_num_out); if (matrix.matrixDotVectorFunction) { matrix.matrixDotVectorFunction(w.dim1(), w.dim2(), &shaped_wi[0], &scales[0], &u[0], &test_result[0]); @@ -93,7 +91,11 @@ protected: } } // Compare sum of all results with expected value. +#ifdef FAST_FLOAT + EXPECT_FLOAT_EQ(total, 337852.16f); +#else EXPECT_FLOAT_EQ(total, 337849.39354684710); +#endif } TRand random_; diff --git a/unittest/lang_model_test.cc b/unittest/lang_model_test.cc index e0d96abc3..048ee13e8 100644 --- a/unittest/lang_model_test.cc +++ b/unittest/lang_model_test.cc @@ -11,8 +11,6 @@ #include // for std::string -#include "absl/strings/str_cat.h" - #include "gmock/gmock.h" // for testing::ElementsAreArray #include "include_gunit.h" @@ -59,7 +57,7 @@ TEST(LangModelTest, AddACharacter) { pass_through_recoder, words, puncs, numbers, lang_is_rtl, nullptr, nullptr)); // Init a trainer with it, and encode kTestString. - std::string traineddata1 = file::JoinPath(output_dir, lang1, absl::StrCat(lang1, ".traineddata")); + std::string traineddata1 = file::JoinPath(output_dir, lang1, lang1) + ".traineddata"; LSTMTrainer trainer1; trainer1.InitCharSet(traineddata1); std::vector labels1; @@ -79,7 +77,7 @@ TEST(LangModelTest, AddACharacter) { pass_through_recoder, words, puncs, numbers, lang_is_rtl, nullptr, nullptr)); // Init a trainer with it, and encode kTestString. - std::string traineddata2 = file::JoinPath(output_dir, lang2, absl::StrCat(lang2, ".traineddata")); + std::string traineddata2 = file::JoinPath(output_dir, lang2, lang2) + ".traineddata"; LSTMTrainer trainer2; trainer2.InitCharSet(traineddata2); std::vector labels2; @@ -144,7 +142,7 @@ TEST(LangModelTest, AddACharacterHindi) { pass_through_recoder, words, puncs, numbers, lang_is_rtl, nullptr, nullptr)); // Init a trainer with it, and encode kTestString. - std::string traineddata1 = file::JoinPath(output_dir, lang1, absl::StrCat(lang1, ".traineddata")); + std::string traineddata1 = file::JoinPath(output_dir, lang1, lang1) + ".traineddata"; LSTMTrainer trainer1; trainer1.InitCharSet(traineddata1); std::vector labels1; @@ -164,7 +162,7 @@ TEST(LangModelTest, AddACharacterHindi) { pass_through_recoder, words, puncs, numbers, lang_is_rtl, nullptr, nullptr)); // Init a trainer with it, and encode kTestString. - std::string traineddata2 = file::JoinPath(output_dir, lang2, absl::StrCat(lang2, ".traineddata")); + std::string traineddata2 = file::JoinPath(output_dir, lang2, lang2) + ".traineddata"; LSTMTrainer trainer2; trainer2.InitCharSet(traineddata2); std::vector labels2; diff --git a/unittest/ligature_table_test.cc b/unittest/ligature_table_test.cc index 44ada6869..404c2b128 100644 --- a/unittest/ligature_table_test.cc +++ b/unittest/ligature_table_test.cc @@ -17,6 +17,7 @@ namespace tesseract { +#if 0 // not with NFC normalization const char kEngNonLigatureText[] = "fidelity effigy ſteep"; // Same as above text, but with "fi" in the first word and "ffi" in the second // word replaced with their respective ligatures. @@ -24,6 +25,7 @@ const char kEngLigatureText[] = "fidelity effigy ſteep"; // Same as kEngLigatureText but with "fi" in both words replaced with their // ligature. The test Verdana font does not support the "ffi" or "ſt" ligature. const char kRenderableEngLigatureText[] = "fidelity effigy ſteep"; +#endif static PangoFontMap *font_map; @@ -54,6 +56,7 @@ TEST_F(LigatureTableTest, DoesFillLigatureTables) { EXPECT_GT(lig_table_->lig_to_norm_table().size(), 0); } +#if 0 // not with NFC normalization TEST_F(LigatureTableTest, DoesAddLigatures) { EXPECT_STREQ(kEngLigatureText, lig_table_->AddLigatures(kEngNonLigatureText, nullptr).c_str()); } @@ -76,6 +79,7 @@ TEST_F(LigatureTableTest, DoesNotAddLigaturesWithUnsupportedFont) { TEST_F(LigatureTableTest, DoesRemoveLigatures) { EXPECT_STREQ(kEngNonLigatureText, lig_table_->RemoveLigatures(kEngLigatureText).c_str()); } +#endif TEST_F(LigatureTableTest, TestCustomLigatures) { const char *kTestCases[] = { @@ -89,6 +93,7 @@ TEST_F(LigatureTableTest, TestCustomLigatures) { } } +#if 0 // not with NFC normalization TEST_F(LigatureTableTest, TestRemovesCustomLigatures) { const char *kTestCases[] = { "fiction", @@ -100,4 +105,6 @@ TEST_F(LigatureTableTest, TestRemovesCustomLigatures) { EXPECT_STREQ(kTestCases[i + 2], lig_table_->RemoveCustomLigatures(kTestCases[i + 1]).c_str()); } } +#endif + } // namespace tesseract diff --git a/unittest/lstm_test.h b/unittest/lstm_test.h index 585f56e34..0b0ff6f58 100644 --- a/unittest/lstm_test.h +++ b/unittest/lstm_test.h @@ -18,7 +18,6 @@ #include "include_gunit.h" -#include "absl/strings/str_cat.h" #include "helpers.h" #include "tprintf.h" @@ -81,7 +80,7 @@ protected: std::string checkpoint_path = model_path + "_checkpoint"; trainer_ = std::make_unique(model_path.c_str(), checkpoint_path.c_str(), 0, 0); trainer_->InitCharSet( - file::JoinPath(FLAGS_test_tmpdir, kLang, absl::StrCat(kLang, ".traineddata"))); + file::JoinPath(FLAGS_test_tmpdir, kLang, kLang) + ".traineddata"); int net_mode = adam ? NF_ADAM : 0; // Adam needs a higher learning rate, due to not multiplying the effective // rate by 1/(1-momentum). diff --git a/unittest/mastertrainer_test.cc b/unittest/mastertrainer_test.cc index 7694359e2..b6dcc5bb5 100644 --- a/unittest/mastertrainer_test.cc +++ b/unittest/mastertrainer_test.cc @@ -32,9 +32,6 @@ #include "trainingsample.h" #include "unicharset.h" -#include "absl/strings/numbers.h" // for safe_strto32 -#include "absl/strings/str_split.h" // for absl::StrSplit - #include #include #include @@ -176,12 +173,11 @@ protected: FLAGS_X = TestDataNameToPath("eng.xheights").c_str(); FLAGS_U = TestDataNameToPath("eng.unicharset").c_str(); std::string tr_file_name(TestDataNameToPath("eng.Arial.exp0.tr")); - const char *argv[] = {tr_file_name.c_str()}; - int argc = 1; + const char *filelist[] = {tr_file_name.c_str(), nullptr}; std::string file_prefix; delete shape_table_; shape_table_ = nullptr; - master_trainer_ = LoadTrainingData(argc, argv, false, &shape_table_, file_prefix); + master_trainer_ = LoadTrainingData(filelist, false, &shape_table_, file_prefix); EXPECT_TRUE(master_trainer_ != nullptr); EXPECT_TRUE(shape_table_ != nullptr); } @@ -268,7 +264,7 @@ TEST_F(MasterTrainerTest, ErrorCounterTest) { shape_classifier.get(), &accuracy_report); LOG(INFO) << accuracy_report.c_str(); std::string result_string = accuracy_report.c_str(); - std::vector results = absl::StrSplit(result_string, '\t', absl::SkipEmpty()); + std::vector results = split(result_string, '\t'); EXPECT_EQ(tesseract::CT_SIZE + 1, results.size()); int result_values[tesseract::CT_SIZE]; for (int i = 0; i < tesseract::CT_SIZE; ++i) { diff --git a/unittest/normstrngs_test.cc b/unittest/normstrngs_test.cc index bb2ca5aad..26b050f6f 100644 --- a/unittest/normstrngs_test.cc +++ b/unittest/normstrngs_test.cc @@ -11,7 +11,6 @@ #include "normstrngs.h" #include -#include "absl/strings/str_format.h" // for absl::StrFormat #include "include_gunit.h" #include "normstrngs_test.h" #ifdef INCLUDE_TENSORFLOW @@ -315,7 +314,9 @@ TEST(NormstrngsTest, IsWhitespace) { EXPECT_TRUE(IsWhitespace('\n')); // U+2000 through U+200A for (char32 ch = 0x2000; ch <= 0x200A; ++ch) { - SCOPED_TRACE(absl::StrFormat("Failed at U+%x", ch)); + char text[80]; + snprintf(text, sizeof(text), "Failed at U+%x", ch); + SCOPED_TRACE(text); EXPECT_TRUE(IsWhitespace(ch)); } // U+3000 is whitespace @@ -354,7 +355,9 @@ TEST(NormstrngsTest, IsInterchangeValid) { const int32_t kMinUnicodeValue = 33; const int32_t kMaxUnicodeValue = 0x10FFFF; for (int32_t ch = kMinUnicodeValue; ch <= kMaxUnicodeValue; ++ch) { - SCOPED_TRACE(absl::StrFormat("Failed at U+%x", ch)); + char text[80]; + snprintf(text, sizeof(text), "Failed at U+%x", ch); + SCOPED_TRACE(text); EXPECT_EQ(UniLib::IsInterchangeValid(ch), IsInterchangeValid(ch)); } #else @@ -369,7 +372,9 @@ TEST(NormstrngsTest, IsInterchangeValid7BitAscii) { const int32_t kMinUnicodeValue = 33; const int32_t kMaxUnicodeValue = 0x10FFFF; for (int32_t ch = kMinUnicodeValue; ch <= kMaxUnicodeValue; ++ch) { - SCOPED_TRACE(absl::StrFormat("Failed at U+%x", ch)); + char text[80]; + snprintf(text, sizeof(text), "Failed at U+%x", ch); + SCOPED_TRACE(text); std::string str = EncodeAsUTF8(ch); EXPECT_EQ(UniLib::IsInterchangeValid7BitAscii(str), IsInterchangeValid7BitAscii(ch)); } @@ -396,7 +401,9 @@ TEST(NormstrngsTest, FullwidthToHalfwidth) { for (int32_t ch = kMinUnicodeValue; ch <= kMaxUnicodeValue; ++ch) { if (!IsValidCodepoint(ch)) continue; - SCOPED_TRACE(absl::StrFormat("Failed at U+%x", ch)); + char text[80]; + snprintf(text, sizeof(text), "Failed at U+%x", ch); + SCOPED_TRACE(text); std::string str = EncodeAsUTF8(ch); const std::string expected_half_str = UniLib::FullwidthToHalfwidth(str.c_str(), str.length(), true); diff --git a/unittest/normstrngs_test.h b/unittest/normstrngs_test.h index 80577f734..df12fc0cd 100644 --- a/unittest/normstrngs_test.h +++ b/unittest/normstrngs_test.h @@ -16,8 +16,6 @@ #include // for std::stringstream #include #include -#include "absl/strings/str_cat.h" -#include "absl/strings/str_join.h" namespace tesseract { @@ -33,7 +31,9 @@ inline std::string CodepointList(const std::vector &str32) { inline std::string PrintString32WithUnicodes(const std::string &str) { std::vector str32 = UNICHAR::UTF8ToUTF32(str.c_str()); - return absl::StrCat("\"", str, "\" ", CodepointList(str32)); + std::string s = "\""; + s += "\" " + CodepointList(str32); + return s; } inline std::string PrintStringVectorWithUnicodes(const std::vector &glyphs) { @@ -49,18 +49,30 @@ inline void ExpectGraphemeModeResults(const std::string &str, UnicodeNormMode u_ int unicode_count, int glyph_count, int grapheme_count, const std::string &target_str) { std::vector glyphs; + std::string s; EXPECT_TRUE(NormalizeCleanAndSegmentUTF8( u_mode, OCRNorm::kNone, GraphemeNormMode::kIndividualUnicodes, true, str.c_str(), &glyphs)); EXPECT_EQ(glyphs.size(), unicode_count) << PrintStringVectorWithUnicodes(glyphs); - EXPECT_EQ(target_str, absl::StrJoin(glyphs.begin(), glyphs.end(), "")); + for (auto &glyph : glyphs) { + s += glyph; + } + EXPECT_EQ(target_str, s); EXPECT_TRUE(NormalizeCleanAndSegmentUTF8(u_mode, OCRNorm::kNone, GraphemeNormMode::kGlyphSplit, true, str.c_str(), &glyphs)); EXPECT_EQ(glyphs.size(), glyph_count) << PrintStringVectorWithUnicodes(glyphs); - EXPECT_EQ(target_str, absl::StrJoin(glyphs.begin(), glyphs.end(), "")); + s.clear(); + for (auto &glyph : glyphs) { + s += glyph; + } + EXPECT_EQ(target_str, s); EXPECT_TRUE(NormalizeCleanAndSegmentUTF8(u_mode, OCRNorm::kNone, GraphemeNormMode::kCombined, true, str.c_str(), &glyphs)); EXPECT_EQ(glyphs.size(), grapheme_count) << PrintStringVectorWithUnicodes(glyphs); - EXPECT_EQ(target_str, absl::StrJoin(glyphs.begin(), glyphs.end(), "")); + s.clear(); + for (auto &glyph : glyphs) { + s += glyph; + } + EXPECT_EQ(target_str, s); EXPECT_TRUE(NormalizeCleanAndSegmentUTF8(u_mode, OCRNorm::kNone, GraphemeNormMode::kSingleString, true, str.c_str(), &glyphs)); EXPECT_EQ(glyphs.size(), 1) << PrintStringVectorWithUnicodes(glyphs); diff --git a/unittest/pagesegmode_test.cc b/unittest/pagesegmode_test.cc index 87f72d965..9689e407e 100644 --- a/unittest/pagesegmode_test.cc +++ b/unittest/pagesegmode_test.cc @@ -97,8 +97,13 @@ TEST_F(PageSegModeTest, WordTest) { VerifyRectText(tesseract::PSM_SINGLE_WORD, "183", 1396, 218, 114, 102); // Test a random pair of words as a line VerifyRectText(tesseract::PSM_SINGLE_LINE, "What should", 237, 393, 256, 36); + #ifdef DISABLED_LEGACY_ENGINE + // Skip check as LSTM mode adds a space. + LOG(INFO) << "Skip `Whatshould` test in LSTM Mode\n"; + #else // Test a random pair of words as a word VerifyRectText(tesseract::PSM_SINGLE_WORD, "Whatshould", 237, 393, 256, 36); + #endif // Test single block mode. VerifyRectText(tesseract::PSM_SINGLE_BLOCK, "both the\nfrom the", 237, 450, 172, 94); // But doesn't work in line or word mode. diff --git a/unittest/pango_font_info_test.cc b/unittest/pango_font_info_test.cc index b3862e504..279388433 100644 --- a/unittest/pango_font_info_test.cc +++ b/unittest/pango_font_info_test.cc @@ -13,7 +13,6 @@ #include #include #include -#include "absl/strings/str_cat.h" // for absl::StrCat #include "commandlineflags.h" #include "fileio.h" #include "gmock/gmock-matchers.h" // for EXPECT_THAT @@ -316,7 +315,8 @@ TEST_F(FontUtilsTest, GetAllRenderableCharacters) { // Check that none of the included fonts cover the Mongolian or Ogham space // characters. for (size_t f = 0; f < countof(kExpectedFontNames); ++f) { - SCOPED_TRACE(absl::StrCat("Testing ", kExpectedFontNames[f])); + std::string tracestring = "Testing " + kExpectedFontNames[f]; + SCOPED_TRACE(tracestring); FontUtils::GetAllRenderableCharacters(kExpectedFontNames[f], &unicode_mask); # if 0 // TODO: check fails because DejaVu Sans Ultra-Light supports ogham EXPECT_FALSE(unicode_mask[kOghamChar]); diff --git a/unittest/paragraphs_test.cc b/unittest/paragraphs_test.cc index c30f859a3..fcc54b00e 100644 --- a/unittest/paragraphs_test.cc +++ b/unittest/paragraphs_test.cc @@ -11,10 +11,6 @@ #include // for std::string -#include "absl/strings/str_cat.h" // for absl::StrCat -#include "absl/strings/str_join.h" // for absl::StrJoin -#include "absl/strings/str_split.h" // for absl::StrSplit - #include "include_gunit.h" // for TEST #include "log.h" // for LOG @@ -62,7 +58,7 @@ void AsciiToRowInfo(const char *text, int row_number, RowInfo *info) { info->lword_text = info->rword_text = ""; info->ltr = true; - std::vector words = absl::StrSplit(text, ' ', absl::SkipEmpty()); + std::vector words = split(text, ' '); info->num_words = words.size(); if (info->num_words < 1) { return; @@ -156,10 +152,11 @@ void EvaluateParagraphDetection(const TextAndModel *correct, int n, dbg_lines.emplace_back("# =========================="); for (int i = 0; i < n; i++) { if (correct[i].model_type != PCONT) { - dbg_lines.push_back(absl::StrCat(correct[i].ascii, " # ", - correct[i].model.ToString().c_str(), - correct[i].is_very_first_or_continuation ? " crown" : "", - correct[i].is_list_item ? " li" : "")); + std::string s = std::string(correct[i].ascii) + " # " + + correct[i].model.ToString() + + (correct[i].is_very_first_or_continuation ? " crown" : "") + + (correct[i].is_list_item ? " li" : ""); + dbg_lines.push_back(s); } else { dbg_lines.emplace_back(correct[i].ascii); } @@ -173,16 +170,21 @@ void EvaluateParagraphDetection(const TextAndModel *correct, int n, if (i == 0 || (detector_output[i - 1] != detector_output[i])) { if (detector_output[i] && detector_output[i]->model) { annotation += - absl::StrCat(" # ", detector_output[i]->model->ToString().c_str(), - detector_output[i]->is_very_first_or_continuation ? " crown" : "", - detector_output[i]->is_list_item ? " li" : ""); + " # " + detector_output[i]->model->ToString() + + (detector_output[i]->is_very_first_or_continuation ? " crown" : "") + + (detector_output[i]->is_list_item ? " li" : ""); } else { annotation = " # Unmodeled paragraph."; } } - dbg_lines.push_back(absl::StrCat(correct[i].ascii, annotation)); + std::string s = correct[i].ascii + annotation; + dbg_lines.push_back(s); } - LOG(INFO) << "Discrepancy!\n" << absl::StrJoin(dbg_lines, "\n"); + std::string s; + for (auto &dbg_line : dbg_lines) { + s += dbg_line + "\n"; + } + LOG(INFO) << "Discrepancy!\n" << s; } } diff --git a/unittest/recodebeam_test.cc b/unittest/recodebeam_test.cc index cda970b45..0fc738576 100644 --- a/unittest/recodebeam_test.cc +++ b/unittest/recodebeam_test.cc @@ -22,8 +22,6 @@ #include "helpers.h" -#include "absl/strings/str_format.h" // for absl::StrFormat - namespace tesseract { // Number of characters to test beam search with. @@ -160,9 +158,9 @@ protected: if (u_decoded.size() < truth_utf8.size()) { const char *str = ccutil_.unicharset.id_to_unichar(unichar_ids[u]); total_rating += ratings[u]; - LOG(INFO) << absl::StrFormat("%d:u_id=%d=%s, c=%g, r=%g, r_sum=%g @%d", u, unichar_ids[u], - str, certainties[u], ratings[u], total_rating, xcoords[u]) - << "\n"; + LOG(INFO) << u << ":u_id=" << unichar_ids[u] << "=" << str << ", c=" + << certainties[u] << ", r=" << ratings[u] << "r_sum=" + << total_rating << " @" << xcoords[u] << "\n"; if (str[0] == ' ') { total_rating = 0.0f; } @@ -184,11 +182,9 @@ protected: } w_decoded += word->best_choice->unichar_string().c_str(); } - LOG(INFO) << absl::StrFormat("Word:%d = %s, c=%g, r=%g, perm=%d", w, - word->best_choice->unichar_string().c_str(), - word->best_choice->certainty(), word->best_choice->rating(), - word->best_choice->permuter()) - << "\n"; + LOG(INFO) << "Word:" << w << " = " << word->best_choice->unichar_string() + << ", c=" << word->best_choice->certainty() << ", r=" << word->best_choice->rating() + << ", perm=" << word->best_choice->permuter() << "\n"; } std::string w_trunc(w_decoded.data(), truth_utf8.size()); if (truth_utf8 != w_trunc) { diff --git a/unittest/resultiterator_test.cc b/unittest/resultiterator_test.cc index cf9cd2505..0914e5e9d 100644 --- a/unittest/resultiterator_test.cc +++ b/unittest/resultiterator_test.cc @@ -5,7 +5,6 @@ #include #include "scrollview.h" -#include "absl/strings/str_format.h" // for absl::StrFormat #include "include_gunit.h" #include "log.h" // for LOG @@ -99,7 +98,7 @@ protected: pixWrite(outfile.c_str(), pix, IFF_PNG); } pix.destroy(); - LOG(INFO) << absl::StrFormat("At level %d: pix diff = %d\n", level, pixcount); + LOG(INFO) << "At level " << level << ": pix diff = " << pixcount << "\n"; EXPECT_LE(pixcount, max_diff); // if (base::GetFlag(FLAGS_v) > 1) CHECK_LE(pixcount, max_diff); } @@ -131,12 +130,14 @@ protected: } void VerifyRebuilds(int block_limit, int para_limit, int line_limit, int word_limit, - int symbol_limit, PageIterator *it) { + int symbol_limit, PageIterator *it, PageIteratorLevel maxlevel=tesseract::RIL_SYMBOL) { VerifyRebuild(block_limit, tesseract::RIL_BLOCK, it); VerifyRebuild(para_limit, tesseract::RIL_PARA, it); VerifyRebuild(line_limit, tesseract::RIL_TEXTLINE, it); VerifyRebuild(word_limit, tesseract::RIL_WORD, it); - VerifyRebuild(symbol_limit, tesseract::RIL_SYMBOL, it); + if (maxlevel == tesseract::RIL_SYMBOL) { + VerifyRebuild(symbol_limit, maxlevel, it); + } } void VerifyAllText(const std::string &truth, ResultIterator *it) { @@ -278,7 +279,7 @@ TEST_F(ResultIteratorTest, EasyTest) { // The images should rebuild almost perfectly. LOG(INFO) << "Verifying image rebuilds 2a (resultiterator)" << "\n"; - VerifyRebuilds(8, 8, 0, 0, 40, r_it); + VerifyRebuilds(8, 8, 0, 0, 40, r_it, tesseract::RIL_WORD); // Test the text. LOG(INFO) << "Verifying text rebuilds 1 (resultiterator)" << "\n"; @@ -287,13 +288,14 @@ TEST_F(ResultIteratorTest, EasyTest) { // The images should rebuild almost perfectly. LOG(INFO) << "Verifying image rebuilds 2b (resultiterator)" << "\n"; - VerifyRebuilds(8, 8, 0, 0, 40, r_it); + VerifyRebuilds(8, 8, 0, 0, 40, r_it, tesseract::RIL_WORD); r_it->Begin(); // Test baseline of the first line. int x1, y1, x2, y2; r_it->Baseline(tesseract::RIL_TEXTLINE, &x1, &y1, &x2, &y2); - LOG(INFO) << absl::StrFormat("Baseline (%d,%d)->(%d,%d)", x1, y1, x2, y2) << "\n"; + LOG(INFO) << "Baseline (" + << x1 << ',' << y1 << ")->(" << x2 << ',' << y2 << ")\n"; // Make sure we have a decent vector. EXPECT_GE(x2, x1 + 400); // The point 200,116 should be very close to the baseline. @@ -308,17 +310,25 @@ TEST_F(ResultIteratorTest, EasyTest) { // Test font attributes for each word. do { - bool bold, italic, underlined, monospace, serif, smallcaps; + float confidence = r_it->Confidence(tesseract::RIL_WORD); +#ifndef DISABLED_LEGACY_ENGINE int pointsize, font_id; + bool bold, italic, underlined, monospace, serif, smallcaps; const char *font = r_it->WordFontAttributes(&bold, &italic, &underlined, &monospace, &serif, &smallcaps, &pointsize, &font_id); - float confidence = r_it->Confidence(tesseract::RIL_WORD); EXPECT_GE(confidence, 80.0f); +#endif char *word_str = r_it->GetUTF8Text(tesseract::RIL_WORD); - LOG(INFO) << absl::StrFormat("Word %s in font %s, id %d, size %d, conf %g", word_str, font, - font_id, pointsize, confidence) - << "\n"; + +#ifdef DISABLED_LEGACY_ENGINE + LOG(INFO) << "Word " << word_str << ", conf " << confidence << "\n"; +#else + LOG(INFO) << "Word " << word_str << " in font " << font + << ", id " << font_id << ", size " << pointsize + << ", conf " << confidence << "\n"; +#endif // def DISABLED_LEGACY_ENGINE delete[] word_str; +#ifndef DISABLED_LEGACY_ENGINE EXPECT_FALSE(bold); EXPECT_FALSE(italic); EXPECT_FALSE(underlined); @@ -329,6 +339,7 @@ TEST_F(ResultIteratorTest, EasyTest) { // 31 pixels / textline * (72 pts / inch) / (200 pixels / inch) = 11.16 pts EXPECT_GE(pointsize, 11.16 - 1.50); EXPECT_LE(pointsize, 11.16 + 1.50); +#endif // def DISABLED_LEGACY_ENGINE } while (r_it->Next(tesseract::RIL_WORD)); delete r_it; } @@ -357,6 +368,10 @@ TEST_F(ResultIteratorTest, GreyTest) { // Tests that Tesseract gets smallcaps and dropcaps. TEST_F(ResultIteratorTest, SmallCapDropCapTest) { +#ifdef DISABLED_LEGACY_ENGINE + // Skip test as LSTM mode does not recognize smallcaps & dropcaps attributes. + GTEST_SKIP(); +#else SetImage("8071_093.3B.tif"); char *result = api_.GetUTF8Text(); delete[] result; @@ -372,8 +387,8 @@ TEST_F(ResultIteratorTest, SmallCapDropCapTest) { &pointsize, &font_id); char *word_str = r_it->GetUTF8Text(tesseract::RIL_WORD); if (word_str != nullptr) { - LOG(INFO) << absl::StrFormat("Word %s is %s", word_str, smallcaps ? "SMALLCAPS" : "Normal") - << "\n"; + LOG(INFO) << "Word " << word_str + << " is " << (smallcaps ? "SMALLCAPS" : "Normal") << "\n"; if (r_it->SymbolIsDropcap()) { ++found_dropcaps; } @@ -392,7 +407,7 @@ TEST_F(ResultIteratorTest, SmallCapDropCapTest) { while (s_it.Next(tesseract::RIL_SYMBOL) && !s_it.IsAtBeginningOf(tesseract::RIL_WORD)) { if (s_it.SymbolIsDropcap()) { char *sym_str = s_it.GetUTF8Text(tesseract::RIL_SYMBOL); - LOG(ERROR) << absl::StrFormat("Symbol %s of word %s is dropcap", sym_str, word_str); + LOG(ERROR) << "Symbol " << sym_str << " of word " << word_str << " is dropcap"; delete[] sym_str; } EXPECT_FALSE(s_it.SymbolIsDropcap()); @@ -404,6 +419,7 @@ TEST_F(ResultIteratorTest, SmallCapDropCapTest) { EXPECT_EQ(1, found_dropcaps); EXPECT_GE(4, found_smallcaps); EXPECT_LE(false_positives, 3); +#endif // DISABLED_LEGACY_ENGINE } #if 0 @@ -433,8 +449,7 @@ TEST_F(ResultIteratorTest, SubSuperTest) { result = r_it->GetUTF8Text(tesseract::RIL_SYMBOL); if (strchr(kAllowedSupers, result[0]) == nullptr) { char* word = r_it->GetUTF8Text(tesseract::RIL_WORD); - LOG(ERROR) << absl::StrFormat("Char %s in word %s is unexpected super!", - result, word); + LOG(ERROR) << "Char " << result << " in word " << word << " is unexpected super!"; delete [] word; EXPECT_TRUE(strchr(kAllowedSupers, result[0]) != nullptr); } @@ -445,8 +460,8 @@ TEST_F(ResultIteratorTest, SubSuperTest) { } } while (r_it->Next(tesseract::RIL_SYMBOL)); delete r_it; - LOG(INFO) << absl::StrFormat("Subs = %d, supers= %d, normal = %d", - found_subs, found_supers, found_normal) << "\n"; + LOG(INFO) << "Subs = " << found_subs << ", supers= " << found_supers + << ", normal = " << found_normal << "\n"; EXPECT_GE(found_subs, 25); EXPECT_GE(found_supers, 25); EXPECT_GE(found_normal, 1350); @@ -528,7 +543,7 @@ TEST_F(ResultIteratorTest, DISABLED_NonNullChoicesTest) { do { char *word_str = r_it->GetUTF8Text(tesseract::RIL_WORD); if (word_str != nullptr) { - LOG(INFO) << absl::StrFormat("Word %s:", word_str) << "\n"; + LOG(INFO) << "Word " << word_str << ":\n"; ResultIterator s_it = *r_it; do { tesseract::ChoiceIterator c_it(s_it); @@ -571,7 +586,7 @@ TEST_F(ResultIteratorTest, NonNullConfidencesTest) { const char *char_str = s_it.GetUTF8Text(tesseract::RIL_SYMBOL); CHECK(char_str != nullptr); float confidence = s_it.Confidence(tesseract::RIL_SYMBOL); - LOG(INFO) << absl::StrFormat("Char %s has confidence %g\n", char_str, confidence); + LOG(INFO) << "Char " << char_str << " has confidence " << confidence << "\n"; delete[] char_str; } while (!s_it.IsAtFinalElement(tesseract::RIL_WORD, tesseract::RIL_SYMBOL) && s_it.Next(tesseract::RIL_SYMBOL)); diff --git a/unittest/shapetable_test.cc b/unittest/shapetable_test.cc index 094d4367c..842f55058 100644 --- a/unittest/shapetable_test.cc +++ b/unittest/shapetable_test.cc @@ -12,8 +12,6 @@ #include #include -#include "absl/strings/str_format.h" // for absl::StrFormat - #include "include_gunit.h" #include "serialis.h" @@ -139,8 +137,9 @@ TEST_F(ShapeTableTest, FullTest) { UNICHARSET unicharset; unicharset.unichar_insert(" "); for (int i = 1; i <= 10; ++i) { - std::string class_str = absl::StrFormat("class%d", i); - unicharset.unichar_insert(class_str.c_str()); + char class_str[20]; + snprintf(class_str, sizeof(class_str), "class%d", i); + unicharset.unichar_insert(class_str); } ShapeTable st(unicharset); EXPECT_EQ(0, st.AddShape(3, 101)); diff --git a/unittest/stats_test.cc b/unittest/stats_test.cc index 7bab779c5..b0e03ab38 100644 --- a/unittest/stats_test.cc +++ b/unittest/stats_test.cc @@ -40,6 +40,13 @@ TEST_F(STATSTest, BasicStats) { EXPECT_EQ(12, stats_.pile_count(2)); } +TEST_F(STATSTest, InitStats) { + STATS stats; + EXPECT_EQ(0, stats.get_total()); + EXPECT_EQ(0, stats.mode()); + EXPECT_EQ(0, stats.pile_count(2)); +} + // Tests the top_n_modes function. TEST_F(STATSTest, TopNModes) { std::vector > modes; diff --git a/unittest/stringrenderer_test.cc b/unittest/stringrenderer_test.cc index 59be6338e..34a8e180f 100644 --- a/unittest/stringrenderer_test.cc +++ b/unittest/stringrenderer_test.cc @@ -17,7 +17,6 @@ #include "stringrenderer.h" #include -#include "absl/strings/str_split.h" // for absl::StrSplit #include #include @@ -314,10 +313,12 @@ TEST_F(StringRendererTest, DoesLigatureTextForRendering) { EXPECT_EQ(strlen(kEngNonLigatureText), renderer_->RenderToImage(kEngNonLigatureText, strlen(kEngNonLigatureText), &pix)); pix.destroy(); +#if 0 // not with NFC normalization // There should be one less box than letters due to the 'fi' ligature. EXPECT_EQ(strlen(kEngNonLigatureText) - 1, renderer_->GetBoxes().size()); // The output box text should be ligatured. EXPECT_STREQ("fi", renderer_->GetBoxes()[0]->ch().c_str()); +#endif } TEST_F(StringRendererTest, DoesRetainInputLigatureForRendering) { @@ -348,7 +349,7 @@ TEST_F(StringRendererTest, DoesRenderWordBoxes) { EXPECT_EQ(strlen(kEngText), renderer_->RenderToImage(kEngText, strlen(kEngText), &pix)); pix.destroy(); // Verify #boxchars = #words + #spaces - std::vector words = absl::StrSplit(kEngText, ' ', absl::SkipEmpty()); + std::vector words = split(kEngText, ' '); const int kNumSpaces = words.size() - 1; const int kExpectedNumBoxes = words.size() + kNumSpaces; const std::vector &boxchars = renderer_->GetBoxes(); @@ -371,8 +372,12 @@ TEST_F(StringRendererTest, DoesRenderWordBoxesFromMultiLineText) { EXPECT_EQ(strlen(kMultlineText), renderer_->RenderToImage(kMultlineText, strlen(kEngText), &pix)); pix.destroy(); // Verify #boxchars = #words + #spaces + #newlines - std::vector words = - absl::StrSplit(kMultlineText, absl::ByAnyChar(" \n"), absl::SkipEmpty()); + std::vector words; + for (auto &line : split(kMultlineText, '\n')) { + for (auto &word : split(line, ' ')) { + words.push_back(word); + } + } const int kNumSeparators = words.size() - 1; const int kExpectedNumBoxes = words.size() + kNumSeparators; const std::vector &boxchars = renderer_->GetBoxes(); diff --git a/unittest/syntaxnet/base.h b/unittest/syntaxnet/base.h index 76fb0fbb3..9ac03c6d1 100644 --- a/unittest/syntaxnet/base.h +++ b/unittest/syntaxnet/base.h @@ -16,12 +16,15 @@ limitations under the License. #ifndef SYNTAXNET_BASE_H_ #define SYNTAXNET_BASE_H_ +#include #include #include #include #include #include +#ifdef INCLUDE_TENSORFLOW + #include "google/protobuf/util/message_differencer.h" #include "tensorflow/core/lib/core/status.h" @@ -31,11 +34,14 @@ limitations under the License. #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/protobuf.h" +#endif + using std::map; using std::pair; using std::unordered_map; using std::unordered_set; using std::vector; +#ifdef INCLUDE_TENSORFLOW using tensorflow::int16; using tensorflow::int32; using tensorflow::int64; @@ -47,10 +53,13 @@ using tensorflow::uint32; using tensorflow::uint64; using tensorflow::uint8; using tensorflow::protobuf::TextFormat; +#endif typedef signed int char32; using std::string; +#ifdef INCLUDE_TENSORFLOW using tensorflow::StringPiece; +#endif // namespace syntaxnet diff --git a/unittest/tatweel_test.cc b/unittest/tatweel_test.cc index 01da60c4d..d0d8f2ae6 100644 --- a/unittest/tatweel_test.cc +++ b/unittest/tatweel_test.cc @@ -19,9 +19,7 @@ #include "include_gunit.h" #include "trie.h" #include "unicharset.h" -#ifdef INCLUDE_TENSORFLOW -# include "util/utf8/unicodetext.h" // for UnicodeText -#endif +#include "util/utf8/unicodetext.h" // for UnicodeText namespace tesseract { @@ -42,10 +40,9 @@ protected: } TatweelTest() { -#ifdef INCLUDE_TENSORFLOW std::string filename = TestDataNameToPath("ara.wordlist"); if (file_exists(filename.c_str())) { - std::string wordlist(u8"\u0640"); + std::string wordlist("\u0640"); CHECK_OK(file::GetContents(filename, &wordlist, file::Defaults())); // Put all the unicodes in the unicharset_. UnicodeText text; @@ -53,14 +50,13 @@ protected: int num_tatweel = 0; for (auto it = text.begin(); it != text.end(); ++it) { std::string utf8 = it.get_utf8_string(); - if (utf8.find(u8"\u0640") != std::string::npos) + if (utf8.find("\u0640") != std::string::npos) ++num_tatweel; unicharset_.unichar_insert(utf8.c_str()); } LOG(INFO) << "Num tatweels in source data=" << num_tatweel; EXPECT_GT(num_tatweel, 0); } -#endif } std::string TestDataNameToPath(const std::string &name) { diff --git a/unittest/textlineprojection_test.cc b/unittest/textlineprojection_test.cc index 2595b5747..90ed8cc33 100644 --- a/unittest/textlineprojection_test.cc +++ b/unittest/textlineprojection_test.cc @@ -12,7 +12,6 @@ #include #include // for std::string -#include "absl/strings/str_format.h" // for absl::StrFormat #include "include_gunit.h" #include @@ -114,13 +113,12 @@ protected: const char *message) { int value = projection_->EvaluateBox(box, denorm_, false); if (greater_or_equal != (value > target_value)) { - LOG(INFO) << absl::StrFormat( - "EvaluateBox too %s:%d vs %d for %s word '%s' at:", greater_or_equal ? "low" : "high", - value, target_value, message, text); + LOG(INFO) << "EvaluateBox too " << (greater_or_equal ? "low" : "high") + << ":" << value << " vs " << target_value << " for " << message << " word '" << text << "' at:"; box.print(); value = projection_->EvaluateBox(box, denorm_, true); } else { - LOG(INFO) << absl::StrFormat("EvaluateBox OK(%d) for %s word '%s'", value, message, text); + LOG(INFO) << "EvaluateBox OK(" << value << ") for " << message << " word '" << text << "'"; } if (greater_or_equal) { EXPECT_GE(value, target_value); @@ -136,14 +134,14 @@ protected: int true_dist = projection_->DistanceOfBoxFromBox(box, true_box, true, denorm_, false); int false_dist = projection_->DistanceOfBoxFromBox(box, false_box, true, denorm_, false); if (false_dist <= true_dist) { - LOG(INFO) << absl::StrFormat("Distance wrong:%d vs %d for %s word '%s' at:", false_dist, - true_dist, message, text); + LOG(INFO) << "Distance wrong:" << false_dist << " vs " << true_dist + << " for " << message << " word '" << text << "' at:"; true_box.print(); projection_->DistanceOfBoxFromBox(box, true_box, true, denorm_, true); projection_->DistanceOfBoxFromBox(box, false_box, true, denorm_, true); } else { - LOG(INFO) << absl::StrFormat("Distance OK(%d vs %d) for %s word '%s'", false_dist, true_dist, - message, text); + LOG(INFO) << "Distance OK(" << false_dist << " vs " << true_dist + << ") for " << message << " word '" << text << "'"; } } diff --git a/unittest/third_party/googletest b/unittest/third_party/googletest new file mode 160000 index 000000000..e2239ee60 --- /dev/null +++ b/unittest/third_party/googletest @@ -0,0 +1 @@ +Subproject commit e2239ee6043f73722e7aa812a459f54a28552929 diff --git a/unittest/unicharcompress_test.cc b/unittest/unicharcompress_test.cc index 9787da105..31fcf3a26 100644 --- a/unittest/unicharcompress_test.cc +++ b/unittest/unicharcompress_test.cc @@ -12,9 +12,6 @@ #include #include -#include "absl/strings/ascii.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_split.h" #include "include_gunit.h" #include "log.h" // for LOG @@ -46,7 +43,7 @@ protected: compressed_.EncodeUnichar(null_char_, &code); encoded_null_char_ = code(0); std::string output_name = - file::JoinPath(FLAGS_test_tmpdir, absl::StrCat(unicharset_name, ".encoding.txt")); + file::JoinPath(FLAGS_test_tmpdir, unicharset_name) + ".encoding.txt"; std::string encoding = compressed_.GetEncodingAsString(unicharset_); std::string encoding_str(&encoding[0], encoding.size()); CHECK_OK(file::SetContents(output_name, encoding_str, file::Defaults())); @@ -233,7 +230,7 @@ TEST_F(UnicharcompressTest, GetEncodingAsString) { ExpectCorrect("trivial"); std::string encoding = compressed_.GetEncodingAsString(unicharset_); std::string encoding_str(&encoding[0], encoding.length()); - std::vector lines = absl::StrSplit(encoding_str, "\n", absl::SkipEmpty()); + std::vector lines = split(encoding_str, '\n'); EXPECT_EQ(5, lines.size()); // The first line is always space. EXPECT_EQ("0\t ", lines[0]); diff --git a/unittest/util/utf8/unicodetext.cc b/unittest/util/utf8/unicodetext.cc index d1174d1ac..a0f5e5718 100644 --- a/unittest/util/utf8/unicodetext.cc +++ b/unittest/util/utf8/unicodetext.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "include_gunit.h" #include "util/utf8/unicodetext.h" #include // for memcpy, NULL, memcmp, etc @@ -172,10 +173,12 @@ void UnicodeText::Repr::append(const char *bytes, int byte_length) { size_ += byte_length; } +#ifdef INCLUDE_TENSORFLOW string UnicodeText::Repr::DebugString() const { return tensorflow::strings::Printf("{Repr %p data=%p size=%d capacity=%d %s}", this, data_, size_, capacity_, ours_ ? "Owned" : "Alias"); } +#endif // *************** UnicodeText ****************** @@ -310,17 +313,24 @@ UnicodeText::const_iterator UnicodeText::UnsafeFind(const UnicodeText &look, const_iterator start_pos) const { // Due to the magic of the UTF8 encoding, searching for a sequence of // letters is equivalent to substring search. +#ifdef INCLUDE_TENSORFLOW StringPiece searching(utf8_data(), utf8_length()); StringPiece look_piece(look.utf8_data(), look.utf8_length()); +#endif LOG(FATAL) << "Not implemented"; +#ifdef INCLUDE_TENSORFLOW // StringPiece::size_type found = // searching.find(look_piece, start_pos.utf8_data() - utf8_data()); StringPiece::size_type found = StringPiece::npos; if (found == StringPiece::npos) return end(); return const_iterator(utf8_data() + found); +#else + return end(); +#endif } +#ifdef INCLUDE_TENSORFLOW bool UnicodeText::HasReplacementChar() const { // Equivalent to: // UnicodeText replacement_char; @@ -332,6 +342,7 @@ bool UnicodeText::HasReplacementChar() const { // return searching.find(looking_for) != StringPiece::npos; return false; } +#endif // ----- other methods ----- @@ -371,10 +382,12 @@ bool operator==(const UnicodeText &lhs, const UnicodeText &rhs) { return memcmp(lhs.repr_.data_, rhs.repr_.data_, lhs.repr_.size_) == 0; } +#ifdef INCLUDE_TENSORFLOW string UnicodeText::DebugString() const { return tensorflow::strings::Printf("{UnicodeText %p chars=%d repr=%s}", this, size(), repr_.DebugString().c_str()); } +#endif // ******************* UnicodeText::const_iterator ********************* @@ -479,6 +492,7 @@ UnicodeText::const_iterator UnicodeText::MakeIterator(const char *p) const { return const_iterator(p); } +#ifdef INCLUDE_TENSORFLOW string UnicodeText::const_iterator::DebugString() const { return tensorflow::strings::Printf("{iter %p}", it_); } @@ -492,3 +506,4 @@ string CodepointString(const UnicodeText &t) { tensorflow::strings::Appendf(&s, "%X ", *it++); return s; } +#endif diff --git a/unittest/util/utf8/unilib_utf8_utils.h b/unittest/util/utf8/unilib_utf8_utils.h index f2d1520cb..e2f48bb3a 100644 --- a/unittest/util/utf8/unilib_utf8_utils.h +++ b/unittest/util/utf8/unilib_utf8_utils.h @@ -29,13 +29,14 @@ namespace UniLib { // (i.e., is not a surrogate codepoint). See also // IsValidCodepoint(const char* src) in util/utf8/public/unilib.h. inline bool IsValidCodepoint(char32 c) { - return (static_cast(c) < 0xD800) || (c >= 0xE000 && c <= 0x10FFFF); + return (static_cast(c) < 0xD800) || (c >= 0xE000 && c <= 0x10FFFF); } // Returns true if 'str' is the start of a structurally valid UTF-8 // sequence and is not a surrogate codepoint. Returns false if str.empty() // or if str.length() < UniLib::OneCharLen(str[0]). Otherwise, this function // will access 1-4 bytes of src, where n is UniLib::OneCharLen(src[0]). +#ifdef INCLUDE_TENSORFLOW inline bool IsUTF8ValidCodepoint(StringPiece str) { char32 c; int consumed; @@ -43,6 +44,7 @@ inline bool IsUTF8ValidCodepoint(StringPiece str) { return !str.empty() && isvalidcharntorune(str.data(), str.size(), &c, &consumed) && IsValidCodepoint(c); } +#endif // Returns the length (number of bytes) of the Unicode code point // starting at src, based on inspecting just that one byte. This