GSoC 2016 - Adding ALIASES for tutorial (#7041)

* GSoC 2016 - Adding toggle files to be used by tutorials.

Add a toggle option for tutorials.
* adds a button on the HTML tutorial pages to switch between blocks
* the default option is for languages: one can write a block
for C++ and another one for Python without re-writing the tutorial

Add aliases to the doxyfile.
* adding alises to make a link to previous and next tutorial.
* adding alias to specify the toggle options in the tutorials index.
* adding alias to add a youtube video directly from link.

Add a sample tutorial (mat_mask_opertaions) using the developed aliases:
* youtube alias
* previous and next tutorial alias
* buttons
* languages info for tutorial table of content
* code referances with snippets (and associated sample code files)

* Removing the automatic ordering.
Adding specific toggles for cpp, java and python.
Move all the code to the footer / header and Doxyfile.
Updating documentation.
This commit is contained in:
Cartucho 2016-12-15 12:32:12 +00:00 committed by Maksim Shabunin
parent 36b5abf6b7
commit fcddfa4f86
13 changed files with 564 additions and 89 deletions

View File

@ -31,10 +31,17 @@ MULTILINE_CPP_IS_BRIEF = NO
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 4
ALIASES =
ALIASES += add_toggle{1}="@htmlonly[block] <div class='newInnerHTML'>\1</div><div> <script type="text/javascript"> addToggle(); </script>@endhtmlonly"
ALIASES += add_toggle_cpp="@htmlonly[block] <div class='newInnerHTML' title='cpp' style='display: none;'>C++</div><div class='toggleable_div label_cpp' style='display: none;'>@endhtmlonly"
ALIASES += add_toggle_java="@htmlonly[block] <div class='newInnerHTML' title='java' style='display: none;'>Java</div><div class='toggleable_div label_java' style='display: none;'>@endhtmlonly"
ALIASES += add_toggle_python="@htmlonly[block] <div class='newInnerHTML' title='python' style='display: none;'>Python</div><div class='toggleable_div label_python' style='display: none;'>@endhtmlonly"
ALIASES += end_toggle="@htmlonly[block] </div> @endhtmlonly"
ALIASES += prev_tutorial{1}="**Prev Tutorial:** \ref \1 \n"
ALIASES += next_tutorial{1}="**Next Tutorial:** \ref \1 \n"
ALIASES += youtube{1}="@htmlonly[block]<div align='center'><iframe title='my title' width='560' height='349' src='http://www.youtube.com/embed/\1?rel=0' frameborder='0' align='middle' allowfullscreen></iframe></div>@endhtmlonly"
TCL_SUBST =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
OPTIMIZE_OUTPUT_JAVA = YES
OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
EXTENSION_MAPPING =

View File

@ -17,5 +17,74 @@ $generatedby &#160;<a href="http://www.doxygen.org/index.html">
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
<script type="text/javascript">
//<![CDATA[
function addButton(label, buttonName) {
var b = document.createElement("BUTTON");
b.innerHTML = buttonName;
b.setAttribute('class', 'toggleable_button label_' + label);
b.onclick = function() {
$('.toggleable_button').css({
border: '2px outset',
'border-radius': '4px'
});
$('.toggleable_button.label_' + label).css({
border: '2px inset',
'border-radius': '4px'
});
$('.toggleable_div').css('display', 'none');
$('.toggleable_div.label_' + label).css('display', 'block');
};
b.style.border = '2px outset';
b.style.borderRadius = '4px';
b.style.margin = '2px';
return b;
}
function buttonsToAdd($elements, $heading, $type) {
if ($elements.length === 0) {
$elements = $("" + $type + ":contains(" + $heading.html() + ")").parent().prev("div.newInnerHTML");
}
var arr = jQuery.makeArray($elements);
var seen = {};
arr.forEach(function(e) {
var txt = e.innerHTML;
if (!seen[txt]) {
$button = addButton(e.title, txt);
if (Object.keys(seen).length == 0) {
var linebreak1 = document.createElement("br");
var linebreak2 = document.createElement("br");
($heading).append(linebreak1);
($heading).append(linebreak2);
}
($heading).append($button);
seen[txt] = true;
}
});
return;
}
$("h2").each(function() {
$heading = $(this);
$smallerHeadings = $(this).nextUntil("h2").filter("h3").add($(this).nextUntil("h2").find("h3"));
if ($smallerHeadings.length) {
$smallerHeadings.each(function() {
var $elements = $(this).nextUntil("h3").filter("div.newInnerHTML");
buttonsToAdd($elements, $(this), "h3");
});
} else {
var $elements = $(this).nextUntil("h2").filter("div.newInnerHTML");
buttonsToAdd($elements, $heading, "h2");
}
});
$(".toggleable_button").first().click();
var $clickDefault = $('.toggleable_button.label_python').first();
if ($clickDefault.length) {
$clickDefault.click();
}
$clickDefault = $('.toggleable_button.label_cpp').first();
if ($clickDefault.length) {
$clickDefault.click();
}
//]]>
</script>
</body>
</html>

View File

@ -54,4 +54,34 @@ $extrastylesheet
</table>
</div>
<!--END TITLEAREA-->
<script type="text/javascript">
//<![CDATA[
function getLabelName(innerHTML) {
var str = innerHTML.toLowerCase();
// Replace all '+' with 'p'
str = str.split('+').join('p');
// Replace all ' ' with '_'
str = str.split(' ').join('_');
// Replace all '#' with 'sharp'
str = str.split('#').join('sharp');
// Replace other special characters with 'ascii' + code
for (var i = 0; i < str.length; i++) {
var charCode = str.charCodeAt(i);
if (!(charCode == 95 || (charCode > 96 && charCode < 123) || (charCode > 47 && charCode < 58)))
str = str.substr(0, i) + 'ascii' + charCode + str.substr(i + 1);
}
return str;
}
function addToggle() {
var $getDiv = $('div.newInnerHTML').last();
var buttonName = $getDiv.html();
var label = getLabelName(buttonName.trim());
$getDiv.attr("title", label);
$getDiv.hide();
$getDiv = $getDiv.next();
$getDiv.attr("class", "toggleable_div label_" + label);
$getDiv.hide();
}
//]]>
</script>
<!-- end header part -->

View File

@ -1,4 +1,4 @@
//<![CDATA[
MathJax.Hub.Config(
{
TeX: {
@ -15,3 +15,4 @@ MathJax.Hub.Config(
}
}
);
//]]>

View File

@ -1,6 +1,9 @@
Mask operations on matrices {#tutorial_mat_mask_operations}
===========================
@prev_tutorial{tutorial_how_to_scan_images}
@next_tutorial{tutorial_mat_operations}
Mask operations on matrices are quite simple. The idea is that we recalculate each pixels value in
an image according to a mask matrix (also known as kernel). This mask holds values that will adjust
how much influence neighboring pixels (and the current pixel) have on the new pixel value. From a
@ -25,113 +28,150 @@ the zero-zero index) on the pixel you want to calculate and sum up the pixel val
the overlapped matrix values. It's the same thing, however in case of large matrices the latter
notation is a lot easier to look over.
@add_toggle_cpp
Now let us see how we can make this happen by using the basic pixel access method or by using the
@ref cv::filter2D function.
@end_toggle
@add_toggle_java
Now let us see how we can make this happen by using the basic pixel access method or by using the
**Imgproc.filter2D()** function.
@end_toggle
@add_toggle_python
Now let us see how we can make this happen by using the basic pixel access method or by using the
**cv2.filter2D()** function.
@end_toggle
The Basic Method
----------------
Here's a function that will do this:
@code{.cpp}
void Sharpen(const Mat& myImage, Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp basic_method
Result.create(myImage.size(), myImage.type());
const int nChannels = myImage.channels();
for(int j = 1; j < myImage.rows - 1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
uchar* output = Result.ptr<uchar>(j);
for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
{
*output++ = saturate_cast<uchar>(5 * current[i]
-current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows - 1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols - 1).setTo(Scalar(0));
}
@endcode
At first we make sure that the input images data is in unsigned char format. For this we use the
@ref cv::CV_Assert function that throws an error when the expression inside it is false.
@code{.cpp}
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp 8_bit
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java basic_method
At first we make sure that the input images data in unsigned 8 bit format.
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java 8_bit
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py basic_method
At first we make sure that the input images data in unsigned 8 bit format.
@code{.py}
my_image = cv2.cvtColor(my_image, cv2.CV_8U)
@endcode
@end_toggle
We create an output image with the same size and the same type as our input. As you can see in the
@ref tutorial_how_to_scan_images_storing "storing" section, depending on the number of channels we may have one or more
subcolumns. We will iterate through them via pointers so the total number of elements depends from
subcolumns.
@add_toggle_cpp
We will iterate through them via pointers so the total number of elements depends on
this number.
@code{.cpp}
Result.create(myImage.size(), myImage.type());
const int nChannels = myImage.channels();
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp create_channels
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java create_channels
@end_toggle
@add_toggle_python
@code{.py}
height, width, n_channels = my_image.shape
result = np.zeros(my_image.shape, my_image.dtype)
@endcode
@end_toggle
@add_toggle_cpp
We'll use the plain C [] operator to access pixels. Because we need to access multiple rows at the
same time we'll acquire the pointers for each of them (a previous, a current and a next line). We
need another pointer to where we're going to save the calculation. Then simply access the right
items with the [] operator. For moving the output pointer ahead we simply increase this (with one
byte) after each operation:
@code{.cpp}
for(int j = 1; j < myImage.rows - 1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp basic_method_loop
uchar* output = Result.ptr<uchar>(j);
for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
{
*output++ = saturate_cast<uchar>(5 * current[i]
-current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
}
}
@endcode
On the borders of the image the upper notation results inexistent pixel locations (like minus one -
minus one). In these points our formula is undefined. A simple solution is to not apply the kernel
in these points and, for example, set the pixels on the borders to zeros:
@code{.cpp}
Result.row(0).setTo(Scalar(0)); // The top row
Result.row(Result.rows - 1).setTo(Scalar(0)); // The bottom row
Result.col(0).setTo(Scalar(0)); // The left column
Result.col(Result.cols - 1).setTo(Scalar(0)); // The right column
@endcode
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp borders
@end_toggle
@add_toggle_java
We need to access multiple rows and columns which can be done by adding or subtracting 1 to the current center (i,j).
Then we apply the sum and put the new value in the Result matrix.
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java basic_method_loop
On the borders of the image the upper notation results in inexistent pixel locations (like (-1,-1)).
In these points our formula is undefined. A simple solution is to not apply the kernel
in these points and, for example, set the pixels on the borders to zeros:
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java borders
@end_toggle
@add_toggle_python
We need to access multiple rows and columns which can be done by adding or subtracting 1 to the current center (i,j).
Then we apply the sum and put the new value in the Result matrix.
@snippet samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py basic_method_loop
@end_toggle
The filter2D function
---------------------
Applying such filters are so common in image processing that in OpenCV there exist a function that
will take care of applying the mask (also called a kernel in some places). For this you first need
to define a *Mat* object that holds the mask:
@code{.cpp}
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
@endcode
Then call the @ref cv::filter2D function specifying the input, the output image and the kernell to
to define an object that holds the mask:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp kern
Then call the @ref cv::filter2D function specifying the input, the output image and the kernel to
use:
@code{.cpp}
filter2D(I, K, I.depth(), kern);
@endcode
The function even has a fifth optional argument to specify the center of the kernel, and a sixth one
for determining what to do in the regions where the operation is undefined (borders). Using this
function has the advantage that it's shorter, less verbose and because there are some optimization
techniques implemented it is usually faster than the *hand-coded method*. For example in my test
while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some
difference.
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp filter2D
The function even has a fifth optional argument to specify the center of the kernel, a sixth
for adding an optional value to the filtered pixels before storing them in K and a seventh one
for determining what to do in the regions where the operation is undefined (borders).
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java kern
Then call the **Imgproc.filter2D()** function specifying the input, the output image and the kernel to
use:
@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java filter2D
The function even has a fifth optional argument to specify the center of the kernel, a sixth
for adding an optional value to the filtered pixels before storing them in K and a seventh one
for determining what to do in the regions where the operation is undefined (borders).
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py kern
Then call the **cv2.filter2D()** function specifying the input, the output image and the kernell to
use:
@snippet samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py filter2D
@end_toggle
This function is shorter, less verbose and, because there are some optimizations, it is usually faster
than the *hand-coded method*. For example in my test while the second one took only 13
milliseconds the first took around 31 milliseconds. Quite some difference.
For example:
![](images/resultMatMaskFilter2D.png)
@add_toggle_cpp
You can download this source code from [here
](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the
OpenCV source code libraries sample directory at
@ -139,9 +179,15 @@ OpenCV source code libraries sample directory at
Check out an instance of running the program on our [YouTube
channel](http://www.youtube.com/watch?v=7PF1tAU9se4) .
@youtube{7PF1tAU9se4}
@end_toggle
\htmlonly
<div align="center">
<iframe width="560" height="349" src="https://www.youtube.com/embed/7PF1tAU9se4?hd=1" frameborder="0" allowfullscreen></iframe>
</div>
\endhtmlonly
@add_toggle_java
You can look in the OpenCV source code libraries sample directory at
`samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java`.
@end_toggle
@add_toggle_python
You can look in the OpenCV source code libraries sample directory at
`samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py`.
@end_toggle

View File

@ -25,6 +25,8 @@ understanding how to manipulate the images on a pixel level.
- @subpage tutorial_mat_mask_operations
*Languages:* C++, Java, Python
*Compatibility:* \> OpenCV 2.0
*Author:* Bernát Gábor

View File

@ -439,6 +439,83 @@ Then include this snippet into documentation:
compatibility with the old rST documentation. But newly created samples should be included with the
_snippet_ command, since this method is less affected by the changes in processed file.
### Toggle buttons inclusion commands {#tutorial_documentation_toggle_buttons_commands_include}
Toggle buttons are used to display the selected configuration (e.g. programming language, OS, IDE).
To use the buttons in documentation, _add_toggle_ and _end_toggle_ commands are used.
The command _add_toggle_ can be
- general: _add_toggle{Button Name}_
- for C++: _add_toggle_cpp_
- for Java: _add_toggle_java_
- for Python: _add_toggle_python_
Example:
@verbatim
@add_toggle{Button Name}
text / code / doxygen commands
@end_toggle
@endverbatim
For example using toggle buttons with text and [code](@ref tutorial_documentation_commands_include) snippets:
@verbatim
### Buttons Example
@add_toggle_cpp
Text for C++ button
@snippet samples/cpp/tutorial_code/introduction/documentation/documentation.cpp hello_world
@end_toggle
@add_toggle_java
Text for Java button
@snippet samples/java/tutorial_code/introduction/documentation/Documentation.java hello_world
@end_toggle
@add_toggle_python
Text for Python button
@snippet samples/python/tutorial_code/introduction/documentation/documentation.py hello_world
@end_toggle
@endverbatim
Result looks like this:
### Buttons Example
@add_toggle_cpp
Text for C++ button
@snippet samples/cpp/tutorial_code/introduction/documentation/documentation.cpp hello_world
@end_toggle
@add_toggle_java
Text for Java button
@snippet samples/java/tutorial_code/introduction/documentation/Documentation.java hello_world
@end_toggle
@add_toggle_python
Text for Python button
@snippet samples/python/tutorial_code/introduction/documentation/documentation.py hello_world
@end_toggle
As you can see, the buttons are added automatically under the previous heading.
### Grouping commands {#tutorial_documentation_commands_group}
All code entities should be put into named groups representing OpenCV modules and their internal
@ -536,6 +613,8 @@ Write the tutorial {#tutorial_documentation_steps_tutorial}
If you want to insert code blocks from this file into your tutorial, mark them with special doxygen comments (see [here](@ref tutorial_documentation_commands_include)).
If you want to write the tutorial in more than one programming language, use the toggle buttons for alternative comments and code (see [here](@ref tutorial_documentation_toggle_buttons_commands_include)).
3. Collect results of the application work. It can be "before/after" images or some numbers
representing performance or even a video.
@ -552,22 +631,21 @@ Write the tutorial {#tutorial_documentation_steps_tutorial}
5. Modify your new page:
- Add page title and identifier, usually prefixed with <em>"tutorial_"</em> (see [here](@ref tutorial_documentation_md_page)).
You can add a link to the previous and next tutorial using the identifier
@verbatim
@prev_tutorial{identifier}
@next_tutorial{identifier}
@endverbatim
@warning Do **not** write the **hashtag (#)**, example: \n Incorrect: @verbatim @prev_tutorial{#tutorial_documentation} @endverbatim Correct: @verbatim @prev_tutorial{tutorial_documentation} @endverbatim
- Add brief description of your idea and tutorial goals.
- Describe your program and/or its interesting pieces.
- Describe your results, insert previously added images or other results.
To add a video use _htmlonly_, _endhtmlonly_ commands with raw html block inside:
To add a youtube video, e.g. www.youtube.com/watch?v= **ViPN810E0SU**, use _youtube_{**Video ID**}:
@verbatim
@htmlonly
<div align="center">
<iframe
title="my title" width="560" height="349"
src="http://www.youtube.com/embed/ViPN810E0SU?rel=0&loop=1"
frameborder="0" allowfullscreen align="middle">
</iframe>
</div>
@endhtmlonly
@youtube{ViPN810E0SU}
@endverbatim
- Add bibliographic references if any (see [here](@ref tutorial_documentation_commands_cite)).
6. Add newly created tutorial to the corresponding table of contents. Just find
@ -576,6 +654,8 @@ Write the tutorial {#tutorial_documentation_steps_tutorial}
@verbatim
- @subpage tutorial_windows_visual_studio_image_watch
_Languages:_ C++, Java, Python
_Compatibility:_ \>= OpenCV 2.4
_Author:_ Wolf Kienzle

View File

@ -50,11 +50,17 @@ int main( int argc, char* argv[])
imshow( "Output", dst0 );
waitKey();
//![kern]
Mat kernel = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
//![kern]
t = (double)getTickCount();
//![filter2D]
filter2D( src, dst1, src.depth(), kernel );
//![filter2D]
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Built-in filter2D time passed in seconds: " << t << endl;
@ -63,13 +69,19 @@ int main( int argc, char* argv[])
waitKey();
return 0;
}
//! [basic_method]
void Sharpen(const Mat& myImage,Mat& Result)
{
//! [8_bit]
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
//! [8_bit]
//! [create_channels]
const int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
//! [create_channels]
//! [basic_method_loop]
for(int j = 1 ; j < myImage.rows-1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
@ -84,9 +96,13 @@ void Sharpen(const Mat& myImage,Mat& Result)
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}
//! [basic_method_loop]
//! [borders]
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
//! [borders]
}
//! [basic_method]

View File

@ -0,0 +1,14 @@
#include <iostream>
/**
* @function main
* @brief Main function
*/
int main( void )
{
//! [hello_world]
std::cout << "Hello World!";
//! [hello_world]
return 0;
}

View File

@ -0,0 +1,139 @@
import org.opencv.core.*;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import javax.swing.*;
import java.awt.Image;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
class MatMaskOperationsRun {
public void run() {
//! [laod_image]
Mat I = Imgcodecs.imread("../data/lena.jpg");
if(I.empty())
System.out.println("Error opening image");
//! [laod_image]
Image img = toBufferedImage( I );
displayImage("Input Image" , img, 0, 200 );
double t = System.currentTimeMillis();
Mat J = sharpen(I, new Mat());
t = ((double)System.currentTimeMillis() - t)/1000;
System.out.println("Hand written function times passed in seconds: " + t);
Image img2 = toBufferedImage( J );
displayImage("Output Image" , img2, 400, 400 );
Mat K = new Mat();
//![kern]
Mat kern = new Mat( 3, 3, CvType.CV_8S );
int row = 0, col = 0;
kern.put(row ,col, 0, -1, 0, -1, 5, -1, 0, -1, 0 );
//![kern]
System.out.println("kern = \n" + kern.dump());
t = System.currentTimeMillis();
//![filter2D]
Imgproc.filter2D(I, K, I.depth(), kern );
//![filter2D]
t = ((double)System.currentTimeMillis() - t)/1000;
System.out.println("Built-in filter2D time passed in seconds: " + t);
Image img3 = toBufferedImage( J );
displayImage("filter2D Output Image" , img3, 800, 400 );
}
//! [basic_method]
public static double saturateCastUchar(double x) {
return x > 255.0 ? 255.0 : (x < 0.0 ? 0.0 : x);
}
public Mat sharpen(Mat myImage, Mat Result)
{
//! [8_bit]
myImage.convertTo(myImage, CvType.CV_8U);
//! [8_bit]
//! [create_channels]
int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
//! [create_channels]
//! [basic_method_loop]
for(int j = 1 ; j < myImage.rows()-1; ++j)
{
for(int i = 1 ; i < myImage.cols()-1; ++i)
{
double sum[] = new double[nChannels];
for(int k = 0; k < nChannels; ++k) {
double top = -myImage.get(j - 1, i)[k];
double bottom = -myImage.get(j + 1, i)[k];
double center = (5 * myImage.get(j, i)[k]);
double left = -myImage.get(j , i - 1)[k];
double right = -myImage.get(j , i + 1)[k];
sum[k] = saturateCastUchar(top + bottom + center + left + right);
}
Result.put(j, i, sum);
}
}
//! [basic_method_loop]
//! [borders]
Result.row(0).setTo(new Scalar(0));
Result.row(Result.rows()-1).setTo(new Scalar(0));
Result.col(0).setTo(new Scalar(0));
Result.col(Result.cols()-1).setTo(new Scalar(0));
//! [borders]
return Result;
}
//! [basic_method]
public Image toBufferedImage(Mat m) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if ( m.channels() > 1 ) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = m.channels()*m.cols()*m.rows();
byte [] b = new byte[bufferSize];
m.get(0,0,b); // get all the pixels
BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(b, 0, targetPixels, 0, b.length);
return image;
}
public void displayImage(String title, Image img, int x, int y)
{
ImageIcon icon=new ImageIcon(img);
JFrame frame=new JFrame(title);
JLabel lbl=new JLabel(icon);
frame.add(lbl);
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.pack();
frame.setLocation(x, y);
frame.setVisible(true);
}
}
public class MatMaskOperations {
public static void main(String[] args) {
// Load the native library.
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new MatMaskOperationsRun().run();
}
}

View File

@ -0,0 +1,9 @@
public class Documentation {
public static void main (String[] args) {
//! [hello_world]
System.out.println ("Hello World!");
//! [hello_world]
}
}

View File

@ -0,0 +1,57 @@
import time
import numpy as np
import cv2
## [basic_method]
def sharpen(my_image):
my_image = cv2.cvtColor(my_image, cv2.CV_8U)
height, width, n_channels = my_image.shape
result = np.zeros(my_image.shape, my_image.dtype)
## [basic_method_loop]
for j in range (1, height-1):
for i in range (1, width-1):
for k in range (0, n_channels):
sum = 5 * my_image[j, i, k] - my_image[j + 1, i, k] - my_image[j - 1, i, k]\
- my_image[j, i + 1, k] - my_image[j, i - 1, k];
if sum > 255:
sum = 255
if sum < 0:
sum = 0
result[j, i, k] = sum
## [basic_method_loop]
return result
## [basic_method]
I = cv2.imread("../data/lena.jpg")
cv2.imshow('Input Image', I)
t = round(time.time())
J = sharpen(I)
t = (time.time() - t)/1000
print "Hand written function times passed in seconds: %s" % t
cv2.imshow('Output Image', J)
t = time.time()
## [kern]
kernel = np.array([ [0,-1,0],
[-1,5,-1],
[0,-1,0] ],np.float32) # kernel should be floating point type
## [kern]
## [filter2D]
K = cv2.filter2D(I, -1, kernel) # ddepth = -1, means destination image has depth same as input image.
## [filter2D]
t = (time.time() - t)/1000
print "Built-in filter2D time passed in seconds: %s" % t
cv2.imshow('filter2D Output Image', K)
cv2.waitKey(0)
cv2.destroyAllWindows()

View File

@ -0,0 +1,5 @@
print('Not showing this text because it is outside the snippet')
## [hello_world]
print('Hello world!')
## [hello_world]