Thursday 5 May 2016

OpenCV C++ Code for Split and Merge -II

In the previous tutorial we split the R,G,B channels of a color image using opencv's function called split().


Refer this article:
http://opencv-code.blogspot.in/2016/12/how-to-split-color-images-merge-single-channel-images-opencv-tutorials.html


First of all why there is a need to split the channels of a color image?
As explained in the previous articles it helps us to guess the individual contribution of the respective channel in the color image.Also it has other application like object detection based on color recogntion i.e we can select a green object from the background and track it.


So the process of splitting color images without using split() function in opencv is as shown below:

// OpenCV Channel Splitting  Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
using namespace cv;
using namespace std;
 
int main()
{
 const float pi=3.14;
 Mat src1,src2,src3,src4,src5;
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv.png",CV_LOAD_IMAGE_COLOR);
 src2 = Mat::eye(src1.rows,src1.cols, CV_8UC1);
 src3 = Mat::eye(src1.rows,src1.cols, CV_8UC1);
 src4 = Mat::eye(src1.rows,src1.cols, CV_8UC1);
  
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
for (int i=0; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 { 
Vec3b color1 = src1.at<Vec3b>(Point(i,j));
Scalar color2 = src2.at<uchar>(Point(i,j));
Scalar color3 = src3.at<uchar>(Point(i,j));
Scalar color4 = src4.at<uchar>(Point(i,j));
 
      color2.val[0]=color1.val[0]; //Blue channel
    
   color3.val[0]=color1.val[1];  //Green Channel
 
   color4.val[0]=color1.val[2];  //Red Channel
     
   src2.at<uchar>(Point(i,j)) = color2.val[0];
   src3.at<uchar>(Point(i,j)) = color3.val[0];
   src4.at<uchar>(Point(i,j)) = color4.val[0];
  }
 }
namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);
 
namedWindow("Red Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Red Channel Image", src4);
imwrite("C:\\Users\\arjun\\Desktop\\opencv-red.png",src4);
 
namedWindow("Green Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Green Channel Image", src3); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-green.png",src3);
 
namedWindow("Blue Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Blue Channel Image", src2); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-blue.png",src2);
 
 waitKey(0);
 return 0;
}



Input:


Output:
Red:

Green:

Blue:



Similarly the process of merging the channels in opencv again so that only individual color channels are displayed can be done as:
// OpenCV Channel Merging  Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
using namespace cv;
using namespace std;
 
int main()
{
 const float pi=3.14;
 Mat src1,src2,src3,src4,src5;
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv.png",CV_LOAD_IMAGE_COLOR);
 src2 = Mat::eye(src1.rows,src1.cols, CV_8UC3);
 src3 = Mat::eye(src1.rows,src1.cols, CV_8UC3);
 src4 = Mat::eye(src1.rows,src1.cols, CV_8UC3);
  
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
for (int i=0; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 { 
Vec3b color1 = src1.at<Vec3b>(Point(i,j));
Vec3b color2 = src2.at<Vec3b>(Point(i,j));
Vec3b color3 = src3.at<Vec3b>(Point(i,j));
Vec3b color4 = src4.at<Vec3b>(Point(i,j));
 
      color2.val[0]=color1.val[0]; //Blue channel
   color2.val[1]=0;
   color2.val[2]=0;
 
   color3.val[0]=0;             //Green Channel
   color3.val[1]=color1.val[1];
   color3.val[2]=0;
 
   color4.val[0]=0;             //Red Channel
   color4.val[1]=0;
   color4.val[2]=color1.val[2];
     
   src2.at<Vec3b>(Point(i,j)) = color2;
   src3.at<Vec3b>(Point(i,j)) = color3;
   src4.at<Vec3b>(Point(i,j)) = color4;
  }
 }
namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);
 
namedWindow("Red Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Red Channel Image", src4);
imwrite("C:\\Users\\arjun\\Desktop\\opencv-red.png",src4);
 
namedWindow("Green Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Green Channel Image", src3); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-green.png",src3);
 
namedWindow("Blue Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Blue Channel Image", src2); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-blue.png",src2);
 
 waitKey(0);
 return 0;
}



Input:

Output:
Red:

Green:

Blue:



Note the difference between the two codes:
In channel splitting we have taken 8UC1 i.e a 8 bit unsigned single channel image.
In channel merging we have taken 8UC3 i.e a 8 bit unsigned three channel image.

Saturday 30 April 2016

OpenCV C++ Code for Split and Merge

This tutorial gives a deep insight of splitting and merging function of opencv. Thus enabling us to split a color image into their respective RGB channels:

Here we want to split a color image into its three channels called "Red" ,"Green" and "Blue".

Splitting a color image into its respective RGB channels gives us an idea about the component of color which is present in an original image.

OpenCV provides built in function called “split()” for this purpose.


Syntax:
C++:void split(const Mat& src, Mat* mvbegin)

Parameters:
src input multi-channel array.
mv output array or vector of arrays.

In the first variant of the function the number of arrays must match src.channels();
the arrays themselves are reallocated, if needed.



The function “merge()” does just the opposite to that of split. It creates one multichannel array out of several single-channel ones.


Syntax: C++: void merge(const Mat* mv, size_t count, OutputArray dst)

Parameters:
mv – input array or vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.
count – number of input matrices when mv is a plain C array; it must be greater than zero.
dst – output array of the same size and the same depth as mv[0]. The number of channels will be the total number of channels in the matrix array. The functions merge merge several arrays to make a single multi-channel array.


Here is the code below:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
 
using namespace cv;
using namespace std;
 
int main()
{
 
    Mat image;
    image = imread("C:\\Users\\arjun\\Desktop\\rgbimage.png", CV_LOAD_IMAGE_COLOR);   // Read the file
 
    if(! image.data )                              // Check for invalid input
    {
        cout <<  "Could not open or find the image" << std::endl ;
        return -1;
    }
 
    
 namedWindow( "Original Image", CV_WINDOW_AUTOSIZE );
 imshow( "Original Image", image );
 
    Mat rgbchannel[3];
    // The actual splitting.
    split(image, rgbchannel);
 
 namedWindow("Blue",CV_WINDOW_AUTOSIZE);
 imshow("Red", rgbchannel[0]);
 
 namedWindow("Green",CV_WINDOW_AUTOSIZE);
 imshow("Green", rgbchannel[1]);
 
 namedWindow("Red",CV_WINDOW_AUTOSIZE);
 imshow("Blue", rgbchannel[2]);
 
    waitKey(0);//Wait for a keystroke in the window
    return 0;
}



Input:
Output:





Note:
You might have observed that we obtain the grayscale images after splitting the color images into Red,Green and Blue colors.
Reason:
Split function splits the multichannel image into single channel arrays containing the identical pixel value of the original image.
So since we have created single channel images,opencv imshow function treats it as a grayscale image.
For a colour image, we need to create a three channel image.

The OpenCV C++ code is given below:-
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
 
using namespace std;
using namespace cv;
 
int main()
{
    Mat image=imread("C:\\Users\\arjun\\Desktop\\aaa.png",1);    
    namedWindow("Original Image",1);
 imshow("Original Image",image);
 
    // Split the image into different channels
    vector<Mat> rgbChannels(3);
    split(src, rgbChannels);
 
    // Show individual channels
    Mat g, fin_img;
    g = Mat::zeros(Size(image.cols, image.rows), CV_8UC1);
      
    // Showing Red Channel
    // G and B channels are kept as zero matrix for visual perception
    {
    vector<Mat> channels;
    channels.push_back(g);
    channels.push_back(g);
    channels.push_back(rgbChannels[2]);
 
    /// Merge the three channels
    merge(channels, fin_img);
    namedWindow("Red",1);
 imshow("Red", fin_img);
    }
 
    // Showing Green Channel
    {
    vector<Mat> channels;
    channels.push_back(g);
    channels.push_back(rgbChannels[1]);
    channels.push_back(g);    
    merge(channels, fin_img);
    namedWindow("Green",1);
 imshow("Green", fin_img);
    }
 
    // Showing Blue Channel
    {
    vector<Mat> channels;
    channels.push_back(rgbChannels[0]);
    channels.push_back(g);
    channels.push_back(g);
    merge(channels, fin_img);
    namedWindow("Blue",1);
    imshow("Blue", fin_img);
    }
 
    waitKey(0);
    return 0;
 
}


Input:

Output:





Here after splitting the image by split(image, rgbChannels)
We get three channels of which
  • Rgbchannel[0] corresponds to that of “Blue” color image.
  • Rgbchannel[1] corresponds to that of “Green” color image
  • Rgbchannel[2] corresponds to that of “Red” color image
Since the split function splits the multi-channel image into single channel ,if we display these channels directly we would get the gray-scale image of RGB channels.
Thus we need to create a matrix of zeros and push that into other channels.

Mat::zeros(Size(image.cols, image.rows), CV_8UC1) :
Creates a matrix of Zeros of single channel whose dimension is same as that of the original image.

Then we have initialized channels as the vector and push_back always puts a new element at the end of the vector.
Here the new element is a 8 bit single channel matrix of Zeros.


The BGR color ordering is the default order of OpenCV.
Refer:
http://opencv-code.blogspot.in/2016/12/how-to-access-extract-pixel-value-particular-location-image.html

Thus for displaying the red channels… we need to make the first two channels as Zeros and create a 3channel image with merge function to get the colored image.
Similar is the case with other channels of image.

Monday 25 April 2016

OpenCV ImageBlender using Addweighted function


The task of blending or mixing two images linearly can be achieved by addWeighted function provided by OpenCV.



The syntax of OpenCV addWeighted function goes as:
C++:void addWeighted(src1, alpha, src2, beta, gamma, dst, int dtype=-1)

Parameters:

src1first input array.
alphaweight of the first array elements.
src2second input array of the same size and channel number as src1.
betaweight of the second array elements.
dstoutput array that has the same size and number of channels as the input arrays.
gammascalar added to each sum.
dtypeoptional depth of the output array; when both input arrays have the same depth, dtype can be set to -1, which will be equivalent to src1.depth().


Linear Blending means adding two images pixel by pixel.
Thus we can use the function
c(x)=(1-α)*a(x)+α*b(x)
where a(x) and b(x) are the two source images.
c(x) is the resultant blended image.

addWeighted( src1, alpha, src2, beta, 0.0, dst);
Thus addWeighted function performs the same thing as dst = α*src1+β*src2+γ
Here γ=0 and β=1-α



Why do we choose β=1-α?
Since we are having images of 8 bits. Thus pixel value can range from 0 to 255. Thus while adding two images the pixel value of the resultant image should lie between 0 to 255. Hence if we multiply a particular co-ordinate of an image by α the the other image's respective co-ordinate need to be 1-α. Thus the sum would be α+1-α=1. Thus the pixel value will range between 0-255.

Example:-
Consider that pixel value of src1 at particular co-ordinate is 230.
 And that of src2 is 215.
Now, we need to blend these two images linearly, for that we need to blend the pixel values of the two images.
Thus if we choose α=0.5 and β=0.7 .
The pixel value at that particular co-ordinate would be
         c(x)=α*a(x)+β*b(x) =0.5*230+0.7*215 =265.5

Thus β value need to be less or equal to that of 1-α. Hence here we have chosen it equal to 1-α to be on the safer side.But it can be less than 1-α too. 


 The code for it goes as below:
// OpenCV Image Blending Tutorial using addWeighted function
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
using namespace cv;
using namespace std;
 
int main()
{
 double alpha = 0.5; 
 double beta; 
 double input;
 
 Mat src1, src2, dst,src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\blue.jpg");
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
  
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
  
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }
 /// Ask the user enter alpha
 std::cout<<" Simple Linear Blender "<<std::endl;
 std::cout<<"-----------------------"<<std::endl;
 std::cout<<"* Enter alpha [0-1]: ";
 std::cin>>input;
 
 /// We use the alpha provided by the user if it is between 0 and 1
 if( input >= 0.0 && input <= 1.0 )
   { 
    alpha = input;
   }
 
 beta = ( 1.0 - alpha );
 addWeighted( src1, alpha, src2, beta, 0.0, dst);
 
 /// Create Windows
 namedWindow("Linear Blend", 1);
 imshow( "Linear Blend", dst );
 
 namedWindow("Original Image1", 1);
 imshow( "Original Image1", src1 );
 
 namedWindow("Original Image2", 1);
 imshow( "Original Image2", src2 );
 waitKey(0);
 return 0;
}



Input:
Blue


Green
Output:
Cyan


The above code of blending two images linearly by using Trackbar is as shown below:
// OpenCV Image bleding Tutorial using addWeighted function and trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
using namespace cv;
using namespace std;
 
double alpha; 
double beta;
const int blend_slider_max = 100;
int alpha_slider;
Mat src1, src2, dst,src3;
 
void blend_trackbar( int , void* )
{
 alpha = (double) alpha_slider/blend_slider_max;
 beta = (double)( 1.0 - alpha );
    addWeighted( src1, alpha, src2, beta, 0.0, dst);
    imshow( "Linear Blend", dst );
}
 
int main()
{
 // Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv_image1.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\opencv_image2.jpg");
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
  
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
  
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }
 
 // Create Windows 
 namedWindow("Linear Blend",CV_WINDOW_AUTOSIZE); 
 createTrackbar( "Blending", "Linear Blend", &alpha_slider, blend_slider_max, blend_trackbar );
 blend_trackbar( alpha_slider, 0 );
 
 namedWindow("Original Image1", 1);
 imshow( "Original Image1", src1 );
 
 namedWindow("Original Image2", 1);
 imshow( "Original Image2", src2 );
 
 waitKey(0);
 return 0;
}



Input Image1:-
Input Image2:-
Output:-

Wednesday 20 April 2016

OpenCV Image Blending Tutorial

Image are basically matrices of pixel values.Thus Image Blending or Image Merging in layman terms simply means that adding the pixel values at a particular co-ordinates of two images.


Note:-Images should be of the same size.


For e.g if the pixel value of two gray-scale images at a particular location are 120 and 35 respectively.Then after blending the pixel value at that particular co-ordinate would become 155.


Note:
Grayscale image is the one where each pixel is stored as a single byte(8 bits). Thus the pixel values can range from 0 to 255. Where 0 denotes black and 255 denotes white.

So,What would happen if the pixel value of the two gray-scale images when merged exceed 255?
For e.g
Let the pixel value at the particular co-ordinate is 250 (would appear white)and that of the other image at the same co-ordinate is 120 (would appear dark). After merging it would appear white because the pixel value at the respective co-ordinate of the merged image would be 255.Since 120+240>255)

Thus the lowest value of the pixel is 0. So even if we multiply a pixel value by -1.
The pixel value of the modified image would be 0.
i.e. If the pixel value at a particular co-ordinate is 255 (white) and if we multiply it by -1.Then the pixel at that point of image would become 0 (black).

We can check the above concept by accessing the pixel value of the merged image at a particular point.
Refer:
http://opencv-code.blogspot.in/2016/12/how-to-access-extract-pixel-value-particular-location-image.html


The code for merging/blending the two images are as shown below:
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
 
using namespace cv;
using namespace std;
 
int main()
{
  
 Mat src1, src2, src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
  
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
  
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }
  
 //Merging two images
 src3=src1 + src2;
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 if( !src3.data ) { printf("Error loading src1 \n"); return -1; }
   
 /// Create Windows
 namedWindow("First Image", 1);
 imshow( "First Image", src1 );
 
 namedWindow("Second Image", 1);
 imshow( "Second Image", src2 );
 
 namedWindow("Blend1 Image", 1);
 imshow( "Blend1 Image", src3 );
 
 waitKey(0);
 return 0;
}


Input Images:
Red

Green

Output:


Multiplying Pixel Value By -1:

What would happen if we multiply the pixels by -1?
Since the minimum value of the pixel can be 0.Thus whole of the image would appear black.
Note:Here again we have assumed that the images are of same size.Hence we have not included the code for comparing the size of images which are to be merged.
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
 
using namespace cv;
using namespace std;
 
int main()
{
  
 Mat src1, src2;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
  
 //Merging two images
 src2=src1 * (-1);
  
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
  
 //src2 = imwrite( "C:\\Users\\arjun\\Desktop\\new1.jpg",src2);
 //src2  = imread("C:\\Users\\arjun\\Desktop\\new1.jpg");
 
 /// Create Windows
 namedWindow("First Image", 1);
 imshow( "First Image", src1 );
 
 namedWindow("Second Image", 1);
 imshow( "Second Image", src2 );
 
 waitKey(0);
 return 0;
}

Output:


Dont you think by including a for loop we can achieve a smooth transition effect between two images.
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
 
using namespace cv;
using namespace std;
 
int main()
{
  
 Mat src1, src2,src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
 
 //Checking whether images are loaded or not
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 
 //Merging two images
 for (double i=0; i<=255;i=i+5)
 {
 src3=(i*src1)/255 + ((255-i)*src2)/255;
 
 /// Create Windows
 namedWindow("Blend Image", 1);
 imshow( "Blend Image", src3 );
  
 waitKey(250);
 }
 return 0;
}

Friday 15 April 2016

Digital Negative of an Image in OpenCV

Digital Negative as the name suggests inverting the pixel values of an image such that the bright pixels appear dark and dark as bright.

Thus the darkest pixel in the original image would be the brightest in that of its negative. A good example of it can be an X-ray image.


Now, Considering an 8 bit image.
The pixel value can range from 0 to 255.
Thus to obtain the negative we need to subtract each pixel values of an image by 255.

Hence for an k-bit image.
The pixel value will range from 0 to [(2^k)-1].
Thus we would have to subtract each pixel of an image by [(2^k)-1].



The below code is in opencv for digital negative of an 8-bit grayscale image:
// OpenCV Digital Negative Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>
 
using namespace cv;
using namespace std;
 
int main()
{
 const float pi=3.14;
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\image_opencv.jpg",CV_LOAD_IMAGE_GRAYSCALE);
 src2 = Mat::zeros(src1.rows,src1.cols, CV_8UC1);
 
 if( !src1.data ) { printf("Error loadind src1 \n"); return -1;}
 
  
for (int i=0; i<src1.cols ; i++)
{
for (int j=0 ; j<src1.rows ; j++)
 { 
 Scalar color1 = src1.at<uchar>(Point(i, j));
 Scalar color2 = src1.at<uchar>(Point(i, j));
 color2.val[0] = 255-color1.val[0];
    
 src2.at<uchar>(Point(i,j)) = color2.val[0]; 
 }
 }
namedWindow("Digital Negative Image",CV_WINDOW_AUTOSIZE); 
imshow("Digital Negative Image", src2); 
//imwrite("C:\\Users\\arjun\\Desktop\\digitalnegative.jpg",src1);
 
namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);
 
 waitKey(0);
 return 0;
}


Input:

Output:


Applications:
It has various immense application in the field of medical in finding the minute details of a tissue. Also in the field of astronomy for observing distant stars.


Input:

Output:

Sunday 10 April 2016

Accessing all the pixels of an Image

To access full pixel value of an image,
We can use :
    Vec3b imagepixel = image.at(x,y);
in for loop to change the value of the co-ordinates (x,y) to cover each row and column.


/*Displaying the Pixel value of the whole Image using Loops*/
 
#include <opencv2/core/core.hpp>  
#include <opencv2/highgui/highgui.hpp>  
#include <iostream> 
 
  using namespace std;  
  using namespace cv;  
 
  int main() 
  {  
    Mat image; 
    //Reading the color image 
    image = imread("C:\\Users\\arjun\\Desktop\\image003.png", CV_LOAD_IMAGE_COLOR);  
 
    //If image not found 
    if (!image.data)                                                                          
     {  
      cout << "No image data \n";  
      return -1;  
     } 
 
 
     //for loop for counting the number of rows and columns and displaying the pixel value at each point
     for (int i = 0; i < image.rows; i++) 
       { 
         for (int j = 0; j < image.cols; j++) 
           { 
            Vec3b imagepixel = image.at<Vec3b>(i, j);
            cout<<imagepixel ;   //Displaying the pixel value  of the whole image
            } 
       }

     //Display the original image
     namedWindow("Display Image");               
     imshow("Display Image", image);  

     waitKey(0);
     return 0;
  }


Input:


Output:



What will happen if we put ,
     cout<<image;
Does it prints the whole pixel array of an image?*


/*Displaying the Pixel value of the whole Image*/
#include <opencv2/core/core.hpp>  
#include <opencv2/highgui/highgui.hpp>  
#include <iostream> 
 
  using namespace std;  
  using namespace cv;  
 
  int main() 
  {  
    Mat image; 
    //Reading the color image 
    image = imread("C:\\Users\\arjun\\Desktop\\image003.png", CV_LOAD_IMAGE_COLOR);  
 
    //If image not found
    if (!image.data)                                                                          
     {  
      cout << "No image data \n";  
      return -1;  
     } 

    //Displaying the pixel value  of the whole image
    cout<<image ;

    //Display the original image              
    namedWindow("Display Image");               
    imshow("Display Image", image);  

    waitKey(0);
    return 0;
 }

Input:


Output:



Notice the difference in output of both the pixel arrays

Tuesday 5 April 2016

Modifying a particular pixel value of an Image

In the previous tutorials we learnt how to access a pixel value of a particular co-ordinate,
Refer :
http://opencv-code.blogspot.in/2016/12/how-to-access-extract-pixel-value-particular-location-image.html

This,

OpenCV C++ tutorial

is about accessing and changing the pixel value at a particular co-ordinate of an Image.
Here is the code below:
/*Displaying the Pixel value of the whole Image using Loops*/
 
#include <opencv2/core/core.hpp>  
#include <opencv2/highgui/highgui.hpp>  
#include <iostream> 
 
  using namespace std;  
  using namespace cv;  
 
int main() 
  {  
    Mat image1,image2; 
    //Reading the color image 
    image1 = imread("C:\\Users\\arjun\\Desktop\\image003.png", CV_LOAD_IMAGE_COLOR);  
 
    //If image1 not found 
    if (!image1.data)                                                                          
    {  
     cout << "No image data \n";  
     return -1;  
    } 
 
    //Display the original image
    namedWindow("Original Image");               
    imshow("Original Image", image1);
 
    //Changing the pixel value at just a particular point(100,200)
     Vec3b color = image1.at<Vec3b>(Point(100,200));
      color.val[0] = 100;
      color.val[1] = 0;
      color.val[2] = 0;
    image1.at<Vec3b>(Point(100,200)) = color;
 
    //Save the modified image
    imwrite("C:\\Users\\arjun\\Desktop\\mod_image.png",image1);
    //Reading the modifed image
    image2 = imread("C:\\Users\\arjun\\Desktop\\mod_image.png", CV_LOAD_IMAGE_COLOR);  
 
   //If image2 not found 
     if (!image2.data)                                                                          
       {  
        cout << "No image data \n";  
        return -1;  
       } 
 
    //Display the modified image
    namedWindow("Modified Image");               
    imshow("Modified Image", image2); 
    waitKey(0);
    return 0;
   }

Input:


Modified Image:


Wednesday 30 March 2016

Accessing Pixel Value at a Location(x,y)

Let us consider a 3 channel image of BGR color ordering
(The BGR color ordering is the default order returned  by imread)
Here the order of the channel is reverse

(We generally use RGB color model while describing about an image.In BGR the color model is same except the order of the channel is reverse)


We use :
Vec3b imagepixel = image.at(x,y);


/*Reading the pixel value of an image at a particular location*/
#include <opencv2/core/core.hpp>  
#include <opencv2/highgui/highgui.hpp>  
#include <iostream> 
 
  using namespace std;  
  using namespace cv;  
 
  int main() 
  {  
    Mat image; 
    //Reading the color image 
    image = imread("C:\\Users\\arjun\\Desktop\\image003.png", CV_LOAD_IMAGE_COLOR);  
 
    //If image not found
     if (!image.data)                                                                          
     {  
      cout << "No image data \n";  
      return -1;  
     } 
    
     
     //Reading pixel value at location (i,j)
     Vec3b imagepixel = image.at<Vec3b>(250,500);

     //Displaying the pixel value  
     cout<<"imagepixel(BGR)="<<imagepixel<<"\n" ;  
        
     //Display the original image
     namedWindow("Display Image");               
     imshow("Display Image", image);  
  
     waitKey(0);
     return 0;
   }



Input:


Output:





/*Reading the pixel value of an image at a particular location*/
 
 
#include <opencv2/core/core.hpp>  
#include <opencv2/highgui/highgui.hpp>  
#include <iostream> 
 
  using namespace std;  
  using namespace cv;  
 
int main() 
  {  
    Mat image; 
    //Reading the color image 
    image = imread("C:\\Users\\arjun\\Desktop\\image003.png", CV_LOAD_IMAGE_COLOR);  
 
     //If image not found  
       if (!image.data)                                                             
     {  
      cout << "No image data \n";  
      return -1;  
     } 
 
 
    while(1)
    {
     //Taking inputs from the user for the co-ordinates of the image 
      int i,j;
      cout<<"Enter the co-ordinates of the image where you want to find the pixel value (i,j): \n";
      cout<<"i<"<<image.rows<<"\t"<<"&"<<"\t"<<"j<"<<image.cols<<"\n";
      
     cout<<"i= ";  cin>>i;
     cout<<"j= ";  cin>>j;
     
     if(i < image.rows) 
      { 
        if(j < image.cols)
          {
           //Reading pixel value at location (i,j)
            Vec3b imagepixel = image.at<Vec3b>(i,j); 
           //Displaying the pixel value                                                        
           cout<<"imagepixel(BGR)="<<imagepixel<<"\n" ;
          }  
        }
      else
        { 
           cout<<"Image Co-ordinates value out of range \n"; 
        }
 
     }
        return 0; 
  }


Input:


Output:



Friday 25 March 2016

OpenCV C++ Code for Drawing a Chessboard Pattern

This OpenCV Tutorial is about drawing a Chess Board Pattern.
Refer the Code Below:



//Opencv Example of Drawing a Chess Board Pattern
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
  
int main( )
{ 
 int a=400/8;
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
    
  // Draw a rectangle ( 5th argument is not -ve)  
  for(int y=0;y<=3;y++)
  for(int x=0;x<=3;x++)
  {
  rectangle( image, Point( x*a*2, y*a*2), Point( a*(2*x+1), a*(2*y+1)), Scalar( 255, 255, 255 ), -1, 4 );
  imshow("Image1",image);
  waitKey( 250 );
  }
  for(int y=0;y<=3;y++)
  for(int x=0;x<=3;x++){
  rectangle( image, Point( a*(2*x+1), a*(2*y+1)), Point( (x+1)*a*2, (y+1)*a*2), Scalar( 255, 255,255 ), -1, 4 );
  imshow("Image1",image);
  waitKey( 250 );
  }
  waitKey( 0 );
  return(0);
}



Output:

Sunday 20 March 2016

Opencv C++ Code for drawing Rectangle

Draws a simple, thick, or filled up-right rectangle.

Syntax:
C++ :void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

Parameters:
img Image.
pt1 Vertex of the rectangle.
pt2 Vertex of the rectangle opposite to pt1.
rec Alternative specification of the drawn rectangle.
color Rectangle color or brightness (grayscale image).
thickness Thickness of lines that make up the rectangle. Negative values, like CV_FILLED , mean that the function has to draw a filled rectangle.
lineType Type of the line. See the line() description.
shift Number of fractional bits in the point coordinates.



#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
  
int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
    
  // Draw a rectangle 
  rectangle(image , Point (100,100), Point (300,300), Scalar( 0, 0, 255 ), 1, 8);
  imshow("Image",image);
  
  waitKey( 0 );
  return(0);
}

Tuesday 15 March 2016

OpenCV C++ Code for Drawing a Semi-Circle

This Opencv Tutorial is about drawing a Semi-Circle

You might have wondered that how to draw a Semi-Circle in Opencv when we have no direct syntax available for it.
Even in the Syntax of drawing a circle in Opencv, we dont have any such parameters which can be modified for drawing a semicircle.


C++ :void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

But, we know that a circle is a special case of an ellipse whose eccentricity is 1. And in the Opencv Ellipse Syntax:

C++ :void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

We can find the parameters like "Start Angle" and "End Angle".
And if we want to draw an Circle From an Ellipse we just need to mention the size of both the axes as same.
Thus,Here is the Opencv Code for drawing a Semi-Circle:



//Opencv C++ Tutorial for drawing a Semi-Circle
#include <opencv2 core.hpp="" core="">
#include <opencv2 highgui.hpp="" highgui="">
using namespace cv;
int main( )
{
 // Create black empty images
 Mat image = Mat::zeros( 500, 500, CV_8UC3 );
 // Draw a ellipse
 for(int i=10;i<=250;i=i+10)
 {
 ellipse( image, Point( 250, 250 ), Size( i, i ), 0, 0, 180, Scalar( 255, 255, 0 ), 2, 8 );
 imshow("Image",image);
 waitKey( 250 );
 }
 waitKey( 0 );
 
 return(0);
}


Output:

Thursday 10 March 2016

OpenCV C++ Code for drawing a Square Spiral

In the previous tutorial we learn about drawing an Line.
http://opencv-code.blogspot.in/2016/12/how-to-draw-line-opencv-cplusplus-example.html
Thus this opencv tutorial will be an extension of that tutorial with some added mathematical logic for drawing a square spiral.



Here is the Opencv Code Below:
//Drawing a Square Spiral
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
  
int main( )
{   
  int count=0,a,b=250,i,j;
  // Create black empty images
  Mat image = Mat::zeros( 500, 500, CV_8UC3 );
  int p=0;int q=1;
  for(a=250;a<500 && a>0;)
  {
     
   count++;
   if(count%2!=0)
   { 
      p++;
    j=b;
    if(p%2!=0) 
    {i=a+5*count;}
    else
    {i=a-5*count;}
 }
   else
  {
     
      q++;
    i=a;
    if(q%2==0 )
    {j=b+5*count;}
    else
    {j=b-5*count;}
 
    }
    // Draw a line 
  line( image, Point( a, b ), Point( i, j), Scalar( 255, 255, 0 ), 2, 8 );
   
     imshow("Image",image);
     waitKey( 100 );
  a=i;
  b=j;
  
  }
  waitKey( 0 );
  return(0);
}

Output:-

Saturday 5 March 2016

OpenCV C++ Tutorial for drawing a Star

In the previous tutorial we learnt about drawing a LINE:
http://opencv-code.blogspot.in/2016/12/how-to-draw-line-opencv-cplusplus-example.html

Thus this opencv tutorial will be an extension of that tutorial with some added mathematical logic for drawing a star.

To begin with we first start by drawing a pentagon:
And name the vertex as a ,b ,c ,d, e.
The co-ordinates of which can be obtained by mathematical rules as explained before:

a=( 2*r*cos(36)*cos(72) , x )
b=( x-2*r*cos(36)*cos(72) , x )
c=( x , 2*r*cos(36)*sin(72) )
d=( x/2 , 0 )
e=( 0 , 2*r*cos(36)*sin(72) )




Now, The magic begins. 1. Join vertex a with d. 2. Join vertex d with b. 3. Join vertex b with e. 4. Join vertex e with c. 5. Join vertex c with a.



Here is the opencv code for drawing a Star:
//Opencv Example of drawing a Star 
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
  
int main( )
{    
  double pi=3.14;
  int a=500/(1+cos(pi*54/180));
  
  // Create black empty images
  Mat image = Mat::zeros( 500, 500, CV_8UC3 );
   
  line( image, Point((a*cos(pi*72/180)), 500),  Point(250, 0), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );
 
  line( image, Point(250, 0), Point(500-(a*cos(pi*72/180)),500), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 ); 
 
  line( image, Point(500-(a*cos(pi*72/180)),500), Point(0, 500-(a*sin(pi*72/180))), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );
 
  line( image, Point(0, 500-(a*sin(pi*72/180))), Point( 500, 500-(a*sin(pi*72/180)) ), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );
 
  line( image, Point( 500, 500-(a*sin(pi*72/180)) ), Point((a*cos(pi*72/180)), 500), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 0 );
  return(0);
}




Output:-

Tuesday 1 March 2016

OpenCV C++ Code for Drawing a Pentagon

This Opencv C++ Tutorial is about drawing a Pentagon.

In the previous tutorials we learn about drawing a Rectangle and a Line.
To draw the Square we obtained the Co-ordinates of the vertices of the square and then joined those vertices with a Line.

Similarly in order to draw the Pentagon we first need to obtain the Co-ordinates of its Vertices.



Refer the Figure Below:



ϴ=72º (∵ ϴ = 360º/5)
Now, In ∆OBC,
Seg OB=Seg OC;
Thus, m∠OBC=m∠OCB=x;
x+x+72º=180º ( Since Sum of All angles of a Triangle is 180º )
2x=180º - 72º ;
x=54º
i.e. m∠OBC=m∠OCB=54º;
Also,
m∠ABC=108º;
Thus, m∠ABQ=72º; & Seg BC=a;
Seg QB=a*Cos(72º);
∴ QR=QB + BC + CR;
& QB=CR;
Thus, 2*a*Cos(72º) + a =QR
where QR is the Length of the Side of the window.
Here QR=500
Thus a=500/(1+2*Cos(72º));



//Opencv C++ Example for drawing a Pentagon 
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
  
int main( )
{    double pi=3.14;
 
  //Length of a Side of Regular Pentagon
  int a=500/(1+2*cos(pi*72/180));
  
  // Create black empty images
  Mat image = Mat::zeros( 500, 500, CV_8UC3 );
   
  line( image, Point((a*cos(pi*72/180)), 500), Point(500-(a*cos(pi*72/180)),500), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );
  line( image, Point(500-(a*cos(pi*72/180)),500), Point( 500, 500-(a*sin(pi*72/180)) ), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );
  line( image, Point(500, 500-(a*sin(pi*72/180))), Point(250, 0), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );
  line( image, Point(250, 0), Point(0, 500-(a*sin(pi*72/180))), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 ); 
  line( image, Point(0, 500-(a*sin(pi*72/180))), Point((a*cos(pi*72/180)), 500), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 0 );
  return(0);
}


Output:-


Thursday 25 February 2016

OpenCV C++ Code for drawing an Ellipse

Syntax:
C++ :void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

Parameters:
img Image.
center Center of the ellipse.
axes Half of the size of the ellipse main axes.
angle Ellipse rotation angle in degrees.
startAngle Starting angle of the elliptic arc in degrees.
endAngle Ending angle of the elliptic arc in degrees.
box Alternative ellipse representation via RotatedRect or CvBox2D. This means that the function draws an ellipse inscribed in the rotated rectangle.
color Ellipse color.
thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
lineType Type of the ellipse boundary. See the line() description.
shift Number of fractional bits in the coordinates of the center and values of axes.

Note: If you want to draw the whole ellipse and not an arc, choose startAngle 0 and endAngle 360.

Note: CIRCLE is a special case of ELLIPSE whose ECCENTRICITY is equal to ONE.
i.e whose both the axis are of equal length.



//Code for drawing an ellipse
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main( )
{
Mat image = Mat::zeros( 400, 400, CV_8UC3 );
//void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1*, int lineType=8*, int shift=0);
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 45, 0, 360, Scalar( 255, 0, 0 ), 3, 8 );
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 90, 0, 360, Scalar( 0,255, 0 ), 3, 8 );
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 135, 0, 360, Scalar( 0, 0, 255 ), 3, 8 );
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 180, 0, 360, Scalar( 255,255, 0 ), 3, 8 );
imshow("Image",image);
waitKey( 0 );
return(0);
}

Saturday 20 February 2016

OpenCV C++ Code for drawing Circles

Syntax:
C++ :void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

Parameters:
img Image where the circle is drawn.
center Center of the circle.
radius Radius of the circle.
color Circle color.
thickness Thickness of the circle outline, if positive. Negative thickness means that a filled circle is to be drawn.
lineType Type of the circle boundary. See the line() description.
shift Number of fractional bits in the coordinates of the center and in the radius value.
The function circle draws a simple or filled circle with a given center and radius.

Note: To draw a filled circle in Opencv ,take the value of line type as Negative (e.g -2).


#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
  
int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
    
  // Draw a circle 
  circle( image, Point( 200, 200 ), 32.0, Scalar( 0, 0, 255 ), 1, 8 );
  imshow("Image",image);
  
  waitKey( 0 );
  return(0);
}



Drawing a Circle by taking values from the user.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
 
int main( )
{    
  // Create black empty images
  /*It basically defines the size of the window where circle would be drawn*/
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  int i,x,y; 
  // Draw a circle 
  while(true)
  {
  cout<<"Enter the co-ordinates of the centre"<<endl;
  //Get value of centre from the user it should be 0<x<400 and 0<y<400
  //Bcoz size of window is (400,400)
  cin>>x>>y;
  //Display the co-ordinate of the centre in the form (x,y)
  cout<<"Co-ordinates of the centre is ("<<x<<","<<y<<")"<<endl;
  cout <<"Enter the value of radius"<<endl;
   //Take the value of centre from the user
  cin >>i;
  //Function for drawing the circle
  circle( image, Point( x, y ), i, Scalar( 0, 0, 255 ), 1, 8 );
  imshow("Image",image);
  waitKey( 1000 );
  }
  return(0);
}



Opencv Code for drawing Concentric Circles.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
  
int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
    
  // Draw a circle 
  for(int i=0; i<200;i=i+30)
  {
  circle( image, Point( 200, 200 ), i, Scalar( 0, 0, 255 ), 1, 8 );
  imshow("Image",image);
  
  waitKey( 1000 );
  }
  return(0);
}

Monday 15 February 2016

OpenCV C++ Code for drawing a Line

Syntax:
C++:void line(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

Parameters:
img Image.
pt1 First point of the line segment.
pt2 Second point of the line segment.
color Line color.
thickness Line thickness.
lineType Type of the line:
                 8 (or omitted) - 8-connected line.
                 4 - 4-connected line.
                 CV_AA - antialiased line.
shift Number of fractional bits in the point coordinates.



//Opencv Code for drawing a Line
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
  
int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
    
  // Draw a line 
  line( image, Point( 15, 20 ), Point( 70, 50), Scalar( 110, 220, 0 ),  2, 8 );
  imshow("Image",image);
  
  waitKey( 0 );
  return(0);
}

Wednesday 10 February 2016

Zeros,Ones and Eyes in OpenCV


Matlab style Initializers
Mat::zeroes
Each element of the matrix is zero of the specified size.
Mat A;
A = Mat::zeros(3, 3, CV_32F);


Mat::ones
Each element of the matrix is one of the specified size
Mat A;
A = Mat::ones(3, 3, CV_32F);


Mat::eyes
It returns an identity matrix of the specified size.
Mat A;
A = Mat::eyes(3, 3, CV_32F);


Note:
We can also mention the scale factor of the matrix.
e.g:
A = Mat::ones(3, 3, CV_32F)* 5;
Here each element of the matrix is 5, because each element of the uniy matrix is multiplied by 5.


#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 
 
using namespace cv;
using namespace std;
 
int main()
{
    Mat imgA = Mat::eye(5, 5, CV_8UC1);
cout << "imgA = \n " << imgA << "\n\n";
 
Mat imgB = Mat::ones(4, 4, CV_8UC1);
cout << "imgB = \n " << imgB << "\n\n";
 
Mat imgC = Mat::zeros(3,3, CV_8UC1);
cout << "imgC = \n " << imgC << "\n\n";
 
return 0;
}

Output:



Note:
Here we have selected the single channel matrix.(CV_8UC1)
For 3 channel matrices:


Code:
#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 
 
using namespace cv;
using namespace std;
 
int main()
{
    Mat imgA = Mat::eye(5, 5, CV_8UC3);
cout << "imgA = \n " << imgA << "\n\n";
 
Mat imgB = Mat::ones(4, 4, CV_8UC3);
cout << "imgB = \n " << imgB << "\n\n";
 
Mat imgC = Mat::zeros(3,3, CV_8UC3);
cout << "imgC = \n " << imgC << "\n\n";
 
return 0;
}





See the difference in the output.Here the zeros,ones and eyes operator is applied only to 1 channel of the matrix.Rest of the other channel elements are taken 0.Thus two columns of 0 can be seen in between.

Also we doesn't mention the no. of channels by default it takes 1.
ie. CV_8U is equivalent to CV_8UC1.

Saturday 30 January 2016

Capturing a Video from a File/Webcam

Video is a series of images displayed sequentially in quick succession.
Thus in other words we can say that a video is a continuous frame of images.Here by continuous we mean that each image frame is played in a rapid succession such that it appears continuous frames to our eyes.
(Due to persistence of vision)

Thus processing a video is analogous to processing each frame of still images.
There are two ways to process a video:
1. Load it from a file
2. Capture it from a webcam i.e real time recording of video.

Thus if we need to capture a video from a webcam we need to just replace the line of the code by
VideoCapture capture(0);
where the parameter 0 indicates that we are using the default camera for capturing the video.

Thus if attach external camera other than the one which we have with our laptop we need to give that index as our parameter.

e.g VideoCapture capture(1);

Reading from a File:


//OpenCv C++ Code for reading video from a File
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
  using namespace std;
  using namespace cv;
 
  int main()
  {
       //Capturing the Video
       VideoCapture capture("D:\\MyVideo.avi");

       
       //Check whether video is Opening
       if (!capture.isOpened())
       throw "Error when reading file";

       namedWindow("window", 1);
       
       //Reading frames of Video
       for (;;)
     {
            Mat frame;
            capture >> frame;
            if (frame.empty())
              break;
            imshow("window", frame);
            waitKey(1);
       }
   }  


Reading from a Webcam:


#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
  using namespace std;
  using namespace cv;
 
  int main()
  {
       //Capturing the Video
       VideoCapture capture(0);

       
       //Check whether video is Opening
       if (!capture.isOpened())
       throw "Error when reading file";

       namedWindow("window", 1);
       
       //Reading frames of Video
       for (;;)
     {
            Mat frame;
            capture >> frame;
            if (frame.empty())
              break;
            imshow("window", frame);
            waitKey(1);
       }
   }  

Monday 25 January 2016

RGB to Other Color Space Conversion

cvtcolor() converts an image from one color space to another.



Syntax :
C++:void cvtColor(InputArray src, OutputArray dst, int code, int dstCn=0 )

Parameter:
src   :– input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision floating-point.
dst    :– output image of the same size and depth as src.
code  :– color space conversion code (see the description below).
dstCn :– number of channels in the destination image; if the parameter is 0, the number of the channels is derived automatically from src and code.



As it has already been mentioned that the functions converts an image form one color space to another, there are various types of conversion possible.

Transformation Syntax
RGB to YCrCb CV_RGB2YCrCb
BGR to YCrCb CV_BGR2YCrCb
YCrCb to RGB CV_YCrCb2RGB
YCrCb to BGR CV_YCrCb2BGR

Transformation Syntax
RGB to HSV CV_RGB2HSV
BGR to HSV CV_BGR2HSV
HSV to RGB CV_HSV2RGB
HSV to BGR CV_HSV2BGR

Transformation Syntax
RGB to CIE L*a*b* CV_RGB2Lab
BGR to CIE L*a*b* CV_BGR2Lab
CIE L*a*b* to RGB CV_Lab2RGB
CIE L*a*b* to BGR CV_Lab2BGR

Transformation Syntax
RGB to CIE L*u*v* CV_RGB2Luv
BGR to CIE L*u*v* CV_BGR2Luv
CIE L*u*v* to RGB CV_Luv2RGB
CIE L*u*v* to BGR CV_Luv2BGR

Transformation Syntax
RGB to CIE XYZ CV_RGB2XYZ
BGR to CIE XYZ CV_BGR2XYZ
CIE XYZ to RGB CV_XYZ2RGB
CIE XYZ to BGR CV_XYZ2BGR


//OpenCV C++ Code for ColorSpace Conversion
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
 
using namespace cv;
 
int main( )
{
// char* imageName = argv[1];
 
 Mat image;
 image = imread( "C:\\Users\\arjun\\Desktop\\color-image.png", 1 );
 
 Mat RGB2GRAY_image;
 cvtColor( image, RGB2GRAY_image, CV_RGB2GRAY );
 
 Mat BGR2GRAY_image;
 cvtColor( image, BGR2GRAY_image, CV_BGR2GRAY );
 
 Mat RGB2YCrCb_image;
 cvtColor( image, RGB2YCrCb_image, CV_RGB2YCrCb );
 
 Mat BGR2YCrCb_image;
 cvtColor( image, BGR2YCrCb_image, CV_BGR2YCrCb );
 
 Mat RGB2HSV_image;
 cvtColor( image, RGB2HSV_image, CV_RGB2HSV );
 
 Mat BGR2HSV_image;
 cvtColor( image, BGR2HSV_image, CV_BGR2HSV );
 
 Mat RGB2Lab_image;
 cvtColor( image, RGB2Lab_image, CV_RGB2Lab );
 
 Mat BGR2Lab_image;
 cvtColor( image, BGR2Lab_image, CV_BGR2Lab );
 
 Mat RGB2Luv_image;
 cvtColor( image, RGB2Luv_image, CV_RGB2Luv );
 
  Mat BGR2Luv_image;
 cvtColor( image, BGR2Luv_image, CV_BGR2Luv );
 
 Mat RGB2XYZ_image;
 cvtColor( image, RGB2XYZ_image, CV_RGB2XYZ );
 
 Mat BGR2XYZ_image;
 cvtColor( image, BGR2XYZ_image, CV_BGR2XYZ );
 
 namedWindow( "original image", CV_WINDOW_AUTOSIZE );
imshow( "original image", image );
 
namedWindow( "RGB2GRAY image", CV_WINDOW_AUTOSIZE );
imshow( "RGB2GRAY image",RGB2GRAY_image );
imwrite( "C:\\Users\\arjun\\Desktop\\RGB2GRAY.jpg", RGB2GRAY_image );
 
namedWindow( "BGR2GRAY image", CV_WINDOW_AUTOSIZE );
imshow( "BGR2GRAY image", BGR2GRAY_image );
imwrite( "C:\\Users\\arjun\\Desktop\\BGR2GRAY.jpg", BGR2GRAY_image );
 
namedWindow( "RGB2YCrCb image", CV_WINDOW_AUTOSIZE );
imshow( "RGB2YCrCb image", RGB2YCrCb_image );
imwrite( "C:\\Users\\arjun\\Desktop\\RGB2YCrCb.jpg", RGB2YCrCb_image );
 
 
namedWindow( "BGR2YCrCb image", CV_WINDOW_AUTOSIZE );
imshow( "BGR2YCrCb image", BGR2YCrCb_image );
imwrite( "C:\\Users\\arjun\\Desktop\\BGR2YCrCb.jpg", BGR2YCrCb_image );
 
namedWindow( "RGB2HSV image", CV_WINDOW_AUTOSIZE );
imshow( "RGB2HSV image", RGB2HSV_image );
imwrite( "C:\\Users\\arjun\\Desktop\\RGB2HSV.jpg", RGB2HSV_image );
 
namedWindow( "BGR2HSV image", CV_WINDOW_AUTOSIZE );
imshow( "BGR2HSV image", BGR2HSV_image );
imwrite( "C:\\Users\\arjun\\Desktop\\BGR2HSV.jpg", BGR2HSV_image );
 
namedWindow( "RGB2Lab image", CV_WINDOW_AUTOSIZE );
imshow( "RGB2Lab image", RGB2Lab_image );
imwrite( "C:\\Users\\arjun\\Desktop\\RGB2Lab.jpg", RGB2Lab_image );
 
namedWindow( "BGR2Lab image", CV_WINDOW_AUTOSIZE );
imshow( "BGR2Lab image", BGR2Lab_image );
imwrite( "C:\\Users\\arjun\\Desktop\\BGR2Lab.jpg", BGR2Lab_image );
 
namedWindow( "RGB2Luv image", CV_WINDOW_AUTOSIZE );
imshow( "RGB2Luv image", RGB2Luv_image );
imwrite( "C:\\Users\\arjun\\Desktop\\RGB2Luv.jpg", RGB2Luv_image );
 
namedWindow( "BGR2Luv image", CV_WINDOW_AUTOSIZE );
imshow( "BGR2Luv image", BGR2Luv_image );
imwrite( "C:\\Users\\arjun\\Desktop\\BGR2Luv.jpg", BGR2Luv_image );
 
namedWindow( "RGB2XYZ image", CV_WINDOW_AUTOSIZE );
imshow( "RGB2XYZ image", RGB2XYZ_image );
imwrite( "C:\\Users\\arjun\\Desktop\\RGB2XYZ.jpg", RGB2XYZ_image );
 
namedWindow( "BGR2XYZ image", CV_WINDOW_AUTOSIZE );
imshow( "BGR2XYZ image", BGR2XYZ_image );
imwrite( "C:\\Users\\arjun\\Desktop\\BGR2XYZ.jpg", BGR2XYZ_image );
 
 waitKey(0);
 
 return 0;
}


Original Image:



RGB to Grey Image:



BGR to Grey Image:



RGB to YCrCb Image:



BGR to YCrCb Image:



RGB to HSV Image:



BGR to HSV Image:



RGB to Lab Image:



BGR to Lab Image:



RGB to Luv Image:



BGR to Luv Image:



RGB to XYZ Image:



BGR to XYZ Image: