I don’t know how to achieve Stitching ,How to use opencV for Unity function implementation.and partial area display of captured image after completion of Stitching.And my feature extraction display and matching graph are mirror display.
C++ code:
Mat homo = findHomography(leftimagePoints, rightimagePoints, RANSAC);
CalcCorners(homo, frame_R);
Mat imageTransform1, imageTransform2;
warpPerspective(imageR, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), imageL.rows));
int dst_width = imageTransform1.cols;
int dst_height = frame_L.rows;
if (imageTransform1.cols < frame_L.cols)
dst_width = frame_L.cols;
if (imageTransform1.rows < frame_L.rows)
dst_height = frame_L.rows;
Mat outimage(dst_height, dst_width, CV_8UC1);
outimage.setTo(0);
imageTransform1.copyTo(outimage(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
imageL.copyTo(outimage(Rect(0, 0, frame_L.cols, frame_L.rows)));
this my code:
float angle = UnityEngine.Random.Range(0, 0), scale = 1.0f;
Point center = new Point(RgrayMat.cols() * 0.5f, RgrayMat.rows() * 0.5f);
Mat affine_matrix = Imgproc.getRotationMatrix2D(center, angle, scale);
Imgproc.warpAffine(LgrayMat, RgrayMat, affine_matrix, RgrayMat.size());
ORB detector = ORB.create();
ORB extractor = ORB.create();
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
Mat descriptors1 = new Mat(LgrayMat.rows(), LgrayMat.cols(), CvType.CV_8UC1);
detector.detect(LgrayMat, keypoints1);
extractor.compute(LgrayMat, keypoints1, descriptors1);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
Mat descriptors2 = new Mat(RgrayMat.rows(), RgrayMat.cols(), CvType.CV_8UC1);
detector.detect(RgrayMat, keypoints2);
extractor.compute(RgrayMat, keypoints2, descriptors2);
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
MatOfDMatch matches = new MatOfDMatch();
List knnMatches = new List();
matcher.knnMatch(descriptors1, descriptors2, knnMatches, 2);
var arrayKnnMatches = knnMatches.ToArray();
List goodMatches = new List();
for (int i = 0; i < arrayKnnMatches.Length; ++i)
{
var array = arrayKnnMatches*.toArray();*
if (array[0].distance < 0.7 * array[1].distance)
{
goodMatches.Add(array[0]);
}
}
List objList = new List();
List sceneList = new List();
List keypoints_objectList = keypoints1.toList();
List keypoints_sceneList = keypoints2.toList();
for (int i = 0; i < goodMatches.Count; i++)
{
objList.Add(keypoints_objectList[goodMatches*.queryIdx].pt);
sceneList.Add(keypoints_sceneList[goodMatches.trainIdx].pt);
//Debug.Log(“keypoints_objectList=” + keypoints_objectList[goodMatches.queryIdx].pt);
//Debug.Log(“keypoints_sceneList=” + keypoints_objectList[goodMatches.queryIdx].pt);
_}
//Debug.Log(“objList.Count=” + objList.Count);
//Debug.Log(“sceneList.Count=” + sceneList.Count);
if (objList.Count == 0)
return;
MatOfPoint2f obj = new MatOfPoint2f();
MatOfPoint2f scene = new MatOfPoint2f();
obj.fromList(objList);
scene.fromList(sceneList);
//Debug.Log(“obj=” + obj.dump());
//Debug.Log(“scene=” + scene.dump());*_
Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC);
//Debug.Log(“H=” + H.dump());
Mat Mixcamera1 = new Mat(2160, 7680, CvType.CV_8UC1);
matches.fromArray(goodMatches.ToArray());
//Debug.Log(“keypoints1=” + keypoints1.dump());
Features2d.drawMatches(LgrayMat, keypoints1, RgrayMat, keypoints2, matches, Mixcamera1);
Texture2D texture = new Texture2D(Mixcamera1.cols(), Mixcamera1.rows(), TextureFormat.RGBA32, false);
Utils.matToTexture2D(Mixcamera1, texture);
Mixcamera.texture = texture;
//cal top point
int Rvalue = SortCorners(H, LgrayMat);
Debug.Log(“Rvalue=” + Rvalue);
if (Rvalue == 0)
return;
Mat imageTransform1 = new Mat();
Mat imageTransform2 = new Mat();
Debug.Log(“right_top=” + cornersXY.right_top[0].x);
Debug.Log(“right_bottom=” + cornersXY.right_bottom[0].x);
Imgproc.warpPerspective(RgrayMat, imageTransform1, H, new Size(Mathf.Max((float)cornersXY.right_top[0].x, (float)cornersXY.right_bottom[0].x), LgrayMat.rows()));
//
//Features2d.drawMatches(LgrayMat, keypoints1, RgrayMat, keypoints2, matches, Mresult);
int dst_width = imageTransform1.cols();
int dst_height = LgrayMat.rows();
Debug.Log(“dst_width=” + dst_width);
Debug.Log(“dst_height=” + dst_height);
if (imageTransform1.cols() < LgrayMat.cols())
dst_width = LgrayMat.cols();
if (imageTransform1.rows() < LgrayMat.rows())
dst_height = LgrayMat.rows();
Mat outimage = new Mat(dst_height, dst_width, CvType.CV_8UC1);
//Mat outimage = new Mat();
outimage.setTo(new Scalar(0));
OpenCVForUnity.CoreModule.Rect rect = new OpenCVForUnity.CoreModule.Rect(0, 0, imageTransform1.cols(), imageTransform1.rows());
imageTransform1.copyTo(outimage);
LgrayMat.copyTo(outimage);
Mat Mresult = new Mat();
Texture2D resulttexture = new Texture2D(outimage.cols(), outimage.rows(), TextureFormat.RGBA32, false);
Utils.matToTexture2D(outimage, resulttexture);
MresultImage.texture = resulttexture;