[RELEASED] OpenCV for Unity

By changing the sample code as follows, you can show correct results in portrait mode.
Can be detected object from another direction by rotating the Mat converted from WebCamTexture.

Edit fix the memory leak that occurs in rgbaMat.t().

using UnityEngine;
using System.Collections;

using OpenCVForUnity;

namespace OpenCVForUnitySample
{
    /// <summary>
    /// WebCamTexture detect face sample.
    /// </summary>
    public class RoteteWebCamTextureDetectFaceSample : MonoBehaviour
    {
   
        WebCamTexture webCamTexture;
        Color32[] colors;
        #if (UNITY_ANDROID || UNITY_IPHONE) && !UNITY_EDITOR
        bool isFront = false;
        #endif
        int width = 640;
        int height = 480;
        Mat rgbaMat;
        Mat RotatedRgbaMat;
        Mat grayMat;
        Texture2D texture;
        CascadeClassifier cascade;
        MatOfRect faces;
        bool initDone = false;
   
        // Use this for initialization
        void Start ()
        {
            // Checks how many and which cameras are available on the device
            for (int cameraIndex = 0; cameraIndex < WebCamTexture.devices.Length; cameraIndex++) {
           
                #if (UNITY_ANDROID || UNITY_IPHONE) && !UNITY_EDITOR
                if (WebCamTexture.devices [cameraIndex].isFrontFacing == isFront) {
                    #endif
               
                    Debug.Log (cameraIndex + " name " + WebCamTexture.devices [cameraIndex].name + " isFrontFacing " + WebCamTexture.devices [cameraIndex].isFrontFacing);
               
                    //Set the appropriate fps
                    webCamTexture = new WebCamTexture (WebCamTexture.devices [cameraIndex].name, width, height, 3);
               
                    #if (UNITY_ANDROID || UNITY_IPHONE) && !UNITY_EDITOR
                    break;
                }
                #endif
           
            }
       
            Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);
       
       
            // Starts the camera
            webCamTexture.Play ();
       
       
            StartCoroutine (init ());
       
       
        }
   
        private IEnumerator init ()
        {
            while (true) {
                //If you want to use webcamTexture.width and webcamTexture.height on iOS, you have to wait until webcamTexture.didUpdateThisFrame == 1, otherwise these two values will be equal to 16. (http://forum.unity3d.com/threads/webcamtexture-and-error-0x0502.123922/)
                if (webCamTexture.didUpdateThisFrame) {
                    Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);
               
                    colors = new Color32[webCamTexture.width * webCamTexture.height];
               
                    rgbaMat = new Mat (webCamTexture.height, webCamTexture.width, CvType.CV_8UC4);

                    //replace width and height
                    RotatedRgbaMat = new Mat (webCamTexture.width, webCamTexture.height, CvType.CV_8UC4);
                    grayMat = new Mat (webCamTexture.width, webCamTexture.height, CvType.CV_8UC1);
               
                    texture = new Texture2D (webCamTexture.height, webCamTexture.width, TextureFormat.RGBA32, false);
               
                    gameObject.transform.eulerAngles = new Vector3 (0, 0, 0);
                    gameObject.transform.localScale = new Vector3 (webCamTexture.height, webCamTexture.width, 1);
               
               
                    cascade = new CascadeClassifier (Utils.getFilePath ("haarcascade_frontalface_alt.xml"));
                    faces = new MatOfRect ();
               
                    gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
               
                    initDone = true;
               
                    break;
                } else {
                    yield return 0;
                }
            }
        }
   
        // Update is called once per frame
        void Update ()
        {
            if (!initDone)
                return;
       
            if (webCamTexture.didUpdateThisFrame) {
           
                Utils.WebCamTextureToMat (webCamTexture, rgbaMat, colors);

//                //rotate by -90 degrees.
//               Core.flip(rgbaMat.t(),RotatedRgbaMat,1);

//fix the memory leak that occurs in rgbaMat.t().
                //rotate by -90 degrees.
                using (Mat transposeRgbaMat = rgbaMat.t()) {
                      Core.flip (transposeRgbaMat, RotatedRgbaMat, 1);
                }
           
           
                Imgproc.cvtColor (RotatedRgbaMat, grayMat, Imgproc.COLOR_RGBA2GRAY);
                Imgproc.equalizeHist (grayMat, grayMat);
           
           
                if (cascade != null)
                    cascade.detectMultiScale (grayMat, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
                                             new Size (50, 50), new Size ());
           
                OpenCVForUnity.Rect[] rects = faces.toArray ();
                for (int i = 0; i < rects.Length; i++) {
                    //                Debug.Log ("detect faces " + rects );
                 
                    Core.rectangle (RotatedRgbaMat, new Point (rects .x, rects .y), new Point (rects .x + rects .width, rects .y + rects .height), new Scalar (255, 0, 0, 255), 2);
                }

             
                Utils.matToTexture2D (RotatedRgbaMat, texture, colors);
             
             
            }
        }
     
        void OnDisable ()
        {
            webCamTexture.Stop ();
        }
     
        void OnGUI ()
        {
            float screenScale = Screen.width / 240.0f;
            Matrix4x4 scaledMatrix = Matrix4x4.Scale (new Vector3 (screenScale, screenScale, screenScale));
            GUI.matrix = scaledMatrix;
         
         
            GUILayout.BeginVertical ();
            if (GUILayout.Button ("back")) {
                Application.LoadLevel ("OpenCVForUnitySample");
            }
         
         
            GUILayout.EndVertical ();
        }
    }
}
1 Like

In order to read a file in Android NDK, files in the StreamingAssets folder will be copied to the “/ data / data / ****** / files /” folder.

Copy from ”assets” folder of Android to “/ data / data / ****** / files /” folder is done when is called “Utils.getFilePath ()”.
cascade = new CascadeClassifier (Utils.getFilePath (“haarcascade_frontalface_alt.xml”));

Copy processing procedure

  • Check whether File that is specified in the “Utils.getFilePath ()” is present in “/ data / data / ****** / files /” folder.

  • if false,copy file.

  • if true,Check whether the file has been updated. If file is updated, again to copy file.

Though I’m not sure whether it is possible to reduce the size,
It might be possible if decompress the compressed file that was placed in ”StreamingAssets” folder before calling “Utils.getFilePath ()”.

Thanks for the explanation, that helps a lot!

“Though I’m not sure whether it is possible to reduce the size,
It might be possible if decompress the compressed file that was placed in ”StreamingAssets” folder before calling “Utils.getFilePath ()”.”

I think the StreamAssets “folder” is read-only on the device… (I’ll double check). So, I think if I wanted to compress the xmls in streamingassets I’d have to decompress straight to /data/data/***/files/ with a custom function and then not use Utils.getFilePath() in CascadeClassifier ctor.

Report Thank you.
If you manage directly “/data/data/***/files/” without using “Utils.getFilePath ()” as you wrote, I think that it works correctly.

Released Version 1.0.6

Version changes
1.0.6
[Android]Support for x86 build target.(Unity 4.6 or higher)

Hi,
i try to combine OpenCV for Unity with the Kinect v2 Example that Microsoft provides.
In general it works but i have some performance issues as with the current API i have only the possibility to convert the data to and from OpenCV via a Texture2D.
Microsoft’s example code provides the data of the Kinect as buffers (byte[ ], ushort[ ])
I extended the openCV4Unity API with a Method Utils.intPtrToMat(IntPtr,Mat) to create a Mat this way:

var pColorHandle = GCHandle.Alloc(pColorBuffer,GCHandleType.Pinned);
Utils.intPtrToMat(pColorHandle.AddrOfPinnedObject(),rgbMat);
Do some ImgProc stuff with rgbMat ....

The main performance problem seems to be the re-conversion from a Mat to a buffer (byte[ ])
I had two version.
The first version works as expected but is slow:

Utils.matToTexture2D(rgbMat,myTexture2D);
var handle = GCHanlde.Alloc(myTexture2D.GetPixels32(),GCHandleType.Pinned);
Marshal.Copy(handle.AddrOfPinnedObject(),pColorBuffer,0,pColorBuffer.Length);
handle.Free();

The second version is fast but the image that i get from the buffer is flipped horizontally:

Marshal.Copy((IntPtr)rgbMat.dataAddr(),pColorBuffer,0,pColorBuffer.Length);

It seems that the Mat.dataAddr() Method provides the data in a wrong way.

Is there a better or faster way to convert the data with IntPtr?

And my feature request is : Extended API to exchange data via IntPtr directly in the right way (without hacking it by myself ).

Thanks

Thank information about Combining “Kinect” and “OpenCV for Unity”.

Since OpenCV Mat and Unity Texture2D are the origin of the image is different, cv::flip(flipcode = 0) has been called in OpenCVForUnity_MatToTexture() and OpenCVForUnity_TextureToMat() C++ side.

Copy OpenCV Mat to the pixel data array IntPtr Please refer to the code below.

/**
     * Copy OpenCV Mat to the pixel data array IntPtr.
     * <p>
     * 
This function copy the OpenCV Mat to the pixel data array IntPtr.The copied byte array has been up and down flip.
     * 
The input Mat object has to be of the types 'CV_8UC4' (RGBA) , 'CV_8UC3' (RGB) or 'CV_8UC1' (GRAY).
     * 
The pixel data array has to be 4byte per pixel.
     * 
The pixel data array has to be of the same size as the input Mat'.
     *
     * @param mat The input Mat object has to be of the types 'CV_8UC4' (RGBA) , 'CV_8UC3' (RGB) or 'CV_8UC1' (GRAY).
     * @param intPtr The pixel data array has to be 4byte per pixel.The pixel data array has to be of the same size as the input Mat'.
     */
                public static void matToIntPtr (Mat mat, IntPtr intPtr)
                {
                        if (mat != null)
                                mat.ThrowIfDisposed ();
       
                        if (mat == null)
                                throw new ArgumentNullException ("mat");
                        if (intPtr == null)
                                throw new ArgumentNullException ("intPtr");
       
       
                        #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IPHONE) && !UNITY_EDITOR)
           
                        OpenCVForUnity_MatToTexture(mat.nativeObj, intPtr);
       
                        #else
                        return;
                        #endif
       
       
                }

                /**
     * Copy the pixel data array IntPtr to OpenCV Mat.
     * <p>
     * This function copy the pixel data array IntPtr to the OpenCV Mat.The copied byte array has been up and down flip.
     * 
The pixel data array has to be 4byte per pixel.
     * 
The output Mat object has to be of the same size as the pixel data array'.
     * The output Mat object has to be of the types 'CV_8UC4' (RGBA) , 'CV_8UC3' (RGB) or 'CV_8UC1' (GRAY).
     *
     * @param intPtr The pixel data array has to be 4byte per pixel.
     * @param mat The output Mat object has to be of the same size as the pixel data array'.
     * The output Mat object has to be of the types 'CV_8UC4' (RGBA) , 'CV_8UC3' (RGB) or 'CV_8UC1' (GRAY).
     */
                public static void intPtrToMat (IntPtr intPtr, Mat mat)
                {
                        if (mat != null)
                                mat.ThrowIfDisposed ();
       
                        if (intPtr == null)
                                throw new ArgumentNullException ("intPtr == null");
                        if (mat == null)
                                throw new ArgumentNullException ("mat == null");

   
                        #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IPHONE) && !UNITY_EDITOR)
       
                        OpenCVForUnity_TextureToMat( intPtr, mat.nativeObj);
       
                        #else
                        return;
                        #endif
       
                }

if rgbaMat.isContinuous() is true,I think that the code below is faster than matToIntPtr().

Core.flip (rgbaMat,rgbaMat,0);
Marshal.Copy((IntPtr)rgbaMat.dataAddr(),colorBytes,0,colorBytes.Length);

In the next version I would like to add method as described above.

Many thanks for the info and your code!
I made a test with both versions and it works. (As you said the direct Marshal.Copy method is a tiny bit faster in my case)
The only thing that seems to need a change is your (IntPtr == null )check. The compiler (VS) complains about it . Should be (intPtr == IntPtr.Zero) .

Can this be used to detect if a texture2d contains a human standing in a particular position? The research I have done makes me think a HOG algorithm to do this perhaps?

By using the “OpenCV for Unity”, you can detect human by using the HOGDescriptor class.
http://docs.opencv.org/java/org/opencv/objdetect/HOGDescriptor.html

1 Like

Hi,
i have a nother problem with the IntPtrToMat conversion. It only works with CV_8U2 & CV_8U4 . When i try to use a Mat with type CV_U8,CV_U8C1 or CV_U8C3 the application crashes when i call Utils.intPtrToMat().

Here my testcode:

const int        cDepthWidth  = 512;
const int        cDepthHeight = 424;
Mat testMat1 = new Mat(cDepthHeight, cDepthWidth, CvType.CV_8UC1);
Mat testMat4 = new Mat(cDepthHeight, cDepthWidth, CvType.CV_8UC4);

byte[]buffer1 = Enumerable.Repeat((byte)255, cDepthWidth * cDepthHeight * 1).ToArray();
byte[]buffer4 = Enumerable.Repeat((byte)255, cDepthWidth * cDepthHeight * 4).ToArray();

 var handle1 = GCHandle.Alloc(buffer1, GCHandleType.Pinned);
  Utils.intPtrToMat(handle1.AddrOfPinnedObject(), testMat1);//crash

var handle4 = GCHandle.Alloc(buffer4, GCHandleType.Pinned);
  Utils.intPtrToMat(handle4.AddrOfPinnedObject(), testMat4);//works

The src pixel data array of Utils.intPtrToMat() has to be 4byte per pixel.
In the next version I plan to improve the method.

If one pixel is not a 4-byte you can handle with code such as the following.

//testMat1.isContinuous = true
Marshal.Copy(buffer1,0,(IntPtr)testMat1.dataAddr(),buffer1.Length);

Many thanks for the info!

When i use the Marshal.Copy way for Input and Output it works now with CV_8U

Marshal.Copy(buffer1,0, (IntPtr)testMat1.dataAddr(), buffer1.Length);// Buffer to Mat
//ImgProc.....
Marshal.Copy((IntPtr)testMat1.dataAddr(), buffer1,0, buffer1.Length);// Mat to Buffer

(The MatToIntPtr method for reading back also crashes.)

1 Like
  1. I have one question can addon (Non-Rigid Face Tracking)?
  2. Another run in pc is ok but i mac cannot work(miss material and web cam capture)?
  1. A simple method to the ”Non-Rigid Face Tracking” is not implemented.

  2. “WebCamTextureToMatSample.scene” is not work?
    Please tell me the system environment.

Please check whether “***.unity” is added to “Scene In Build”.

I have one pc and one imac ! same step in pc is work but mac is not work,why?

If look at the screenshot that you have attached,
“Level ‘Texture2DtoMatSample’ (-1) could not be loaded because it has not been added to the build settings.” Error Did not appear in the Console?

Success to iOS 64bit build.

I have confirmed that the project using the “OpenCV for Unity now version 1.0.6” in Unity4.6.1p3 is successful in iOS 64bit build.