[OpenCV] cv::Mat to Texture2D issue

Hello. I’m trying to capture a image from webcam and use it on a plane runtime. The idea that I have is to use OpenCV to capture it and now I’m trying to convert cv::Mat to Texture2D, to use it with a material. For some reason, nonetheless, in the process I’m losing half of the image. Any idea why?

Here is a example of the issue:

.CPP:

// Fill out your copyright notice in the Description page of Project Settings.


#include "FaceDetector.h"
#include "Engine/Texture.h"
#include "ImageUtils.h"

// Sets default values
AFaceDetector::AFaceDetector()
{
 	// Set this actor to call Tick() every frame.  You can turn this off to improve performance if you don't need it.
	PrimaryActorTick.bCanEverTick = true;

}

// Called when the game starts or when spawned
void AFaceDetector::BeginPlay()
{
	Super::BeginPlay();

    // Start Video..1) 0 for WebCam 2) "Path to Video" for a Local Video
    Vcapture.open(2);

    if (Vcapture.isOpened())
    {
        // Change path before execution 
        if (cascade.load("C:/Program Files/Epic Games/UnrealProjects/CreazyHead/ThirdParty/OpenCV/haarcascades/haarcascade_frontalcatface.xml"))
        {
            GEngine->AddOnScreenDebugMessage(-1, 15.0f, FColor::Yellow, TEXT("DB Faces loaded!"));
            UE_LOG(LogTemp, Warning, TEXT("DB Faces loaded!"));
            FTimerHandle UnusedTimer;
            GetWorldTimerManager().SetTimer(UnusedTimer, this, &AFaceDetector::DetectAndDraw, 0.05f, true, 1.f);
        }
    }
}

// Called every frame
void AFaceDetector::Tick(float DeltaTime)
{
	Super::Tick(DeltaTime);
}

void AFaceDetector::DetectAndDraw()
{
    // Capture frames from video and detect faces
    Vcapture >> frame;
        
    AuxFaceDetector face_detector;

    std::vector<cv::Rect> CurrentRectangles = face_detector.detect_face_rectangles(frame);
    cv::Scalar color(0, 105, 205);
    int frame_thickness = 4;
    for (const auto& r : CurrentRectangles)
        cv::rectangle(frame, r, color, frame_thickness);
    ConvertMatToOpenCV();
}

AuxFaceDetector::AuxFaceDetector() : confidence_threshold_(0.5), input_image_height_(300), input_image_width_(300),
scale_factor_(1.0), mean_values_({ 104., 177.0, 123.0 }) 
{

    network_ = cv::dnn::readNetFromCaffe("C:/Program Files/Epic Games/UnrealProjects/CreazyHead/ThirdParty/OpenCV/Includes/opencvAssets/deploy.prototxt", "C:/Program Files/Epic Games/UnrealProjects/CreazyHead/ThirdParty/OpenCV/Includes/opencvAssets/res10_300x300_ssd_iter_140000_fp16.caffemodel");

    if (network_.empty()) 
    {
        std::ostringstream ss;
        ss << "Failed to load network with the following settings:\n"
            << "Configuration: " + std::string("C:/Program Files/Epic Games/UnrealProjects/CreazyHead/ThirdParty/OpenCV/Includes/opencvAssets/deploy.prototxt") + "\n"
            << "Binary: " + std::string("C:/Program Files/Epic Games/UnrealProjects/CreazyHead/ThirdParty/OpenCV/Includes/opencvAssets/res10_300x300_ssd_iter_140000_fp16.caffemodel") + "\n";
        throw std::invalid_argument(ss.str());
    }
}

std::vector<cv::Rect> AuxFaceDetector::detect_face_rectangles(const cv::Mat& frame) 
{
    cv::Mat input_blob = cv::dnn::blobFromImage(frame, scale_factor_, cv::Size(input_image_width_, input_image_height_),
        mean_values_, false, false);
    network_.setInput(input_blob, "data");
    cv::Mat detection = network_.forward("detection_out");
    cv::Mat detection_matrix(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

    std::vector<cv::Rect> faces;

    for (int i = 0; i < detection_matrix.rows; i++) 
    {
        float confidence = detection_matrix.at<float>(i, 2);

        if (confidence < confidence_threshold_) {
            continue;
        }
        int x_left_bottom = static_cast<int>(detection_matrix.at<float>(i, 3) * frame.cols);
        int y_left_bottom = static_cast<int>(detection_matrix.at<float>(i, 4) * frame.rows);
        int x_right_top = static_cast<int>(detection_matrix.at<float>(i, 5) * frame.cols);
        int y_right_top = static_cast<int>(detection_matrix.at<float>(i, 6) * frame.rows);

        faces.emplace_back(x_left_bottom, y_left_bottom, (x_right_top - x_left_bottom), (y_right_top - y_left_bottom));
    }

    return faces;
}

void AFaceDetector::ConvertMatToOpenCV()
{
    cv::Mat resized;
    cv::resize(frame, resized, cv::Size(frame.cols/3, frame.rows/3));
    cv::imshow("img", resized);
    const int32 SrcWidth = resized.cols;
    const int32 SrcHeight = resized.rows;
    const bool UseAlpha = false;
    // Create the texture
    FrameAsTexture = UTexture2D::CreateTransient(
        SrcWidth,
        SrcHeight,
        PF_B8G8R8A8
    );

    // Getting SrcData
    uint8_t* pixelPtr = (uint8_t*)resized.data;
    const int NumberOfChannels = resized.channels();
    TArray<FColor*> ImageColor;

    for (int i = 0; i < SrcHeight; i++)
    {
        for (int j = 0; j < SrcWidth; j++)
        {
            // Getting pixel rgb values
            uint8 ImageR = pixelPtr[i * SrcWidth * NumberOfChannels + j * NumberOfChannels + 2]; // R
            uint8 ImageG = pixelPtr[i * SrcWidth * NumberOfChannels + j * NumberOfChannels + 1]; // G
            uint8 ImageB = pixelPtr[i * SrcWidth * NumberOfChannels + j * NumberOfChannels + 0]; // B

            // Storing RGB values
            ImageColor.Add(new FColor(ImageR, ImageG, ImageB, 1));
        }
    }

    // Lock the texture so it can be modified
    uint8* MipData = static_cast<uint8*>(FrameAsTexture->PlatformData->Mips[0].BulkData.Lock(LOCK_READ_WRITE));

    // Create base mip.
    uint8* DestPtr = NULL;
    const FColor* SrcPtr = NULL;
    for (int32 y = 1; y <= SrcHeight; y++)
    {
        int CurrentIndex = (SrcHeight - y) * (SrcWidth);
        DestPtr = &MipData[CurrentIndex * sizeof(FColor)];
        SrcPtr = const_cast<FColor*>(ImageColor[CurrentIndex]);
        for (int32 x = 0; x < ImageColor.Num()/ SrcHeight; x++)
        {
            *DestPtr++ = SrcPtr->B;
            *DestPtr++ = SrcPtr->G;
            *DestPtr++ = SrcPtr->R;
            *DestPtr++ = (UseAlpha ? SrcPtr->A : 0xFF);

            SrcPtr++;
        }
    }

    // Unlock the texture
    FrameAsTexture->PlatformData->Mips[0].BulkData.Unlock();
    FrameAsTexture->UpdateResource();
}

.H

// Fill out your copyright notice in the Description page of Project Settings.

#pragma once

#include "CoreMinimal.h"
#include "GameFramework/Actor.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/dnn/dnn.hpp"
#include "opencv2/opencv.hpp"
#include "FaceDetector.generated.h"

class UTexture2D;

UCLASS()
class CRAZY_HEAD_DANCE_API AFaceDetector : public AActor
{
	GENERATED_BODY()
private:
	// VideoCapture class for playing video for which faces to be detected
	cv::VideoCapture Vcapture;
	cv::Mat frame;

	// PreDefined trained XML classifiers with facial features
	cv::CascadeClassifier cascade;

public:
    UPROPERTY(EditAnywhere, BlueprintReadWrite)
        UTexture2D* FrameAsTexture;

private:
	void DetectAndDraw();
    void ConvertMatToOpenCV();

protected:
	// Called when the game starts or when spawned
	virtual void BeginPlay() override;

public:
    // Sets default values for this actor's properties
    AFaceDetector();

	// Called every frame
	virtual void Tick(float DeltaTime) override;

};

class AuxFaceDetector 
{
public:
    explicit AuxFaceDetector();

    /// Detect faces in an image frame
    /// \param frame Image to detect faces in
    /// \return Vector of detected faces
    std::vector<cv::Rect> detect_face_rectangles(const cv::Mat& frame);

private:
    /// Face detection network
    cv::dnn::Net network_;
    /// Input image width
    const int input_image_width_;
    /// Input image height
    const int input_image_height_;
    /// Scale factor when creating image blob
    const double scale_factor_;
    /// Mean normalization values network was trained with
    const cv::Scalar mean_values_;
    /// Face detection confidence threshold
    const float confidence_threshold_;

};

Hi,
This is not a concrete answer to your problem, but as I guess the source of the problem lies in your mat to texture conversion (i.e. in ConvertMatToOpenCV), I would like to show a way simpler method to fill the Texture2D with the Mat data. Most of the conversion is done automatically, no reading of individual pixel data necessary.

This is my MatToTexture2D function. Some code was added to show what to do in certain conditions and can be omitted if you know they don’t show up:

UTexture2D* AVideoActor::MatToTexture2D(const cv::Mat InMat)
{
	//create new texture, set its values
	UTexture2D* Texture = UTexture2D::CreateTransient(InMat.cols, InMat.rows, PF_B8G8R8A8);

	/**check if Mat actually has the right format. The format has to be the same as the Texture2D format or converted in the needed format, or the texture will be crooked.
	* Other formats than shown here are, of course, also possible. Some combinations are:
	* (Texture2D format;Mat format)
	* (PF_R8/PF_G8/PF_B8; CV_8U) for a one channel Mat
	* (PF_R8G8B8A8/PF_A8R8G8B8/PF_B8G8R8A8; CV_8UC4) for a four channel Mat
	* (PF_R8G8; CV_8UC2) for a two channel Mat
	* ... and so on. Other color depths besides eight bit are also possible.
	*/
	if(InMat.type() == CV8UC3)//example for pre-conversion of Mat
	{
		//if the Mat is in BGR space, convert it to BGRA. There is no three channel texture in UE (at least with eight bit)
		cv::cvtColor(InMat, InMat, cv::COLOR_BGR2BGRA);
	}
	else if(InMat.type() != CV_8U4C)
	{
		//if the texture hasnt the right pixel format, abort.
		Texture->PostEditChange();
		Texture->UpdateResource();
		return Texture;
	}
	Texture->SRGB = 0;//set to 0 if Mat is not in srgb (which is likely when coming from a webcam)
	//other settings of the texture can also be changed here
	Texture->UpdateResource();

	//actually copy the data to the new texture
	FTexture2DMipMap& Mip = Texture->GetPlatformData()->Mips[0];
	void* Data = Mip.BulkData.Lock(LOCK_READ_WRITE);//lock the texture data
	FMemory::Memcpy(Data, InMat.data, InMat.total() * InMat.elemSize());//copy the data

	Mip.BulkData.Unlock();

	Texture->PostEditChange();
	Texture->UpdateResource();

	return Texture;
}

With this function, you still need to know which color format your mat has, which can be annoying to find out. E.g. the difference between RGB and BGR cannot really be read from a Mat to my knowledge, both are in the format CV_8UC3 (eight bit, three channels).
It is also annoying that Texture2D can have one channel, two channels and four channels, but not three channels, so converting an RGB Mat to a Texture2D is not possible without first converting it to a RGBA Mat.

EDIT: Such a function as the one I wrote actually exists in the OpenCV Plugin that is integrated into Unreal Engine 5, inside the OpenCVHelper class (TextureFromCvMat).
Also, as it took me some time to find that out, I will just add how to include the OpenCV headers:

//headers have to be included with these wrappers
#include "PreOpenCVHeaders.h"
#include "OpenCVHelper.h"
#include "opencv2/aruco.hpp"
#include "opencv2/opencv.hpp"
//...
#include "PostOpenCVHeaders.h"

I changed CV8UC3 to CV_8UC3 and
CV_8U4C to CV_8UC4
compile successfully and core have worked.

Need comment “Texture-> PostEditChange();”
Otherwise packaging fails.
Think you.