Hello! I’m not new to Unreal but fairly new to Unreal C++ and loading NNEs this way. I’ve been trying to get a model I made added in by following the tutorial (it’s being loaded into my C++ blueprint as a UAsset) without much success. I know it’s an issue with how I’m loading in the tensor input/output but nothing I try seems to fix it.
I left all of my comments and old commented code in case that’s helpful at all. If any of my notes are wrong, also happy to know about that.
Any help would be very much appreciated!!
ANNEE.cpp
// Fill out your copyright notice in the Description page of Project Settings.
#include "ANNEE.h"
#include "Engine/AssetManager.h"
TArray<uint32> InputData = { 64, 3, 200, 200 };
TArray<uint32> OutputData = { 64, 3, 100, 100 };
TArray<UE::NNE::FTensorBindingCPU> InputBindings;
TArray<UE::NNE::FTensorBindingCPU> OutputBindings;
// Sets default values
AANNEE::AANNEE()
{
// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.
PrimaryActorTick.bCanEverTick = true;
}
//TODO: Set tensor size to match original model
// Called when the game starts or when spawned
void AANNEE::BeginPlay()
{
UE_LOG(LogTemp, Display, TEXT("!!!!!AANNEE is beginning play!!!!!"));
Super::BeginPlay();
//Running on CPU works for in editor and runtime. Can be called synchronously on game thread or asynchronously (starting with this approach)
//INNERuntimeRDG <-- Run frame aligned. If you can't call model when needed with INNERuntimeCPU, might have to switch to this. Needs FRDGBuilder and Render Dependency Graph knowledge (https://docs.unrealengine.com/5.2/en-US/render-dependency-graph-in-unreal-engine/)
//!!!!!IMPORTANT!!!!!
//!!!!!NNERuntimeORTCpu = In editor model running. Will NOT run IN GAME after building. Have to change this to *something*. Can't find other command rn
TWeakInterfacePtr<INNERuntimeCPU> Runtime = UE::NNE::GetRuntime<INNERuntimeCPU>(FString("NNERuntimeORTCpu"));
if (Runtime.IsValid())
{
TUniquePtr<UE::NNE::IModelCPU> Model = Runtime->CreateModel(PreLoadedModelData);
UE_LOG(LogTemp, Display, TEXT("PreLoadedModelData loaded %s"), *PreLoadedModelData->GetName());
if (Model.IsValid())
{
//Needed if preloading from asset?
//ModelHelper->ModelInstance = Model->CreateModelInstance();
TUniquePtr<UE::NNE::IModelInstanceCPU> ModelInstance = Model->CreateModelInstance();
if (ModelInstance.IsValid())
{
//Use if tensor size is known
//bool bIsRunning;
TArray<UE::NNE::FTensorShape> InputTensorShapes = { UE::NNE::FTensorShape::Make(InputData) };
InputData.SetNumZeroed(InputTensorShapes[0].Volume());
InputBindings.SetNumZeroed(1);
InputBindings[0].Data = InputData.GetData();
InputBindings[0].SizeInBytes = InputData.Num() * sizeof(float);
TArray<UE::NNE::FTensorShape> OutputTensorShapes = { UE::NNE::FTensorShape::Make(OutputData) };
OutputData.SetNumZeroed(OutputTensorShapes[0].Volume());
OutputBindings.SetNumZeroed(1);
OutputBindings[0].Data = OutputData.GetData();
OutputBindings[0].SizeInBytes = OutputData.Num() * sizeof(float);
//Figure out how much memory to allocate to input and output
//Input can be populated before model runs. Output can be populated after inference completes
//IMPORTANT: Have to match input/output of tensor shapes to input/output dimensions of original model
//Input shape = (64, 3, 200, 200)
TConstArrayView<UE::NNE::FTensorDesc> InputTensorDescs = ModelInstance->GetInputTensorDescs();
//checkf(InputTensorDescs.Num() == 1, TEXT("The current example supports only models with a single input tensor"));
//UE::NNE::FSymbolicTensorShape SymbolicInputTensorShape = InputTensorDescs[0].GetShape();
//////IsConcrete = Tests if any dimensions set to -1 (which means model accepts any size of input/output tensor)
////checkf(SymbolicInputTensorShape.IsConcrete(), TEXT("The current example supports only models without variable input tensor dimensions"));
//checkf(SymbolicInputTensorShape.Rank() == 4, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 200 x 200!"));
//checkf(SymbolicInputTensorShape.GetData()[0] == 64, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 200 x 200!"));
//checkf(SymbolicInputTensorShape.GetData()[1] == 3, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 200 x 200!"));
//checkf(SymbolicInputTensorShape.GetData()[2] == 200, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 200 x 200!"));
//checkf(SymbolicInputTensorShape.GetData()[3] == 200, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 200 x 200!"));
////TArray<UE::NNE::FTensorShape> InputTensorShapes = { UE::NNE::FTensorShape::MakeFromSymbolic(SymbolicInputTensorShape) };
//Set the input tensor dimension. Must be called each time size would change
//Input shape = (64, 3, 200, 200)
ModelInstance->SetInputTensorShapes(InputTensorShapes);
//Output shape of (64,32,100,100)
TConstArrayView<UE::NNE::FTensorDesc> OutputTensorDescs = ModelInstance->GetOutputTensorDescs();
FString OutputTensorDescs_String = FString(UTF8_TO_TCHAR(reinterpret_cast<const char*>(OutputTensorDescs.GetData())));
UE_LOG(LogTemp, Display, TEXT("OutputTensorDescs: %s"), *OutputTensorDescs_String);
//checkf(OutputTensorDescs.Num() == 1, TEXT("The current example supports only models with a single output tensor"));
//UE::NNE::FSymbolicTensorShape SymbolicOutputTensorShape = OutputTensorDescs[0].GetShape();
////checkf(SymbolicOutputTensorShape.IsConcrete(), TEXT("The current example supports only models without variable output tensor dimensions"));
//checkf(SymbolicOutputTensorShape.Rank() == 4, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 100 x 100!"));
////IMPORTANT: Unreal is crashing after this line
////Try feeding it an image/input to see if that's the only issue. If it can process the input, proceed. If it can't, debug or try starting over
////a little bit with basing new stuff after sample project that should be first tab of UE5 NNE tab group
//checkf(SymbolicOutputTensorShape.GetData()[0] == 64, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 100 x 100!"));
//checkf(SymbolicOutputTensorShape.GetData()[1] == 3, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 100 x 100!"));
//checkf(SymbolicOutputTensorShape.GetData()[2] == 100, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 100 x 100!"));
//checkf(SymbolicOutputTensorShape.GetData()[3] == 100, TEXT("Neural Post Processing requires models with input shape 64 x 3 x 100 x 100!"));
//TArray<UE::NNE::FTensorShape> OutputTensorShapes = { UE::NNE::FTensorShape::MakeFromSymbolic(SymbolicOutputTensorShape) };
}
else
{
UE_LOG(LogTemp, Error, TEXT("Failed to create the model instance"));
}
}
else
{
UE_LOG(LogTemp, Error, TEXT("Failed to create the model"));
}
}
else
{
UE_LOG(LogTemp, Error, TEXT("Cannot find runtime NNERuntimeORTCpu, please enable the corresponding plugin"));
}
}
// Called every frame
void AANNEE::Tick(float DeltaTime)
{
//Added by default?
Super::Tick(DeltaTime);
UE_LOG(LogTemp, Display, TEXT("!!!!!AANNEE is ticking!!!!!"));
//This if statement makes sure RunSync is not called twice on same model instance or at the same time
if (!ModelHelper->bIsRunning)
{
//Process ModelHelper->OutputData from previous run here
//Pass new data into ModelHelper->InputData here
ModelHelper->bIsRunning = true;
TSharedPtr<FMyModelHelper> ModelHelperPtr = ModelHelper;
//[]() = lambda notation for C++. Have to put ptr in [] as a capture to be able to access all vars/funcs from the ptr
AsyncTask(ENamedThreads::AnyNormalThreadNormalTask, [ModelHelperPtr]()
{
//Runs model on separate thread
if (ModelHelperPtr->ModelInstance->RunSync(ModelHelperPtr->InputBindings, ModelHelperPtr->OutputBindings) != 0)
{
UE_LOG(LogTemp, Error, TEXT("Failed to run the model"));
}
//Once inference completes, queue another AsyncTask to run on game thread
AsyncTask(ENamedThreads::GameThread, [ModelHelperPtr]()
{
ModelHelperPtr->bIsRunning = false;
});
});
}
}
ANNEE.h
// Fill out your copyright notice in the Description page of Project Settings.
#pragma once
#include "CoreMinimal.h"
#include "GameFramework/Actor.h"
#include "NNE.h"
#include "NNERuntimeCPU.h"
#include "NNEModelData.h"
#include "ANNEE.generated.h"
//Model Helper especially needed for larger models
//Game could stop at any point and start freeing memory the inference is still using to try and run which would typically result in a crash
//Also don't want to copy a bunch of data around for performance reasons
class FMyModelHelper
{
public:
TUniquePtr<UE::NNE::IModelInstanceCPU> ModelInstance;
TArray<float> InputData = { 64.0f, 3.0f, 200.0f, 200.0f };
TArray<float> OutputData = { 64.0f, 3.0f, 100.0f, 100.0f };
TArray<UE::NNE::FTensorBindingCPU> InputBindings;
TArray<UE::NNE::FTensorBindingCPU> OutputBindings;
bool bIsRunning;
};
UCLASS()
class SHADOWSHAPES_CNN_API AANNEE : public AActor
{
GENERATED_BODY()
public:
// Sets default values for this actor's properties
AANNEE();
// Called every frame
virtual void Tick(float DeltaTime) override;
//Automated loading, loads on actor spawn and unloads on despawn but model remains in memory for actor lifetime
//Synchronous call (will only make this one call until it finishes loading which could block game starting)
//Downside of preloaded = model will last for lifetime of actor. If model gets too big, might not be ideal
UPROPERTY(EditAnywhere)
TObjectPtr<UNNEModelData> PreLoadedModelData;
//Delayed loading, must be triggered by func (like BeginPlay() ) in actor cpp file
//Asynchronous call (can be a background call and can check when loading is finished)
//UPROPERTY(EditAnywhere)
//TSoftObjectPtr<UNNEModelData> LazyLoadedModelData;
protected:
// Called when the game starts or when spawned
virtual void BeginPlay() override;
private:
//Pointer to model helper used to pass model data around
TSharedPtr<FMyModelHelper> ModelHelper = MakeShared<FMyModelHelper>();
//Use if tensor size is known
/*TArray<float> InputData;
TArray<float> OutputData;
TArray<UE::NNE::FTensorShape> InputTensorShapes;
TArray<UE::NNE::FTensorShape> OutputTensorShapes;
TArray<UE::NNE::FTensorBindingCPU> InputBindings;
TArray<UE::NNE::FTensorBindingCPU> OutputBindings;*/
};